@@ -368,31 +368,24 @@ void UserQuerySelect::submit() {
368
368
369
369
if (!uberJobsEnabled) {
370
370
std::function<void (util::CmdData*)> funcBuildJob =
371
- [this , sequence, job{move (job)}](util::CmdData*) { // references in captures cause races
371
+ // &&&[this, sequence, job{move(job)}](util::CmdData*) { // references in captures cause races
372
+ [this , job{move (job)}](util::CmdData*) { // references in captures cause races
372
373
QSERV_LOGCONTEXT_QUERY (_qMetaQueryId);
373
374
job->runJob ();
374
375
};
375
376
auto cmd = std::make_shared<qdisp::PriorityCommand>(funcBuildJob);
376
377
_executive->queueJobStart (cmd);
377
-
378
378
}
379
379
++sequence;
380
380
}
381
381
382
382
if (uberJobsEnabled) {
383
383
vector<qdisp::UberJob::Ptr > uberJobs;
384
- /* &&&
385
- vector<czar::WorkerResource> workers; // &&& delete and replace with a real list of workers
386
- throw Bug("&&&NEED_CODE to find all workers"); // workers = all workers found in database
387
- for (auto&& worker:workers) {
388
- worker.fillChunkIdSet();
389
- }
390
- */
391
384
392
385
czar::WorkerResources workerResources;
393
- workerResources.setMonoNodeTest (); // &&& TODO:UJ only good for mono-node test.
386
+ workerResources.setMonoNodeTest (); // &&& TODO:UJ only good for mono-node test. Need a real list of workers and their chunks. ******
394
387
395
- // &&& make a map of all jobs in the executive.
388
+ // Make a map of all jobs in the executive.
396
389
// &&& TODO:UJ for now, just using ints. At some point, need to check that ResourceUnit databases can be found for all databases in the query
397
390
qdisp::Executive::ChunkIdJobMapType chunksInQuery = _executive->getChunkJobMapAndInvalidate ();
398
391
@@ -405,13 +398,6 @@ void UserQuerySelect::submit() {
405
398
// / make a map<worker, deque<chunkId> that will be destroyed as chunks are checked/used
406
399
map<string, deque<int >> tmpWorkerList = workerResources.getDequesFor (dbName);
407
400
408
- /* &&&
409
- list<std::reference_wrapper<czar::WorkerResource>> tmpWorkerList;
410
- for(auto&& worker:workers) {
411
- tmpWorkerList.push_back(worker);
412
- }
413
- */
414
-
415
401
// TODO:UJ So UberJobIds don't conflict with chunk numbers or jobIds, start at a large number.
416
402
// This could use some refinement.
417
403
int uberJobId = qdisp::UberJob::getFirstIdNumber ();
@@ -461,26 +447,49 @@ void UserQuerySelect::submit() {
461
447
workerIter = tmpWorkerList.begin ();
462
448
}
463
449
}
450
+ LOGS (_log, LOG_LVL_INFO, " &&& submit m" );
464
451
_executive->addUberJobs (uberJobs);
452
+ LOGS (_log, LOG_LVL_INFO, " &&& submit n" );
465
453
for (auto && uJob:uberJobs) {
454
+ LOGS (_log, LOG_LVL_INFO, " &&& submit o" );
466
455
uJob->runUberJob ();
456
+ LOGS (_log, LOG_LVL_INFO, " &&& submit p" );
467
457
}
468
- _executive->startRemainingJobs ();
458
+ LOGS (_log, LOG_LVL_INFO, " &&& submit q" );
459
+ // If any chunks in the query were not found on a worker's list, run them individually.
460
+ // &&&_executive->startRemainingJobs(chunksInQuery); //&&& delete func in Executive.
461
+ for (auto & ciq:chunksInQuery) {
462
+ qdisp::JobQuery* jqRaw = ciq.second ;
463
+ qdisp::JobQuery::Ptr job = _executive->getSharedPtrForRawJobPtr (jqRaw);
464
+ std::function<void (util::CmdData*)> funcBuildJob =
465
+ [this , job{move (job)}](util::CmdData*) { // references in captures cause races
466
+ QSERV_LOGCONTEXT_QUERY (_qMetaQueryId);
467
+ job->runJob ();
468
+ };
469
+ auto cmd = std::make_shared<qdisp::PriorityCommand>(funcBuildJob);
470
+ _executive->queueJobStart (cmd);
471
+ }
472
+
473
+ LOGS (_log, LOG_LVL_INFO, " &&& submit r" );
469
474
}
470
475
471
476
// attempt to restore original thread priority, requires root
472
477
if (increaseThreadPriority) {
473
478
threadPriority.restoreOriginalValues ();
474
479
}
480
+ LOGS (_log, LOG_LVL_INFO, " &&& submit s" );
475
481
476
482
LOGS (_log, LOG_LVL_DEBUG, " total jobs in query=" << sequence);
477
483
_executive->waitForAllJobsToStart ();
484
+ LOGS (_log, LOG_LVL_INFO, " &&& submit t" );
478
485
479
486
// we only care about per-chunk info for ASYNC queries
480
487
if (_async) {
488
+ LOGS (_log, LOG_LVL_INFO, " &&& submit u" );
481
489
std::lock_guard<std::mutex> lock (chunksMtx);
482
490
_qMetaAddChunks (chunks);
483
491
}
492
+ LOGS (_log, LOG_LVL_INFO, " &&& submit v" );
484
493
}
485
494
486
495
0 commit comments