@@ -99,9 +99,7 @@ void MPPTaskMonitorHelper::initAndAddself(MPPTaskManager * manager_, const Strin
9999MPPTaskMonitorHelper::~MPPTaskMonitorHelper ()
100100{
101101 if (initialized)
102- {
103102 manager->removeMonitoredTask (task_unique_id);
104- }
105103}
106104
107105MPPTask::MPPTask (const mpp::TaskMeta & meta_, const ContextPtr & context_)
@@ -121,9 +119,8 @@ MPPTask::~MPPTask()
121119{
122120 // / MPPTask maybe destructed by different thread, set the query memory_tracker
123121 // / to current_memory_tracker in the destructor
124- auto * query_memory_tracker = getMemoryTracker ();
125- if (query_memory_tracker != nullptr && current_memory_tracker != query_memory_tracker)
126- current_memory_tracker = query_memory_tracker;
122+ if (process_list_entry != nullptr && current_memory_tracker != process_list_entry->get ().getMemoryTrackerPtr ().get ())
123+ current_memory_tracker = process_list_entry->get ().getMemoryTrackerPtr ().get ();
127124 abortTunnels (" " , true );
128125 LOG_INFO (log, " finish MPPTask: {}" , id.toString ());
129126}
@@ -190,8 +187,7 @@ void MPPTask::registerTunnels(const mpp::DispatchTaskRequest & task_request)
190187 if (unlikely (!task_meta.ParseFromString (exchange_sender.encoded_task_meta (i))))
191188 throw TiFlashException (" Failed to decode task meta info in ExchangeSender" , Errors::Coprocessor::BadRequest);
192189
193- // / when the receiver task is root task, it should never be local tunnel
194- bool is_local = context->getSettingsRef ().enable_local_tunnel && task_meta.task_id () != -1 && meta.address () == task_meta.address ();
190+ bool is_local = context->getSettingsRef ().enable_local_tunnel && meta.address () == task_meta.address ();
195191 bool is_async = !is_local && context->getSettingsRef ().enable_async_server ;
196192 MPPTunnelPtr tunnel = std::make_shared<MPPTunnel>(
197193 task_meta,
@@ -297,13 +293,6 @@ void MPPTask::setErrString(const String & message)
297293 err_string = message;
298294}
299295
300- MemoryTracker * MPPTask::getMemoryTracker () const
301- {
302- if (process_list_entry_holder.process_list_entry != nullptr )
303- return process_list_entry_holder.process_list_entry ->get ().getMemoryTrackerPtr ().get ();
304- return nullptr ;
305- }
306-
307296void MPPTask::unregisterTask ()
308297{
309298 auto [result, reason] = manager->unregisterTask (id);
@@ -313,19 +302,6 @@ void MPPTask::unregisterTask()
313302 LOG_WARNING (log, " task failed to unregister, reason: {}" , reason);
314303}
315304
316- void MPPTask::initProcessListEntry (MPPTaskManagerPtr & task_manager)
317- {
318- // / all the mpp tasks of the same mpp query shares the same process list entry
319- auto [query_process_list_entry, aborted_reason] = task_manager->getOrCreateQueryProcessListEntry (id.query_id , context);
320- if (!aborted_reason.empty ())
321- throw TiFlashException (fmt::format (" MPP query is already aborted, aborted reason: {}" , aborted_reason), Errors::Coprocessor::Internal);
322- assert (query_process_list_entry != nullptr );
323- process_list_entry_holder.process_list_entry = query_process_list_entry;
324- dag_context->setProcessListEntry (query_process_list_entry);
325- context->setProcessListElement (&query_process_list_entry->get ());
326- current_memory_tracker = getMemoryTracker ();
327- }
328-
329305void MPPTask::prepare (const mpp::DispatchTaskRequest & task_request)
330306{
331307 dag_req = getDAGRequestFromStringWithRetry (task_request.encoded_plan ());
@@ -382,13 +358,13 @@ void MPPTask::prepare(const mpp::DispatchTaskRequest & task_request)
382358 dag_context->tidb_host = context->getClientInfo ().current_address .toString ();
383359
384360 context->setDAGContext (dag_context.get ());
385-
386- auto task_manager = tmt_context.getMPPTaskManager ();
387- initProcessListEntry (task_manager);
361+ process_list_entry = setProcessListElement (*context, dag_context->dummy_query_string , dag_context->dummy_ast .get ());
362+ dag_context->setProcessListEntry (process_list_entry);
388363
389364 injectFailPointBeforeRegisterTunnel (dag_context->isRootMPPTask ());
390365 registerTunnels (task_request);
391366
367+ auto task_manager = tmt_context.getMPPTaskManager ();
392368 LOG_DEBUG (log, " begin to register the task {}" , id.toString ());
393369
394370 injectFailPointBeforeRegisterMPPTask (dag_context->isRootMPPTask ());
@@ -429,7 +405,7 @@ void MPPTask::preprocess()
429405void MPPTask::runImpl ()
430406{
431407 CPUAffinityManager::getInstance ().bindSelfQueryThread ();
432- RUNTIME_ASSERT (current_memory_tracker == getMemoryTracker (), log, " The current memory tracker is not set correctly for MPPTask::runImpl" );
408+ RUNTIME_ASSERT (current_memory_tracker == process_list_entry-> get (). getMemoryTrackerPtr (). get (), log, " The current memory tracker is not set correctly for MPPTask::runImpl" );
433409 if (!switchStatus (INITIALIZING, RUNNING))
434410 {
435411 LOG_WARNING (log, " task not in initializing state, skip running" );
@@ -532,9 +508,9 @@ void MPPTask::runImpl()
532508 // todo when error happens, should try to update the metrics if it is available
533509 if (auto throughput = dag_context->getTableScanThroughput (); throughput.first )
534510 GET_METRIC (tiflash_storage_logical_throughput_bytes).Observe (throughput.second );
535- // / note that memory_tracker is shared by all the mpp tasks, the peak memory usage is not accurate
536- // / todo log executor level peak memory usage instead
537- auto peak_memory = getMemoryTracker ()-> getPeak ( );
511+ auto process_info = context-> getProcessListElement ()-> getInfo ();
512+ auto peak_memory = process_info. peak_memory_usage > 0 ? process_info. peak_memory_usage : 0 ;
513+ GET_METRIC (tiflash_coprocessor_request_memory_usage, type_run_mpp_task). Observe (peak_memory );
538514 mpp_task_statistics.setMemoryPeak (peak_memory);
539515 }
540516 }
0 commit comments