@@ -3,7 +3,7 @@ import { Worker, WorkerOptions } from 'bullmq';
33import { environmentConfig } from '../config/environment' ;
44import { SubmissionModel } from '../models/submission.model' ;
55import { SecureDockerExecutor } from '../executors/secureDockerExecutor' ;
6- import { getPerformanceMonitor } from '../services/performanceMonitor' ;
6+ // import { getPerformanceMonitor } from '../services/performanceMonitor';
77import { logger } from '../utils/logger' ;
88
99const config = environmentConfig . getConfig ( ) ;
@@ -15,7 +15,7 @@ interface ProcessingError extends Error {
1515
1616// Initialize the secure executor and performance monitor
1717const executor = new SecureDockerExecutor ( ) ;
18- const performanceMonitor = getPerformanceMonitor ( executor ) ;
18+ // const performanceMonitor = getPerformanceMonitor(executor);
1919
2020// Enhanced worker configuration for high throughput
2121const workerConfig : WorkerOptions = {
@@ -78,8 +78,8 @@ const processSubmission = async (submissionId: string) => {
7878 } ) ;
7979
8080 // Record execution metrics for performance monitoring
81- const executionTime = Date . now ( ) - startTime ;
82- performanceMonitor . recordExecution ( result . success , executionTime ) ;
81+ // const executionTime = Date.now() - startTime;
82+ // performanceMonitor.recordExecution(result.success, executionTime);
8383
8484 // Update submission with results
8585 submission . status = result . success ? 'completed' : 'failed' ;
@@ -118,7 +118,7 @@ const processSubmission = async (submissionId: string) => {
118118 return { submissionId, success : result . success } ;
119119 } catch ( error ) {
120120 const executionTime = Date . now ( ) - startTime ;
121- performanceMonitor . recordExecution ( false , executionTime ) ;
121+ // performanceMonitor.recordExecution(false, executionTime);
122122
123123 logger . error ( `Failed to process submission ${ submissionId } :` , error ) ;
124124
@@ -295,26 +295,26 @@ batchWorker.on('completed', (job) => {
295295} ) ;
296296
297297// Health monitoring and load management
298- setInterval ( async ( ) => {
299- try {
300- const health = await performanceMonitor . getHealthStatus ( ) ;
301-
302- if ( health . status === 'critical' ) {
303- logger . warn ( 'System under critical load - monitoring worker performance' ) ;
304-
305- // Log current metrics for debugging
306- const metrics = performanceMonitor . getMetrics ( ) ;
307- logger . warn ( 'Current metrics:' , {
308- totalExecutions : metrics . totalExecutions ,
309- successRate : metrics . successRate ,
310- currentLoad : metrics . currentLoad ,
311- containerCount : metrics . containerCount ,
312- } ) ;
313- }
314- } catch ( error ) {
315- logger . error ( 'Health check failed in worker:' , error ) ;
316- }
317- } , 60000 ) ; // Check every minute
298+ // setInterval(async () => {
299+ // try {
300+ // // const health = await performanceMonitor.getHealthStatus();
301+
302+ // if (health.status === 'critical') {
303+ // // logger.warn('System under critical load - monitoring worker performance');
304+
305+ // // Log current metrics for debugging
306+ // // const metrics = performanceMonitor.getMetrics();
307+ // // logger.warn('Current metrics:', {
308+ // // totalExecutions: metrics.totalExecutions,
309+ // // successRate: metrics.successRate,
310+ // // currentLoad: metrics.currentLoad,
311+ // // containerCount: metrics.containerCount,
312+ // / / });
313+ // }
314+ // } catch (error) {
315+ // logger.error('Health check failed in worker:', error);
316+ // }
317+ // }, 60000); // Check every minute
318318
319319logger . info ( '👷 Enhanced workers running with high concurrency support...' ) ;
320320logger . info (
0 commit comments