|
| 1 | +const os = require('os'); |
| 2 | +const cluster = require('cluster'); |
| 3 | + |
1 | 4 | const logger = require('pelias-logger').get('api'); |
2 | 5 | const type_mapping = require('./helper/type_mapping'); |
3 | 6 |
|
4 | 7 | const app = require('./app'), |
5 | 8 | port = ( process.env.PORT || 3100 ), |
6 | 9 | host = ( process.env.HOST || undefined ); |
7 | 10 |
|
| 11 | +// determine the number of processes to launch |
| 12 | +// by default launch only a single process, |
| 13 | +// but if the CPUS environment variable is set, launch up to one process per CPU detected |
| 14 | +const envCpus = parseInt( process.env.CPUS, 10 ); |
| 15 | +const cpus = Math.min( envCpus || 1 , os.availableParallelism() ); |
| 16 | + |
8 | 17 | let server; |
| 18 | +let terminating = false; |
| 19 | + |
| 20 | +logger.info('Starting Pelias API using %d CPUs', cpus); |
| 21 | + |
| 22 | +// simple case where cluster module is disabled with CPUS=1 |
| 23 | +// or if this is a worker |
| 24 | +if ( cpus === 1 || cluster.isWorker ) { |
| 25 | + startServer(); |
| 26 | +// if using the cluster module, do more work to set up all the workers |
| 27 | +} else if ( cluster.isMaster ) { |
| 28 | + // listen to worker ready message and print a message |
| 29 | + cluster.on('online', (worker) => { |
| 30 | + if (Object.keys(cluster.workers).length === cpus) { |
| 31 | + logger.info( `pelias is now running on http://${host || `::`}:${port}` ); |
| 32 | + } |
| 33 | + }); |
| 34 | + |
| 35 | + // set up worker exit event that prints error message |
| 36 | + cluster.on('exit', (worker, code, signal) => { |
| 37 | + if (!terminating) { |
| 38 | + logger.error('[master] worker died', worker.process.pid); |
| 39 | + } |
| 40 | + }); |
9 | 41 |
|
10 | | -// load Elasticsearch type mappings before starting web server |
11 | | -type_mapping.load(() => { |
12 | | - server = app.listen( port, host, () => { |
13 | | - // ask server for the actual address and port its listening on |
14 | | - const listenAddress = server.address(); |
15 | | - logger.info( `pelias is now running on http://${listenAddress.address}:${listenAddress.port}` ); |
| 42 | + // create a handler that prints when a new worker is created via fork |
| 43 | + cluster.on('fork', (worker, code, signal) => { |
| 44 | + logger.info('[master] worker forked', worker.process.pid); |
16 | 45 | }); |
17 | | -}); |
18 | 46 |
|
| 47 | + // call fork to create the desired number of workers |
| 48 | + for( let c = 0; c < cpus; c++ ){ |
| 49 | + cluster.fork(); |
| 50 | + } |
| 51 | +} |
| 52 | + |
| 53 | +// an exit handler that either closes the local Express server |
| 54 | +// or, if using the cluster module, forwards the signal to all workers |
19 | 55 | function exitHandler() { |
20 | | - logger.info('Pelias API shutting down'); |
| 56 | + terminating = true; |
| 57 | + |
| 58 | + if (cluster.isPrimary) { |
| 59 | + logger.info('Pelias API shutting down'); |
| 60 | + for (const id in cluster.workers) { |
| 61 | + cluster.workers[id].send('shutdown'); |
| 62 | + cluster.workers[id].disconnect(); |
| 63 | + } |
| 64 | + } |
21 | 65 |
|
22 | | - server.close(); |
| 66 | + if (server) { |
| 67 | + server.close(); |
| 68 | + } |
| 69 | +} |
| 70 | + |
| 71 | +function startServer() { |
| 72 | + // load Elasticsearch type_mapping before starting the web server |
| 73 | + // This has to be run on each forked worker because unlike "real" |
| 74 | + // unix `fork`, these forks don't share memory with other workers |
| 75 | + type_mapping.load(() => { |
| 76 | + server = app.listen( port, host, () => { |
| 77 | + // ask server for the actual address and port its listening on |
| 78 | + const listenAddress = server.address(); |
| 79 | + if (cluster.isMaster) { |
| 80 | + logger.info( `pelias is now running on http://${listenAddress.address}:${listenAddress.port}` ); |
| 81 | + } else { |
| 82 | + logger.info( `pelias worker ${process.pid} online` ); |
| 83 | + } |
| 84 | + }); |
| 85 | + }); |
23 | 86 | } |
24 | 87 |
|
25 | 88 | process.on('SIGINT', exitHandler); |
|
0 commit comments