|
| 1 | +// Copyright (C) <2019> Intel Corporation |
| 2 | +// |
| 3 | +// SPDX-License-Identifier: Apache-2.0 |
| 4 | + |
| 5 | +'use strict'; |
| 6 | + |
| 7 | +const grpcTools = require('./grpcTools'); |
| 8 | +const packOption = grpcTools.packOption; |
| 9 | +const unpackNotification = grpcTools.unpackNotification; |
| 10 | + |
| 11 | +const makeRPC = require('./makeRPC').makeRPC; |
| 12 | +const log = require('./logger').logger.getLogger('RpcRequest'); |
| 13 | + |
| 14 | +const enableGrpc = global.config?.service?.enable_grpc || false; |
| 15 | +const GRPC_TIMEOUT = 2000; |
| 16 | + |
| 17 | +const RpcRequest = function(rpcChannel, listener) { |
| 18 | + const that = {}; |
| 19 | + const grpcAgents = {}; // workerAgent => grpcClient |
| 20 | + const grpcNode = {}; // workerNode => grpcClient |
| 21 | + const nodeType = {}; // NodeId => Type |
| 22 | + let clusterClient; |
| 23 | + const opt = () => ({deadline: new Date(Date.now() + GRPC_TIMEOUT)}); |
| 24 | + |
| 25 | + that.getWorkerNode = function(clusterManager, purpose, forWhom, preference) { |
| 26 | + log.debug('getworker node:', purpose, forWhom, 'enable grpc:', enableGrpc, clusterManager); |
| 27 | + if (enableGrpc) { |
| 28 | + if (!clusterClient) { |
| 29 | + clusterClient = grpcTools.startClient( |
| 30 | + 'clusterManager', |
| 31 | + clusterManager |
| 32 | + ); |
| 33 | + } |
| 34 | + let agentAddress; |
| 35 | + return new Promise((resolve, reject) => { |
| 36 | + const req = { |
| 37 | + purpose, |
| 38 | + task: forWhom.task, |
| 39 | + preference, // Change data for some preference |
| 40 | + reserveTime: 30 * 1000 |
| 41 | + }; |
| 42 | + clusterClient.schedule(req, opt(), (err, result) => { |
| 43 | + if (err) { |
| 44 | + log.debug('Schedule node error:', err); |
| 45 | + reject(err); |
| 46 | + } else { |
| 47 | + resolve(result); |
| 48 | + } |
| 49 | + }); |
| 50 | + }).then((workerAgent) => { |
| 51 | + agentAddress = workerAgent.info.ip + ':' + workerAgent.info.grpcPort; |
| 52 | + if (!grpcAgents[agentAddress]) { |
| 53 | + grpcAgents[agentAddress] = grpcTools.startClient( |
| 54 | + 'nodeManager', |
| 55 | + agentAddress |
| 56 | + ); |
| 57 | + } |
| 58 | + return new Promise((resolve, reject) => { |
| 59 | + grpcAgents[agentAddress].getNode({info: forWhom}, opt(), (err, result) => { |
| 60 | + if (!err) { |
| 61 | + resolve(result.message); |
| 62 | + } else { |
| 63 | + reject(err); |
| 64 | + } |
| 65 | + }); |
| 66 | + }); |
| 67 | + }).then((workerNode) => { |
| 68 | + if (grpcNode[workerNode]) { |
| 69 | + // Has client already |
| 70 | + return {agent: agentAddress, node: workerNode}; |
| 71 | + } |
| 72 | + log.debug('Start gRPC client:', purpose, workerNode); |
| 73 | + grpcNode[workerNode] = grpcTools.startClient( |
| 74 | + purpose, |
| 75 | + workerNode |
| 76 | + ); |
| 77 | + nodeType[workerNode] = purpose; |
| 78 | + // Register listener |
| 79 | + const call = grpcNode[workerNode].listenToNotifications({id: ''}); |
| 80 | + call.on('data', (notification) => { |
| 81 | + if (listener) { |
| 82 | + // Unpack notification.data |
| 83 | + const data = unpackNotification(notification); |
| 84 | + if (data) { |
| 85 | + listener.processNotification(data); |
| 86 | + } |
| 87 | + } |
| 88 | + }); |
| 89 | + call.on('end', (err) => { |
| 90 | + log.debug('Call on end:', err); |
| 91 | + if (grpcNode[workerNode]) { |
| 92 | + grpcNode[workerNode].close(); |
| 93 | + delete grpcNode[workerNode]; |
| 94 | + } |
| 95 | + }); |
| 96 | + call.on('error', (err) => { |
| 97 | + // On error |
| 98 | + log.debug('Call on error:', err); |
| 99 | + }); |
| 100 | + return {agent: agentAddress, node: workerNode}; |
| 101 | + }); |
| 102 | + } |
| 103 | + |
| 104 | + return rpcChannel.makeRPC(clusterManager, 'schedule', [purpose, forWhom.task, preference, 30 * 1000]) |
| 105 | + .then(function(workerAgent) { |
| 106 | + return rpcChannel.makeRPC(workerAgent.id, 'getNode', [forWhom]) |
| 107 | + .then(function(workerNode) { |
| 108 | + return {agent: workerAgent.id, node: workerNode}; |
| 109 | + }); |
| 110 | + }); |
| 111 | + }; |
| 112 | + |
| 113 | + that.recycleWorkerNode = function(workerAgent, workerNode, forWhom) { |
| 114 | + if (grpcAgents[workerAgent]) { |
| 115 | + const req = {id: workerNode, info: forWhom}; |
| 116 | + return new Promise((resolve, reject) => { |
| 117 | + grpcAgents[workerAgent].recycleNode(req, opt(), (err, result) => { |
| 118 | + if (err) { |
| 119 | + log.debug('Recycle node error:', err); |
| 120 | + reject(err); |
| 121 | + } |
| 122 | + if (grpcNode[workerNode]) { |
| 123 | + grpcNode[workerNode].close(); |
| 124 | + delete grpcNode[workerNode]; |
| 125 | + delete nodeType[workerNode]; |
| 126 | + } |
| 127 | + resolve('ok'); |
| 128 | + }); |
| 129 | + }); |
| 130 | + } else { |
| 131 | + return rpcChannel.makeRPC(workerAgent, 'recycleNode', [workerNode, forWhom]); |
| 132 | + } |
| 133 | + }; |
| 134 | + |
| 135 | + that.sendMsg = function(portal, participantId, event, data) { |
| 136 | + if (enableGrpc) { |
| 137 | + return Promise.resolve(); |
| 138 | + } |
| 139 | + return rpcChannel.makeRPC(portal, 'notify', [participantId, event, data]); |
| 140 | + }; |
| 141 | + |
| 142 | + that.broadcast = function(portal, controller, excludeList, event, data) { |
| 143 | + if (enableGrpc) { |
| 144 | + return Promise.resolve(); |
| 145 | + } |
| 146 | + return rpcChannel.makeRPC(portal, 'broadcast', [controller, excludeList, event, data]); |
| 147 | + }; |
| 148 | + |
| 149 | + return that; |
| 150 | +}; |
| 151 | + |
| 152 | +module.exports = RpcRequest; |
| 153 | + |
0 commit comments