|
| 1 | +// server to implement simple ZMQ REQ/REPLY synchronization barrier |
| 2 | +// all clients with same syncId will get a (almost) synchronous after a timeout, which triggers after no new client with this id connects |
| 3 | + |
| 4 | +#include <zmq.h> |
| 5 | +#include <iostream> |
| 6 | +#include <string> |
| 7 | +#include <unordered_map> |
| 8 | +#include <vector> |
| 9 | +#include <thread> |
| 10 | +#include <mutex> |
| 11 | +#include <chrono> |
| 12 | +#include <cstring> |
| 13 | +#include <algorithm> |
| 14 | + |
| 15 | +#include <InfoLogger/InfoLogger.hxx> |
| 16 | +#include <InfoLogger/InfoLoggerMacros.hxx> |
| 17 | + |
| 18 | +// definition of a global for logging |
| 19 | +using namespace AliceO2::InfoLogger; |
| 20 | +InfoLogger theLog; |
| 21 | + |
| 22 | + |
| 23 | +using namespace std; |
| 24 | +using namespace std::chrono; |
| 25 | + |
| 26 | +struct Client { |
| 27 | + zmq_msg_t identity; |
| 28 | +}; |
| 29 | + |
| 30 | +struct Group { |
| 31 | + vector<Client> clients; |
| 32 | + time_point<steady_clock> lastActivity; |
| 33 | + bool waiting = false; |
| 34 | +}; |
| 35 | + |
| 36 | +unordered_map<int, Group> groups; |
| 37 | +mutex groups_mutex; |
| 38 | +void* router_socket = nullptr; |
| 39 | + |
| 40 | +void send_reply_to_group(int syncId) { |
| 41 | + vector<Client> clientsToReply; |
| 42 | + |
| 43 | + { |
| 44 | + lock_guard lock(groups_mutex); |
| 45 | + auto& group = groups[syncId]; |
| 46 | + clientsToReply = move(group.clients); |
| 47 | + group.clients.clear(); |
| 48 | + group.waiting = false; |
| 49 | + } |
| 50 | + |
| 51 | + for (auto& client : clientsToReply) { |
| 52 | + zmq_msg_send(&client.identity, router_socket, ZMQ_SNDMORE); |
| 53 | + zmq_msg_close(&client.identity); |
| 54 | + |
| 55 | + zmq_msg_t empty; |
| 56 | + zmq_msg_init_size(&empty, 0); |
| 57 | + zmq_msg_send(&empty, router_socket, ZMQ_SNDMORE); |
| 58 | + zmq_msg_close(&empty); |
| 59 | + |
| 60 | + std::string reply = "SYNC for id " + std::to_string(syncId); |
| 61 | + zmq_msg_t msg; |
| 62 | + zmq_msg_init_size(&msg, reply.length()); |
| 63 | + memcpy(zmq_msg_data(&msg), reply.c_str(), reply.length()); |
| 64 | + zmq_msg_send(&msg, router_socket, 0); |
| 65 | + zmq_msg_close(&msg); |
| 66 | + } |
| 67 | + |
| 68 | + theLog.log(LogInfoDevel, "SYNC for id %d sent to %d clients", syncId, (int) clientsToReply.size()); |
| 69 | +} |
| 70 | + |
| 71 | + |
| 72 | +void start_group_timer(int syncId) { |
| 73 | + std::thread([syncId]() { |
| 74 | + using namespace std::chrono; |
| 75 | + constexpr auto timeout = seconds(5); |
| 76 | + |
| 77 | + while (true) { |
| 78 | + time_point<steady_clock> last; |
| 79 | + |
| 80 | + { |
| 81 | + std::lock_guard lock(groups_mutex); |
| 82 | + last = groups[syncId].lastActivity; |
| 83 | + } |
| 84 | + |
| 85 | + auto now = steady_clock::now(); |
| 86 | + auto elapsed = now - last; |
| 87 | + |
| 88 | + if (elapsed >= timeout) { |
| 89 | + send_reply_to_group(syncId); |
| 90 | + break; |
| 91 | + } |
| 92 | + |
| 93 | + auto remaining = timeout - elapsed; |
| 94 | + auto sleep_duration = std::min(duration_cast<milliseconds>(remaining), milliseconds(500)); |
| 95 | + std::this_thread::sleep_for(sleep_duration); |
| 96 | + } |
| 97 | + }).detach(); |
| 98 | +} |
| 99 | + |
| 100 | + |
| 101 | + |
| 102 | +int main() { |
| 103 | + void* context = zmq_ctx_new(); |
| 104 | + router_socket = zmq_socket(context, ZMQ_ROUTER); |
| 105 | + const char *address = "tcp://*:50003"; |
| 106 | + zmq_bind(router_socket, address); |
| 107 | + |
| 108 | + theLog.setContext(InfoLoggerContext({ { InfoLoggerContext::FieldName::Facility, (std::string) "readout/sync" } })); |
| 109 | + |
| 110 | + theLog.log(LogInfoDevel, "readout SYNC server started on %s", address); |
| 111 | + |
| 112 | + while (true) { |
| 113 | + zmq_msg_t identity; |
| 114 | + zmq_msg_init(&identity); |
| 115 | + zmq_msg_recv(&identity, router_socket, 0); |
| 116 | + |
| 117 | + zmq_msg_t empty; |
| 118 | + zmq_msg_init(&empty); |
| 119 | + zmq_msg_recv(&empty, router_socket, 0); |
| 120 | + zmq_msg_close(&empty); |
| 121 | + |
| 122 | + zmq_msg_t message; |
| 123 | + zmq_msg_init(&message); |
| 124 | + zmq_msg_recv(&message, router_socket, 0); |
| 125 | + |
| 126 | + string msg_str((char*)zmq_msg_data(&message), zmq_msg_size(&message)); |
| 127 | + int syncId = stoi(msg_str); |
| 128 | + zmq_msg_close(&message); |
| 129 | + |
| 130 | + lock_guard lock(groups_mutex); |
| 131 | + auto& group = groups[syncId]; |
| 132 | + |
| 133 | + Client c; |
| 134 | + zmq_msg_init(&c.identity); |
| 135 | + zmq_msg_copy(&c.identity, &identity); |
| 136 | + group.clients.push_back(move(c)); |
| 137 | + group.lastActivity = steady_clock::now(); |
| 138 | + |
| 139 | + theLog.log(LogInfoDevel, "New client waiting for sync id %d", syncId); |
| 140 | + |
| 141 | + if (!group.waiting) { |
| 142 | + group.waiting = true; |
| 143 | + start_group_timer(syncId); |
| 144 | + } |
| 145 | + |
| 146 | + zmq_msg_close(&identity); |
| 147 | + } |
| 148 | + |
| 149 | + zmq_close(router_socket); |
| 150 | + zmq_ctx_term(context); |
| 151 | + return 0; |
| 152 | +} |
0 commit comments