|
| 1 | +/// |
| 2 | +/// \file Kafka.cxx |
| 3 | +/// \author Adam Wegrzynek <[email protected]> |
| 4 | +/// |
| 5 | + |
| 6 | +#include "Kafka.h" |
| 7 | +#include <string> |
| 8 | +#include <boost/lexical_cast.hpp> |
| 9 | + |
| 10 | +namespace o2 |
| 11 | +{ |
| 12 | +/// ALICE O2 Monitoring system |
| 13 | +namespace monitoring |
| 14 | +{ |
| 15 | +/// Monitoring backends |
| 16 | +namespace backends |
| 17 | +{ |
| 18 | + |
| 19 | +Kafka::Kafka(const std::string& host, unsigned int port, const std::string& topic) : |
| 20 | + mInfluxDB(), mTopic(topic) |
| 21 | +{ |
| 22 | + std::string errstr; |
| 23 | + RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); |
| 24 | + conf->set("bootstrap.servers", host + ":" + std::to_string(port), errstr); |
| 25 | + conf->set("request.required.acks", "0", errstr); |
| 26 | + conf->set("message.send.max.retries", "0", errstr); |
| 27 | + conf->set("queue.buffering.max.ms", "10", errstr); |
| 28 | + conf->set("batch.num.messages", "1000", errstr); |
| 29 | + |
| 30 | + producer = RdKafka::Producer::create(conf, errstr); |
| 31 | + if (!producer) { |
| 32 | + MonLogger::Get() << "Failed to create producer: " << errstr << MonLogger::End(); |
| 33 | + exit(1); |
| 34 | + } |
| 35 | + |
| 36 | + MonLogger::Get() << "Kafka backend initialized" |
| 37 | + << " ("<< host << ":" << port << ")" << MonLogger::End(); |
| 38 | +} |
| 39 | + |
| 40 | +Kafka::~Kafka() |
| 41 | +{ |
| 42 | + delete producer; |
| 43 | +} |
| 44 | + |
| 45 | +inline std::string Kafka::convertTimestamp(const std::chrono::time_point<std::chrono::system_clock>& timestamp) |
| 46 | +{ |
| 47 | + return std::to_string(std::chrono::duration_cast <std::chrono::nanoseconds>( |
| 48 | + timestamp.time_since_epoch() |
| 49 | + ).count()); |
| 50 | +} |
| 51 | + |
| 52 | +void Kafka::sendMultiple(std::string /*measurement*/, std::vector<Metric>&& /*metrics*/) |
| 53 | +{ |
| 54 | +} |
| 55 | + |
| 56 | +void Kafka::send(std::vector<Metric>&& /*metrics*/) |
| 57 | +{ |
| 58 | +} |
| 59 | + |
| 60 | +void Kafka::send(const Metric& metric) |
| 61 | +{ |
| 62 | + std::string influxLine = mInfluxDB.toInfluxLineProtocol(metric); |
| 63 | + int32_t partition = RdKafka::Topic::PARTITION_UA; |
| 64 | + |
| 65 | + RdKafka::ErrorCode resp = producer->produce( |
| 66 | + mTopic, partition, |
| 67 | + RdKafka::Producer::RK_MSG_COPY, |
| 68 | + const_cast<char*>(influxLine.c_str()), influxLine.size(), |
| 69 | + NULL, 0, |
| 70 | + 0, |
| 71 | + NULL, |
| 72 | + NULL |
| 73 | + ); |
| 74 | + if (resp != RdKafka::ERR_NO_ERROR) { |
| 75 | + MonLogger::Get() << "% Produce failed: " << RdKafka::err2str(resp) << MonLogger::End(); |
| 76 | + } |
| 77 | + producer->poll(0); |
| 78 | +} |
| 79 | + |
| 80 | +void Kafka::addGlobalTag(std::string_view name, std::string_view value) |
| 81 | +{ |
| 82 | + std::string sName = name.data(); |
| 83 | + std::string sValue = value.data(); |
| 84 | + if (!tagSet.empty()) tagSet += ","; |
| 85 | + tagSet += sName + "=" + sValue; |
| 86 | +} |
| 87 | + |
| 88 | +} // namespace backends |
| 89 | +} // namespace monitoring |
| 90 | +} // namespace o2 |
0 commit comments