From 558f80eeec374ca74af592e33d7bc8829934f947 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Fri, 25 Apr 2025 16:10:35 +0200 Subject: [PATCH 01/32] v1 --- .../influxdb/influxdb-metrics-apiv2.lua | 463 ++++++++++++++++++ 1 file changed, 463 insertions(+) create mode 100644 centreon-certified/influxdb/influxdb-metrics-apiv2.lua diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua new file mode 100644 index 00000000..9c891da5 --- /dev/null +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -0,0 +1,463 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker influxdb Connector Events +-------------------------------------------------------------------------------- + +local metrics = {} + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_address", + "influxdb_username", + "influxdb_password", + "influxdb_database" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/infuxdb-metrics.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.http_server_address = params.http_server_address + self.sc_params.params.http_server_protocol = params.http_server_protocol or "http" + self.sc_params.params.http_server_port = params.http_server_port or 8086 + self.sc_params.params.influxdb_username = params.influxdb_username + self.sc_params.params.influxdb_password = params.influxdb_password + self.sc_params.params.influxdb_database = params.influxdb_database + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 5000 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + -- for cache problems, we need to check the datas in the database + self.sc_params.params.enable_broker_cache_counter_check = params.enable_broker_cache_counter_check or 0 + -- centreon database information (only used if you set enable_broker_cache_counter_check to 1 + self.sc_params.params.centreon_db_name = params.centreon_db_name or "centreon" + self.sc_params.params.centreon_db_address = params.centreon_db_name or "127.0.0.1" + self.sc_params.params.centreon_db_port = params.centreon_db_name or 3306 + self.sc_params.params.centreon_db_user = params.centreon_db_name or "centreon" + self.sc_params.params.centreon_db_password = params.centreon_db_name or "" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") + self.sc_event.event.formated_event = metric.metric_name .. ",type=host," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric host") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + local params = self.sc_params.params + self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") + self.sc_event.event.formated_event = metric.metric_name .. ",type=service,service.name=" + .. self.sc_event.event.cache.service.description + .. "," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_tags method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:build_generic_tags(metric) + local event = self.sc_event.event + local tags = 'host.name=' .. event.cache.host.name .. ',poller=' .. event.cache.poller + + local metric_key = tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name) + broker_log:info(0, "METRIC_KEY: " .. metric_key) + if not metrics[metric_key] then + broker_log:error(0, "METRIC_ID not found for key: " .. metric_key) + else + broker_log:info(0, "METRIC_ID: " .. metrics[metric_key]) + tags = tags .. ',metric.id=' .. metrics[metric_key] + end + + -- add metric instance in tags + if metric.instance ~= "" then + tags = tags .. ',metric.instance=' .. metric.instance + end + + if metric.uom ~= "" then + tags = tags .. ',metric.unit=' .. metric.uom + end + + -- add metric subinstances in tags + if metric.subinstance[1] then + for subinstance_id, subinstance_name in ipairs(metric.subinstance) do + tags = tags .. ',subinstance_' .. subinstance_id .. '=' .. subinstance_name + end + end + + return tags +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + broker_log:info(0, "EventQueue:add()") + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + broker_log:info(0, "EventQueue:build_payload()") + if not payload then + payload = event + else + payload = payload .. "\n" .. event + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + broker_log:info(0, "EventQueue:send_data()") + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local params = self.sc_params.params + + local url = params.http_server_protocol .. "://" .. params.http_server_address .. ":" .. tostring(params.http_server_port) + .. "/write?u=" .. tostring(params.influxdb_username) + .. "&p=" .. tostring(params.influxdb_password) + .. "&db=" .. tostring(params.influxdb_database) + .. "&precision=s" + + queue_metadata.headers = { + "content-type: text/plain; charset=utf-8" + } + + self.sc_logger:log_curl_command(url, queue_metadata, params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Influxdb address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.verify_certificate) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.influxdata.com/influxdb/cloud/api/#operation/PostWrite other than 204 is not good + if http_response_code == 204 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + + if payload then + self.sc_logger:error("[EventQueue:send_data]: sent payload was: " .. tostring(payload)) + end + end + + return retval +end + +function EventQueue:convert_metric_event(event) + local params = self.sc_params.params + -- drop the event if it is not a metric event from the storage category + if event.category ~= params.bbdo.categories["storage"].id then + return false + end + if event.element ~= params.bbdo.elements["metric"].id then + return false + end + + -- hack event to make stream connector lib think it is a standard status neb event. + event.perfdata = "'" .. event.name .. "'=" .. event.value .. ";;;;" + event.category = params.bbdo.categories["neb"].id + + if not event.ctime then + event.last_check = event.time + else + event.last_check = event.ctime + end + + if event.service_id and event.service_id ~= 0 then + event.element = params.bbdo.elements["service_status"].id + else + event.element = params.bbdo.elements["host_status"].id + end + + return event +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + broker_log:info(0, "EventQueue:init()") + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + --broker_log:info(0, "EventQueue:write()") + + if event._type == 196617 then + local metric_key = tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. tostring(event.name) + -- check if the metric is already in the metrics table + if not metrics[metric_key] then + metrics[metric_key] = event.metric_id + broker_log:info(0, "ADD METRIC key: " .. metric_key .. " - value: " .. metrics[metric_key]) + end + end + + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file From 1982bbfc352d9fb19763358f9608f3dbfd9c5869 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 28 Apr 2025 23:25:08 +0200 Subject: [PATCH 02/32] update --- .../influxdb/influxdb-metrics-apiv2.lua | 111 ++++++------------ 1 file changed, 33 insertions(+), 78 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 9c891da5..0c62bbb3 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -166,8 +166,15 @@ end -- @param metric {table} a single metric data -------------------------------------------------------------------------------- function EventQueue:format_metric_host(metric) - self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") - self.sc_event.event.formated_event = metric.metric_name .. ",type=host," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") + local event = self.sc_event.event + local metric_key = tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name) + self.sc_event.event.formated_event = { + metric_name = metric.metric_name, + metric_value = metric.value, + metric_key = metric_key, + last_check = self.sc_event.event.last_check, + } self:add() self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric host") end @@ -177,50 +184,17 @@ end -- @param metric {table} a single metric data -------------------------------------------------------------------------------- function EventQueue:format_metric_service(metric) - local params = self.sc_params.params self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") - self.sc_event.event.formated_event = metric.metric_name .. ",type=service,service.name=" - .. self.sc_event.event.cache.service.description - .. "," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check - self:add() - self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") -end - --------------------------------------------------------------------------------- ----- EventQueue:build_tags method --- @param metric {table} a single metric data --- @return tags {table} a table with formated metadata --------------------------------------------------------------------------------- -function EventQueue:build_generic_tags(metric) local event = self.sc_event.event - local tags = 'host.name=' .. event.cache.host.name .. ',poller=' .. event.cache.poller - local metric_key = tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name) - broker_log:info(0, "METRIC_KEY: " .. metric_key) - if not metrics[metric_key] then - broker_log:error(0, "METRIC_ID not found for key: " .. metric_key) - else - broker_log:info(0, "METRIC_ID: " .. metrics[metric_key]) - tags = tags .. ',metric.id=' .. metrics[metric_key] - end - - -- add metric instance in tags - if metric.instance ~= "" then - tags = tags .. ',metric.instance=' .. metric.instance - end - - if metric.uom ~= "" then - tags = tags .. ',metric.unit=' .. metric.uom - end - - -- add metric subinstances in tags - if metric.subinstance[1] then - for subinstance_id, subinstance_name in ipairs(metric.subinstance) do - tags = tags .. ',subinstance_' .. subinstance_id .. '=' .. subinstance_name - end - end - - return tags + self.sc_event.event.formated_event = { + metric_name = metric.metric_name, + metric_value = metric.value, + metric_key = metric_key, + last_check = self.sc_event.event.last_check, + } + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") end -------------------------------------------------------------------------------- @@ -236,6 +210,7 @@ function EventQueue:add() .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_common:dumper(self.sc_event.event.formated_event) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) @@ -251,11 +226,10 @@ end function EventQueue:build_payload(payload, event) broker_log:info(0, "EventQueue:build_payload()") if not payload then - payload = event + payload = {event} else - payload = payload .. "\n" .. event + table.insert(payload, event) end - return payload end @@ -274,7 +248,18 @@ function EventQueue:send_data(payload, queue_metadata) "content-type: text/plain; charset=utf-8" } - self.sc_logger:log_curl_command(url, queue_metadata, params, payload) + self.sc_common:dumper(payload) + local data_binary = '' + for index, event in ipairs(payload) do + if not metrics[event.metric_key] then + broker_log:error(0, "METRIC_ID not found for key: " .. event.metric_key) + else + broker_log:info(0, "METRIC_ID: " .. metrics[event.metric_key]) + data_binary = data_binary .. "\n" .. event.metric_name .. ",metric.id=" .. metrics[event.metric_key] .. " value=" .. event.metric_value .. " " .. event.last_check + end + end + + self.sc_logger:log_curl_command(url, queue_metadata, params, data_binary) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -316,7 +301,7 @@ function EventQueue:send_data(payload, queue_metadata) end -- adding the HTTP POST data - http_request:setopt_postfields(payload) + http_request:setopt_postfields(data_binary) -- performing the HTTP request http_request:perform() @@ -343,35 +328,6 @@ function EventQueue:send_data(payload, queue_metadata) return retval end -function EventQueue:convert_metric_event(event) - local params = self.sc_params.params - -- drop the event if it is not a metric event from the storage category - if event.category ~= params.bbdo.categories["storage"].id then - return false - end - if event.element ~= params.bbdo.elements["metric"].id then - return false - end - - -- hack event to make stream connector lib think it is a standard status neb event. - event.perfdata = "'" .. event.name .. "'=" .. event.value .. ";;;;" - event.category = params.bbdo.categories["neb"].id - - if not event.ctime then - event.last_check = event.time - else - event.last_check = event.ctime - end - - if event.service_id and event.service_id ~= 0 then - event.element = params.bbdo.elements["service_status"].id - else - event.element = params.bbdo.elements["host_status"].id - end - - return event -end - -------------------------------------------------------------------------------- -- Required functions for Broker StreamConnector -------------------------------------------------------------------------------- @@ -397,7 +353,6 @@ function write (event) -- check if the metric is already in the metrics table if not metrics[metric_key] then metrics[metric_key] = event.metric_id - broker_log:info(0, "ADD METRIC key: " .. metric_key .. " - value: " .. metrics[metric_key]) end end From aecc3f5d0f02d9b2768b4fd66b59e845f45e1c4d Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 08:54:02 +0200 Subject: [PATCH 03/32] metric + status --- .../influxdb/influxdb-metrics-apiv2.lua | 50 ++++++++++++------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 0c62bbb3..bc50b2a2 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -65,7 +65,7 @@ function EventQueue.new(params) self.sc_params.params.influxdb_database = params.influxdb_database self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" - self.sc_params.params.max_buffer_size = params.max_buffer_size or 5000 + self.sc_params.params.max_buffer_size = params.max_buffer_size or 100 self.sc_params.params.hard_only = params.hard_only or 0 self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 @@ -167,13 +167,15 @@ end -------------------------------------------------------------------------------- function EventQueue:format_metric_host(metric) self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") + local event = self.sc_event.event - local metric_key = tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name) - self.sc_event.event.formated_event = { + local metric_key = tostring(event.host_id) .. ':0:' .. tostring(metric.metric_name) + event.formated_event = { metric_name = metric.metric_name, metric_value = metric.value, metric_key = metric_key, - last_check = self.sc_event.event.last_check, + last_check = event.last_check, + status = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. " " .. tostring(event.last_check) } self:add() self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric host") @@ -187,11 +189,12 @@ function EventQueue:format_metric_service(metric) self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") local event = self.sc_event.event local metric_key = tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name) - self.sc_event.event.formated_event = { + event.formated_event = { metric_name = metric.metric_name, metric_value = metric.value, metric_key = metric_key, - last_check = self.sc_event.event.last_check, + last_check = event.last_check, + status = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. ",service_id=" .. tostring(event.cache.service.service_id) .. " " .. tostring(event.last_check) } self:add() self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") @@ -201,7 +204,6 @@ end -- EventQueue:add, add an event to the sending queue -------------------------------------------------------------------------------- function EventQueue:add() - broker_log:info(0, "EventQueue:add()") -- store event in self.events lists local category = self.sc_event.event.category local element = self.sc_event.event.element @@ -224,7 +226,6 @@ end -- @return payload {string} json encoded string -------------------------------------------------------------------------------- function EventQueue:build_payload(payload, event) - broker_log:info(0, "EventQueue:build_payload()") if not payload then payload = {event} else @@ -233,8 +234,9 @@ function EventQueue:build_payload(payload, event) return payload end +local events_retry = {} + function EventQueue:send_data(payload, queue_metadata) - broker_log:info(0, "EventQueue:send_data()") self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") local params = self.sc_params.params @@ -248,14 +250,27 @@ function EventQueue:send_data(payload, queue_metadata) "content-type: text/plain; charset=utf-8" } - self.sc_common:dumper(payload) local data_binary = '' - for index, event in ipairs(payload) do - if not metrics[event.metric_key] then - broker_log:error(0, "METRIC_ID not found for key: " .. event.metric_key) + for index, payload_event in ipairs(payload) do + if not metrics[payload_event.metric_key] then + payload_event.retry = 1 + table.insert(events_retry, payload_event) + else + data_binary = data_binary .. payload_event.metric_name .. ",metric.id=" .. metrics[payload_event.metric_key] .. " value=" .. payload_event.metric_value .. " " .. payload_event.last_check .. "\n" .. payload_event.status .. "\n" + end + end + + for index, retry_event in ipairs(events_retry) do + if not metrics[retry_event.metric_key] then + retry_event.retry = retry_event.retry + 1 + if retry_event.retry > 3 then + self.sc_logger:debug("Retry limit reached for key: " .. retry_event.metric_key) + data_binary = data_binary .. retry_event.metric_name .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" + table.remove(events_retry, index) + end else - broker_log:info(0, "METRIC_ID: " .. metrics[event.metric_key]) - data_binary = data_binary .. "\n" .. event.metric_name .. ",metric.id=" .. metrics[event.metric_key] .. " value=" .. event.metric_value .. " " .. event.last_check + data_binary = data_binary .. retry_event.metric_name .. ",metric.id=" .. metrics[retry_event.metric_key] .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" .. retry_event.status .. "\n" + table.remove(events_retry, index) end end @@ -267,7 +282,7 @@ function EventQueue:send_data(payload, queue_metadata) return true end - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following data " .. tostring(data_binary)) self.sc_logger:info("[EventQueue:send_data]: Influxdb address is: " .. tostring(url)) local http_response_body = "" @@ -336,7 +351,6 @@ local queue -- Fonction init() function init(conf) - broker_log:info(0, "EventQueue:init()") queue = EventQueue.new(conf) end @@ -346,7 +360,6 @@ end -- @return {boolean} -------------------------------------------------------------------------------- function write (event) - --broker_log:info(0, "EventQueue:write()") if event._type == 196617 then local metric_key = tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. tostring(event.name) @@ -355,6 +368,7 @@ function write (event) metrics[metric_key] = event.metric_id end end + --broker_log:info(0, "METRICS: " .. broker.json_encode(metrics)) -- skip event if a mandatory parameter is missing if queue.fail then From 6cec1968a9630d5e55a3dd8eb0da5adce05a52de Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 11:17:35 +0200 Subject: [PATCH 04/32] add cache patch from https://github.com/centreon/centreon-stream-connector-scripts/pull/237 --- .../canopsis/canopsis2x-events-apiv2.lua | 2 +- .../clickhouse/clickhouse-metrics-apiv2.lua | 2 +- .../datadog/datadog-events-apiv2.lua | 2 +- .../datadog/datadog-metrics-apiv2.lua | 2 +- .../elasticsearch/elastic-events-apiv2.lua | 2 +- .../elasticsearch/elastic-metrics-apiv2.lua | 2 +- .../google/bigquery-events-apiv2.lua | 2 +- .../influxdb/influxdb-metrics-apiv2.lua | 10 +- .../influxdb/influxdb2-metrics-apiv2.lua | 2 +- .../kafka/kafka-events-apiv2.lua | 2 +- centreon-certified/keep/keep-events-apiv2.lua | 2 +- .../logstash/logstash-events-apiv2.lua | 2 +- centreon-certified/omi/omi_events-apiv2.lua | 2 +- .../opsgenie/opsgenie-events-apiv2.lua | 2 +- .../pagerduty/pagerduty-events-apiv2.lua | 2 +- .../servicenow/servicenow-em-events-apiv2.lua | 2 +- .../servicenow-incident-events-apiv2.lua | 2 +- .../signl4/signl4-events-apiv2.lua | 2 +- .../splunk/splunk-events-apiv2.lua | 2 +- .../splunk/splunk-metrics-apiv2.lua | 3 +- .../sc_broker.lua | 151 ++++++++++++++---- .../sc_params.lua | 8 + .../acknowledgement_stream_connector.lua | 3 +- modules/tests/bam_stream_connector.lua | 3 +- modules/tests/downtime_stream_connector.lua | 3 +- modules/tests/neb_stream_connector.lua | 3 +- 26 files changed, 165 insertions(+), 55 deletions(-) diff --git a/centreon-certified/canopsis/canopsis2x-events-apiv2.lua b/centreon-certified/canopsis/canopsis2x-events-apiv2.lua index 7be345a5..97ab4307 100644 --- a/centreon-certified/canopsis/canopsis2x-events-apiv2.lua +++ b/centreon-certified/canopsis/canopsis2x-events-apiv2.lua @@ -54,7 +54,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) self.bbdo_version = self.sc_common:get_bbdo_version() @@ -114,6 +113,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/clickhouse/clickhouse-metrics-apiv2.lua b/centreon-certified/clickhouse/clickhouse-metrics-apiv2.lua index 38f1668d..8890924a 100644 --- a/centreon-certified/clickhouse/clickhouse-metrics-apiv2.lua +++ b/centreon-certified/clickhouse/clickhouse-metrics-apiv2.lua @@ -50,7 +50,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -86,6 +85,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/datadog/datadog-events-apiv2.lua b/centreon-certified/datadog/datadog-events-apiv2.lua index 26dc34cd..24b27ff6 100644 --- a/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/centreon-certified/datadog/datadog-events-apiv2.lua @@ -47,7 +47,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -79,6 +78,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/datadog/datadog-metrics-apiv2.lua b/centreon-certified/datadog/datadog-metrics-apiv2.lua index efb62351..a58eadc1 100644 --- a/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -48,7 +48,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -84,6 +83,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/centreon-certified/elasticsearch/elastic-events-apiv2.lua index d7f2e537..dc9c78c8 100644 --- a/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -49,7 +49,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -75,6 +74,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua b/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua index 34468070..fe2dbc4a 100644 --- a/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua +++ b/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua @@ -53,7 +53,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -103,6 +102,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/google/bigquery-events-apiv2.lua b/centreon-certified/google/bigquery-events-apiv2.lua index 5ff02c7e..46869c1c 100644 --- a/centreon-certified/google/bigquery-events-apiv2.lua +++ b/centreon-certified/google/bigquery-events-apiv2.lua @@ -32,8 +32,8 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index bc50b2a2..2d0fbc13 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -48,7 +48,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -90,6 +89,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements @@ -256,7 +256,8 @@ function EventQueue:send_data(payload, queue_metadata) payload_event.retry = 1 table.insert(events_retry, payload_event) else - data_binary = data_binary .. payload_event.metric_name .. ",metric.id=" .. metrics[payload_event.metric_key] .. " value=" .. payload_event.metric_value .. " " .. payload_event.last_check .. "\n" .. payload_event.status .. "\n" + data_binary = data_binary .. payload_event.metric_name .. ",metric_id=" .. metrics[payload_event.metric_key] .. " value=" .. payload_event.metric_value .. " " .. payload_event.last_check .. "\n" + data_binary = data_binary .. payload_event.status .. "\n" end end @@ -266,10 +267,12 @@ function EventQueue:send_data(payload, queue_metadata) if retry_event.retry > 3 then self.sc_logger:debug("Retry limit reached for key: " .. retry_event.metric_key) data_binary = data_binary .. retry_event.metric_name .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" + data_binary = data_binary .. retry_event.status .. "\n" table.remove(events_retry, index) end else - data_binary = data_binary .. retry_event.metric_name .. ",metric.id=" .. metrics[retry_event.metric_key] .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" .. retry_event.status .. "\n" + data_binary = data_binary .. retry_event.metric_name .. ",metric_id=" .. metrics[retry_event.metric_key] .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" + data_binary = data_binary .. retry_event.status .. "\n" table.remove(events_retry, index) end end @@ -368,7 +371,6 @@ function write (event) metrics[metric_key] = event.metric_id end end - --broker_log:info(0, "METRICS: " .. broker.json_encode(metrics)) -- skip event if a mandatory parameter is missing if queue.fail then diff --git a/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua index 00116580..37375b1f 100644 --- a/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua @@ -51,7 +51,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -90,6 +89,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/kafka/kafka-events-apiv2.lua b/centreon-certified/kafka/kafka-events-apiv2.lua index 05b0513b..1c785654 100644 --- a/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/centreon-certified/kafka/kafka-events-apiv2.lua @@ -34,7 +34,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) self.sc_kafka_config = kafka_config.new() self.sc_kafka_topic_config = kafka_topic_config.new() @@ -86,6 +85,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/keep/keep-events-apiv2.lua b/centreon-certified/keep/keep-events-apiv2.lua index 49f4e37e..def1a299 100644 --- a/centreon-certified/keep/keep-events-apiv2.lua +++ b/centreon-certified/keep/keep-events-apiv2.lua @@ -48,7 +48,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -82,6 +81,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/logstash/logstash-events-apiv2.lua b/centreon-certified/logstash/logstash-events-apiv2.lua index 5badf222..df345c3f 100644 --- a/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/centreon-certified/logstash/logstash-events-apiv2.lua @@ -44,7 +44,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -74,6 +73,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/omi/omi_events-apiv2.lua b/centreon-certified/omi/omi_events-apiv2.lua index 793d0c92..00dc21ff 100644 --- a/centreon-certified/omi/omi_events-apiv2.lua +++ b/centreon-certified/omi/omi_events-apiv2.lua @@ -73,7 +73,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -107,6 +106,7 @@ function EventQueue.new(params) end self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/opsgenie/opsgenie-events-apiv2.lua b/centreon-certified/opsgenie/opsgenie-events-apiv2.lua index 08f63454..c74eaf53 100644 --- a/centreon-certified/opsgenie/opsgenie-events-apiv2.lua +++ b/centreon-certified/opsgenie/opsgenie-events-apiv2.lua @@ -47,7 +47,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -92,6 +91,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index cf68a046..46906ce3 100644 --- a/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -48,7 +48,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -81,6 +80,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index 0e35a554..8c4d8523 100644 --- a/centreon-certified/servicenow/servicenow-em-events-apiv2.lua +++ b/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -53,7 +53,6 @@ function EventQueue.new (params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) self.sc_params.params.instance = params.instance @@ -85,6 +84,7 @@ function EventQueue.new (params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua index 2a4886f8..8f1a6a07 100644 --- a/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua +++ b/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -53,7 +53,6 @@ function EventQueue.new (params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) self.sc_params.params.instance = params.instance @@ -95,6 +94,7 @@ function EventQueue.new (params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/signl4/signl4-events-apiv2.lua b/centreon-certified/signl4/signl4-events-apiv2.lua index 1e27d0e0..3a268938 100644 --- a/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/centreon-certified/signl4/signl4-events-apiv2.lua @@ -45,7 +45,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -76,6 +75,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/splunk/splunk-events-apiv2.lua b/centreon-certified/splunk/splunk-events-apiv2.lua index f2468bda..367e98ff 100644 --- a/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/centreon-certified/splunk/splunk-events-apiv2.lua @@ -44,7 +44,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -74,6 +73,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/centreon-certified/splunk/splunk-metrics-apiv2.lua b/centreon-certified/splunk/splunk-metrics-apiv2.lua index 19e25b32..34e5f242 100644 --- a/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -42,7 +42,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- checking mandatory parameters and setting a fail flag @@ -77,8 +76,8 @@ function EventQueue.new(params) end self.sc_params:build_accepted_elements_info() - self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements diff --git a/modules/centreon-stream-connectors-lib/sc_broker.lua b/modules/centreon-stream-connectors-lib/sc_broker.lua index 2d659f60..e4574755 100644 --- a/modules/centreon-stream-connectors-lib/sc_broker.lua +++ b/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -11,12 +11,32 @@ local sc_logger = require("centreon-stream-connectors-lib.sc_logger") local ScBroker = {} -function sc_broker.new(logger) +function sc_broker.new(params, logger) local self = {} - self.logger = logger - if not self.logger then - self.logger = sc_logger.new() + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + self.params = params + + if params.enable_broker_cache_counter_check == 1 then + if pcall(require, "luasql.mysql") then + local db = require("luasql.mysql") + local db_driver = db.mysql() + local centreon_db, error = db_driver:connect(params.centreon_db_name, params.centreon_db_user, params.centreon_db_password, params.centreon_db_address, params.centreon_db_port) + + if not centreon_db then + self.sc_logger:error("[sc_broker:new]: couldn't connect to " .. tostring(params.centreon_db_name) .. ". Error is: " .. tostring(error) + .. "make sure that your parameters are valid: centreon_db_user: " .. tostring(params.centreon_db_user) .. ", centreon_db_address: " .. tostring(params.centreon_db_address) .. ", centreon_db_port: " .. tostring(params.centreon_db_port)) + else + self.centreon_db = centreon_db + end + else + self.sc_logger:error("[sc_broker:new]: couldn't load luasql.mysql module and you asked for it by using the enable_broker_cache_counter_check parameter." + .. " Make sure that you have installed this dependency. We are disabling the aformentioned parameter.") + end end setmetatable(self, { __index = ScBroker }) @@ -32,7 +52,7 @@ end function ScBroker:get_host_all_infos(host_id) -- return because host_id isn't valid if host_id == nil or host_id == "" then - self.logger:warning("[sc_broker:get_host_all_infos]: host id is nil") + self.sc_logger:warning("[sc_broker:get_host_all_infos]: host id is nil") return false end @@ -40,9 +60,39 @@ function ScBroker:get_host_all_infos(host_id) local host_info = broker_cache:get_host(host_id) -- return false only if no host information were found in broker cache - if not host_info then - self.logger:warning("[sc_broker:get_host_all_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + if not host_info and self.params.enable_broker_cache_counter_check ~= 1 then + self.sc_logger:warning("[sc_broker:get_host_all_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") return false + + -- user is asking to also check in the database for the host. if we find it, we return a limited set of value (the most common ones) + elseif not host_info and self.params.enable_broker_cache_counter_check == 1 then + local query = [[ + SELECT h.host_id, + h.host_name AS name, + h.host_alias AS alias, + h.host_address AS address, + h.display_name, + ehi.ehi_notes AS notes, + ehi.ehi_notes_url AS notes_url, + ehi.ehi_action_url AS action_url, + nhr.nagios_server_id AS instance_id + FROM host h, + extended_host_information ehi, + ns_host_relation nhr + WHERE ehi.host_host_id = h.host_id + AND h.host_activate <> '0' + AND h.host_id = nhr.host_host_id + AND h.host_id = ]] .. tonumber(host_id) + + self.sc_logger:debug("[sc_broker:get_host_all_infos]: no information found in broker cache for host: " .. tostring(host_id) .. ", going to check in the centreon database with query: " .. tostring(query)) + + host_info = self:get_centreon_db_info(query) + + if not host_info then + self.sc_logger:error("[sc_broker:get_host_all_infos]: couldn't find host: " .. tostring(host_id) + .. " in your database. Maybe it has been disabled or removed. You should export your configuration.") + return false + end end return host_info @@ -50,13 +100,13 @@ end --- get_service_all_infos: retrieve informations from a service -- @param host_id (number) --- @params service_id (number) +-- @param service_id (number) -- @return false (boolean) if host id or service id aren't valid -- @return service (table) all the informations from the service function ScBroker:get_service_all_infos(host_id, service_id) -- return because host_id or service_id isn't valid if host_id == nil or host_id == "" or service_id == nil or service_id == "" then - self.logger:warning("[sc_broker:get_service_all_infos]: host id or service id is nil") + self.sc_logger:warning("[sc_broker:get_service_all_infos]: host id or service id is nil") return false end @@ -64,10 +114,34 @@ function ScBroker:get_service_all_infos(host_id, service_id) local service_info = broker_cache:get_service(host_id, service_id) -- return false only if no service information were found in broker cache - if not service_info then - self.logger:warning("[sc_broker:get_service_all_infos]: No service information found for host_id: " .. tostring(host_id) + if not service_info and self.params.enable_broker_cache_counter_check ~= 1 then + self.sc_logger:warning("[sc_broker:get_service_all_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) .. ". Restarting centengine should fix this.") return false + elseif not service_info and self.params.enable_broker_cache_counter_check == 1 then + local query = [[ + SELECT s.service_id, + s.service_description AS description, + s.service_alias AS alias, + s.display_name, + esi.esi_notes AS notes, + esi.esi_notes_url AS notes_url, + esi.esi_action_url AS action_url + FROM service s, + extended_service_information esi + WHERE esi.service_service_id = s.service_id + AND s.service_activate <> '0' + AND s.service_id = ]] .. tonumber(service_id) + + self.sc_logger:debug("[sc_broker:get_host_all_infos]: no information found in broker cache for service: " .. tostring(service_id) .. ", going to check in the centreon database with query: " .. tostring(query)) + + service_info = self:get_centreon_db_info(query) + + if not service_info then + self.sc_logger:error("[sc_broker:get_host_all_infos]: couldn't find service: " .. tostring(service_id) + .. " in your database. Maybe it has been disabled or removed. You should export your configuration.") + return false + end end return service_info @@ -81,7 +155,7 @@ end function ScBroker:get_host_infos(host_id, info) -- return because host_id isn't valid if host_id == nil or host_id == "" then - self.logger:warning("[sc_broker:get_host_infos]: host id is nil") + self.sc_logger:warning("[sc_broker:get_host_infos]: host id is nil") return false end @@ -100,7 +174,7 @@ function ScBroker:get_host_infos(host_id, info) -- return host_id only if no host information were found in broker cache if not host_info then - self.logger:warning("[sc_broker:get_host_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + self.sc_logger:warning("[sc_broker:get_host_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") return host end @@ -132,7 +206,7 @@ end function ScBroker:get_service_infos(host_id, service_id, info) -- return because host_id or service_id isn't valid if host_id == nil or host_id == "" or service_id == nil or service_id == "" then - self.logger:warning("[sc_broker:get_service_infos]: host id or service id is invalid") + self.sc_logger:warning("[sc_broker:get_service_infos]: host id or service id is invalid") return false end @@ -152,7 +226,7 @@ function ScBroker:get_service_infos(host_id, service_id, info) -- return host_id and service_id only if no host information were found in broker cache if not service_info then - self.logger:warning("[sc_broker:get_service_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) + self.sc_logger:warning("[sc_broker:get_service_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) .. ". Restarting centengine should fix this.") return service end @@ -183,7 +257,7 @@ end function ScBroker:get_hostgroups(host_id) -- return false if host id is invalid if host_id == nil or host_id == "" then - self.logger:warning("[sc_broker:get_hostgroup]: host id is nil or empty") + self.sc_logger:warning("[sc_broker:get_hostgroup]: host id is nil or empty") return false end @@ -206,7 +280,7 @@ end function ScBroker:get_servicegroups(host_id, service_id) -- return false if service id is invalid if host_id == nil or host_id == "" or service_id == nil or service_id == "" then - self.logger:warning("[sc_broker:get_servicegroups]: service id is nil or empty") + self.sc_logger:warning("[sc_broker:get_servicegroups]: service id is nil or empty") return false end @@ -229,7 +303,7 @@ end function ScBroker:get_severity(host_id, service_id) -- return false if host id is invalid if host_id == nil or host_id == "" then - self.logger:warning("[sc_broker:get_severity]: host id is nil or empty") + self.sc_logger:warning("[sc_broker:get_severity]: host id is nil or empty") return false end @@ -242,7 +316,7 @@ function ScBroker:get_severity(host_id, service_id) -- return false if no severity were found if not severity then - self.logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host: " .. tostring(host_id)) + self.sc_logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host: " .. tostring(host_id)) return false end @@ -254,7 +328,7 @@ function ScBroker:get_severity(host_id, service_id) -- return false if no severity were found if not severity then - self.logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host id: " .. tostring(host_id) .. " and service id: " .. tostring(service_id)) + self.sc_logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host id: " .. tostring(host_id) .. " and service id: " .. tostring(service_id)) return false end @@ -268,7 +342,7 @@ end function ScBroker:get_instance(instance_id) -- return false if instance_id is invalid if instance_id == nil or instance_id == "" then - self.logger:warning("[sc_broker:get_instance]: instance id is nil or empty") + self.sc_logger:warning("[sc_broker:get_instance]: instance id is nil or empty") return false end @@ -277,7 +351,7 @@ function ScBroker:get_instance(instance_id) -- return false if no instance name is found if not name then - self.logger:warning("[sc_broker:get_instance]: couldn't get instance name from broker cache for instance id: " .. tostring(instance_id)) + self.sc_logger:warning("[sc_broker:get_instance]: couldn't get instance name from broker cache for instance id: " .. tostring(instance_id)) return false end @@ -291,7 +365,7 @@ end function ScBroker:get_ba_infos(ba_id) -- return false if ba_id is invalid if ba_id == nil or ba_id == "" then - self.logger:warning("[sc_broker:get_ba_infos]: ba id is nil or empty") + self.sc_logger:warning("[sc_broker:get_ba_infos]: ba id is nil or empty") return false end @@ -300,7 +374,7 @@ function ScBroker:get_ba_infos(ba_id) -- return false if no informations are found if ba_info == nil then - self.logger:warning("[sc_broker:get_ba_infos]: couldn't get ba informations in cache for ba_id: " .. tostring(ba_id)) + self.sc_logger:warning("[sc_broker:get_ba_infos]: couldn't get ba informations in cache for ba_id: " .. tostring(ba_id)) return false end @@ -314,7 +388,7 @@ end function ScBroker:get_bvs_infos(ba_id) -- return false if ba_id is invalid if ba_id == nil or ba_id == "" then - self.logger:warning("[sc_broker:get_bvs]: ba id is nil or empty") + self.sc_logger:warning("[sc_broker:get_bvs]: ba id is nil or empty") return false end @@ -323,7 +397,7 @@ function ScBroker:get_bvs_infos(ba_id) -- return false if no bv id are found for ba_id if bvs_id == nil or bvs_id == "" then - self.logger:warning("[sc_broker:get_bvs]: couldn't get bvs for ba id: " .. tostring(ba_id)) + self.sc_logger:warning("[sc_broker:get_bvs]: couldn't get bvs for ba id: " .. tostring(ba_id)) return false end @@ -340,7 +414,7 @@ function ScBroker:get_bvs_infos(ba_id) table.insert(bvs,bv_infos) found_bv = true else - self.logger:warning("[sc_broker:get_bvs]: couldn't get bv information for bv id: " .. tostring(bv_id)) + self.sc_logger:warning("[sc_broker:get_bvs]: couldn't get bv information for bv id: " .. tostring(bv_id)) end end @@ -352,4 +426,27 @@ function ScBroker:get_bvs_infos(ba_id) return bvs end +--- get_centreon_db_info: run a query (that must return only one row) in the centreon database to build a cache from the db when asking to. If the query return multiple rows, only the last one will be returned +-- @param query (string) the sql query that must be executed to build the cache +-- @return result (table or nil) the result of the query or nil +function ScBroker:get_centreon_db_info(query) + local result, error = self.centreon_db:execute(query) + + if not result then + self.sc_logger:error("[sc_broker:get_centreon_db_info]: query: " .. tostring(query) .. " failed\n error: " .. tostring(error)) + return nil + end + + local rows = result:fetch({}, "a") + local db_content + + -- queries are about a single object, we should never have multiple rows returned so we don't care about properly indexing results + while rows do + db_content = rows + rows = result:fetch(rows, "a") + end + + return db_content +end + return sc_broker diff --git a/modules/centreon-stream-connectors-lib/sc_params.lua b/modules/centreon-stream-connectors-lib/sc_params.lua index d4a1f8f8..df1ba98c 100644 --- a/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/modules/centreon-stream-connectors-lib/sc_params.lua @@ -78,6 +78,14 @@ function sc_params.new(common, logger) skip_anon_events = 1, skip_nil_id = 1, enable_bam_host = 0, + enable_broker_cache_counter_check = 0, + + -- centreon database information (only used if you set enable_broker_cache_counter_check to 1 + centreon_db_name = "centreon", + centreon_db_address = "127.0.0.1", + centreon_db_port = 3306, + centreon_db_user = "centreon", + centreon_db_password = "", -- enable or disable dedup enable_host_status_dedup = 1, diff --git a/modules/tests/acknowledgement_stream_connector.lua b/modules/tests/acknowledgement_stream_connector.lua index d965a0fc..3fdd4532 100644 --- a/modules/tests/acknowledgement_stream_connector.lua +++ b/modules/tests/acknowledgement_stream_connector.lua @@ -22,7 +22,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- initiate parameters dedicated to this stream connector @@ -42,6 +41,8 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self diff --git a/modules/tests/bam_stream_connector.lua b/modules/tests/bam_stream_connector.lua index 6908f2f2..dccbb3d7 100644 --- a/modules/tests/bam_stream_connector.lua +++ b/modules/tests/bam_stream_connector.lua @@ -22,7 +22,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- initiate parameters dedicated to this stream connector @@ -42,6 +41,8 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self diff --git a/modules/tests/downtime_stream_connector.lua b/modules/tests/downtime_stream_connector.lua index b81beb0f..f6e2647a 100644 --- a/modules/tests/downtime_stream_connector.lua +++ b/modules/tests/downtime_stream_connector.lua @@ -22,7 +22,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- initiate parameters dedicated to this stream connector @@ -42,6 +41,8 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self diff --git a/modules/tests/neb_stream_connector.lua b/modules/tests/neb_stream_connector.lua index 54ac4311..09a4d497 100644 --- a/modules/tests/neb_stream_connector.lua +++ b/modules/tests/neb_stream_connector.lua @@ -22,7 +22,6 @@ function EventQueue.new(params) -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) -- initiate parameters dedicated to this stream connector @@ -42,6 +41,8 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() + self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self From 7d6c6cdfc2dbdc430e5b17455fd7b59f29c0ec42 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 11:23:57 +0200 Subject: [PATCH 05/32] update doc --- modules/docs/README.md | 59 ++++++++------- modules/docs/sc_broker.md | 154 ++++++++++++++++++++++++++------------ modules/docs/sc_param.md | 10 ++- 3 files changed, 146 insertions(+), 77 deletions(-) diff --git a/modules/docs/README.md b/modules/docs/README.md index 65eae402..3ff0c5b5 100644 --- a/modules/docs/README.md +++ b/modules/docs/README.md @@ -17,7 +17,7 @@ ## Libraries list | Lib name | Content | Usage | Documentation | -| ------------------------ | ------------------------------------------------ | ------------------------------------------------------------------------- | -------------------------------------------- | +|--------------------------| ------------------------------------------------ | ------------------------------------------------------------------------- | -------------------------------------------- | | sc_common | basic methods for lua | you can use it when you want to simplify your code | [Documentation](sc_common.md) | | sc_logger | methods that handle logging with centreon broker | When you want to log a message from your stream connector | [Documentation](sc_logger.md) | | sc_broker | wrapper methods for broker cache | when you need something from the broker cache | [Documentation](sc_broker.md) | @@ -31,25 +31,25 @@ ## sc_common methods -| Method name | Method description | Link | -| ---------------------------------- | --------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| ifnil_or_empty | check if a variable is empty or nil and replace it with a default value if it is the case | [Documentation](sc_common.md#ifnil_or_empty-method) | -| if_wrong_type | check the type of a variable, if it is wrong, replace the variable with a default value | [Documentation](sc_common.md#if_wrong_type-method) | -| boolean_to_number | change a true/false boolean to a 1/0 value | [Documentation](sc_common.md#boolean_to_number-method) | -| number_to_boolean | change a 0/1 number to a false/true value | [Documentation](sc_common.md#number_to_boolean-method) | -| check_boolean_number_option_syntax | make sure that a boolean is 0 or 1, if that's not the case, replace it with a default value | [Documentation](sc_common.md#check_boolean_number_option_syntax-method) | -| split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | -| compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | -| generate_postfield_param_string | convert a table of parameters into a URL encoded parameter string | [Documentation](sc_common.md#generate_postfield_param_string-method) | -| load_json_file | the method loads a json file and parses it | [Documentation](sc_common.md#load_json_file-method) | -| json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | -| xml_escape | escape xml characters in a string | [Documentation](sc_common.md#xml_escape-method) | -| lua_regex_escape | escape lua regex special characters in a string | [Documentation](sc_common.md#lua_regex_escape-method) | +| Method name | Method description | Link | +| ---------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| ifnil_or_empty | check if a variable is empty or nil and replace it with a default value if it is the case | [Documentation](sc_common.md#ifnil_or_empty-method) | +| if_wrong_type | check the type of a variable, if it is wrong, replace the variable with a default value | [Documentation](sc_common.md#if_wrong_type-method) | +| boolean_to_number | change a true/false boolean to a 1/0 value | [Documentation](sc_common.md#boolean_to_number-method) | +| number_to_boolean | change a 0/1 number to a false/true value | [Documentation](sc_common.md#number_to_boolean-method) | +| check_boolean_number_option_syntax | make sure that a boolean is 0 or 1, if that's not the case, replace it with a default value | [Documentation](sc_common.md#check_boolean_number_option_syntax-method) | +| split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | +| compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | +| generate_postfield_param_string | convert a table of parameters into a URL encoded parameter string | [Documentation](sc_common.md#generate_postfield_param_string-method) | +| load_json_file | the method loads a json file and parses it | [Documentation](sc_common.md#load_json_file-method) | +| json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | +| xml_escape | escape xml characters in a string | [Documentation](sc_common.md#xml_escape-method) | +| lua_regex_escape | escape lua regex special characters in a string | [Documentation](sc_common.md#lua_regex_escape-method) | | dumper | dump any variable for debug purposes | [Documentation](sc_common.md#dumper-method) | -| trim | trim spaces (or provided character) at the beginning and the end of a string | [Documentation](sc_common.md#trim-method) | -| get_bbdo_version | returns the first digit of the bbdo protocol version | [Documentation](sc_common.md#get_bbdo_version-method) | -| is_valid_pattern | check if a Lua pattern is valid | [Documentation](sc_common.md#is_valid_pattern-method) | -| sleep | wait a given number of seconds | [Documentation](sc_common.md#sleep-method) | +| trim | trim spaces (or provided character) at the beginning and the end of a string | [Documentation](sc_common.md#trim-method) | +| get_bbdo_version | returns the first digit of the bbdo protocol version | [Documentation](sc_common.md#get_bbdo_version-method) | +| is_valid_pattern | check if a Lua pattern is valid | [Documentation](sc_common.md#is_valid_pattern-method) | +| sleep | wait a given number of seconds | [Documentation](sc_common.md#sleep-method) | | create_sleep_counter_table | create a table to handle sleep counters. Useful when you want to log something less often after some repetitions | [Documentation](sc_common.md#create_sleep_counter_table-method) | ## sc_logger methods @@ -77,19 +77,20 @@ | get_instance | retrieve the name of the poller using the instance id from the broker cache | [Documentation](sc_broker.md#get_instance-method) | | get_ba_infos | retrieve the name and description of a BA from the broker cache | [Documentation](sc_broker.md#get_ba_infos-method) | | get_bvs_infos | retrieve the name and description of all BV linked to a BA | [Documentation](sc_broker.md#get_bvs_infos-method) | +| get_centreon_db_info | runs a query in the centreon database to build a cache from the db when asked to | [Documentation](sc_broker.md#get_centreon_db_info-method) | ## sc_param methods -| Method name | Method description | Link | -| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | +| Method name | Method description | Link | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | | check_params | make sure that the default stream connector params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | -| is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | -| get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | -| load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | -| build_accepted_elements_info | build a table that stores information about accepted elements | [Documentation](sc_param.md#build_accepted_elements_info-method) | -| validate_pattern_param | check if a parameter has a valid Lua pattern as a value | [Documentation](sc_param.md#validate_pattern_param-method) | -| build_and_validate_filters_pattern | build a table that stores information about patterns for compatible parameters | [Documentation](sc_param.md#build_and_validate_filters_pattern-method) | +| is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | +| get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | +| load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | +| build_accepted_elements_info | build a table that stores information about accepted elements | [Documentation](sc_param.md#build_accepted_elements_info-method) | +| validate_pattern_param | check if a parameter has a valid Lua pattern as a value | [Documentation](sc_param.md#validate_pattern_param-method) | +| build_and_validate_filters_pattern | build a table that stores information about patterns for compatible parameters | [Documentation](sc_param.md#build_and_validate_filters_pattern-method) | ## sc_event methods @@ -214,4 +215,4 @@ | learn how to create a custom format using a format file | [Documentation](./templating.md) | | learn how to create custom code for your stream connector | [Documentation](./custom_code.md) | | have a look at all the available mappings and how to use them | [Documentation](./mappings.md) | -| have a look at the event structure | [Documentation](./broker_data_structure.md) and [Documentation](https://docs.centreon.com/docs/developer/developer-broker-mapping/) | +| have a look at the event structure | [Documentation](./broker_data_structure.md) and [Documentation](https://docs.centreon.com/docs/developer/developer-broker-mapping/) | \ No newline at end of file diff --git a/modules/docs/sc_broker.md b/modules/docs/sc_broker.md index d28faabe..a6e905d5 100644 --- a/modules/docs/sc_broker.md +++ b/modules/docs/sc_broker.md @@ -1,50 +1,54 @@ # Documentation of the sc_broker module -- [Documentation of the sc_broker module](#documentation-of-the-sc_broker-module) +- [Documentation of the sc\_broker module](#documentation-of-the-sc_broker-module) - [Introduction](#introduction) - [Module initialization](#module-initialization) - [Module constructor](#module-constructor) - [constructor: Example](#constructor-example) - - [get_host_all_infos method](#get_host_all_infos-method) - - [get_host_all_infos: parameters](#get_host_all_infos-parameters) - - [get_host_all_infos: returns](#get_host_all_infos-returns) - - [get_host_all_infos: example](#get_host_all_infos-example) - - [get_service_all_infos method](#get_service_all_infos-method) - - [get_service_all_infos: parameters](#get_service_all_infos-parameters) - - [get_service_all_infos: returns](#get_service_all_infos-returns) - - [get_service_all_infos: example](#get_service_all_infos-example) - - [get_host_infos method](#get_host_infos-method) - - [get_host_infos: parameters](#get_host_infos-parameters) - - [get_host_infos: returns](#get_host_infos-returns) - - [get_host_infos: example](#get_host_infos-example) - - [get_service_infos method](#get_service_infos-method) - - [get_service_infos: parameters](#get_service_infos-parameters) - - [get_service_infos: returns](#get_service_infos-returns) - - [get_service_infos: example](#get_service_infos-example) - - [get_hostgroups method](#get_hostgroups-method) - - [get_hostgroups: parameters](#get_hostgroups-parameters) - - [get_hostgroups: returns](#get_hostgroups-returns) - - [get_hostgroups: example](#get_hostgroups-example) - - [get_servicegroups method](#get_servicegroups-method) - - [get_servicegroups: parameters](#get_servicegroups-parameters) - - [get_servicegroups: returns](#get_servicegroups-returns) - - [get_servicegroups: example](#get_servicegroups-example) - - [get_severity method](#get_severity-method) - - [get_severity: parameters](#get_severity-parameters) - - [get_severity: returns](#get_severity-returns) - - [get_severity: example](#get_severity-example) - - [get_instance method](#get_instance-method) - - [get_instance: parameters](#get_instance-parameters) - - [get_instance: returns](#get_instance-returns) - - [get_instance: example](#get_instance-example) - - [get_ba_infos method](#get_ba_infos-method) - - [get_ba_infos: parameters](#get_ba_infos-parameters) - - [get_ba_infos: returns](#get_ba_infos-returns) - - [get_ba_infos: example](#get_ba_infos-example) - - [get_bvs_infos method](#get_bvs_infos-method) - - [get_bvs_infos: parameters](#get_bvs_infos-parameters) - - [get_bvs_infos: returns](#get_bvs_infos-returns) - - [get_bvs_infos: example](#get_bvs_infos-example) + - [get\_host\_all\_infos method](#get_host_all_infos-method) + - [get\_host\_all\_infos: parameters](#get_host_all_infos-parameters) + - [get\_host\_all\_infos: returns](#get_host_all_infos-returns) + - [get\_host\_all\_infos: example](#get_host_all_infos-example) + - [get\_service\_all\_infos method](#get_service_all_infos-method) + - [get\_service\_all\_infos: parameters](#get_service_all_infos-parameters) + - [get\_service\_all\_infos: returns](#get_service_all_infos-returns) + - [get\_service\_all\_infos: example](#get_service_all_infos-example) + - [get\_host\_infos method](#get_host_infos-method) + - [get\_host\_infos: parameters](#get_host_infos-parameters) + - [get\_host\_infos: returns](#get_host_infos-returns) + - [get\_host\_infos: example](#get_host_infos-example) + - [get\_service\_infos method](#get_service_infos-method) + - [get\_service\_infos: parameters](#get_service_infos-parameters) + - [get\_service\_infos: returns](#get_service_infos-returns) + - [get\_service\_infos: example](#get_service_infos-example) + - [get\_hostgroups method](#get_hostgroups-method) + - [get\_hostgroups: parameters](#get_hostgroups-parameters) + - [get\_hostgroups: returns](#get_hostgroups-returns) + - [get\_hostgroups: example](#get_hostgroups-example) + - [get\_servicegroups method](#get_servicegroups-method) + - [get\_servicegroups: parameters](#get_servicegroups-parameters) + - [get\_servicegroups: returns](#get_servicegroups-returns) + - [get\_servicegroups: example](#get_servicegroups-example) + - [get\_severity method](#get_severity-method) + - [get\_severity: parameters](#get_severity-parameters) + - [get\_severity: returns](#get_severity-returns) + - [get\_severity: example](#get_severity-example) + - [get\_instance method](#get_instance-method) + - [get\_instance: parameters](#get_instance-parameters) + - [get\_instance: returns](#get_instance-returns) + - [get\_instance: example](#get_instance-example) + - [get\_ba\_infos method](#get_ba_infos-method) + - [get\_ba\_infos: parameters](#get_ba_infos-parameters) + - [get\_ba\_infos: returns](#get_ba_infos-returns) + - [get\_ba\_infos: example](#get_ba_infos-example) + - [get\_bvs\_infos method](#get_bvs_infos-method) + - [get\_bvs\_infos: parameters](#get_bvs_infos-parameters) + - [get\_bvs\_infos: returns](#get_bvs_infos-returns) + - [get\_bvs\_infos: example](#get_bvs_infos-example) + - [get\_centreon\_db\_info method](#get_centreon_db_info-method) + - [get\_centreon\_db\_info: parameters](#get_centreon_db_info-parameters) + - [get\_centreon\_db\_info: returns](#get_centreon_db_info-returns) + - [get\_centreon\_db\_info: example](#get_centreon_db_info-example) ## Introduction @@ -56,18 +60,18 @@ Since this is OOP, it is required to initiate your module ### Module constructor -Constructor can be initialized with one parameter or it will use a default value. +Constructor can be initialized with two parameters. +- a params table. - sc_logger. This is an instance of the sc_logger module -If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) ### constructor: Example ```lua -- load modules local sc_logger = require("centreon-stream-connectors-lib.sc_logger") -local sc_broker = require("centreon-stream-connectors-lib.sc_broker") -- initiate "mandatory" informations for the logger module local logfile = "/var/log/test_broker.log" @@ -76,8 +80,11 @@ local severity = 1 -- create a new instance of the sc_logger module local test_logger = sc_logger.new(logfile, severity) --- create a new instance of the sc_common module -local test_broker = sc_broker.new(test_logger) +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) + +-- create a new instance of the sc_broker module +local test_broker = sc_broker.new(test_param.params, test_logger) ``` ## get_host_all_infos method @@ -614,3 +621,58 @@ local result = test_broker:get_ba_infos(ba_id) --> result[2].bv_name is: "another-BV" --]] ``` + +## get_centreon_db_info method + +The **get_centreon_db_info** method runs a query (that must return only one row) in the centreon database to build a cache from the db when asked to. If the query return multiple rows, only the last one will be returned + +This second cache method is currently only triggered when using the following sc_cache methods: + +- get_host_all_infos +- get_service_all_infos + +The cache that is built this way will not return the same amount of information. This feature's purpose is not to replace the broker cache. It is just a best effort mecanism in case of a faulty broker cache. + +### get_centreon_db_info: parameters + +| parameter | type | optional | default value | +| ---------------- | ------ | -------- | ------------- | +| the query to run | string | no | | + +### get_centreon_db_info: returns + +| return | type | always | condition | +| ------------------------------- | ----- | ------ | -------------------------------------------------- | +| a table the result of the query | table | no | it will return nil if the query failed or is empty | + +### get_centreon_db_info: example + +```lua +local host_id = 2712 +local query = [[ + SELECT h.host_id, + h.host_name AS name, + h.host_alias AS alias, + h.host_address AS address, + h.display_name, + ehi.ehi_notes AS notes, + ehi.ehi_notes_url AS notes_url, + ehi.ehi_action_url AS action_url + FROM host h, + extended_host_information ehi + WHERE ehi.host_host_id = h.host_id + AND h.host_activate <> '0' + AND h.host_id = ]] .. tonumber(host_id) + +local result = test_broker:get_centreon_db_info(query) +--[[ + --> result structure is: + { + "address" = "127.0.0.1", + "host_id" = "2712", + "name" = "bordeaux", + "alias" = "what a beautiful city", + "notes" = "you should go there when you have time" + } +]]-- +``` \ No newline at end of file diff --git a/modules/docs/sc_param.md b/modules/docs/sc_param.md index 0a21157f..d3447a4c 100644 --- a/modules/docs/sc_param.md +++ b/modules/docs/sc_param.md @@ -37,7 +37,7 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters | Parameter name | type | default value | description | default scope | additional information | -| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +|-----------------------------------------| ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a comma-separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | | accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a comma-separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank spaces with underscores. "Host status" becomes "host_status") | | host_status | string | 0,1,2 | comma-separated list of accepted host statuses (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | @@ -67,6 +67,12 @@ The sc_param module provides methods to help you handle parameters for your stre | skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | enable_bam_host | number | 0 | filter out events if the host name matches "^_Module_BAM_*" (which is a fake host related to Business Activities from the Centreon BAM module) (use 1 to accept them). Those are very special events with the hostname being _Module_BAM_1 and the service name being ba_x where "x" is the ID of the business activity. Even if this parameter only applies to service_status(neb) events, you will need to use the **NEB and BAM** filter in your stream connector configuration (otherwise you won't have the BAM cache built by Centreon Broker and your event will not be sent) | service_status(neb) | | +| enable_broker_cache_counter_check | number | 0 | when enabled, if the stream connector is not able to find a host or a service in the broker cache, it will try to find it in the centreon database. (set to 1 to enable). | | You need to have the luasql.mysql dependency installed (stream connectors will automatically disable this option if it is not the case) | +| centreon_db_name | string | centreon | the name of the centreon database (only used when enable_broker_cache_counter_check is set to 1) | | | +| centreon_db_user | string | centreon | the user used to connect to the centreon database (only used when enable_broker_cache_counter_check is set to 1) | | | +| centreon_db_password | string | | the password of the centreon database user (only used when enable_broker_cache_counter_check is set to 1) | | | +| centreon_db_address | string | 127.0.0.1 | the address of the centreon database (only used when enable_broker_cache_counter_check is set to 1) | | | +| centreon_db_port | number | 3306 | the port of the centreon database (only used when enable_broker_cache_counter_check is set to 1) | | | | max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | | max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | | max_all_queues_age | number | 5 | if the last global flush was 5 seconds ago, it will force a flush of each queue | | | @@ -410,4 +416,4 @@ local param_list = {"accepted_hosts", "accepted_services"} test_param:build_and_validate_filters_pattern(param_name, param_value) --> it creates a test_param.params.accepted_hosts_pattern_list table and a test_param.params.accepted_services_pattern_list -``` +``` \ No newline at end of file From db9b57b19b8d39b5061699ef18d5225f751ed986 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 12:02:05 +0200 Subject: [PATCH 06/32] removing unneeded params --- centreon-certified/influxdb/influxdb-metrics-apiv2.lua | 8 -------- 1 file changed, 8 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 2d0fbc13..ba7b0904 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -68,14 +68,6 @@ function EventQueue.new(params) self.sc_params.params.hard_only = params.hard_only or 0 self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 - -- for cache problems, we need to check the datas in the database - self.sc_params.params.enable_broker_cache_counter_check = params.enable_broker_cache_counter_check or 0 - -- centreon database information (only used if you set enable_broker_cache_counter_check to 1 - self.sc_params.params.centreon_db_name = params.centreon_db_name or "centreon" - self.sc_params.params.centreon_db_address = params.centreon_db_name or "127.0.0.1" - self.sc_params.params.centreon_db_port = params.centreon_db_name or 3306 - self.sc_params.params.centreon_db_user = params.centreon_db_name or "centreon" - self.sc_params.params.centreon_db_password = params.centreon_db_name or "" -- apply users params and check syntax of standard ones self.sc_params:param_override(params) From d2a24b75d59c555fac5c06049dee1989c93d7867 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 16:23:44 +0200 Subject: [PATCH 07/32] add lua-sql-mysql dependency --- packaging/connectors-lib/centreon-stream-connectors-lib.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/packaging/connectors-lib/centreon-stream-connectors-lib.yaml index 670e6ee6..356f5f1e 100644 --- a/packaging/connectors-lib/centreon-stream-connectors-lib.yaml +++ b/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -32,12 +32,14 @@ overrides: - lua-socket >= 3.0 - centreon-broker-core >= 22.04.0 - lua-curl >= 0.3.13-10 + - lua-sql-mysql - lua deb: depends: - "centreon-broker-core (>= 22.04.0)" - "lua-socket (>= 3.0~)" - "lua-curl (>= 0.3.13-10)" + - "lua-sql-mysql" - "lua5.3" rpm: From 3cf7bd2fca779a2b763632dccf961f0b323bac51 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 16:26:50 +0200 Subject: [PATCH 08/32] add the ability to upload the packages as artifacts --- .github/actions/package-nfpm/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/package-nfpm/action.yml b/.github/actions/package-nfpm/action.yml index f0156ccb..1b46a31e 100644 --- a/.github/actions/package-nfpm/action.yml +++ b/.github/actions/package-nfpm/action.yml @@ -98,8 +98,8 @@ runs: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} - # Update if condition to true to get packages as artifacts - - if: ${{ false }} + # Add to your PR the label upload-artifacts to get packages as artifacts + - if: ${{ contains(github.event.pull_request.labels.*.name, 'upload-artifacts') }} name: Upload package artifacts uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: From e6665d8c231ee836227e298ef753b7a3100b42a3 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 16:59:35 +0200 Subject: [PATCH 09/32] update lib version --- .github/workflows/stream-connectors-lib.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index 63b7f8f0..2f43afaf 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -65,7 +65,7 @@ jobs: with: nfpm_file_pattern: "packaging/connectors-lib/*.yaml" distrib: ${{ matrix.distrib }} - version: "3.7.0" # previous version:"3.6.1" + version: "3.7.1" # previous version:"3.7.0" release: "2" package_extension: ${{ matrix.package_extension }} arch: all From 1baf5e4fa165b1eb75e6055360e37baaa6ce25ca Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Wed, 30 Apr 2025 17:06:55 +0200 Subject: [PATCH 10/32] update release --- .github/workflows/stream-connectors-lib.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index 2f43afaf..306d9185 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -66,7 +66,7 @@ jobs: nfpm_file_pattern: "packaging/connectors-lib/*.yaml" distrib: ${{ matrix.distrib }} version: "3.7.1" # previous version:"3.7.0" - release: "2" + release: "1" package_extension: ${{ matrix.package_extension }} arch: all commit_hash: ${{ github.sha }} From 19da298b39298ba4538acca2fa33ed3f988e37c9 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 12 May 2025 11:54:46 +0200 Subject: [PATCH 11/32] updates after client tests --- .../influxdb/influxdb-metrics-apiv2.lua | 107 +++-- .../sc_params.lua | 120 +++--- .../sc_storage.lua | 199 ++++++++++ .../sc_storage_sqlite.lua | 368 ++++++++++++++++++ 4 files changed, 715 insertions(+), 79 deletions(-) create mode 100644 modules/centreon-stream-connectors-lib/sc_storage.lua create mode 100644 modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index ba7b0904..b3141ba0 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -7,6 +7,7 @@ local metrics = {} -- Libraries local curl = require "cURL" +local mime = require("mime") local sc_common = require("centreon-stream-connectors-lib.sc_common") local sc_logger = require("centreon-stream-connectors-lib.sc_logger") local sc_broker = require("centreon-stream-connectors-lib.sc_broker") @@ -15,6 +16,7 @@ local sc_params = require("centreon-stream-connectors-lib.sc_params") local sc_macros = require("centreon-stream-connectors-lib.sc_macros") local sc_flush = require("centreon-stream-connectors-lib.sc_flush") local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") +local sc_storage = require("centreon-stream-connectors-lib.sc_storage") -------------------------------------------------------------------------------- -- Classe event_queue @@ -63,12 +65,13 @@ function EventQueue.new(params) self.sc_params.params.influxdb_password = params.influxdb_password self.sc_params.params.influxdb_database = params.influxdb_database self.sc_params.params.accepted_categories = params.accepted_categories or "neb" - self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.accepted_elements = params.accepted_elements or "service_status" self.sc_params.params.max_buffer_size = params.max_buffer_size or 100 self.sc_params.params.hard_only = params.hard_only or 0 self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 - + self.sc_params.params.metric_name_regex = params.metric_name_regex or "([, =])" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "\\%1" -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -82,30 +85,49 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) + self.sc_storage = sc_storage.new(self.sc_common, self.sc_logger, self.sc_params.params) + local rc, init_metrics = self.sc_storage:get_all_values_from_property("metric_id") + if type(init_metrics) == "boolean" or rc == false then + self.sc_logger:notice("no metric_id found in the sqlite db. That's probably because it is the first time the stream connector is executed") + else + metrics = init_metrics + end local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements self.format_event = { [categories.neb.id] = { - [elements.host_status.id] = function () return self:format_event_host() end, - [elements.service_status.id] = function () return self:format_event_service() end + [elements.host_status.id] = function() + return self:format_event_host() + end, + [elements.service_status.id] = function() + return self:format_event_service() + end } } self.format_metric = { [categories.neb.id] = { - [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, - [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + [elements.host_status.id] = function(metric) + return self:format_metric_host(metric) + end, + [elements.service_status.id] = function(metric) + return self:format_metric_service(metric) + end } } self.send_data_method = { - [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + [1] = function(payload, queue_metadata) + return self:send_data(payload, queue_metadata) + end } self.build_payload_method = { - [1] = function (payload, event) return self:build_payload(payload, event) end + [1] = function(payload, event) + return self:build_payload(payload, event) + end } -- return EventQueue object @@ -125,9 +147,9 @@ function EventQueue:format_accepted_event() -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file if not self.format_event[category][element] then self.sc_logger:error("[format_event]: You are trying to format an event with category: " - .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " - .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) - .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") else self.format_event[category][element]() end @@ -161,12 +183,14 @@ function EventQueue:format_metric_host(metric) self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") local event = self.sc_event.event - local metric_key = tostring(event.host_id) .. ':0:' .. tostring(metric.metric_name) + local metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':0:' .. tostring(metric.metric_name)) event.formated_event = { metric_name = metric.metric_name, metric_value = metric.value, metric_key = metric_key, last_check = event.last_check, + host_id = event.host_id, + service_id = 0, status = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. " " .. tostring(event.last_check) } self:add() @@ -180,12 +204,14 @@ end function EventQueue:format_metric_service(metric) self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") local event = self.sc_event.event - local metric_key = tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name) + local metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name)) event.formated_event = { metric_name = metric.metric_name, metric_value = metric.value, metric_key = metric_key, last_check = event.last_check, + host_id = event.host_id, + service_id = event.service_id, status = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. ",service_id=" .. tostring(event.cache.service.service_id) .. " " .. tostring(event.last_check) } self:add() @@ -201,14 +227,14 @@ function EventQueue:add() local element = self.sc_event.event.element self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) - .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_common:dumper(self.sc_event.event.formated_event) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -219,7 +245,7 @@ end -------------------------------------------------------------------------------- function EventQueue:build_payload(payload, event) if not payload then - payload = {event} + payload = { event } else table.insert(payload, event) end @@ -245,11 +271,15 @@ function EventQueue:send_data(payload, queue_metadata) local data_binary = '' for index, payload_event in ipairs(payload) do if not metrics[payload_event.metric_key] then + if payload_event.host_id == 7423 and payload_event.service_id == 0 and payload_event.metric_name == "rtmin" then + self.sc_logger:notice("send_data: No metric_id found for: host_id:" .. tostring(payload_event.host_id) .. ", service_id: " .. tostring(payload_event.service_id) .. ", metric name: " .. tostring(payload_event.metric_name)) + end payload_event.retry = 1 + table.insert(events_retry, payload_event) else data_binary = data_binary .. payload_event.metric_name .. ",metric_id=" .. metrics[payload_event.metric_key] .. " value=" .. payload_event.metric_value .. " " .. payload_event.last_check .. "\n" - data_binary = data_binary .. payload_event.status .. "\n" + data_binary = data_binary .. payload_event.status .. "\n" end end @@ -257,7 +287,7 @@ function EventQueue:send_data(payload, queue_metadata) if not metrics[retry_event.metric_key] then retry_event.retry = retry_event.retry + 1 if retry_event.retry > 3 then - self.sc_logger:debug("Retry limit reached for key: " .. retry_event.metric_key) + self.sc_logger:warning("Retry limit reached for key: " .. retry_event.metric_key) data_binary = data_binary .. retry_event.metric_name .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" data_binary = data_binary .. retry_event.status .. "\n" table.remove(events_retry, index) @@ -282,15 +312,15 @@ function EventQueue:send_data(payload, queue_metadata) local http_response_body = "" local http_request = curl.easy() - :setopt_url(url) - :setopt_writefunction( - function (response) + :setopt_url(url) + :setopt_writefunction( + function(response) http_response_body = http_response_body .. tostring(response) end - ) - :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) - :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.verify_certificate) - :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.verify_certificate) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then @@ -355,12 +385,25 @@ end -- @return {boolean} -------------------------------------------------------------------------------- function write (event) + if event._type == 196617 or event._type == 196609 then + local mname = event.name + local metric_key = "" + mname = string.gsub(mname, queue.sc_params.params.metric_name_regex, queue.sc_params.params.metric_replacement_character) + --if event.host_id == 7423 then + -- queue.sc_logger:notice("metric_key for host 7423: " .. tostring(metric_key) .. ", dumper write func: " .. queue.sc_common:dumper(event) ) + -- end + --local metric_key = tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. tostring(event.name) + if not event.service_id or event.service_id == 0 then + metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':0:' .. mname) + else + metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':' .. event.service_id .. ':' .. mname) + end - if event._type == 196617 then - local metric_key = tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. tostring(event.name) -- check if the metric is already in the metrics table if not metrics[metric_key] then + queue.sc_logger:notice("write: no metric_id found for 'metric_key': " .. tostring(metric_key) .. ", info: " .. tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. mname .. ", going to save metric_id : " .. tostring(event.metric_id) .. " in sqlite db and memory") metrics[metric_key] = event.metric_id + queue.sc_storage:set(metric_key, "metric_id", event.metric_id) end end @@ -380,14 +423,14 @@ function write (event) if queue.sc_metrics:is_valid_metric_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " - .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end return flush() @@ -423,4 +466,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end diff --git a/modules/centreon-stream-connectors-lib/sc_params.lua b/modules/centreon-stream-connectors-lib/sc_params.lua index df1ba98c..37ce7074 100644 --- a/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/modules/centreon-stream-connectors-lib/sc_params.lua @@ -2,7 +2,7 @@ broker_api_version = 2 ---- +--- -- Module to help initiate a stream connector with all paramaters -- @module sc_params -- @alias sc_params @@ -22,20 +22,20 @@ function sc_params.new(common, logger) -- initiate mandatory libs self.logger = logger - if not self.logger then + if not self.logger then self.logger = sc_logger.new() end self.common = common -- get the version of the bbdo protocol (only the first digit, nothing else matters) self.bbdo_version = self.common:get_bbdo_version() - + -- initiate params self.params = { -- filter broker events accepted_categories = "neb,bam", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) accepted_elements = "host_status,service_status,ba_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) - + -- filter status host_status = "0,1,2", -- = ok, down, unreachable service_status = "0,1,2,3", -- = ok, warning, critical, unknown, @@ -44,13 +44,13 @@ function sc_params.new(common, logger) ack_service_status = "", -- will use service_status if empty dt_host_status = "", -- will use host_status if empty dt_service_status = "", -- will use service_status if empty - + -- filter state type hard_only = 1, acknowledged = 0, in_downtime = 0, flapping = 0, - + -- objects filter accepted_hostgroups = "", rejected_hostgroups = "", @@ -77,24 +77,15 @@ function sc_params.new(common, logger) -- filter anomalous events skip_anon_events = 1, skip_nil_id = 1, - enable_bam_host = 0, - enable_broker_cache_counter_check = 0, - - -- centreon database information (only used if you set enable_broker_cache_counter_check to 1 - centreon_db_name = "centreon", - centreon_db_address = "127.0.0.1", - centreon_db_port = 3306, - centreon_db_user = "centreon", - centreon_db_password = "", -- enable or disable dedup enable_host_status_dedup = 1, enable_service_status_dedup = 1, - + -- communication parameters max_buffer_size = 1, max_buffer_age = 5, --deprecated - max_all_queues_age = 5, + max_all_queues_age = 60, send_mixed_events = 1, -- connection parameters @@ -102,6 +93,15 @@ function sc_params.new(common, logger) allow_insecure_connection = 0, --deprecated (confusing naming) verify_certificate = 1, + enable_broker_cache_counter_check = 0, + + -- centreon database information (only used if you set enable_broker_cache_counter_check to 1 + centreon_db_name = "centreon", + centreon_db_address = "127.0.0.1", + centreon_db_port = 3306, + centreon_db_user = "centreon", + centreon_db_password = "", + -- proxy parameters proxy_address = "", proxy_port = "", @@ -119,6 +119,10 @@ function sc_params.new(common, logger) -- custom code parameters custom_code_file = "", + -- storage parameters + storage_backend = "broker", + ["sc_storage.sqlite.db_file"] = "/var/lib/centreon-broker/stream-connector-storage.sdb", + -- time parameters local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), timestamp_conversion_format = "%Y-%m-%d %X", -- will print 2021-06-11 10:43:38 @@ -134,6 +138,8 @@ function sc_params.new(common, logger) logfile = "", log_level = "", log_curl_commands = 0, + enable_trace = 0, + trace_host_id_list = "", -- metric metric_name_regex = "no_forbidden_character_to_replace", @@ -954,7 +960,7 @@ function ScParams:param_override(user_params) return end - local keywords_to_hide = {"pass", "key"} + local keywords_to_hide = { "pass", "key" } local logged_param_value for param_name, param_value in pairs(user_params) do @@ -987,7 +993,6 @@ function ScParams:check_params() self.params.flapping = self.common:check_boolean_number_option_syntax(self.params.flapping, 0) self.params.skip_anon_events = self.common:check_boolean_number_option_syntax(self.params.skip_anon_events, 1) self.params.skip_nil_id = self.common:check_boolean_number_option_syntax(self.params.skip_nil_id, 1) - self.params.enable_bam_host = self.common:check_boolean_number_option_syntax(self.params.enable_bam_host, 0) self.params.accepted_authors = self.common:if_wrong_type(self.params.accepted_authors, "string", "") self.params.rejected_authors = self.common:if_wrong_type(self.params.rejected_authors, "string", "") self.params.accepted_hostgroups = self.common:if_wrong_type(self.params.accepted_hostgroups, "string", "") @@ -1028,7 +1033,7 @@ function ScParams:check_params() self.params.metric_replacement_character = self.common:ifnil_or_empty(self.params.metric_replacement_character, "_") self.params.output_size_limit = self.common:if_wrong_type(self.params.output_size_limit, "number", "") self.params.delta_host_status_change_allow = self.common:if_wrong_type(self.params.delta_host_status_change_allow, "number", 20) - + if self.params.accepted_hostgroups ~= '' and self.params.rejected_hostgroups ~= '' then self.logger:error("[sc_params:check_params]: Parameters accepted_hostgroups and rejected_hostgroups cannot be used together. None will be used.") end @@ -1046,14 +1051,15 @@ function ScParams:check_params() end -- handle some dedicated parameters that can use lua pattern (such as accepted_hosts and accepted_services) - self:build_and_validate_filters_pattern({"accepted_hosts", "accepted_services"}) + self:build_and_validate_filters_pattern({ "accepted_hosts", "accepted_services" }) + self:build_trace_host_list(self.params.trace_host_id_list) end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table -- @param kafka_config (object) object instance of kafka_config -- @param params (table) the list of parameters from broker web configuration function ScParams:get_kafka_params(kafka_config, params) - local keywords_to_hide = {"pass", "key"} + local keywords_to_hide = { "pass", "key" } local logged_param_value for param_name, param_value in pairs(params) do @@ -1062,14 +1068,14 @@ function ScParams:get_kafka_params(kafka_config, params) if string.find(param_name, "^_sc_kafka_") ~= nil then -- remove the _sc_kafka_ prefix and store the param in a dedicated kafka table kafka_config[string.gsub(param_name, "_sc_kafka_", "")] = param_value - + for _, must_be_hidden_param in pairs(keywords_to_hide) do if string.match(param_name, must_be_hidden_param) then logged_param_value = "******" end - - self.logger:notice("[sc_param:get_kafka_params]: " .. tostring(param_name) - .. " parameter with value " .. tostring(logged_param_value) .. " added to kafka_config") + + self.logger:notice("[sc_param:get_kafka_params]: " .. tostring(param_name) + .. " parameter with value " .. tostring(logged_param_value) .. " added to kafka_config") end end end @@ -1082,8 +1088,8 @@ end function ScParams:is_mandatory_config_set(mandatory_params, params) for index, mandatory_param in ipairs(mandatory_params) do if not params[mandatory_param] or params[mandatory_param] == "" then - self.logger:error("[sc_param:is_mandatory_config_set]: " .. tostring(mandatory_param) - .. " parameter is not set in the stream connector web configuration (or value is empty)") + self.logger:error("[sc_param:is_mandatory_config_set]: " .. tostring(mandatory_param) + .. " parameter is not set in the stream connector web configuration (or value is empty)") return false end @@ -1101,10 +1107,10 @@ function ScParams:load_event_format_file(json_string) -- return if there is no file configured if self.params.format_file == "" or self.params.format_file == nil then return false - end - + end + local retval, content = self.common:load_json_file(self.params.format_file) - + -- return if we couldn't load the json file if not retval then return false @@ -1115,16 +1121,16 @@ function ScParams:load_event_format_file(json_string) local elements = self.params.bbdo.elements local tpl_category local tpl_element - + -- store format template in their appropriate category/element table for cat_el, format in pairs(content) do tpl_category, tpl_element = string.match(cat_el, "^(%w+)_(.*)") - + -- convert back to json if if json_string then format = broker.json_encode(format) end - + self.params.format_template[categories[tpl_category].id][elements[tpl_element].id] = format end @@ -1138,14 +1144,14 @@ function ScParams:load_custom_code_file(custom_code_file) -- return if there is no file configured if self.params.custom_code_file == "" or self.params.custom_code_file == nil then return true - end - + end + local file = io.open(custom_code_file, "r") -- return false if we can't open the file if not file then self.logger:error("[sc_params:load_custom_code_file]: couldn't open file " - .. tostring(custom_code_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + .. tostring(custom_code_file) .. ". Make sure your file is there and that it is readable by centreon-broker") return false end @@ -1157,11 +1163,11 @@ function ScParams:load_custom_code_file(custom_code_file) for return_value in string.gmatch(file_content, "return (.-)\n") do if return_value ~= "self, true" and return_value ~= "self, false" then self.logger:error("[sc_params:load_custom_code_file]: your custom code file: " .. tostring(custom_code_file) - .. " is returning wrong values (" .. tostring(return_value) .. "). It must only return 'self, true' or 'self, false'") + .. " is returning wrong values (" .. tostring(return_value) .. "). It must only return 'self, true' or 'self, false'") return false end end - + -- check if it is valid lua code local custom_code, error = loadfile(custom_code_file) @@ -1181,7 +1187,7 @@ function ScParams:build_accepted_elements_info() -- list all accepted elements for _, accepted_element in ipairs(self.common:split(self.params.accepted_elements, ",")) do -- try to find element in known categories - for category_name, category_info in pairs(categories) do + for category_name, category_info in pairs(categories) do if self.params.element_mapping[category_info.id][accepted_element] then -- if found, store information in a dedicated table self.params.accepted_elements_info[accepted_element] = { @@ -1202,7 +1208,7 @@ end function ScParams:validate_pattern_param(param_name, param_value) if not self.common:validate_pattern(param_value) then self.logger:error("[sc_params:validate_pattern_param]: couldn't validate Lua pattern: " .. tostring(param_value) - .. " for parameter: " .. tostring(param_name) .. ". The filter will be reset to an empty value.") + .. " for parameter: " .. tostring(param_name) .. ". The filter will be reset to an empty value.") return "" end @@ -1230,18 +1236,18 @@ function ScParams:build_and_validate_filters_pattern(param_list) -- this option is here to overcome the lack of alternation operator ("|" character in POSIX regex) in Lua regex if self.params[param_name .. "_enable_split_pattern"] == 1 then temp_pattern_table = self.common:split(self.params[param_name], self.params[param_name .. "_split_character"]) - + for index, temp_pattern in ipairs(temp_pattern_table) do -- each sub pattern must be a valid standalone pattern. We are not here to develop regex in Lua if self.common:is_valid_pattern(temp_pattern) then table.insert(self.params[param_name .. "_pattern_list"], temp_pattern) self.logger:notice("[sc_params:build_accepted_filters_pattern]: adding " .. tostring(temp_pattern) - .. " to the list of filtering patterns for parameter: " .. param_name) + .. " to the list of filtering patterns for parameter: " .. param_name) else -- if the sub pattern is not valid, just ignore it - self.logger:error("[sc_params:build_accepted_filters_pattern]: ignoring pattern for param: " - .. param_name .. " because after splitting the string:" .. param_name - .. ", we end up with the following pattern: " .. tostring(temp_pattern) .. " which is not a valid Lua pattern") + self.logger:error("[sc_params:build_accepted_filters_pattern]: ignoring pattern for param: " + .. param_name .. " because after splitting the string:" .. param_name + .. ", we end up with the following pattern: " .. tostring(temp_pattern) .. " which is not a valid Lua pattern") end end else @@ -1250,4 +1256,24 @@ function ScParams:build_and_validate_filters_pattern(param_list) end end -return sc_params \ No newline at end of file +function ScParams:build_trace_host_list(param_value) + if self.params.enable_trace == 1 then + if param_value == "" then + self.logger:notice("[sc_params:build_trace_host_list]: enable_trace param is set to 1 but no trace_host_id_list provided. Trace is going to be disabled") + self.params.enable_trace = 0 + return + end + local tmp_trace_list = self.common:split(param_value) + local trace_list = {} + local host_info + + for index, host_id in ipairs(tmp_trace_list) do + trace_list[tonumber(host_id)] = tonumber(host_id) + end + self.params.trace_host_id_list = trace_list + elseif self.params.trace_host_id_list ~= "" and self.params.enable_trace == 0 then + self.logger:notice("[sc_params:build_trace_host_list]: trace_host_id_list is not empty but enable_trace param is set to 0. trace_host_id_list param is going to be ignored") + end +end + +return sc_params diff --git a/modules/centreon-stream-connectors-lib/sc_storage.lua b/modules/centreon-stream-connectors-lib/sc_storage.lua new file mode 100644 index 00000000..056cb301 --- /dev/null +++ b/modules/centreon-stream-connectors-lib/sc_storage.lua @@ -0,0 +1,199 @@ +--- +-- a wrapper to handle any storage system for stream connectors +-- @module sc_storage +-- @module sc_storage + +local sc_storage = {} +local ScStorage = {} + +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +--- sc_storage.new: sc_storage constructor +-- @param common (object) a sc_common instance +-- @param logger (object) a sc_logger instance +-- @param params (table) the params table of the stream connector +function sc_storage.new(common, logger, params) + local self = {} + + self.sc_common = common + self.sc_logger = logger + self.params = params + + -- list of lua patterns used to check if an object is a valid one + self.storage_objects = { + "host_%d+", + "service_%d+_%d+", + "ba_%d+", + "metric_.*" + } + + -- make sure we are able to load the desired storage backend. If not, fall back to the one provided by broker + if pcall(require, "centreon-stream-connectors-lib.storage_backends.sc_storage_" .. params.storage_backend) then + local storage_backend = require("centreon-stream-connectors-lib.storage_backends.sc_storage_" .. params.storage_backend) + self.storage_backend = storage_backend.new(self.sc_common, logger, params) + else + self.sc_logger:error("[sc_storage:new]: Couldn't load storage backend: " .. tostring(params.storage_backend) + .. ". Make sure that the file sc_storage_" .. tostring(params.storage_backend) .. ".lua exists on your server." + .. " The stream connector is going to use the broker storage backend.") + self.storage_backend = require("centreon-stream-connectors-lib.storage_backends.sc_storage_broker") + end + + setmetatable(self, { __index = ScStorage }) + return self +end + +--- is_valid_storage_object: make sure that the object that needs an interraction with the storage is an object that can have storage +-- @param object_id (string) the object that must be checked +-- @return (boolean) true if valid, false otherwise +function ScStorage:is_valid_storage_object(object_id) + for _, accepted_object_format in ipairs(self.storage_objects) do + if string.match(object_id, accepted_object_format) then + self.sc_logger:debug("[sc_storage:is_valid_storage_object]: object_id: " .. tostring(object_id) + .. " matched object format: " .. accepted_object_format) + return true + end + end + + self.sc_logger:error("[sc_storage:is_valid_storage_object]: object id: " .. tostring(object_id) + .. " is not a valid object_id.") + return false +end + +--- set: set an object property in the storage +-- @param object_id (string) the object with the property that must be set +-- @param property (string) the name of the property +-- @param value (string|number|boolean) the value of the property +-- @return (boolean) true if value properly set in storage, false otherwise +function ScStorage:set(object_id, property, value) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:set]: Object is invalid") + return false + end + + return self.storage_backend:set(object_id, property, value) +end + +--- set_multiple: set multiple object properties in the storage +-- @param object_id (string) the object with the property that must be set +-- @param properties (table) a table of properties and their values +-- @param value (string|number|boolean) the value of the property +-- @return (boolean) true if value properly set in storage, false otherwise +function ScStorage:set_multiple(object_id, properties) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:set_multiple]: Object is invalid") + return false + end + + if type(properties) ~= "table" then + self.sc_logger:error("[sc_storage:set_multiple]: properties parameter is not a table" + .. ". Received properties: " .. self.sc_common:dumper(properties)) + return false + end + + return self.storage_backend:set_multiple(object_id, properties) +end + +--- get: get an object property that is stored in the storage +-- @param object_id (string) the object with the property that must be retrieved +-- @param property (string) the name of the property +-- @return (boolean) true if value properly retrieved from storage, false otherwise +-- @return (string) empty string if status false, value otherwise +function ScStorage:get(object_id, property) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:get]: Object is invalid") + return false + end + + local status, value = self.storage_backend:get(object_id, property) + + if not status then + self.sc_logger:error("[sc_storage:get]: couldn't get property in storage. Object id: " .. tostring(object_id) + .. ", property name: " .. tostring(property)) + end + + return status, value +end + +--- get_multiple: retrieve a list of properties for an object +-- @param object_id (string) the object with the property that must be retrieved +-- @param properties (table) a list of properties +-- @return (boolean) true if value properly retrieved from storage, false otherwise +-- @return (table) empty table if status false, table of properties and their value otherwise +function ScStorage:get_multiple(object_id, properties) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:get]: Object is invalid") + return false + end + + if type(properties) ~= "table" then + self.sc_logger:error("[sc_storage:get_multiple]: properties parameter is not a table" + .. ". Received properties: " .. self.sc_common:dumper(properties)) + return false + end + + local status, value = self.storage_backend:get_multiple(object_id, properties) + + if not status then + self.sc_logger:error("[sc_storage:get]: couldn't get property in storage. Object id: " .. tostring(object_id) + .. ", property name: " .. self.sc_common:dumper(properties)) + end + + return status, value +end + +--- delete: delete an object property in the storage +-- @param object_id (string) the object with the property that must be deleted +-- @param property (string) the name of the property +-- @return (boolean) true if value properly deleted in storage, false otherwise +function ScStorage:delete(object_id, property) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:delete]: Object is invalid") + return false + end + + return self.storage_backend:delete(object_id, property) +end + +--- delete_multiple: delete an object properties in the storage +-- @param object_id (string) the object with the property that must be deleted +-- @param properties (table) a list of properties +-- @return (boolean) true if values properly deleted in storage, false otherwise +function ScStorage:delete_multiple(object_id, properties) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:delete]: Object is invalid") + return false + end + + if type(properties) ~= "table" then + self.sc_logger:error("[sc_storage:delete_multiple]: properties parameter is not a table" + .. ". Received properties: " .. self.sc_common:dumper(properties)) + return false + end + + return self.storage_backend:delete_multiple(object_id, property) +end + +--- show: show (in the log file) all stored properties of an object +-- @param object_id (string) the object with the property that must be shown +-- @return (boolean) true if object properties are retrieved, false otherwise +function ScStorage:show(object_id) + if not self:is_valid_storage_object(object_id) then + self.sc_logger:error("[sc_storage:show]: Object is invalid") + return false + end + + return self.storage_backend:show(object_id) +end + +--- clear: delete all stored information in storage +-- @return (boolean) true if storage has been deleted, false otherwise +function ScStorage:clear() + return self.storage_backend:clear() +end + +function ScStorage:get_all_values_from_property(property) + return self.storage_backend:get_all_values_from_property(property) +end + +--- TODO dump to extract the whole storage +return sc_storage diff --git a/modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua b/modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua new file mode 100644 index 00000000..4e7d251d --- /dev/null +++ b/modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua @@ -0,0 +1,368 @@ +--- +-- a storage module that is using LuaSqlite3 +-- @module sc_storage_sqlite +-- @module sc_storage_sqlite + +local sc_storage_sqlite = {} +local ScStorageSqlite = {} + +local sqlite = require("lsqlite3") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +--- sc_storage_sqlite.new: sc_storage_sqlite constructor +-- @param common (object) a sc_common instance +-- @param logger (object) a sc_logger instance +-- @param params (table) the params table of the stream connector +function sc_storage_sqlite.new(common, logger, params) + local self = {} + + self.sc_common = common + self.sc_logger = logger + self.params = params + + self.sqlite = sqlite.open(params["sc_storage.sqlite.db_file"]) + + if not self.sqlite:isopen() then + self.sc_logger:error("[sc_storage_sqlite:new]: couldn't open sqlite database: " .. tostring(params["sc_storage.sqlite.db_file"])) + else + self.sc_logger:notice("[sc_storage_sqlite:new]: successfully loaded sqlite storage database: " .. tostring(params["sc_storage.sqlite.db_file"]) + .. ". Status is: " .. tostring(self.sqlite:isopen())) + end + + self.last_query_result = {} + + self.callback_functions = { + get_query_result = function(convert_data, column_count, column_value, column_name) + return self:get_query_result(convert_data, column_count, column_value, column_name) end + } + + -- every functions that can be used to convert data retrieved from sc_storage table + self.convert_data_type = { + string = function(data) return tostring(data) end, + number = function(data) return tonumber(data) end, + boolean = function(data) + if data == "true" then + return true + end + + return false + end, + table = function(data) return broker.json_decode(data) end + } + + -- when you want to convert a data stored in the sdb, you need a column with the value to convert and another telling the expected data type + self.required_columns_for_data_type_conversion = { + value_column = "value", + type_column = "data_type" + } + + setmetatable(self, { __index = ScStorageSqlite }) + self:check_storage_table() + return self +end + +--- sc_storage_sqlite:get_query_result: this is a callback function. It is called for each row found by a sql query +-- @param convert_data (boolean) When set to true, values from the column "value" will have their type converted according to the "data_type" column. Query must be compatible with that. +-- @param column_count (number) the number of columns from the sql query +-- @param column_value (string) the value of a column +-- @param column_name (string) the name of the column +-- @return 0 (number) this is the required return code otherwise the sqlite:exec function will stop calling this callback function +function ScStorageSqlite:get_query_result(convert_data, column_count, column_value, column_name) + local row = {} + + for i = 1, column_count do + row[column_name[i]] = column_value[i] + end + + -- only convert data when possible + if convert_data + and self.convert_data_type[row.data_type] + and row[self.required_columns_for_data_type_conversion.value_column] + and row[self.required_columns_for_data_type_conversion.type_column] + then + row.value = self.convert_data_type[row.data_type](row.value) + end + + -- store results in a "global" variable + self.last_query_result[#self.last_query_result + 1] = row + return 0 +end + +--- sc_storage_sqlite:check_storage_table: check if the sc_storage table exists and, if not, create it. +function ScStorageSqlite:check_storage_table() + local query = "SELECT name FROM sqlite_master WHERE type='table' AND name='sc_storage';" + + self:run_query(query, true, false) + + if #self.last_query_result == 1 then + self.sc_logger:debug("[sc_storage_sqlite:check_storage_table]: sqlite table sc_storage exists") + else + self.sc_logger:notice("[sc_storage_sqlite:check_storage_table]: sqlite table sc_storage does not exist. We are going to create it") + self:create_storage_table() + end +end + +--- sc_storage_sqlite:create_storage_table: create the sc_storage table. +function ScStorageSqlite:create_storage_table() + local query = [[ + CREATE TABLE sc_storage ( + object_id TEXT, + property TEXT, + value TEXT, + data_type TEXT, + PRIMARY KEY (object_id, property) + ) + ]] + + self.sqlite:exec(query) +end + +--- sc_storage_sqlite:run_query: execute the given query +-- @param query (string) the query that must be run +-- @param get_result (boolean) When set to true, the query results will be stored in the self.last_query_result table +-- @param convert_data (boolean) When set to true, values from the column "value" will have their type converted according to the "data_type" column. Query must be compatible with that. +-- @return (boolean) false if query failed, true otherwise +function ScStorageSqlite:run_query(query, get_result, convert_data) + -- flush old stored query results + self.last_query_result = {} + + if not get_result then + self.sqlite:exec(query) + else + self.sqlite:exec(query, self.callback_functions.get_query_result, convert_data) + end + + if self.sqlite:errcode() ~= 0 then + self.sc_logger:error("[sc_storage_sqlite:run_query]: couldn't run query: " .. tostring(query) + .. ". [SQL ERROR CODE]: " .. self.sqlite:errcode() .. ". [SQL ERROR MESSAGE]: " .. tostring(self.sqlite:errmsg())) + return false + else + self.sc_logger:debug("[sc_storage_sqlite:run_query]: successfully executed query: " .. tostring(query)) + end + + return true +end + +--- sc_storage_sqlite:set: insert or update an object property value in the sc_storage table +-- @param object_id (string) the object identifier. +-- @param property (string) the name of the property +-- @param value (string, number, boolean, table) the value of the property +-- @return (boolean) false if we couldn't store the information in the storage, true otherwise +function ScStorageSqlite:set(object_id, property, value) + local data_type = type(value) + + if data_type == "table" then + value = broker.json_encode(value) + end + + value = string.gsub(tostring(value), "'", " ") + local query = "INSERT OR REPLACE INTO sc_storage VALUES ('" .. object_id .. "', '" .. property .. "', '" .. value .. "', '" .. data_type .. "');" + + if not self:run_query(query) then + self.sc_logger:error("[sc_storage_sqlite:set]: couldn't insert property in storage. Object id: " .. tostring(object_id) + .. ", property name: " .. tostring(property) .. ", property value: " .. tostring(value)) + return false + end + + return true +end + +--- sc_storage_sqlite:set_multiple: insert or update multiple object properties value in the sc_storage table +-- @param object_id (string) the object identifier. +-- @param properties (table) a table of properties and their values +-- @return (boolean) false if we couldn't store the information in the storage, true otherwise +function ScStorageSqlite:set_multiple(object_id, properties) + local counter = 0 + local sql_values = "" + local data_type + + for property, value in pairs(properties) do + data_type = type(value) + + if data_type == "table" then + value = broker.json_encode(value) + end + + value = string.gsub(tostring(value), "'", " ") + + if counter == 0 then + sql_values = "('" .. object_id .. "', '" .. property .. "', '" .. value .. "', '" .. data_type .. "')" + counter = counter + 1 + else + sql_values = sql_values .. ", " .. "('" .. object_id .. "', '" .. property .. "', '" .. value .. "', '" .. data_type .. "')" + end + end + + local query = "INSERT OR REPLACE INTO sc_storage VALUES " .. sql_values .. ";" + + if not self:run_query(query) then + self.sc_logger:error("[sc_storage_sqlite:set_multiple]: couldn't insert properties in storage. Object id: " .. tostring(object_id) + .. ", properties: " .. self.sc_common:dumper(properties)) + return false + end + + return true +end + +--- sc_storage_sqlite:get: retrieve a single property value of an object +-- @param object_id (string) the object identifier. +-- @param property (string) the name of the property +-- @return (boolean) false if we couldn't get the information from the storage, true otherwise +-- @return value (string, number, boolean) the value of the property (an empty string when first return is false or if we didn't find a value for this object property) +function ScStorageSqlite:get(object_id, property) + local query = "SELECT value, data_type FROM sc_storage WHERE property = '" .. property .. "' AND object_id = '" .. object_id .. "';" + + if not self:run_query(query, true, true) then + self.sc_logger:error("[sc_storage_sqlite:get]: couldn't get property in storage. Object id: " .. tostring(object_id) + .. ", property name: " .. tostring(property)) + return false, "" + end + + local value = "" + + -- if we didn't already store information in the storage, the last_query_result could be an empty table + if self.last_query_result[1] then + value = self.last_query_result[1].value + end + + return true, value +end + +--- sc_storage_sqlite:get_multiple: retrieve a list of properties for an object +-- @param object_id (string) the object identifier. +-- @param properties (table) a table of properties to retreive +-- @return (boolean) false if we couldn't get the information from the storage, true otherwise +-- @return values (table) a table of properties and their value if true, empty table otherwise +function ScStorageSqlite:get_multiple(object_id, properties) + local counter = 0 + local sql_properties_value = "" + + for _, property in ipairs(properties) do + if counter == 0 then + sql_properties_value = "'" .. property .. "'" + counter = counter + 1 + else + sql_properties_value = sql_properties_value .. ", '" .. property .. "'" + end + end + + local query = "SELECT property, value, data_type FROM sc_storage WHERE property IN (" .. sql_properties_value .. ") AND object_id = '" .. object_id .. "';" + + if not self:run_query(query, true, true) then + self.sc_logger:error("[sc_storage_sqlite:get_multiple]: couldn't get properties in storage. Object id: " .. tostring(object_id) + .. ", properties: " .. self.sc_common:dumper(properties)) + return false, {} + end + + local values = {} + + -- if we didn't already store information in the storage, the last_query_result could be an empty table + if self.last_query_result[1] then + for index, stored_data in pairs(self.last_query_result) do + values[stored_data.property] = stored_data.value + end + end + + return true, values +end + +--- sc_storage_sqlite:delete: delete a single property of an object +-- @param object_id (string) the object identifier. +-- @param property (string) the name of the property +-- @return (boolean) false if we couldn't delete the information from the storage, true otherwise +function ScStorageSqlite:delete(object_id, property) + local query = "DELETE FROM sc_storage WHERE property = '" .. property .. "' AND object_id = '" .. object_id .. "';" + + if not self:run_query(query) then + self.sc_logger:error("[sc_storage_sqlite:delete]: couldn't delete property in storage. Object id: " .. tostring(object_id) + .. ", property name: " .. tostring(property)) + return false + end + + self.sc_logger:debug("[sc_storage_sqlite:delete]: successfully deleted property in storage for object id: " .. tostring(object_id) + .. ", property name: " .. tostring(property)) + + return true +end + +--- sc_storage_sqlite:delete_multiple: delete a multiple properties of an object +-- @param object_id (string) the object identifier. +-- @param properties (table) a table of properties to retreive +-- @return (boolean) false if we couldn't delete the information from the storage, true otherwise +function ScStorageSqlite:delete_multiple(object_id, properties) + local sql_properties_value = "" + + for _, property in ipairs(properties) do + if counter == 0 then + sql_properties_value = "'" .. property .. "'" + counter = counter + 1 + else + sql_properties_value = sql_properties_value .. ", '" .. property .. "'" + end + end + + local query = "DELETE FROM sc_storage WHERE property IN (" .. sql_properties_value .. ") AND object_id = '" .. object_id .. "';" + + if not self:run_query(query) then + self.sc_logger:error("[sc_storage_sqlite:delete_multiple]: couldn't delete property in storage. Object id: " .. tostring(object_id) + .. ", properties: " .. self.sc_common:dumper(properties)) + return false + end + + self.sc_logger:debug("[sc_storage_sqlite:delete_multiple]: successfully deleted property in storage for object id: " .. tostring(object_id) + .. ", properties: " .. self.sc_common:dumper(properties)) + + return true +end + +--- sc_storage_sqlite:show: display all property values of a given object in the stream connector log file. +-- @param object_id (string) the object identifier. +-- @return (boolean) false if we couldn't display the information from the storage, true otherwise +function ScStorageSqlite:show(object_id) + local query = "SELECT * FROM sc_storage WHERE object_id = '" .. object_id .. "';" + + if not self:run_query(query, true) then + self.sc_logger:error("[sc_storage_sqlite:show]: couldn't show stored properties for object id: " .. tostring(object_id)) + return false + end + + self.sc_logger:notice("[sc_storage_sqlite:show]: stored properties for object id: " .. tostring(object_id) + .. ": " .. broker.json_encode(self.last_query_result)) + + return true +end + +--- sc_storage_sqlite:clear: delete everything stored in the sc_storage table. +-- @return (boolean) false if we couldn't delete data stored in the sc_storage table, true otherwise +function ScStorageSqlite:clear() + local query = "DELETE FROM sc_storage;" + + if not self:run_query(query) then + self.sc_logger:error("[sc_storage_sqlite:CLEAR]: couldn't delete storage stored in the sc_storage table") + return false + end + + return true +end + +function ScStorageSqlite:get_all_values_from_property(property) + local query = "SELECT object_id,value, data_type FROM sc_storage WHERE property='" .. tostring(property) .. "'" + if not self:run_query(query, true, true) then + self.sc_logger:error("[sc_storage_sqlite:get_all_values_from_property]: couldn't get stored properties for property: " .. tostring(property) .. ", query is: " .. tostring(query)) + return false + end + + local values = {} + + -- if we didn't already store information in the storage, the last_query_result could be an empty table + if self.last_query_result[1] then + for index, stored_data in pairs(self.last_query_result) do + values[stored_data.object_id] = stored_data.value + end + end + + return true, values + +end + +return sc_storage_sqlite From 45438a8f302e3f2d64d8b65078a106d4a6a041d0 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 12 May 2025 15:47:50 +0200 Subject: [PATCH 12/32] Add the ability to provide an artifact name --- .github/actions/package-nfpm/action.yml | 16 +++++++++++++++- .github/workflows/stream-connectors.yml | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/actions/package-nfpm/action.yml b/.github/actions/package-nfpm/action.yml index 1b46a31e..37600e90 100644 --- a/.github/actions/package-nfpm/action.yml +++ b/.github/actions/package-nfpm/action.yml @@ -38,6 +38,9 @@ inputs: stability: description: "Branch stability (stable, testing, unstable, canary)" required: true + artifact_name: + description: The name of the uploaded artifact + required: false runs: using: composite @@ -99,10 +102,21 @@ runs: key: ${{ inputs.cache_key }} # Add to your PR the label upload-artifacts to get packages as artifacts + - if: ${{ contains(github.event.pull_request.labels.*.name, 'upload-artifacts') }} + name: Get artifact name + id: get-artifact-name + run: | + if [ -z "${{ inputs.artifact_name }}" ]; then + echo "artifact_name=packages-${{ inputs.distrib }}" >> $GITHUB_OUTPUT + else + echo "artifact_name=${{ inputs.artifact_name }}" >> $GITHUB_OUTPUT + fi + shell: bash + - if: ${{ contains(github.event.pull_request.labels.*.name, 'upload-artifacts') }} name: Upload package artifacts uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: - name: packages-${{ inputs.distrib }} + name: ${{ steps.get-artifact-name.outputs.artifact_name }} path: ./*.${{ inputs.package_extension}} retention-days: 1 diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index 010c7c12..e5dc46c5 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -133,6 +133,7 @@ jobs: rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} stability: ${{ needs.get-environment.outputs.stability }} + artifact_name: "package-${{ matrix.connector_path }}-${{ matrix.distrib }}" deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} From c530adc1703fc746e4909fe6398c62e2714b5830 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Tue, 13 May 2025 15:57:18 +0200 Subject: [PATCH 13/32] Works for customer --- .github/actions/package-nfpm/action.yml | 2 +- .../influxdb/influxdb-metrics-apiv2.lua | 55 ++--- .../sc_broker.lua | 82 ++++---- .../sc_common.lua | 131 +++--------- .../sc_event.lua | 197 ++++++++---------- .../sc_flush.lua | 44 ++-- .../sc_logger.lua | 97 +++++++-- .../sc_macros.lua | 54 ++--- .../sc_metrics.lua | 71 +++++-- .../sc_params.lua | 18 +- .../sc_storage.lua | 18 +- .../sc_storage_sqlite.lua | 28 +-- 12 files changed, 394 insertions(+), 403 deletions(-) rename modules/centreon-stream-connectors-lib/{ => storage_backends}/sc_storage_sqlite.lua (94%) diff --git a/.github/actions/package-nfpm/action.yml b/.github/actions/package-nfpm/action.yml index 37600e90..295bf030 100644 --- a/.github/actions/package-nfpm/action.yml +++ b/.github/actions/package-nfpm/action.yml @@ -105,7 +105,7 @@ runs: - if: ${{ contains(github.event.pull_request.labels.*.name, 'upload-artifacts') }} name: Get artifact name id: get-artifact-name - run: | + run: | if [ -z "${{ inputs.artifact_name }}" ]; then echo "artifact_name=packages-${{ inputs.distrib }}" >> $GITHUB_OUTPUT else diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index b3141ba0..549fa0c9 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -98,36 +98,24 @@ function EventQueue.new(params) self.format_event = { [categories.neb.id] = { - [elements.host_status.id] = function() - return self:format_event_host() - end, - [elements.service_status.id] = function() - return self:format_event_service() - end + [elements.host_status.id] = function() return self:format_event_host() end, + [elements.service_status.id] = function() return self:format_event_service() end } } self.format_metric = { [categories.neb.id] = { - [elements.host_status.id] = function(metric) - return self:format_metric_host(metric) - end, - [elements.service_status.id] = function(metric) - return self:format_metric_service(metric) - end + [elements.host_status.id] = function(metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function(metric) return self:format_metric_service(metric) end } } self.send_data_method = { - [1] = function(payload, queue_metadata) - return self:send_data(payload, queue_metadata) - end + [1] = function(payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { - [1] = function(payload, event) - return self:build_payload(payload, event) - end + [1] = function(payload, event) return self:build_payload(payload, event) end } -- return EventQueue object @@ -147,9 +135,9 @@ function EventQueue:format_accepted_event() -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file if not self.format_event[category][element] then self.sc_logger:error("[format_event]: You are trying to format an event with category: " - .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " - .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) - .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") else self.format_event[category][element]() end @@ -227,14 +215,14 @@ function EventQueue:add() local element = self.sc_event.event.element self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) - .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_common:dumper(self.sc_event.event.formated_event) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -259,10 +247,10 @@ function EventQueue:send_data(payload, queue_metadata) local params = self.sc_params.params local url = params.http_server_protocol .. "://" .. params.http_server_address .. ":" .. tostring(params.http_server_port) - .. "/write?u=" .. tostring(params.influxdb_username) - .. "&p=" .. tostring(params.influxdb_password) - .. "&db=" .. tostring(params.influxdb_database) - .. "&precision=s" + .. "/write?u=" .. tostring(params.influxdb_username) + .. "&p=" .. tostring(params.influxdb_password) + .. "&db=" .. tostring(params.influxdb_database) + .. "&precision=s" queue_metadata.headers = { "content-type: text/plain; charset=utf-8" @@ -287,7 +275,8 @@ function EventQueue:send_data(payload, queue_metadata) if not metrics[retry_event.metric_key] then retry_event.retry = retry_event.retry + 1 if retry_event.retry > 3 then - self.sc_logger:warning("Retry limit reached for key: " .. retry_event.metric_key) + self.sc_logger:error("send_data: retry limit reached for metric_key: " .. retry_event.metric_key .. " ; metric name ='" .. retry_event.metric_name .. "' ; metric value='" .. retry_event.metric_value .. "'") + --self.sc_logger:error("Retry limit reached for key: " .. retry_event.metric_key) data_binary = data_binary .. retry_event.metric_name .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" data_binary = data_binary .. retry_event.status .. "\n" table.remove(events_retry, index) @@ -314,9 +303,9 @@ function EventQueue:send_data(payload, queue_metadata) local http_request = curl.easy() :setopt_url(url) :setopt_writefunction( - function(response) - http_response_body = http_response_body .. tostring(response) - end + function(response) + http_response_body = http_response_body .. tostring(response) + end ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.verify_certificate) @@ -426,11 +415,11 @@ function write (event) --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " - .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end return flush() diff --git a/modules/centreon-stream-connectors-lib/sc_broker.lua b/modules/centreon-stream-connectors-lib/sc_broker.lua index e4574755..5d263949 100644 --- a/modules/centreon-stream-connectors-lib/sc_broker.lua +++ b/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Module with Centreon broker related methods for easier usage -- @module sc_broker -- @alias sc_broker @@ -14,6 +14,8 @@ local ScBroker = {} function sc_broker.new(params, logger) local self = {} + broker_api_version = 2 + self.sc_logger = logger if not self.sc_logger then self.sc_logger = sc_logger.new() @@ -40,7 +42,6 @@ function sc_broker.new(params, logger) end setmetatable(self, { __index = ScBroker }) - return self end @@ -55,39 +56,42 @@ function ScBroker:get_host_all_infos(host_id) self.sc_logger:warning("[sc_broker:get_host_all_infos]: host id is nil") return false end - + -- get host information from broker cache local host_info = broker_cache:get_host(host_id) -- return false only if no host information were found in broker cache if not host_info and self.params.enable_broker_cache_counter_check ~= 1 then self.sc_logger:warning("[sc_broker:get_host_all_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + --self.sc_logger:notice("[sc_broker:get_host_all_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") return false - -- user is asking to also check in the database for the host. if we find it, we return a limited set of value (the most common ones) - elseif not host_info and self.params.enable_broker_cache_counter_check == 1 then + -- user is asking to also check in the database for the host. if we find it, we return a limited set of value (the most common ones) + elseif (not host_info or not host_info.name) and self.params.enable_broker_cache_counter_check == 1 then + --self.sc_logger:notice("going to check host info in the database") local query = [[ - SELECT h.host_id, - h.host_name AS name, - h.host_alias AS alias, - h.host_address AS address, + SELECT h.host_id, + h.host_name AS name, + h.host_alias AS alias, + h.host_address AS address, h.display_name, ehi.ehi_notes AS notes, ehi.ehi_notes_url AS notes_url, ehi.ehi_action_url AS action_url, - nhr.nagios_server_id AS instance_id + nhr.nagios_server_id as instance_id FROM host h, extended_host_information ehi, - ns_host_relation nhr + ns_host_relation nhr WHERE ehi.host_host_id = h.host_id AND h.host_activate <> '0' - AND h.host_id = nhr.host_host_id AND h.host_id = ]] .. tonumber(host_id) + .. [[ AND nhr.host_host_id = ]] .. tonumber(host_id) self.sc_logger:debug("[sc_broker:get_host_all_infos]: no information found in broker cache for host: " .. tostring(host_id) .. ", going to check in the centreon database with query: " .. tostring(query)) + --- self.sc_logger:notice("[sc_broker:get_host_all_infos]: no information found in broker cache for host: " .. tostring(host_id) .. ", going to check in the centreon database with query: " .. tostring(query)) host_info = self:get_centreon_db_info(query) - + -- self.sc_logger:notice("found host info in db cacheç " .. self.sc_common:dumper(host_info) if not host_info then self.sc_logger:error("[sc_broker:get_host_all_infos]: couldn't find host: " .. tostring(host_id) .. " in your database. Maybe it has been disabled or removed. You should export your configuration.") @@ -109,7 +113,7 @@ function ScBroker:get_service_all_infos(host_id, service_id) self.sc_logger:warning("[sc_broker:get_service_all_infos]: host id or service id is nil") return false end - + -- get service information from broker cache local service_info = broker_cache:get_service(host_id, service_id) @@ -117,16 +121,18 @@ function ScBroker:get_service_all_infos(host_id, service_id) if not service_info and self.params.enable_broker_cache_counter_check ~= 1 then self.sc_logger:warning("[sc_broker:get_service_all_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) .. ". Restarting centengine should fix this.") + self.sc_logger:notice("[sc_broker:get_service_all_infos]: No service information found for host_id: " .. tostring(host_id) + .. " and service_id: " .. tostring(service_id) .. ". Restarting centengine should fix this.") return false - elseif not service_info and self.params.enable_broker_cache_counter_check == 1 then + elseif (not service_info or not service_info.description) and self.params.enable_broker_cache_counter_check == 1 then local query = [[ - SELECT s.service_id, - s.service_description AS description, - s.service_alias AS alias, + SELECT s.service_id, + s.service_description AS description, + s.service_alias AS alias, s.display_name, esi.esi_notes AS notes, esi.esi_notes_url AS notes_url, - esi.esi_action_url AS action_url + esi.esi_action_url AS action_url FROM service s, extended_service_information esi WHERE esi.service_service_id = s.service_id @@ -134,6 +140,8 @@ function ScBroker:get_service_all_infos(host_id, service_id) AND s.service_id = ]] .. tonumber(service_id) self.sc_logger:debug("[sc_broker:get_host_all_infos]: no information found in broker cache for service: " .. tostring(service_id) .. ", going to check in the centreon database with query: " .. tostring(query)) + --- self.sc_logger:notice("[sc_broker:get_host_all_infos]: no information found in broker cache for service: " .. tostring(service_id) .. ", going to check in the centreon database with query: " .. tostring(query)) + service_info = self:get_centreon_db_info(query) @@ -149,7 +157,7 @@ end --- get_host_infos: retrieve the the desired host informations -- @param host_id (number) --- @params info (string|table) the name of the wanted host parameter or a table of all wanted host parameters +-- @param info (string|table) the name of the wanted host parameter or a table of all wanted host parameters -- @return false (boolean) if host_id is nil or empty -- @return host (any) a table of all wanted host params if input param is a table. The single parameter if input param is a string function ScBroker:get_host_infos(host_id, info) @@ -158,7 +166,7 @@ function ScBroker:get_host_infos(host_id, info) self.sc_logger:warning("[sc_broker:get_host_infos]: host id is nil") return false end - + -- prepare return table with host information local host = { host_id = host_id @@ -174,7 +182,7 @@ function ScBroker:get_host_infos(host_id, info) -- return host_id only if no host information were found in broker cache if not host_info then - self.sc_logger:warning("[sc_broker:get_host_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + self.sc_logger:warning("[sc_broker:get_host_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") return host end @@ -200,7 +208,7 @@ end --- get_service_infos: retrieve the the desired service informations -- @param host_id (number) -- @param service_id (number) --- @params info (string|table) the name of the wanted host parameter or a table of all wanted service parameters +-- @param info (string|table) the name of the wanted host parameter or a table of all wanted service parameters -- @return false (boolean) if host_id and/or service_id are nil or empty -- @return service (any) a table of all wanted service params if input param is a table. A single parameter if input param is a string function ScBroker:get_service_infos(host_id, service_id, info) @@ -209,7 +217,7 @@ function ScBroker:get_service_infos(host_id, service_id, info) self.sc_logger:warning("[sc_broker:get_service_infos]: host id or service id is invalid") return false end - + -- prepare return table with service information local service = { host_id = host_id, @@ -226,8 +234,8 @@ function ScBroker:get_service_infos(host_id, service_id, info) -- return host_id and service_id only if no host information were found in broker cache if not service_info then - self.sc_logger:warning("[sc_broker:get_service_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) - .. ". Restarting centengine should fix this.") + self.sc_logger:warning("[sc_broker:get_service_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) + .. ". Restarting centengine should fix this.") return service end @@ -256,7 +264,7 @@ end -- @return hostgroups (table) a table of all hostgroups for the host function ScBroker:get_hostgroups(host_id) -- return false if host id is invalid - if host_id == nil or host_id == "" then + if host_id == nil or host_id == "" then self.sc_logger:warning("[sc_broker:get_hostgroup]: host id is nil or empty") return false end @@ -268,7 +276,7 @@ function ScBroker:get_hostgroups(host_id) if not hostgroups then return false end - + return hostgroups end @@ -279,7 +287,7 @@ end -- @return servicegroups (table) a table of all servicegroups for the service function ScBroker:get_servicegroups(host_id, service_id) -- return false if service id is invalid - if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then self.sc_logger:warning("[sc_broker:get_servicegroups]: service id is nil or empty") return false end @@ -291,7 +299,7 @@ function ScBroker:get_servicegroups(host_id, service_id) if not servicegroups then return false end - + return servicegroups end @@ -302,7 +310,7 @@ end -- @return severity (table) all the severity from the host or the service function ScBroker:get_severity(host_id, service_id) -- return false if host id is invalid - if host_id == nil or host_id == "" then + if host_id == nil or host_id == "" then self.sc_logger:warning("[sc_broker:get_severity]: host id is nil or empty") return false end @@ -364,7 +372,7 @@ end -- @return ba_info (table) a table with the name and description of the ba function ScBroker:get_ba_infos(ba_id) -- return false if ba_id is invalid - if ba_id == nil or ba_id == "" then + if ba_id == nil or ba_id == "" then self.sc_logger:warning("[sc_broker:get_ba_infos]: ba id is nil or empty") return false end @@ -387,7 +395,7 @@ end -- @return bvs (table) name and description of all the bvs function ScBroker:get_bvs_infos(ba_id) -- return false if ba_id is invalid - if ba_id == nil or ba_id == "" then + if ba_id == nil or ba_id == "" then self.sc_logger:warning("[sc_broker:get_bvs]: ba id is nil or empty") return false end @@ -411,9 +419,9 @@ function ScBroker:get_bvs_infos(ba_id) -- add bv information to the list if bv_infos then - table.insert(bvs,bv_infos) + table.insert(bvs, bv_infos) found_bv = true - else + else self.sc_logger:warning("[sc_broker:get_bvs]: couldn't get bv information for bv id: " .. tostring(bv_id)) end end @@ -427,8 +435,8 @@ function ScBroker:get_bvs_infos(ba_id) end --- get_centreon_db_info: run a query (that must return only one row) in the centreon database to build a cache from the db when asking to. If the query return multiple rows, only the last one will be returned --- @param query (string) the sql query that must be executed to build the cache --- @return result (table or nil) the result of the query or nil +-- @param query (string) the sql query that must be executed to build the cache +-- @return result (table or nil) the result of the query or nil function ScBroker:get_centreon_db_info(query) local result, error = self.centreon_db:execute(query) diff --git a/modules/centreon-stream-connectors-lib/sc_common.lua b/modules/centreon-stream-connectors-lib/sc_common.lua index 835be9c8..29e76878 100644 --- a/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/modules/centreon-stream-connectors-lib/sc_common.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Module with common methods for Centreon Stream Connectors -- @module sc_common -- @alias sc_common @@ -25,9 +25,9 @@ local ScCommon = {} function sc_common.new(sc_logger) local self = {} - + self.sc_logger = sc_logger - if not self.sc_logger then + if not self.sc_logger then self.sc_logger = sc_logger.new() end @@ -227,13 +227,13 @@ function ScCommon:json_escape(string) end return string:gsub('\\', '\\\\') - :gsub('\t', '\\t') - :gsub('\n', '\\n') - :gsub('\b', '\\b') - :gsub('\r', '\\r') - :gsub('\f', '\\f') - :gsub('/', '\\/') - :gsub('"', '\\"') + :gsub('\t', '\\t') + :gsub('\n', '\\n') + :gsub('\b', '\\b') + :gsub('\r', '\\r') + :gsub('\f', '\\f') + :gsub('/', '\\/') + :gsub('"', '\\"') end --- xml_escape: escape xml special characters in a string @@ -246,10 +246,10 @@ function ScCommon:xml_escape(string) end return string:gsub('&', '&') - :gsub('<', '$lt;') - :gsub('>', '>') - :gsub('"', '"') - :gsub("'", "'") + :gsub('<', '$lt;') + :gsub('>', '>') + :gsub('"', '"') + :gsub("'", "'") end --- lua_regex_escape: escape lua regex special characters in a string @@ -262,17 +262,17 @@ function ScCommon:lua_regex_escape(string) end return string:gsub('%%', '%%%%') - :gsub('%.', '%%.') - :gsub("%*", "%%*") - :gsub("%-", "%%-") - :gsub("%(", "%%(") - :gsub("%)", "%%)") - :gsub("%[", "%%[") - :gsub("%]", "%%]") - :gsub("%$", "%%$") - :gsub("%^", "%%^") - :gsub("%+", "%%+") - :gsub("%?", "%%?") + :gsub('%.', '%%.') + :gsub("%*", "%%*") + :gsub("%-", "%%-") + :gsub("%(", "%%(") + :gsub("%)", "%%)") + :gsub("%[", "%%[") + :gsub("%]", "%%]") + :gsub("%$", "%%$") + :gsub("%^", "%%^") + :gsub("%+", "%%+") + :gsub("%?", "%%?") end --- dumper: dump variables for debug purpose @@ -345,9 +345,9 @@ function ScCommon:get_bbdo_version() local bbdo_version if broker.bbdo_version ~= nil then - _, _, bbdo_version = string.find(broker.bbdo_version(), "(%d+).%d+.%d+") + _, _, bbdo_version = string.find(broker.bbdo_version(), "(%d+).%d+.%d+") else - bbdo_version = 2 + bbdo_version = 2 end return tonumber(bbdo_version) @@ -366,81 +366,4 @@ function ScCommon:is_valid_pattern(pattern) return status end ---- sleep: wait a given number of seconds --- @param seconds (number) the number of seconds you need to wait -function ScCommon:sleep(seconds) - local default_value = 1 - - if type(seconds) == "number" then - os.execute("sleep " .. seconds) - else - self.sc_logger:error("[sc_common:sleep]: given parameter is not a valid second value. Parameter value: " .. tostrin(seconds) - .. ". This will default to: " .. tostring(default_value)) - os.execute("sleep " .. default_value) - end -end - ---- create_sleep_counter: create a table to handle sleep counters. Useful when you want to log something less often after some repetitions --- @param sleep_table (table) an empty table that will be returned with all the desired data structure --- @param min (number) the minimum value of the counter --- @param max (number) the maximum value of the counter --- @param step (number) the value by whitch the counter will be incremented --- @param init_value (number) [optional] the value of the counter when you create the table. When not provided, it will use the min --- @return sleep_table (table) a table with all values set and some functions in order to interact with the table more easily -function ScCommon:create_sleep_counter_table(sleep_table, min, max, step, init_value) - local default_min = 0 - local default_max = 300 - local default_step = 10 - - if type(min) ~= "number" - or type(max) ~= "number" - or type(step) ~= "number" - then - self.sc_logger:error("[sc_common:create_sleep_counter_table]: min, max or step are not numbers: " .. tostring(min) - .. ", " .. tostring(max) .. ", " .. tostring(step) .. ". We will use default values instead") - min = default_min - max = default_max - step = default_step - end - - if max < min then - self.sc_logger:error("[sc_common:create_sleep_counter_table]: max is below min." .. tostring(max) .. " < " .. tostring(min) - .. ". We will use default values instead") - min = default_min - max = default_max - end - - if not init_value or type(init_value) ~= "number" then - init_value = min - end - - sleep_table.min = min - sleep_table.max = max - sleep_table.value = init_value - sleep_table.step = step - sleep_table.reset = function () sleep_table.value = sleep_table.min end - sleep_table.increment = function () - if sleep_table.value < sleep_table.max then - sleep_table.value = sleep_table.value + sleep_table.step - end - end - sleep_table.is_max_reached = function () - if sleep_table.value < sleep_table.max then - return false - else - return true - end - end - sleep_table.sleep = function () - if not sleep_table:is_max_reached() then - self:sleep(sleep_table.value) - sleep_table:increment() - else - self:sleep(sleep_table.value) - end - end - - return sleep_table -end - return sc_common diff --git a/modules/centreon-stream-connectors-lib/sc_event.lua b/modules/centreon-stream-connectors-lib/sc_event.lua index 807f39b3..80ea3933 100644 --- a/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/modules/centreon-stream-connectors-lib/sc_event.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Module to help handle events from Centreon broker -- @module sc_event -- @alias sc_event @@ -14,27 +14,20 @@ local sc_broker = require("centreon-stream-connectors-lib.sc_broker") local ScEvent = {} -function sc_event.new(broker_event, params, common, logger, broker) +function sc_event.new(event, params, common, logger, broker) local self = {} self.sc_logger = logger - if not self.sc_logger then + if not self.sc_logger then self.sc_logger = sc_logger.new() end self.sc_common = common self.params = params - self.broker_event = broker_event + self.event = event self.sc_broker = broker self.bbdo_version = self.sc_common:get_bbdo_version() - -- we create our event table - self.event = { - cache = {} - } - - -- create the meta table for the self.event table - local event_meta = { __index = function (tbl, key) return self.broker_event[key] end} - setmetatable(self.event, event_meta) + self.event.cache = {} setmetatable(self, { __index = ScEvent }) return self @@ -73,7 +66,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_event() local is_valid_event = false - + -- run validation tests depending on the category of the event if self.event.category == self.params.bbdo.categories.neb.id then is_valid_event = self:is_valid_neb_event() @@ -91,7 +84,7 @@ function ScEvent:is_valid_event() -- run custom code if self.params.custom_code and type(self.params.custom_code) == "function" then self, is_valid_event = self.params.custom_code(self) - end + end return is_valid_event end @@ -100,13 +93,13 @@ end -- @return true|false (boolean) function ScEvent:is_valid_neb_event() local is_valid_event = false - + -- run validation tests depending on the element type of the neb event if self.event.element == self.params.bbdo.elements.host_status.id then is_valid_event = self:is_valid_host_status_event() elseif self.event.element == self.params.bbdo.elements.service_status.id then is_valid_event = self:is_valid_service_status_event() - elseif self.event.element == self.params.bbdo.elements.acknowledgement.id then + elseif self.event.element == self.params.bbdo.elements.acknowledgement.id then is_valid_event = self:is_valid_acknowledgement_event() elseif self.event.element == self.params.bbdo.elements.downtime.id then is_valid_event = self:is_valid_downtime_event() @@ -123,10 +116,10 @@ function ScEvent:is_valid_host_status_event() self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") return false end - + -- return false if event status is not accepted if not self:is_valid_event_status(self.params.host_status) then - self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state])) return false end @@ -180,7 +173,7 @@ end function ScEvent:is_valid_service_status_event() -- return false if we can't get hostname or host id is nil if not self:is_valid_host() then - self.sc_logger:warning("[sc_event:is_valid_service_status_event]: host_id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated for service with id: " .. tostring(self.event.service_id)) return false end @@ -193,7 +186,7 @@ function ScEvent:is_valid_service_status_event() -- return false if event status is not accepted if not self:is_valid_event_status(self.params.service_status) then - self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state])) return false end @@ -213,32 +206,32 @@ function ScEvent:is_valid_service_status_event() -- return false if host is not monitored from an accepted poller if not self:is_valid_poller() then - self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) .. ". host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") return false end -- return false if host has not an accepted severity if not self:is_valid_host_severity() then - self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") return false end -- return false if service has not an accepted severity if not self:is_valid_service_severity() then - self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") return false end -- return false if host is not in an accepted hostgroup if not self:is_valid_hostgroup() then - self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) return false end - + -- return false if service is not in an accepted servicegroup if not self:is_valid_servicegroup() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") @@ -272,8 +265,10 @@ function ScEvent:is_valid_host() -- return false if we can't get hostname if (not self.event.cache.host and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + --self.sc_logger:notice("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) + --.. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false elseif (not self.event.cache.host and self.params.skip_anon_events == 0) then self.event.cache.host = { @@ -287,7 +282,7 @@ function ScEvent:is_valid_host() end -- return false if event is coming from fake bam host - if string.find(self.event.cache.host.name, "^_Module_BAM_*") and self.params.enable_bam_host == 0 then + if string.find(self.event.cache.host.name, "^_Module_BAM_*") then self.sc_logger:debug("[sc_event:is_valid_host]: Host is a BAM fake host: " .. tostring(self.event.cache.host.name)) return false end @@ -308,9 +303,9 @@ function ScEvent:is_valid_host() end if not is_valid_pattern then - self.sc_logger:info("[sc_event:is_valid_host]: Host: " .. tostring(self.event.cache.host.name) - .. " doesn't match accepted_hosts pattern: " .. tostring(self.params.accepted_hosts) - .. " or any of the sub-patterns if accepted_hosts_enable_split_pattern is enabled") + self.sc_logger:info("[sc_event:is_valid_host]: Host: " .. tostring(self.event.cache.host.name) + .. " doesn't match accepted_hosts pattern: " .. tostring(self.params.accepted_hosts) + .. " or any of the sub-patterns if accepted_hosts_enable_split_pattern is enabled") return false end @@ -331,7 +326,7 @@ function ScEvent:is_valid_service() -- return false if we can't get service description if (not self.event.cache.service and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_valid_service]: Invalid description for service with id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_service]: Invalid description for service with id: " .. tostring(self.event.service_id) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false elseif (not self.event.cache.service and self.params.skip_anon_events == 0) then @@ -361,24 +356,12 @@ function ScEvent:is_valid_service() end if not is_valid_pattern then - self.sc_logger:info("[sc_event:is_valid_service]: Service: " .. tostring(self.event.cache.service.description) .. " from host: " .. tostring(self.event.cache.host.name) - .. " doesn't match accepted_services pattern: " .. tostring(self.params.accepted_services) - .. " or any of the sub-patterns if accepted_services_enable_split_pattern is enabled") + self.sc_logger:info("[sc_event:is_valid_service]: Service: " .. tostring(self.event.cache.service.description) .. " from host: " .. tostring(self.event.cache.host.name) + .. " doesn't match accepted_services pattern: " .. tostring(self.params.accepted_services) + .. " or any of the sub-patterns if accepted_services_enable_split_pattern is enabled") return false end - -- if we want to send BA status using the service status mecanism, we need to use the ba_description instead of host name - if string.find(self.event.cache.host.name, "^_Module_BAM_*") and self.params.enable_bam_host == 1 then - self.sc_logger:debug("[sc_event:is_valid_service]: Host is a fake BAM host. Therefore, host name: " - .. tostring(self.event.cache.host.name) .. " must be replaced by the name of the BA.") - self.event.ba_id = string.gsub(self.event.cache.service.description, "ba_", "") - self.event.ba_id = tonumber(self.event.ba_id) - self:is_valid_ba() - self.sc_logger:debug("[sc_event:is_valid_service]: replacing host name: " - .. tostring(self.event.cache.host.name) .. " by BA name: " .. tostring(self.event.cache.ba.ba_name)) - self.event.cache.host.name = self.event.cache.ba.ba_name - end - return true end @@ -413,7 +396,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_event_status(accepted_status_list) local status_list = self.sc_common:split(accepted_status_list, ",") - + if not status_list then self.sc_logger:error("[sc_event:is_valid_event_status]: accepted_status list is nil or empty") return false @@ -430,20 +413,20 @@ function ScEvent:is_valid_event_status(accepted_status_list) -- end compat patch for _, status_id in ipairs(status_list) do - if tostring(self.event.state) == status_id then + if tostring(self.event.state) == status_id then return true end end -- handle downtime event specific case for logging if (self.event.category == self.params.bbdo.categories.neb.id and self.event.element == self.params.bbdo.elements.downtime.id) then - self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " + self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) return false end -- log for everything else - self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " + self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) return false end @@ -452,7 +435,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_event_state_type() if not self.sc_common:compare_numbers(self.event.state_type, self.params.hard_only, ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_state_type]: event is not in an valid state type. Event state type must be above or equal to " .. tostring(self.params.hard_only) + self.sc_logger:warning("[sc_event:is_valid_event_state_type]: event is not in an valid state type. Event state type must be above or equal to " .. tostring(self.params.hard_only) .. ". Current state type: " .. tostring(self.event.state_type)) return false end @@ -473,7 +456,7 @@ function ScEvent:is_valid_event_acknowledge_state() end if not self.sc_common:compare_numbers(self.params.acknowledged, self.sc_common:boolean_to_number(self.event.acknowledged), ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be below or equal to " .. tostring(self.params.acknowledged) + self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be below or equal to " .. tostring(self.params.acknowledged) .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) return false end @@ -485,12 +468,12 @@ end -- @return true|false (boolean) function ScEvent:is_valid_event_downtime_state() -- patch compat bbdo 3 => bbdo 2 - if (not self.event.scheduled_downtime_depth and self.event.downtime_depth) then + if (not self.event.scheduled_downtime_depth and self.event.downtime_depth) then self.event.scheduled_downtime_depth = self.event.downtime_depth end if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.scheduled_downtime_depth, ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be below or equal to " .. tostring(self.params.in_downtime) + self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be below or equal to " .. tostring(self.params.in_downtime) .. ". Current downtime state: " .. tostring(self.sc_common:boolean_to_number(self.event.scheduled_downtime_depth))) return false end @@ -502,7 +485,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_event_flapping_state() if not self.sc_common:compare_numbers(self.params.flapping, self.sc_common:boolean_to_number(self.event.flapping), ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_flapping_state]: event is not in an valid flapping state. Event flapping state must be below or equal to " .. tostring(self.params.flapping) + self.sc_logger:warning("[sc_event:is_valid_event_flapping_state]: event is not in an valid flapping state. Event flapping state must be below or equal to " .. tostring(self.params.flapping) .. ". Current flapping state: " .. tostring(self.sc_common:boolean_to_number(self.event.flapping))) return false end @@ -526,11 +509,11 @@ function ScEvent:is_valid_hostgroup() if not self.event.cache.hostgroups then if accepted_hostgroups_isnotempty then self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) - .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups ..".") + .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups .. ".") return false elseif rejected_hostgroups_isnotempty then self.sc_logger:debug("[sc_event:is_valid_hostgroup]: accepting event because host with id: " .. tostring(self.event.host_id) - .. " is not linked to a hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups ..".") + .. " is not linked to a hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups .. ".") return true end end @@ -540,11 +523,11 @@ function ScEvent:is_valid_hostgroup() -- return false if the host is not in a valid hostgroup if accepted_hostgroups_isnotempty and not accepted_hostgroup_name then - self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) .. " is not in an accepted hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) return false elseif rejected_hostgroups_isnotempty and rejected_hostgroup_name then - self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) .. " is in a rejected hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups) return false else @@ -595,11 +578,11 @@ function ScEvent:is_valid_servicegroup() if not self.event.cache.servicegroups then if accepted_servicegroups_isnotempty then self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) - .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups ..".") + .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups .. ".") return false elseif rejected_servicegroups_isnotempty then self.sc_logger:debug("[sc_event:is_valid_servicegroup]: accepting event because service with id: " .. tostring(self.event.service_id) - .. " is not linked to a servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups ..".") + .. " is not linked to a servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups .. ".") return true end end @@ -609,15 +592,15 @@ function ScEvent:is_valid_servicegroup() -- return false if the service is not in a valid servicegroup if accepted_servicegroups_isnotempty and not accepted_servicegroup_name then - self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) return false elseif rejected_servicegroups_isnotempty and rejected_servicegroup_name then - self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) .. " is in an rejected servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups) return false end - + local debug_msg = "[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) if accepted_servicegroups_isnotempty then debug_msg = debug_msg .. " matched servicegroup: " .. tostring(accepted_servicegroup_name) @@ -642,7 +625,7 @@ function ScEvent:find_servicegroup_in_list(servicegroups_list) return servicegroup_name end end - end + end end return false end @@ -679,7 +662,7 @@ function ScEvent:is_valid_bam_event() self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in an accepted BV") return false end - + return true end @@ -694,13 +677,13 @@ function ScEvent:is_valid_ba() end self.event.cache.ba = self.sc_broker:get_ba_infos(self.event.ba_id) - + -- return false if we can't get ba name if (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 1) then self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA with id: " .. tostring(self.event.ba_id) .. ". Found BA name is: " .. tostring(self.event.cache.ba.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) return false - elseif (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 0) then + elseif (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 0) then self.event.cache.ba = { ba_name = self.event.ba_id } @@ -713,8 +696,8 @@ end -- @return true|false (boolean) function ScEvent:is_valid_ba_status_event() if not self:is_valid_event_status(self.params.ba_status) then - self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA status for BA id: " .. tostring(self.event.ba_id) .. ". State is: " - .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Acceptes states are: " .. tostring(self.params.ba_status)) + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA status for BA id: " .. tostring(self.event.ba_id) .. ". State is: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Acceptes states are: " .. tostring(self.params.ba_status)) return false end @@ -725,7 +708,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_ba_downtime_state() if not self.sc_common:compare_numbers(self.params.in_downtime, self.sc_common:boolean_to_number(self.event.in_downtime), ">=") then - self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA downtime state for BA id: " .. tostring(self.event.ba_id) .. " downtime state is : " .. tostring(self.event.in_downtime) + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA downtime state for BA id: " .. tostring(self.event.ba_id) .. " downtime state is : " .. tostring(self.event.in_downtime) .. " and accepted downtime state must be below or equal to: " .. tostring(self.params.in_downtime)) return false end @@ -754,16 +737,16 @@ function ScEvent:is_valid_bv() if (not accepted_bvs_isnotempty and not rejected_bvs_isnotempty) or (accepted_bvs_isnotempty and rejected_bvs_isnotempty) then return true end - + -- return false if no bvs were found if not self.event.cache.bvs then if accepted_bvs_isnotempty then self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because host with id: " .. tostring(self.event.host_id) - .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs ..".") + .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs .. ".") return false elseif rejected_bvs_isnotempty then self.sc_logger:debug("[sc_event:is_valid_bv]: accepting event because host with id: " .. tostring(self.event.host_id) - .. " is not linked to a BV. Rejected BVs are: " .. self.params.rejected_bvs ..".") + .. " is not linked to a BV. Rejected BVs are: " .. self.params.rejected_bvs .. ".") return true end end @@ -796,7 +779,7 @@ function ScEvent:find_bv_in_list(bvs_list) if bvs_list == nil or bvs_list == "" then return false else - for _, bv_name in ipairs(self.sc_common:split(bvs_list,",")) do + for _, bv_name in ipairs(self.sc_common:split(bvs_list, ",")) do for _, event_bv in pairs(self.event.cache.bvs) do if bv_name == event_bv.bv_name then return bv_name @@ -849,7 +832,7 @@ function ScEvent:is_valid_poller() -- return false if the host is not monitored from a valid poller if accepted_pollers_isnotempty and not accepted_poller_name then - self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) .. " is not linked to an accepted poller. Host is monitored from: " .. tostring(self.event.cache.poller) .. ". Accepted pollers are: " .. self.params.accepted_pollers) return false elseif rejected_pollers_isnotempty and rejected_poller_name then @@ -900,7 +883,7 @@ function ScEvent:is_valid_host_severity() -- return false if host severity doesn't match if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.severity.host, self.params.host_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_host_severity]: dropping event because host with id: " .. tostring(self.event.host_id) .. " has an invalid severity. Severity is: " - .. tostring(self.event.cache.severity.host) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator + .. tostring(self.event.cache.severity.host) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator .. " to the severity of the host (" .. tostring(self.event.cache.severity.host) .. ")") return false end @@ -929,7 +912,7 @@ function ScEvent:is_valid_service_severity() -- return false if service severity doesn't match if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.severity.service, self.params.service_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_service_severity]: dropping event because service with id: " .. tostring(self.event.service_id) .. " has an invalid severity. Severity is: " - .. tostring(self.event.cache.severity.service) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator + .. tostring(self.event.cache.severity.service) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator .. " to the severity of the host (" .. tostring(self.event.cache.severity.service) .. ")") return false end @@ -949,11 +932,11 @@ function ScEvent:is_valid_acknowledgement_event() -- check if ack author is valid if not self:is_valid_author() then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: acknowledgement on host: " .. tostring(self.event.host_id) - .. "and service: " .. tostring(self.event.service_id) .. "(0 means ack is on host) is not made by a valid author. Author is: " + .. "and service: " .. tostring(self.event.service_id) .. "(0 means ack is on host) is not made by a valid author. Author is: " .. tostring(self.event.author) .. " Accepted authors are: " .. self.params.accepted_authors) return false end - + -- return false if host is not monitored from an accepted poller if not self:is_valid_poller() then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") @@ -962,7 +945,7 @@ function ScEvent:is_valid_acknowledgement_event() -- return false if host has not an accepted severity if not self:is_valid_host_severity() then - self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") return false end @@ -975,12 +958,12 @@ function ScEvent:is_valid_acknowledgement_event() -- return false if event status is not accepted if not self:is_valid_event_status(event_status) then - self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.params.bbdo.elements.host_status.id][self.event.state])) return false end - -- service_id != 0 means ack is on a service - else + -- service_id != 0 means ack is on a service + else -- return false if we can't get service description of service id is nil if not self:is_valid_service() then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") @@ -992,14 +975,14 @@ function ScEvent:is_valid_acknowledgement_event() -- return false if event status is not accepted if not self:is_valid_event_status(event_status) then - self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.params.bbdo.elements.service_status.id][self.event.state])) return false end -- return false if service has not an accepted severity if not self:is_valid_service_severity() then - self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") return false end @@ -1013,11 +996,11 @@ function ScEvent:is_valid_acknowledgement_event() -- return false if host is not in an accepted hostgroup if not self:is_valid_hostgroup() then - self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) return false end - + return true end @@ -1053,10 +1036,10 @@ function ScEvent:is_valid_downtime_event() if self.event.type == 2 then -- store the result in the self.event.state because doing that allow us to use the is_valid_event_status method self.event.state = self:get_downtime_host_status() - + -- checks if the current host downtime state is an accpeted status if not self:is_valid_event_status(self.params.dt_host_status) then - self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. " Accepted states are: " .. tostring(self.params.dt_host_status)) return false @@ -1070,10 +1053,10 @@ function ScEvent:is_valid_downtime_event() -- store the result in the self.event.state because doing that allow us to use the is_valid_event_status method self.event.state = self:get_downtime_service_status() - + -- return false if event status is not accepted if not self:is_valid_event_status(self.params.dt_service_status) then - self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service with id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. " Accepted states are: " .. tostring(self.params.dt_service_status)) return false @@ -1081,7 +1064,7 @@ function ScEvent:is_valid_downtime_event() -- return false if service has not an accepted severity if not self:is_valid_service_severity() then - self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service id: " .. tostring(self.event.service_id) .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") return false end @@ -1095,7 +1078,7 @@ function ScEvent:is_valid_downtime_event() -- return false if host is not in an accepted hostgroup if not self:is_valid_hostgroup() then - self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) return false end @@ -1106,7 +1089,7 @@ end --- is_valid_author: check if the author of a comment is valid based on contact alias in Centreon -- return true|false (boolean) function ScEvent:is_valid_author() - -- return true if options are not set or if both options are set + -- return true if options are not set or if both options are set local accepted_authors_isnotempty = self.params.accepted_authors ~= "" local rejected_authors_isnotempty = self.params.rejected_authors ~= "" if (not accepted_authors_isnotempty and not rejected_authors_isnotempty) or (accepted_authors_isnotempty and rejected_authors_isnotempty) then @@ -1117,7 +1100,7 @@ function ScEvent:is_valid_author() local accepted_author_name = self:find_author_in_list(self.params.accepted_authors) local rejected_author_name = self:find_author_in_list(self.params.rejected_authors) if accepted_authors_isnotempty and not accepted_author_name then - self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) + self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) .. " is not in an accepted authors list. Accepted authors are: " .. self.params.accepted_authors) return false elseif rejected_authors_isnotempty and rejected_author_name then @@ -1166,11 +1149,11 @@ end -- return status (number) the status code of the service function ScEvent:get_downtime_service_status() -- if cache is not filled we can't get the state of the service - if - not self.event.cache.service.last_time_ok - or not self.event.cache.service.last_time_warning - or not self.event.cache.service.last_time_critical - or not self.event.cache.service.last_time_unknown + if + not self.event.cache.service.last_time_ok + or not self.event.cache.service.last_time_warning + or not self.event.cache.service.last_time_critical + or not self.event.cache.service.last_time_unknown then return "N/A" end @@ -1196,7 +1179,7 @@ function ScEvent:get_most_recent_status_code(timestamp) highest_timestamp = 0, status = nil } - + -- compare all status timestamp and keep the most recent one and the corresponding status code for status_code, status_timestamp in ipairs(timestamp) do if status_timestamp > status_info.highest_timestamp then @@ -1221,7 +1204,7 @@ function ScEvent:is_service_status_event_duplicated() if self.event.last_hard_state_change == self.event.last_check or self.event.last_hard_state_change == self.event.last_update then return false end - + return true --[[ IT LOOKS LIKE THIS PIECE OF CODE IS USELESS @@ -1293,7 +1276,7 @@ function ScEvent:is_downtime_event_useless() if self:is_valid_downtime_event_start() then return true end - + -- return false if downtime event is not a valid end of downtime event if self:is_valid_downtime_event_end() then return true @@ -1349,7 +1332,7 @@ function ScEvent:is_valid_downtime_event_end() return true end - + -- any other downtime event is not about the actual end of a downtime so we return false self.sc_logger:debug("[sc_event:is_valid_downtime_event_end]: deletion_time not found in the downtime event or equal to -1. The downtime event is not about the end of a downtime") return false @@ -1376,7 +1359,7 @@ function ScEvent:build_outputs() if self.params.use_long_output == 0 and short_output then self.event.output = short_output - -- replace line break if asked to and we are not already using a short output + -- replace line break if asked to and we are not already using a short output elseif not short_output and self.params.remove_line_break_in_output == 1 then self.event.output = string.gsub(self.event.output, "\n", self.params.output_line_break_replacement_character) end @@ -1394,4 +1377,4 @@ function ScEvent:is_valid_storage_event() return true end -return sc_event \ No newline at end of file +return sc_event diff --git a/modules/centreon-stream-connectors-lib/sc_flush.lua b/modules/centreon-stream-connectors-lib/sc_flush.lua index 71ab72d3..e7db0cdf 100644 --- a/modules/centreon-stream-connectors-lib/sc_flush.lua +++ b/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Module that handles data queue for stream connectors -- @module sc_flush -- @alias sc_flush @@ -16,10 +16,10 @@ local ScFlush = {} -- @param [opt] sc_logger (object) a sc_logger object function sc_flush.new(params, logger) local self = {} - + -- create a default logger if it is not provided self.sc_logger = logger - if not self.sc_logger then + if not self.sc_logger then self.sc_logger = sc_logger.new() end @@ -37,7 +37,7 @@ function sc_flush.new(params, logger) [categories.bam.id] = {}, global_queues_metadata = {} } - + -- link events queues to their respective categories and elements for element_name, element_info in pairs(self.params.accepted_elements_info) do self.queues[element_info.category_id][element_info.element_id] = { @@ -62,7 +62,7 @@ function ScFlush:add_queue_metadata(category_id, element_id, metadata) self.sc_logger:warning("[ScFlush:add_queue_metadata]: can't add queue metadata for category: " .. self.params.reverse_category_mapping[category_id] .. " (id: " .. category_id .. ") and element: " .. self.params.reverse_element_mapping[category_id][element_id] .. " (id: " .. element_id .. ")." .. ". metadata name: " .. tostring(metadata_name) .. ", metadata value: " .. tostring(metadata_value) - .. ". You need to accept this category with the parameter 'accepted_categories'.") + .. ". You need to accept this category with the parameter 'accepted_categories'.") return end @@ -156,14 +156,14 @@ function ScFlush:flush_mixed_payload(build_payload_method, send_method) -- all events have been sent return true -end +end --- flush_homogeneous_payload: flush a payload that contains a single type of events (services with services only and hosts with hosts only for example) -- @return boolean (boolean) true or false depending on the success of the operation function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) local counter = 0 local payload = nil - + -- get all queues for _, element_info in pairs(self.params.accepted_elements_info) do -- get events from queues @@ -171,17 +171,17 @@ function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) -- add event to the payload payload = build_payload_method(payload, event) counter = counter + 1 - + -- send events if max buffer size is reached if counter >= self.params.max_buffer_size then if not self:flush_payload( - send_method, - payload, + send_method, + payload, self.queues[element_info.category_id][element_info.element_id].queue_metadata ) then return false end - + -- reset payload and counter because events have been sent counter = 0 payload = nil @@ -190,8 +190,8 @@ function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) -- make sure there are no events left inside a specific queue if not self:flush_payload( - send_method, - payload, + send_method, + payload, self.queues[element_info.category_id][element_info.element_id].queue_metadata ) then return false @@ -210,21 +210,13 @@ end -- @param metadata (table) all metadata for the payload -- @return boolean (boolean) true or false depending on the success of the operation function ScFlush:flush_payload(send_method, payload, metadata) - -- when the payload doesn't exist or is empty, we just tell broker that everything is fine on the stream connector side - if not payload or payload == "" then - return true - end - - local pcall_status, result = pcall(send_method, payload, metadata) - - self.sc_logger:debug("[sc_flush:flush_payload]: tried to send payload protected by pcall. Status: " .. tostring(pcall_status) .. ", Message: " .. tostring(result)) - - if not pcall_status then - self.sc_logger:error("[sc_flush:flush_payload]: could not send payload because of an internal error. pcall status: " .. tostring(pcall_status) .. ", error message: " .. tostring(result)) - return false + if payload then + if not send_method(payload, metadata) then + return false + end end - return result + return true end return sc_flush \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_logger.lua b/modules/centreon-stream-connectors-lib/sc_logger.lua index e5026530..2622cf4f 100644 --- a/modules/centreon-stream-connectors-lib/sc_logger.lua +++ b/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Logging module for centreon stream connectors -- @module sc_logger -- @alias sc_logger @@ -9,12 +9,12 @@ local sc_logger = {} --- build_message: prepare log message -- @param severity (string) the severity of the message (WARNING, CRITIAL...) --- @param message (string) the log message +-- @param message (string) the log message -- @return ouput (string) the formated log message local function build_message(severity, message) local date = os.date("%a %b %d %H:%M:%S %Y") local output = date .. ": " .. severity .. ": " .. message .. "\n" - + return output end @@ -41,7 +41,7 @@ local ScLogger = {} --- sc_logger.new: sc_logger constructor -- @param [opt] logfile (string) output file for logs --- @param [opt] severity (integer) the accepted severity level +-- @param [opt] severity (integer) the accepted severity level function sc_logger.new(logfile, severity) local self = {} self.severity = severity @@ -52,12 +52,24 @@ function sc_logger.new(logfile, severity) self.logfile = logfile or "/var/log/centreon-broker/stream-connector.log" broker_log:set_parameters(self.severity, self.logfile) - + + self.trace_action = "" + self.params = params + setmetatable(self, { __index = ScLogger }) return self end +function ScLogger:set_params(params) + self.params = params + self:notice(self.params.enable_broker_cache_counter_check) +end + +function ScLogger:set_common_object(sc_common) + self.sc_common = sc_common +end + --- error: write an error message -- @param message (string) the message that will be written function ScLogger:error(message) @@ -79,7 +91,7 @@ end -- info: write an informational message -- @param message (string) the message that will be written function ScLogger:info(message) - broker_log:info(2,message) + broker_log:info(2, message) end --- debug: write a debug message @@ -104,18 +116,18 @@ function ScLogger:log_curl_command(url, metadata, params, data, basic_auth) .. params.proxy_address .. ", port: " .. params.proxy_port .. ", user: " .. params.proxy_username .. ", password: " .. tostring(params.proxy_password)) local proxy_url - - if params.proxy_address ~= "" then + + if params.proxy_address ~= "" then if params.proxy_username ~= "" then proxy_url = params.proxy_protocol .. "://" .. params.proxy_username .. ":" .. params.proxy_password .. "@" .. params.proxy_address .. ":" .. params.proxy_port else proxy_url = params.proxy_protocol .. "://" .. params.proxy_address .. ":" .. params.proxy_port end - + curl_string = curl_string .. " --proxy '" .. proxy_url .. "'" end - + -- handle certificate verification -- It's false because of this part: Tell libcurl to not verify the peer. With libcurl you disable this with curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE); if params.verify_certificate == false then @@ -130,16 +142,16 @@ function ScLogger:log_curl_command(url, metadata, params, data, basic_auth) else curl_string = curl_string .. " -X GET" end - + -- handle headers if metadata.headers then for _, header in ipairs(metadata.headers) do curl_string = curl_string .. " -H '" .. tostring(header) .. "'" end end - + curl_string = curl_string .. " '" .. tostring(url) .. "'" - + -- handle curl data if data and data ~= "" then curl_string = curl_string .. " -d '" .. data .. "'" @@ -149,11 +161,66 @@ function ScLogger:log_curl_command(url, metadata, params, data, basic_auth) if basic_auth then curl_string = curl_string .. " -u '" .. basic_auth.username .. ":" .. basic_auth.password .. "'" end - + self:notice("[sc_logger:log_curl_command]: " .. curl_string) else self:debug("[sc_logger:log_curl_command]: curl command not logged because log_curl_commands param is set to: " .. params.log_curl_commands) end end -return sc_logger \ No newline at end of file +function ScLogger:log_trace(step, host_id, flush) + if not self.params then + return + end + + if self.params.enable_trace == 0 then + return + end + + if not host_id and not flush then + return + end + + if not self.params.trace_host_id_list[host_id] and not flush then + return + end + + if type(self.trace_action) ~= "table" then + self.trace_action = {} + end + + -- self:notice(self.sc_common:dumper(self.trace_action)) + + if not self.trace_action[host_id] and host_id then + self.trace_action[host_id] = {} + end + + -- self:notice(self.sc_common:dumper(self.trace_action)) + + if host_id then + self.trace_action[host_id][step] = true + end + -- local log_string = "[TRACE][" .. tostring(func_name) .. "][" .. tostring(step) .. "]: " .. tostring(self.params.trace_host_id_list[host_id]) .. " " .. tostring(action) + -- self:notice(log_string) + if flush and type(self.trace_action) == "table" then + local msg = "| host id | action | data |\n| -- | -- | -- |\n" + local h_name + -- self:notice(self.sc_common:dumper(self.trace_action)) + for host_id, trace_info in pairs(self.trace_action) do + h_name = broker_cache:get_hostname(host_id) + if not h_name then + h_name = host_id + end + + for step_name, value in pairs(trace_info) do + msg = msg .. "| " .. tostring(h_name) .. " | " .. tostring(step_name) .. " | " .. tostring(value) .. " |\n" + end + end + + msg = msg .. "\n\n| sent payload | result |\n| -- | -- |\n| " .. tostring(flush.payload) .. " | " .. tostring(flush.result) .. " |\n" + self.trace_action = "" + self:notice(msg) + end +end + +return sc_logger diff --git a/modules/centreon-stream-connectors-lib/sc_macros.lua b/modules/centreon-stream-connectors-lib/sc_macros.lua index 5f72e532..44d2938b 100644 --- a/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Module to handle centreon macros (e.g: $HOSTADDRESS$) and sc macros (e.g: {cache.host.address}) -- @module sc_macros -- @alias sc_macros @@ -20,7 +20,7 @@ function sc_macros.new(params, logger, common) -- initiate mandatory libs self.sc_logger = logger - if not self.sc_logger then + if not self.sc_logger then self.sc_logger = sc_logger.new() end @@ -47,12 +47,12 @@ function sc_macros.new(params, logger, common) -- mapping of macro that we will convert if asked self.transform_macro = { - date = function (macro_value) return self:transform_date(macro_value) end, - type = function (macro_value) return self:transform_type(macro_value) end, - short = function (macro_value) return self:transform_short(macro_value) end, - state = function (macro_value, event) return self:transform_state(macro_value, event) end, - number = function (macro_value) return self:transform_number(macro_value) end, - string = function (macro_value) return self:transform_string(macro_value) end + date = function(macro_value) return self:transform_date(macro_value) end, + type = function(macro_value) return self:transform_type(macro_value) end, + short = function(macro_value) return self:transform_short(macro_value) end, + state = function(macro_value, event) return self:transform_state(macro_value, event) end, + number = function(macro_value) return self:transform_number(macro_value) end, + string = function(macro_value) return self:transform_string(macro_value) end } -- mapping of centreon standard macros to their stream connectors counterparts @@ -77,7 +77,7 @@ function sc_macros.new(params, logger, common) -- HOSTDURATION doesn't exist -- HOSTDURATIONSEC doesn't exist HOSTDOWNTIME = "{cache.host.scheduled_downtime_depth}", - HOSTPERCENTCHANGE = "{percent_state_change}" , -- will be replaced by the service percent_state_change if event is about a service + HOSTPERCENTCHANGE = "{percent_state_change}", -- will be replaced by the service percent_state_change if event is about a service -- HOSTGROUPNAME doesn't exist -- HOSTGROUPNAMES doesn't exist LASTHOSTCHECK = "{cache.host.last_check_value}", @@ -207,17 +207,17 @@ function ScMacros:replace_sc_macro(string, event, json_string) -- will generate two macros {cache.host.name} and {host_id}) for macro in string.gmatch(string, "{[%w_.%(%),%%%+%-%*%?%[%]%^%$]+}") do self.sc_logger:debug("[sc_macros:replace_sc_macro]: found a macro, name is: " .. tostring(macro)) - + -- check if macro is in the cache cache_macro_value = self:get_cache_macro(macro, event) - + -- replace all cache macro such as {cache.host.name} with their values if cache_macro_value then converted_string = self:build_converted_string_for_cache_and_event_macro(cache_macro_value, macro, converted_string) else -- if not in cache, try to find a matching value in the event itself event_macro_value = self:get_event_macro(macro, event) - + -- replace all event macro such as {host_id} with their values if event_macro_value then converted_string = self:build_converted_string_for_cache_and_event_macro(event_macro_value, macro, converted_string) @@ -229,11 +229,11 @@ function ScMacros:replace_sc_macro(string, event, json_string) if group_macro_value then group_macro_value = broker.json_encode(group_macro_value) macro = self.sc_common:lua_regex_escape(macro) - + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a group macro. Macro name: " .. tostring(macro) .. ", value is: " .. tostring(group_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string) .. ". Applied format is: " .. tostring(format)) - + if string.match(converted_string, '"' .. macro .. '"') then converted_string = string.gsub(converted_string, '"' .. macro .. '"', group_macro_value) else @@ -285,7 +285,7 @@ function ScMacros:get_cache_macro(raw_macro, event) if event.cache[cache_type] then -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag local macro_value, flag = self:get_transform_flag(macro) - + -- check if the macro is in the cache if event.cache[cache_type][macro_value] then if flag then @@ -314,12 +314,12 @@ function ScMacros:get_event_macro(macro, event) -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag local macro_value, flag = self:get_transform_flag(macro) - + -- check if the macro is in the event if event[macro_value] then if flag then self.sc_logger:info("[sc_macros:get_event_macro]: macro has a flag associated. Flag is: " .. tostring(flag) - .. ", a macro value conversion will be done. Macro value is: " .. tostring(macro_value)) + .. ", a macro value conversion will be done. Macro value is: " .. tostring(macro_value)) -- convert the found value according to the flag that has been sent return self.transform_macro[flag](event[macro_value], event) else @@ -351,7 +351,7 @@ function ScMacros:get_group_macro(macro, event) if not code then self.sc_logger:error("[sc_macros:get_group_macro]: couldn't convert data for group type: " .. tostring(group_type) .. ". Desired format: " .. tostring(format) .. ". Filtering using regex: " .. tostring(regex)) - return false + return false end return converted_data, format @@ -395,7 +395,7 @@ function ScMacros:build_group_macro_value(data, index_name, format, regex) table.insert(result, group_info[index_name]) end end - + if not self.group_macro_format[format] then self.sc_logger:error("[sc_macros:build_group_macro_value]: unknown format for group macro. Format provided: " .. tostring(format)) return false @@ -437,7 +437,7 @@ function ScMacros:convert_centreon_macro(string, event) local centreon_macro = false local sc_macro_value = false local converted_string = string - + -- get all standard macros for macro in string.gmatch(string, "$%w$") do self.sc_logger:debug("[sc_macros:convert_centreon_macro]: found a macro, name is: " .. tostring(macro)) @@ -447,11 +447,11 @@ function ScMacros:convert_centreon_macro(string, event) -- if the macro has been found, try to get its value if centreon_macro then sc_macro_value = self:replace_sc_macro(centreon_macro, event) - + -- if a value has been found, replace the macro with the value if sc_macro_value then self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a centreon macro. Macro name: " - .. tostring(macro) .. ", value is: " .. tostring(sc_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + .. tostring(macro) .. ", value is: " .. tostring(sc_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) converted_string = string.gsub(converted_string, centreon_macro, sc_macro_value) end else @@ -478,7 +478,7 @@ end function ScMacros:get_transform_flag(macro) -- separate macro and flag local macro_value, flag = string.match(macro, "(.*)_sc(%w+)$") - + -- if there was a flag in the macro name, return the real macro name and its flag if macro_value then return macro_value, flag @@ -499,7 +499,7 @@ end -- @param macro_value (string) the string that needs to be shortened -- @return string (string) the input string with only the first lne function ScMacros:transform_short(macro_value) - return string.match(macro_value, "^(.*)\n") or macro_value + return string.match(macro_value, "^(.*)\n") end --- transform_type: convert a 0, 1 value into SOFT or HARD @@ -553,10 +553,10 @@ function ScMacros:build_converted_string_for_cache_and_event_macro(macro_value, -- need to escape % characters or else it will break the string.gsub that is done later local clean_macro_value, _ = string.gsub(macro_value, "%%", "%%%%") local clean_macro_value_json = "" - + self.sc_logger:debug("[sc_macros:build_converted_string_for_cache_and_event_macro]: macro is a cache macro. Macro name: " - .. tostring(macro) .. ", value is: " .. tostring(clean_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) - + .. tostring(macro) .. ", value is: " .. tostring(clean_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + --[[ to have the best json possible, we try to remove double quotes. "service_severity": "{cache.severity.service}" must become "service_severity": 1 and not "service_severity": "1" diff --git a/modules/centreon-stream-connectors-lib/sc_metrics.lua b/modules/centreon-stream-connectors-lib/sc_metrics.lua index 3fc65356..6a04f220 100644 --- a/modules/centreon-stream-connectors-lib/sc_metrics.lua +++ b/modules/centreon-stream-connectors-lib/sc_metrics.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua ---- +--- -- Module that handles event metrics for stream connectors -- @module sc_metrics -- @alias sc_metrics @@ -25,10 +25,10 @@ function sc_metrics.new(event, params, common, broker, logger) -- create a default logger if it is not provided self.sc_logger = logger - if not self.sc_logger then + if not self.sc_logger then self.sc_logger = sc_logger.new() end - + self.sc_common = common self.params = params self.sc_broker = broker @@ -39,21 +39,21 @@ function sc_metrics.new(event, params, common, broker, logger) -- store metric validation functions inside a table linked to category/element self.metric_validation = { [categories.neb.id] = { - [elements.host.id] = function () return self:is_valid_host_metric_event() end, + [elements.host.id] = function() return self:is_valid_host_metric_event() end, [elements.host_status.id] = function() return self:is_valid_host_metric_event() end, - [elements.service.id] = function () return self:is_valid_service_metric_event() end, - [elements.service_status.id] = function () return self:is_valid_service_metric_event() end + [elements.service.id] = function() return self:is_valid_service_metric_event() end, + [elements.service_status.id] = function() return self:is_valid_service_metric_event() end }, [categories.bam.id] = { - [elements.kpi_event.id] = function () return self:is_valid_kpi_metric_event() end + [elements.kpi_event.id] = function() return self:is_valid_kpi_metric_event() end } } --- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation --- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics --- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key --- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) --- splunk [^a-zA-Z0-9_] + -- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation + -- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics + -- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key + -- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) + -- splunk [^a-zA-Z0-9_] if self.params.metrics_name_custom_regex and self.params.metrics_name_custom_regex ~= "" then self.metrics_name_operations.custom.regex = self.params.metrics_custom_regex @@ -84,7 +84,7 @@ function ScMetrics:is_valid_bbdo_element() -- drop event if event category is not accepted if not self.sc_event:find_in_mapping(self.params.category_mapping, self.params.accepted_categories, event_category) then - self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with category: " .. tostring(event_category) .. " is not an accepted category") + self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with category: " .. tostring(event_category) .. " is not an accepted category") return false else -- drop event if accepted category is not supposed to be used for a metric stream connector @@ -95,7 +95,7 @@ function ScMetrics:is_valid_bbdo_element() else -- drop event if element is not accepted if not self.sc_event:find_in_mapping(self.params.element_mapping[event_category], self.params.accepted_elements, event_element) then - self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with element: " .. tostring(event_element) .. " is not an accepted element") + self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with element: " .. tostring(event_element) .. " is not an accepted element") return false else -- drop event if element is not an element that carries perfdata @@ -119,7 +119,7 @@ end function ScMetrics:is_valid_metric_event() category = self.sc_event.event.category element = self.sc_event.event.element - + self.sc_logger:debug("[sc_metrics:is_valid_metric_event]: starting validation for event with category: " .. tostring(category) .. ". And element: " .. tostring(element)) return self.metric_validation[category][element]() @@ -134,24 +134,32 @@ function ScMetrics:is_valid_host_metric_event() return false end + self.sc_logger:log_trace("valid_host", self.sc_event.event.host_id) + -- return false if host is not monitored from an accepted poller if not self.sc_event:is_valid_poller() then self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") return false end + self.sc_logger:log_trace("valid_poller", self.sc_event.event.host_id) + -- return false if host has not an accepted severity if not self.sc_event:is_valid_host_severity() then self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " has not an accepted severity") return false end + self.sc_logger:log_trace("valid_host_severity", self.sc_event.event.host_id) + -- return false if host is not in an accepted hostgroup if not self.sc_event:is_valid_hostgroup() then self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not in an accepted hostgroup") return false end + self.sc_logger:log_trace("valid_hostgroup", self.sc_event.event.host_id) + -- return false if there is no perfdata or it can't be parsed if not self:is_valid_perfdata(self.sc_event.event.perfdata) then self.sc_logger:warning("[sc_metrics:is_vaild_host_metric_event]: host_id: " @@ -159,6 +167,8 @@ function ScMetrics:is_valid_host_metric_event() return false end + self.sc_logger:log_trace("valid_perfdata", self.sc_event.event.host_id) + return true end @@ -171,46 +181,58 @@ function ScMetrics:is_valid_service_metric_event() return false end + self.sc_logger:log_trace("valid_host", self.sc_event.event.host_id) + -- return false if we can't get service description of service id is nil if not self.sc_event:is_valid_service() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service with id: " .. tostring(self.sc_event.event.service_id) .. " hasn't been validated") return false end + self.sc_logger:log_trace("valid_service", self.sc_event.event.host_id) + -- return false if host is not monitored from an accepted poller if not self.sc_event:is_valid_poller() then - self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") return false end -- return false if host has not an accepted severity if not self.sc_event:is_valid_host_severity() then - self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. ". Host has not an accepted severity") return false end + self.sc_logger:log_trace("valid_host_severity", self.sc_event.event.host_id) + -- return false if service has not an accepted severity if not self.sc_event:is_valid_service_severity() then - self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. ". Service has not an accepted severity") return false end + self.sc_logger:log_trace("valid_service_severity", self.sc_event.event.host_id) + -- return false if host is not in an accepted hostgroup if not self.sc_event:is_valid_hostgroup() then - self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.sc_event.event.host_id)) return false end + self.sc_logger:log_trace("valid_hostgroup", self.sc_event.event.host_id) + -- return false if service is not in an accepted servicegroup if not self.sc_event:is_valid_servicegroup() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) .. " is not in an accepted servicegroup") return false end + self.sc_logger:log_trace("valid_servicegroup", self.sc_event.event.host_id) + -- return false if there is no perfdata or they it can't be parsed if not self:is_valid_perfdata(self.sc_event.event.perfdata) then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " @@ -218,6 +240,8 @@ function ScMetrics:is_valid_service_metric_event() return false end + self.sc_logger:log_trace("valid_perfdata", self.sc_event.event.host_id) + return true end @@ -270,6 +294,13 @@ function ScMetrics:build_metric(format_metric) local metrics_info = self.metrics_info for metric, metric_data in pairs(self.metrics_info) do + if metrics_info[metric].instance ~= "" then + if #metrics_info[metric].subinstance ~= 0 then + metrics_info[metric].metric_name = metrics_info[metric].instance .. '~' .. table.concat(metrics_info[metric].subinstance, '~') .. '#' .. metrics_info[metric].metric_name + else + metrics_info[metric].metric_name = metrics_info[metric].instance .. '#' .. metrics_info[metric].metric_name + end + end if string.match(metric_data.metric_name, self.params.accepted_metrics) then metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.params.metric_name_regex, self.params.metric_replacement_character) -- use stream connector method to format the metric event @@ -280,4 +311,4 @@ function ScMetrics:build_metric(format_metric) end end -return sc_metrics \ No newline at end of file +return sc_metrics diff --git a/modules/centreon-stream-connectors-lib/sc_params.lua b/modules/centreon-stream-connectors-lib/sc_params.lua index 37ce7074..28043c29 100644 --- a/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/modules/centreon-stream-connectors-lib/sc_params.lua @@ -1,7 +1,5 @@ #!/usr/bin/lua -broker_api_version = 2 - --- -- Module to help initiate a stream connector with all paramaters -- @module sc_params @@ -1075,7 +1073,7 @@ function ScParams:get_kafka_params(kafka_config, params) end self.logger:notice("[sc_param:get_kafka_params]: " .. tostring(param_name) - .. " parameter with value " .. tostring(logged_param_value) .. " added to kafka_config") + .. " parameter with value " .. tostring(logged_param_value) .. " added to kafka_config") end end end @@ -1089,7 +1087,7 @@ function ScParams:is_mandatory_config_set(mandatory_params, params) for index, mandatory_param in ipairs(mandatory_params) do if not params[mandatory_param] or params[mandatory_param] == "" then self.logger:error("[sc_param:is_mandatory_config_set]: " .. tostring(mandatory_param) - .. " parameter is not set in the stream connector web configuration (or value is empty)") + .. " parameter is not set in the stream connector web configuration (or value is empty)") return false end @@ -1151,7 +1149,7 @@ function ScParams:load_custom_code_file(custom_code_file) -- return false if we can't open the file if not file then self.logger:error("[sc_params:load_custom_code_file]: couldn't open file " - .. tostring(custom_code_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + .. tostring(custom_code_file) .. ". Make sure your file is there and that it is readable by centreon-broker") return false end @@ -1163,7 +1161,7 @@ function ScParams:load_custom_code_file(custom_code_file) for return_value in string.gmatch(file_content, "return (.-)\n") do if return_value ~= "self, true" and return_value ~= "self, false" then self.logger:error("[sc_params:load_custom_code_file]: your custom code file: " .. tostring(custom_code_file) - .. " is returning wrong values (" .. tostring(return_value) .. "). It must only return 'self, true' or 'self, false'") + .. " is returning wrong values (" .. tostring(return_value) .. "). It must only return 'self, true' or 'self, false'") return false end end @@ -1208,7 +1206,7 @@ end function ScParams:validate_pattern_param(param_name, param_value) if not self.common:validate_pattern(param_value) then self.logger:error("[sc_params:validate_pattern_param]: couldn't validate Lua pattern: " .. tostring(param_value) - .. " for parameter: " .. tostring(param_name) .. ". The filter will be reset to an empty value.") + .. " for parameter: " .. tostring(param_name) .. ". The filter will be reset to an empty value.") return "" end @@ -1242,12 +1240,12 @@ function ScParams:build_and_validate_filters_pattern(param_list) if self.common:is_valid_pattern(temp_pattern) then table.insert(self.params[param_name .. "_pattern_list"], temp_pattern) self.logger:notice("[sc_params:build_accepted_filters_pattern]: adding " .. tostring(temp_pattern) - .. " to the list of filtering patterns for parameter: " .. param_name) + .. " to the list of filtering patterns for parameter: " .. param_name) else -- if the sub pattern is not valid, just ignore it self.logger:error("[sc_params:build_accepted_filters_pattern]: ignoring pattern for param: " - .. param_name .. " because after splitting the string:" .. param_name - .. ", we end up with the following pattern: " .. tostring(temp_pattern) .. " which is not a valid Lua pattern") + .. param_name .. " because after splitting the string:" .. param_name + .. ", we end up with the following pattern: " .. tostring(temp_pattern) .. " which is not a valid Lua pattern") end end else diff --git a/modules/centreon-stream-connectors-lib/sc_storage.lua b/modules/centreon-stream-connectors-lib/sc_storage.lua index 056cb301..43b273df 100644 --- a/modules/centreon-stream-connectors-lib/sc_storage.lua +++ b/modules/centreon-stream-connectors-lib/sc_storage.lua @@ -33,8 +33,8 @@ function sc_storage.new(common, logger, params) self.storage_backend = storage_backend.new(self.sc_common, logger, params) else self.sc_logger:error("[sc_storage:new]: Couldn't load storage backend: " .. tostring(params.storage_backend) - .. ". Make sure that the file sc_storage_" .. tostring(params.storage_backend) .. ".lua exists on your server." - .. " The stream connector is going to use the broker storage backend.") + .. ". Make sure that the file sc_storage_" .. tostring(params.storage_backend) .. ".lua exists on your server." + .. " The stream connector is going to use the broker storage backend.") self.storage_backend = require("centreon-stream-connectors-lib.storage_backends.sc_storage_broker") end @@ -49,13 +49,13 @@ function ScStorage:is_valid_storage_object(object_id) for _, accepted_object_format in ipairs(self.storage_objects) do if string.match(object_id, accepted_object_format) then self.sc_logger:debug("[sc_storage:is_valid_storage_object]: object_id: " .. tostring(object_id) - .. " matched object format: " .. accepted_object_format) + .. " matched object format: " .. accepted_object_format) return true end end self.sc_logger:error("[sc_storage:is_valid_storage_object]: object id: " .. tostring(object_id) - .. " is not a valid object_id.") + .. " is not a valid object_id.") return false end @@ -86,7 +86,7 @@ function ScStorage:set_multiple(object_id, properties) if type(properties) ~= "table" then self.sc_logger:error("[sc_storage:set_multiple]: properties parameter is not a table" - .. ". Received properties: " .. self.sc_common:dumper(properties)) + .. ". Received properties: " .. self.sc_common:dumper(properties)) return false end @@ -108,7 +108,7 @@ function ScStorage:get(object_id, property) if not status then self.sc_logger:error("[sc_storage:get]: couldn't get property in storage. Object id: " .. tostring(object_id) - .. ", property name: " .. tostring(property)) + .. ", property name: " .. tostring(property)) end return status, value @@ -127,7 +127,7 @@ function ScStorage:get_multiple(object_id, properties) if type(properties) ~= "table" then self.sc_logger:error("[sc_storage:get_multiple]: properties parameter is not a table" - .. ". Received properties: " .. self.sc_common:dumper(properties)) + .. ". Received properties: " .. self.sc_common:dumper(properties)) return false end @@ -135,7 +135,7 @@ function ScStorage:get_multiple(object_id, properties) if not status then self.sc_logger:error("[sc_storage:get]: couldn't get property in storage. Object id: " .. tostring(object_id) - .. ", property name: " .. self.sc_common:dumper(properties)) + .. ", property name: " .. self.sc_common:dumper(properties)) end return status, value @@ -166,7 +166,7 @@ function ScStorage:delete_multiple(object_id, properties) if type(properties) ~= "table" then self.sc_logger:error("[sc_storage:delete_multiple]: properties parameter is not a table" - .. ". Received properties: " .. self.sc_common:dumper(properties)) + .. ". Received properties: " .. self.sc_common:dumper(properties)) return false end diff --git a/modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua b/modules/centreon-stream-connectors-lib/storage_backends/sc_storage_sqlite.lua similarity index 94% rename from modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua rename to modules/centreon-stream-connectors-lib/storage_backends/sc_storage_sqlite.lua index 4e7d251d..e170b325 100644 --- a/modules/centreon-stream-connectors-lib/sc_storage_sqlite.lua +++ b/modules/centreon-stream-connectors-lib/storage_backends/sc_storage_sqlite.lua @@ -26,7 +26,7 @@ function sc_storage_sqlite.new(common, logger, params) self.sc_logger:error("[sc_storage_sqlite:new]: couldn't open sqlite database: " .. tostring(params["sc_storage.sqlite.db_file"])) else self.sc_logger:notice("[sc_storage_sqlite:new]: successfully loaded sqlite storage database: " .. tostring(params["sc_storage.sqlite.db_file"]) - .. ". Status is: " .. tostring(self.sqlite:isopen())) + .. ". Status is: " .. tostring(self.sqlite:isopen())) end self.last_query_result = {} @@ -76,9 +76,9 @@ function ScStorageSqlite:get_query_result(convert_data, column_count, column_val -- only convert data when possible if convert_data - and self.convert_data_type[row.data_type] - and row[self.required_columns_for_data_type_conversion.value_column] - and row[self.required_columns_for_data_type_conversion.type_column] + and self.convert_data_type[row.data_type] + and row[self.required_columns_for_data_type_conversion.value_column] + and row[self.required_columns_for_data_type_conversion.type_column] then row.value = self.convert_data_type[row.data_type](row.value) end @@ -134,7 +134,7 @@ function ScStorageSqlite:run_query(query, get_result, convert_data) if self.sqlite:errcode() ~= 0 then self.sc_logger:error("[sc_storage_sqlite:run_query]: couldn't run query: " .. tostring(query) - .. ". [SQL ERROR CODE]: " .. self.sqlite:errcode() .. ". [SQL ERROR MESSAGE]: " .. tostring(self.sqlite:errmsg())) + .. ". [SQL ERROR CODE]: " .. self.sqlite:errcode() .. ". [SQL ERROR MESSAGE]: " .. tostring(self.sqlite:errmsg())) return false else self.sc_logger:debug("[sc_storage_sqlite:run_query]: successfully executed query: " .. tostring(query)) @@ -160,7 +160,7 @@ function ScStorageSqlite:set(object_id, property, value) if not self:run_query(query) then self.sc_logger:error("[sc_storage_sqlite:set]: couldn't insert property in storage. Object id: " .. tostring(object_id) - .. ", property name: " .. tostring(property) .. ", property value: " .. tostring(value)) + .. ", property name: " .. tostring(property) .. ", property value: " .. tostring(value)) return false end @@ -197,7 +197,7 @@ function ScStorageSqlite:set_multiple(object_id, properties) if not self:run_query(query) then self.sc_logger:error("[sc_storage_sqlite:set_multiple]: couldn't insert properties in storage. Object id: " .. tostring(object_id) - .. ", properties: " .. self.sc_common:dumper(properties)) + .. ", properties: " .. self.sc_common:dumper(properties)) return false end @@ -214,7 +214,7 @@ function ScStorageSqlite:get(object_id, property) if not self:run_query(query, true, true) then self.sc_logger:error("[sc_storage_sqlite:get]: couldn't get property in storage. Object id: " .. tostring(object_id) - .. ", property name: " .. tostring(property)) + .. ", property name: " .. tostring(property)) return false, "" end @@ -250,7 +250,7 @@ function ScStorageSqlite:get_multiple(object_id, properties) if not self:run_query(query, true, true) then self.sc_logger:error("[sc_storage_sqlite:get_multiple]: couldn't get properties in storage. Object id: " .. tostring(object_id) - .. ", properties: " .. self.sc_common:dumper(properties)) + .. ", properties: " .. self.sc_common:dumper(properties)) return false, {} end @@ -275,12 +275,12 @@ function ScStorageSqlite:delete(object_id, property) if not self:run_query(query) then self.sc_logger:error("[sc_storage_sqlite:delete]: couldn't delete property in storage. Object id: " .. tostring(object_id) - .. ", property name: " .. tostring(property)) + .. ", property name: " .. tostring(property)) return false end self.sc_logger:debug("[sc_storage_sqlite:delete]: successfully deleted property in storage for object id: " .. tostring(object_id) - .. ", property name: " .. tostring(property)) + .. ", property name: " .. tostring(property)) return true end @@ -305,12 +305,12 @@ function ScStorageSqlite:delete_multiple(object_id, properties) if not self:run_query(query) then self.sc_logger:error("[sc_storage_sqlite:delete_multiple]: couldn't delete property in storage. Object id: " .. tostring(object_id) - .. ", properties: " .. self.sc_common:dumper(properties)) + .. ", properties: " .. self.sc_common:dumper(properties)) return false end self.sc_logger:debug("[sc_storage_sqlite:delete_multiple]: successfully deleted property in storage for object id: " .. tostring(object_id) - .. ", properties: " .. self.sc_common:dumper(properties)) + .. ", properties: " .. self.sc_common:dumper(properties)) return true end @@ -327,7 +327,7 @@ function ScStorageSqlite:show(object_id) end self.sc_logger:notice("[sc_storage_sqlite:show]: stored properties for object id: " .. tostring(object_id) - .. ": " .. broker.json_encode(self.last_query_result)) + .. ": " .. broker.json_encode(self.last_query_result)) return true end From e8582795fc432113300d0e9031893edce5b0563d Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Tue, 13 May 2025 16:01:13 +0200 Subject: [PATCH 14/32] use broker v2 API --- .../centreon-stream-connectors-lib/sc_broker.lua | 2 -- modules/centreon-stream-connectors-lib/sc_event.lua | 13 ++++++++++--- .../centreon-stream-connectors-lib/sc_params.lua | 2 ++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/modules/centreon-stream-connectors-lib/sc_broker.lua b/modules/centreon-stream-connectors-lib/sc_broker.lua index 5d263949..c250f181 100644 --- a/modules/centreon-stream-connectors-lib/sc_broker.lua +++ b/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -14,8 +14,6 @@ local ScBroker = {} function sc_broker.new(params, logger) local self = {} - broker_api_version = 2 - self.sc_logger = logger if not self.sc_logger then self.sc_logger = sc_logger.new() diff --git a/modules/centreon-stream-connectors-lib/sc_event.lua b/modules/centreon-stream-connectors-lib/sc_event.lua index 80ea3933..ff182dcb 100644 --- a/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/modules/centreon-stream-connectors-lib/sc_event.lua @@ -14,7 +14,7 @@ local sc_broker = require("centreon-stream-connectors-lib.sc_broker") local ScEvent = {} -function sc_event.new(event, params, common, logger, broker) +function sc_event.new(broker_event, params, common, logger, broker) local self = {} self.sc_logger = logger @@ -23,11 +23,18 @@ function sc_event.new(event, params, common, logger, broker) end self.sc_common = common self.params = params - self.event = event + self.broker_event = broker_event self.sc_broker = broker self.bbdo_version = self.sc_common:get_bbdo_version() - self.event.cache = {} + -- we create our event table + self.event = { + cache = {} + } + + -- create the meta table for the self.event table + local event_meta = { __index = function(tbl, key) return self.broker_event[key] end } + setmetatable(self.event, event_meta) setmetatable(self, { __index = ScEvent }) return self diff --git a/modules/centreon-stream-connectors-lib/sc_params.lua b/modules/centreon-stream-connectors-lib/sc_params.lua index 28043c29..7e10a2c1 100644 --- a/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/modules/centreon-stream-connectors-lib/sc_params.lua @@ -1,5 +1,7 @@ #!/usr/bin/lua +broker_api_version = 2 + --- -- Module to help initiate a stream connector with all paramaters -- @module sc_params From d0ab0cfaf2ae37b0161f870ed694a04852f9c214 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Thu, 15 May 2025 08:58:59 +0200 Subject: [PATCH 15/32] Removal of sending metrics without metric_id --- centreon-certified/influxdb/influxdb-metrics-apiv2.lua | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 549fa0c9..25c0b7cf 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -274,11 +274,8 @@ function EventQueue:send_data(payload, queue_metadata) for index, retry_event in ipairs(events_retry) do if not metrics[retry_event.metric_key] then retry_event.retry = retry_event.retry + 1 - if retry_event.retry > 3 then + if retry_event.retry > 5 then self.sc_logger:error("send_data: retry limit reached for metric_key: " .. retry_event.metric_key .. " ; metric name ='" .. retry_event.metric_name .. "' ; metric value='" .. retry_event.metric_value .. "'") - --self.sc_logger:error("Retry limit reached for key: " .. retry_event.metric_key) - data_binary = data_binary .. retry_event.metric_name .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" - data_binary = data_binary .. retry_event.status .. "\n" table.remove(events_retry, index) end else @@ -378,10 +375,6 @@ function write (event) local mname = event.name local metric_key = "" mname = string.gsub(mname, queue.sc_params.params.metric_name_regex, queue.sc_params.params.metric_replacement_character) - --if event.host_id == 7423 then - -- queue.sc_logger:notice("metric_key for host 7423: " .. tostring(metric_key) .. ", dumper write func: " .. queue.sc_common:dumper(event) ) - -- end - --local metric_key = tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. tostring(event.name) if not event.service_id or event.service_id == 0 then metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':0:' .. mname) else From 5dd6d13c12f3e9a50328ab2be1ec9337ec253b8c Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 15 May 2025 10:05:14 +0200 Subject: [PATCH 16/32] Remove debug code --- centreon-certified/influxdb/influxdb-metrics-apiv2.lua | 3 --- 1 file changed, 3 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 25c0b7cf..37570ac5 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -259,9 +259,6 @@ function EventQueue:send_data(payload, queue_metadata) local data_binary = '' for index, payload_event in ipairs(payload) do if not metrics[payload_event.metric_key] then - if payload_event.host_id == 7423 and payload_event.service_id == 0 and payload_event.metric_name == "rtmin" then - self.sc_logger:notice("send_data: No metric_id found for: host_id:" .. tostring(payload_event.host_id) .. ", service_id: " .. tostring(payload_event.service_id) .. ", metric name: " .. tostring(payload_event.metric_name)) - end payload_event.retry = 1 table.insert(events_retry, payload_event) From 60a53d2e8bcb9d50db8f3c7439765c5d9c2f3e1f Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Thu, 22 May 2025 11:56:30 +0200 Subject: [PATCH 17/32] updates after review --- .../influxdb/influxdb-metrics-apiv2.lua | 195 +++++++++++------- .../sc_common.lua | 83 +++++++- .../sc_event.lua | 56 ++--- .../sc_flush.lua | 20 +- .../sc_logger.lua | 69 +------ .../sc_macros.lua | 34 +-- .../sc_metrics.lua | 41 +--- .../sc_params.lua | 25 +-- .../sc_storage.lua | 4 +- .../storage_backends/sc_storage_broker.lua | 65 ++++++ 10 files changed, 344 insertions(+), 248 deletions(-) create mode 100644 modules/centreon-stream-connectors-lib/storage_backends/sc_storage_broker.lua diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 37570ac5..d35ef42d 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -4,6 +4,7 @@ -------------------------------------------------------------------------------- local metrics = {} +local incomplete_metrics = {} -- Libraries local curl = require "cURL" @@ -86,12 +87,12 @@ function EventQueue.new(params) self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) self.sc_storage = sc_storage.new(self.sc_common, self.sc_logger, self.sc_params.params) - local rc, init_metrics = self.sc_storage:get_all_values_from_property("metric_id") - if type(init_metrics) == "boolean" or rc == false then - self.sc_logger:notice("no metric_id found in the sqlite db. That's probably because it is the first time the stream connector is executed") - else - metrics = init_metrics - end + --local rc, init_metrics = self.sc_storage:get_all_values_from_property("metric_id") + --if rc == false or type(init_metrics) == "boolean" then + -- self.sc_logger:notice("no metric_id found in the sqlite db. That's probably because it is the first time the stream connector is executed") + --else + -- metrics = init_metrics + --end local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements @@ -145,13 +146,38 @@ function EventQueue:format_accepted_event() self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") end +-------------------------------------------------------------------------------- +---- EventQueue:build_metric: use the stream connector format method to parse every metric in the event and remove unwanted metrics based on their name +-- @param format_metric (function) the format method from the stream connector +function EventQueue:build_metric(format_metric) + self.sc_logger:debug("[EventQueue:build_metric]: start build_metric") + local metrics_info = self.sc_metrics.metrics_info + for metric, metric_data in pairs(metrics_info) do + if metrics_info[metric].instance ~= "" then + if #metrics_info[metric].subinstance ~= 0 then + metrics_info[metric].metric_name = metrics_info[metric].instance .. '~' .. table.concat(metrics_info[metric].subinstance, '~') .. '#' .. metrics_info[metric].metric_name + else + metrics_info[metric].metric_name = metrics_info[metric].instance .. '#' .. metrics_info[metric].metric_name + end + end + if string.match(metric_data.metric_name, self.sc_params.params.accepted_metrics) then + metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.sc_params.params.metric_name_regex, self.sc_params.params.metric_replacement_character) + -- use stream connector method to format the metric event + format_metric(metrics_info[metric]) + else + self.sc_logger:debug("[ScMetric:build_metric]: metric name is filtered out: " .. tostring(metric_data.metric_name) .. ". Metric name filter is: " .. tostring(self.sc_params.params.accepted_metrics)) + end + end + self.sc_logger:debug("[EventQueue:build_metric]: end build_metric") +end + -------------------------------------------------------------------------------- ---- EventQueue:format_event_host method -------------------------------------------------------------------------------- function EventQueue:format_event_host() local event = self.sc_event.event self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") - self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) + self:build_metric(self.format_metric[event.category][event.element]) end -------------------------------------------------------------------------------- @@ -160,7 +186,7 @@ end function EventQueue:format_event_service() self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") local event = self.sc_event.event - self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) + self:build_metric(self.format_metric[event.category][event.element]) end -------------------------------------------------------------------------------- @@ -169,19 +195,26 @@ end -------------------------------------------------------------------------------- function EventQueue:format_metric_host(metric) self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") - local event = self.sc_event.event - local metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':0:' .. tostring(metric.metric_name)) - event.formated_event = { - metric_name = metric.metric_name, - metric_value = metric.value, - metric_key = metric_key, - last_check = event.last_check, - host_id = event.host_id, - service_id = 0, - status = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. " " .. tostring(event.last_check) - } + -- status + self.sc_event.event.formated_event = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. " " .. tostring(event.last_check) self:add() + -- metrics + local metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':0:' .. tostring(metric.metric_name)) + if not metrics[metric_key] then + local category = self.sc_event.event.category + local element = self.sc_event.event.element + table.insert(incomplete_metrics, { + entry_creation_date = os.time(), + metric_name = metric.metric_name, + metric_value = metric.value, + metric_key = metric_key, + last_check = event.last_check + }) + else + self.sc_event.event.formated_event = metric.metric_name .. ",metric_id=" .. metrics[metric_key] .. " value=" .. metric.value .. " " .. event.last_check + self:add() + end self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric host") end @@ -192,17 +225,23 @@ end function EventQueue:format_metric_service(metric) self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") local event = self.sc_event.event - local metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name)) - event.formated_event = { - metric_name = metric.metric_name, - metric_value = metric.value, - metric_key = metric_key, - last_check = event.last_check, - host_id = event.host_id, - service_id = event.service_id, - status = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. ",service_id=" .. tostring(event.cache.service.service_id) .. " " .. tostring(event.last_check) - } + -- status + self.sc_event.event.formated_event = "status value=" .. tostring(event.state) .. ",host_id=" .. tostring(event.host_id) .. ",service_id=" .. tostring(event.cache.service.service_id) .. " " .. tostring(event.last_check) self:add() + -- metrics + local metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':' .. tostring(event.cache.service.service_id) .. ':' .. tostring(metric.metric_name)) + if not metrics[metric_key] then + table.insert(incomplete_metrics, { + entry_creation_date = os.time(), + metric_name = metric.metric_name, + metric_value = metric.value, + metric_key = metric_key, + last_check = event.last_check + }) + else + self.sc_event.event.formated_event = metric.metric_name .. ",metric_id=" .. metrics[metric_key] .. " value=" .. metric.value .. " " .. event.last_check + self:add() + end self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") end @@ -218,7 +257,6 @@ function EventQueue:add() .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) - self.sc_common:dumper(self.sc_event.event.formated_event) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) @@ -233,56 +271,28 @@ end -------------------------------------------------------------------------------- function EventQueue:build_payload(payload, event) if not payload then - payload = { event } + payload = event else - table.insert(payload, event) + payload = payload .. "\n" .. event end return payload end -local events_retry = {} - function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") local params = self.sc_params.params local url = params.http_server_protocol .. "://" .. params.http_server_address .. ":" .. tostring(params.http_server_port) - .. "/write?u=" .. tostring(params.influxdb_username) - .. "&p=" .. tostring(params.influxdb_password) - .. "&db=" .. tostring(params.influxdb_database) + .. "/write?u=" .. broker.url_encode(params.influxdb_username) + .. "&p=" .. broker.url_encode(params.influxdb_password) + .. "&db=" .. broker.url_encode(params.influxdb_database) .. "&precision=s" queue_metadata.headers = { "content-type: text/plain; charset=utf-8" } - local data_binary = '' - for index, payload_event in ipairs(payload) do - if not metrics[payload_event.metric_key] then - payload_event.retry = 1 - - table.insert(events_retry, payload_event) - else - data_binary = data_binary .. payload_event.metric_name .. ",metric_id=" .. metrics[payload_event.metric_key] .. " value=" .. payload_event.metric_value .. " " .. payload_event.last_check .. "\n" - data_binary = data_binary .. payload_event.status .. "\n" - end - end - - for index, retry_event in ipairs(events_retry) do - if not metrics[retry_event.metric_key] then - retry_event.retry = retry_event.retry + 1 - if retry_event.retry > 5 then - self.sc_logger:error("send_data: retry limit reached for metric_key: " .. retry_event.metric_key .. " ; metric name ='" .. retry_event.metric_name .. "' ; metric value='" .. retry_event.metric_value .. "'") - table.remove(events_retry, index) - end - else - data_binary = data_binary .. retry_event.metric_name .. ",metric_id=" .. metrics[retry_event.metric_key] .. " value=" .. retry_event.metric_value .. " " .. retry_event.last_check .. "\n" - data_binary = data_binary .. retry_event.status .. "\n" - table.remove(events_retry, index) - end - end - - self.sc_logger:log_curl_command(url, queue_metadata, params, data_binary) + self.sc_logger:log_curl_command(url, queue_metadata, params, payload) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -290,7 +300,7 @@ function EventQueue:send_data(payload, queue_metadata) return true end - self.sc_logger:info("[EventQueue:send_data]: Going to send the following data " .. tostring(data_binary)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following data " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Influxdb address is: " .. tostring(url)) local http_response_body = "" @@ -324,7 +334,7 @@ function EventQueue:send_data(payload, queue_metadata) end -- adding the HTTP POST data - http_request:setopt_postfields(data_binary) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() @@ -351,6 +361,48 @@ function EventQueue:send_data(payload, queue_metadata) return retval end +function EventQueue:check_incomplete_metrics() + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: start check_incomplete_metrics") + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: start check_incomplete_metrics") + local incomplete_metrics_queue_size = 0 + local incomplete_metrics_payload = "" + local queue_metadata = { + headers = { + "content-type: text/plain; charset=utf-8" + } + } + for metric_index = #incomplete_metrics, 1, -1 do + local metric_data = incomplete_metrics[metric_index] + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: metric_data: " .. broker.json_encode(metric_data)) + if metrics[metric_data.metric_key] then + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: metric_key found") + incomplete_metrics_payload = incomplete_metrics_payload .. metric_data.metric_name .. ",metric_id=" .. metrics[metric_data.metric_key] .. " value=" .. metric_data.metric_value .. " " .. metric_data.last_check .. "\n" + incomplete_metrics_queue_size = incomplete_metrics_queue_size + 1 + table.remove(incomplete_metrics, metric_index) + elseif os.time() - metric_data.entry_creation_date > 60 then + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: metric_key " .. tostring(metric_data.metric_key) .. " is too old, removing it") + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: metric_key " .. tostring(metric_data.metric_key) .. " is too old, removing it") + table.remove(incomplete_metrics, metric_index) + else + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: keeping metric_key " .. tostring(metric_data.metric_key) .. " in the incomplete metrics list") + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: keeping metric_key " .. tostring(metric_data.metric_key) .. " in the incomplete metrics list") + end + if incomplete_metrics_queue_size > self.sc_params.params.max_buffer_size then + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") + self:send_data(incomplete_metrics_payload, queue_metadata) + incomplete_metrics_payload = "" + incomplete_metrics_queue_size = 0 + end + end + if incomplete_metrics_payload ~= "" then + broker_log:info(0, "[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") + self:send_data(incomplete_metrics_payload, queue_metadata) + end + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: end check_incomplete_metrics") +end + -------------------------------------------------------------------------------- -- Required functions for Broker StreamConnector -------------------------------------------------------------------------------- @@ -368,7 +420,7 @@ end -- @return {boolean} -------------------------------------------------------------------------------- function write (event) - if event._type == 196617 or event._type == 196609 then + if queue.sc_params.params.bbdo.categories["storage"].id == event.category and queue.sc_params.params.bbdo.elements["metric"].id == event.element then local mname = event.name local metric_key = "" mname = string.gsub(mname, queue.sc_params.params.metric_name_regex, queue.sc_params.params.metric_replacement_character) @@ -377,7 +429,6 @@ function write (event) else metric_key = "metric_" .. mime.b64(tostring(event.host_id) .. ':' .. event.service_id .. ':' .. mname) end - -- check if the metric is already in the metrics table if not metrics[metric_key] then queue.sc_logger:notice("write: no metric_id found for 'metric_key': " .. tostring(metric_key) .. ", info: " .. tostring(event.host_id) .. ':' .. tostring(event.service_id) .. ':' .. mname .. ", going to save metric_id : " .. tostring(event.metric_id) .. " in sqlite db and memory") @@ -430,7 +481,9 @@ function flush() if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then return false end - + if #incomplete_metrics > 0 then + queue:check_incomplete_metrics() + end return true end @@ -439,7 +492,9 @@ function flush() if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then return false end - + if #incomplete_metrics > 0 then + queue:check_incomplete_metrics() + end return true end diff --git a/modules/centreon-stream-connectors-lib/sc_common.lua b/modules/centreon-stream-connectors-lib/sc_common.lua index 29e76878..eaac3777 100644 --- a/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/modules/centreon-stream-connectors-lib/sc_common.lua @@ -118,8 +118,8 @@ function ScCommon:split(text, separator) end --- compare_numbers: compare two numbers, if comparison is valid, then return true --- @param firstNumber {number} --- @param secondNumber {number} +-- @param firstNumber {number} +-- @param secondNumber {number} -- @param operator {string} the mathematical operator that is used for the comparison -- @return {boolean} function ScCommon:compare_numbers(firstNumber, secondNumber, operator) @@ -366,4 +366,81 @@ function ScCommon:is_valid_pattern(pattern) return status end -return sc_common +--- sleep: wait a given number of seconds +-- @param seconds (number) the number of seconds you need to wait +function ScCommon:sleep(seconds) + local default_value = 1 + + if type(seconds) == "number" then + os.execute("sleep " .. seconds) + else + self.sc_logger:error("[sc_common:sleep]: given parameter is not a valid second value. Parameter value: " .. tostrin(seconds) + .. ". This will default to: " .. tostring(default_value)) + os.execute("sleep " .. default_value) + end +end + +--- create_sleep_counter: create a table to handle sleep counters. Useful when you want to log something less often after some repetitions +-- @param sleep_table (table) an empty table that will be returned with all the desired data structure +-- @param min (number) the minimum value of the counter +-- @param max (number) the maximum value of the counter +-- @param step (number) the value by whitch the counter will be incremented +-- @param init_value (number) [optional] the value of the counter when you create the table. When not provided, it will use the min +-- @return sleep_table (table) a table with all values set and some functions in order to interact with the table more easily +function ScCommon:create_sleep_counter_table(sleep_table, min, max, step, init_value) + local default_min = 0 + local default_max = 300 + local default_step = 10 + + if type(min) ~= "number" + or type(max) ~= "number" + or type(step) ~= "number" + then + self.sc_logger:error("[sc_common:create_sleep_counter_table]: min, max or step are not numbers: " .. tostring(min) + .. ", " .. tostring(max) .. ", " .. tostring(step) .. ". We will use default values instead") + min = default_min + max = default_max + step = default_step + end + + if max < min then + self.sc_logger:error("[sc_common:create_sleep_counter_table]: max is below min." .. tostring(max) .. " < " .. tostring(min) + .. ". We will use default values instead") + min = default_min + max = default_max + end + + if not init_value or type(init_value) ~= "number" then + init_value = min + end + + sleep_table.min = min + sleep_table.max = max + sleep_table.value = init_value + sleep_table.step = step + sleep_table.reset = function() sleep_table.value = sleep_table.min end + sleep_table.increment = function() + if sleep_table.value < sleep_table.max then + sleep_table.value = sleep_table.value + sleep_table.step + end + end + sleep_table.is_max_reached = function() + if sleep_table.value < sleep_table.max then + return false + else + return true + end + end + sleep_table.sleep = function() + if not sleep_table:is_max_reached() then + self:sleep(sleep_table.value) + sleep_table:increment() + else + self:sleep(sleep_table.value) + end + end + + return sleep_table +end + +return sc_common \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_event.lua b/modules/centreon-stream-connectors-lib/sc_event.lua index ff182dcb..076e32f6 100644 --- a/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/modules/centreon-stream-connectors-lib/sc_event.lua @@ -70,7 +70,7 @@ function ScEvent:find_in_mapping(mapping, reference, item) end --- is_valid_event: check if the event is accepted depending on configured conditions --- @return true|false (boolean) +-- @return true|false (boolean) function ScEvent:is_valid_event() local is_valid_event = false @@ -131,7 +131,7 @@ function ScEvent:is_valid_host_status_event() return false end - -- return false if event status is a duplicate and dedup is enabled + -- return false if event status is a duplicate and dedup is enabled if self:is_host_status_event_duplicated() then self.sc_logger:warning("[sc_event:is_host_status_event_duplicated]: host_id: " .. tostring(self.event.host_id) .. " is sending a duplicated event. Dedup option (enable_host_status_dedup) is set to: " .. tostring(self.params.enable_host_status_dedup)) @@ -198,7 +198,7 @@ function ScEvent:is_valid_service_status_event() return false end - -- return false if event status is a duplicate and dedup is enabled + -- return false if event status is a duplicate and dedup is enabled if self:is_service_status_event_duplicated() then self.sc_logger:warning("[sc_event:is_service_status_event_duplicated]: host_id: " .. tostring(self.event.host_id) .. " service_id: " .. tostring(self.event.service_id) .. " is sending a duplicated event. Dedup option (enable_service_status_dedup) is set to: " .. tostring(self.params.enable_service_status_dedup)) @@ -239,7 +239,7 @@ function ScEvent:is_valid_service_status_event() return false end - -- return false if service is not in an accepted servicegroup + -- return false if service is not in an accepted servicegroup if not self:is_valid_servicegroup() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") return false @@ -274,8 +274,6 @@ function ScEvent:is_valid_host() if (not self.event.cache.host and self.params.skip_anon_events == 1) then self.sc_logger:warning("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) - --self.sc_logger:notice("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) - --.. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false elseif (not self.event.cache.host and self.params.skip_anon_events == 0) then self.event.cache.host = { @@ -289,7 +287,7 @@ function ScEvent:is_valid_host() end -- return false if event is coming from fake bam host - if string.find(self.event.cache.host.name, "^_Module_BAM_*") then + if string.find(self.event.cache.host.name, "^_Module_BAM_*") and self.params.enable_bam_host == 0 then self.sc_logger:debug("[sc_event:is_valid_host]: Host is a BAM fake host: " .. tostring(self.event.cache.host.name)) return false end @@ -369,6 +367,18 @@ function ScEvent:is_valid_service() return false end + -- if we want to send BA status using the service status mecanism, we need to use the ba_description instead of host name + if string.find(self.event.cache.host.name, "^_Module_BAM_*") and self.params.enable_bam_host == 1 then + self.sc_logger:debug("[sc_event:is_valid_service]: Host is a fake BAM host. Therefore, host name: " + .. tostring(self.event.cache.host.name) .. " must be replaced by the name of the BA.") + self.event.ba_id = string.gsub(self.event.cache.service.description, "ba_", "") + self.event.ba_id = tonumber(self.event.ba_id) + self:is_valid_ba() + self.sc_logger:debug("[sc_event:is_valid_service]: replacing host name: " + .. tostring(self.event.cache.host.name) .. " by BA name: " .. tostring(self.event.cache.ba.ba_name)) + self.event.cache.host.name = self.event.cache.ba.ba_name + end + return true end @@ -474,7 +484,7 @@ end --- is_valid_event_downtime_state: check if the event is in an accepted downtime state -- @return true|false (boolean) function ScEvent:is_valid_event_downtime_state() - -- patch compat bbdo 3 => bbdo 2 + -- patch compat bbdo 3 => bbdo 2 if (not self.event.scheduled_downtime_depth and self.event.downtime_depth) then self.event.scheduled_downtime_depth = self.event.downtime_depth end @@ -640,7 +650,7 @@ end --- is_valid_bam_event: check if the event is an accepted bam type event -- @return true|false (boolean) function ScEvent:is_valid_bam_event() - -- return false if ba name is invalid or ba_id is nil + -- return false if ba name is invalid or ba_id is nil if not self:is_valid_ba() then self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " hasn't been validated") return false @@ -887,7 +897,7 @@ function ScEvent:is_valid_host_severity() end - -- return false if host severity doesn't match + -- return false if host severity doesn't match if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.severity.host, self.params.host_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_host_severity]: dropping event because host with id: " .. tostring(self.event.host_id) .. " has an invalid severity. Severity is: " .. tostring(self.event.cache.severity.host) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator @@ -916,7 +926,7 @@ function ScEvent:is_valid_service_severity() - -- return false if service severity doesn't match + -- return false if service severity doesn't match if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.severity.service, self.params.service_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_service_severity]: dropping event because service with id: " .. tostring(self.event.service_id) .. " has an invalid severity. Severity is: " .. tostring(self.event.cache.severity.service) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator @@ -927,7 +937,7 @@ function ScEvent:is_valid_service_severity() return true end ----is_valid_acknowledgement_event: checks if the event is a valid acknowledge event +---is_valid_acknowledgement_event: checks if the event is a valid acknowledge event -- @return true|false (boolean) function ScEvent:is_valid_acknowledgement_event() -- return false if we can't get hostname or host id is nil @@ -936,7 +946,7 @@ function ScEvent:is_valid_acknowledgement_event() return false end - -- check if ack author is valid + -- check if ack author is valid if not self:is_valid_author() then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: acknowledgement on host: " .. tostring(self.event.host_id) .. "and service: " .. tostring(self.event.service_id) .. "(0 means ack is on host) is not made by a valid author. Author is: " @@ -960,7 +970,7 @@ function ScEvent:is_valid_acknowledgement_event() local event_status = "" -- service_id = 0 means ack is on a host if self.event.type == 0 then - -- use dedicated ack host status configuration or host_status configuration + -- use dedicated ack host status configuration or host_status configuration event_status = self.sc_common:ifnil_or_empty(self.params.ack_host_status, self.params.host_status) -- return false if event status is not accepted @@ -977,7 +987,7 @@ function ScEvent:is_valid_acknowledgement_event() return false end - -- use dedicated ack host status configuration or host_status configuration + -- use dedicated ack host status configuration or host_status configuration event_status = self.sc_common:ifnil_or_empty(self.params.ack_service_status, self.params.service_status) -- return false if event status is not accepted @@ -994,7 +1004,7 @@ function ScEvent:is_valid_acknowledgement_event() return false end - -- return false if service is not in an accepted servicegroup + -- return false if service is not in an accepted servicegroup if not self:is_valid_servicegroup() then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") return false @@ -1026,7 +1036,7 @@ function ScEvent:is_valid_downtime_event() return false end - -- check if downtime author is valid + -- check if downtime author is valid if not self:is_valid_author() then self.sc_logger:warning("[sc_event:is_valid_downtime_event]: downtime with internal ID: " .. tostring(self.event.internal_id) .. " is not made by a valid author. Author is: " .. tostring(self.event.author) .. " Accepted authors are: " .. self.params.accepted_authors) @@ -1076,7 +1086,7 @@ function ScEvent:is_valid_downtime_event() return false end - -- return false if service is not in an accepted servicegroup + -- return false if service is not in an accepted servicegroup if not self:is_valid_servicegroup() then self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") return false @@ -1176,7 +1186,7 @@ function ScEvent:get_downtime_service_status() return self:get_most_recent_status_code(timestamp) end ---- get_most_recent_status_code: retrieve the last status code from a list of status and timestamp +--- get_most_recent_status_code: retrieve the last status code from a list of status and timestamp -- @param timestamp (table) a table with the association of the last known timestamp of a status and its corresponding status code -- @return status (number) the most recent status code of the object function ScEvent:get_most_recent_status_code(timestamp) @@ -1233,8 +1243,8 @@ function ScEvent:is_service_status_event_duplicated() end -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: - -- OK(H) --> NOT-OK(S) --> OK(H) - ]]-- + -- OK(H) --> NOT-OK(S) --> OK(H) + ]]-- end --- is_host_status_event_duplicated: check if the host event is the same than the last one (will not work for UP(H) -> DOWN(S) -> UP(H)) @@ -1270,7 +1280,7 @@ function ScEvent:is_host_status_event_duplicated() end -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: - -- UP(H) --> NOT-UP(S) --> UP(H) + -- UP(H) --> NOT-UP(S) --> UP(H) ]]-- end @@ -1384,4 +1394,4 @@ function ScEvent:is_valid_storage_event() return true end -return sc_event +return sc_event \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_flush.lua b/modules/centreon-stream-connectors-lib/sc_flush.lua index e7db0cdf..39ebb1a0 100644 --- a/modules/centreon-stream-connectors-lib/sc_flush.lua +++ b/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -13,7 +13,7 @@ local ScFlush = {} --- sc_flush.new: sc_flush constructor -- @param params (table) the params table of the stream connector --- @param [opt] sc_logger (object) a sc_logger object +-- @param [opt] sc_logger (object) a sc_logger object function sc_flush.new(params, logger) local self = {} @@ -210,13 +210,21 @@ end -- @param metadata (table) all metadata for the payload -- @return boolean (boolean) true or false depending on the success of the operation function ScFlush:flush_payload(send_method, payload, metadata) - if payload then - if not send_method(payload, metadata) then - return false - end + -- when the payload doesn't exist or is empty, we just tell broker that everything is fine on the stream connector side + if not payload or payload == "" then + return true end - return true + local pcall_status, result = pcall(send_method, payload, metadata) + + self.sc_logger:debug("[sc_flush:flush_payload]: tried to send payload protected by pcall. Status: " .. tostring(pcall_status) .. ", Message: " .. tostring(result)) + + if not pcall_status then + self.sc_logger:error("[sc_flush:flush_payload]: could not send payload because of an internal error. pcall status: " .. tostring(pcall_status) .. ", error message: " .. tostring(result)) + return false + end + + return result end return sc_flush \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_logger.lua b/modules/centreon-stream-connectors-lib/sc_logger.lua index 2622cf4f..16ccee02 100644 --- a/modules/centreon-stream-connectors-lib/sc_logger.lua +++ b/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -53,23 +53,11 @@ function sc_logger.new(logfile, severity) self.logfile = logfile or "/var/log/centreon-broker/stream-connector.log" broker_log:set_parameters(self.severity, self.logfile) - self.trace_action = "" - self.params = params - setmetatable(self, { __index = ScLogger }) return self end -function ScLogger:set_params(params) - self.params = params - self:notice(self.params.enable_broker_cache_counter_check) -end - -function ScLogger:set_common_object(sc_common) - self.sc_common = sc_common -end - --- error: write an error message -- @param message (string) the message that will be written function ScLogger:error(message) @@ -168,59 +156,4 @@ function ScLogger:log_curl_command(url, metadata, params, data, basic_auth) end end -function ScLogger:log_trace(step, host_id, flush) - if not self.params then - return - end - - if self.params.enable_trace == 0 then - return - end - - if not host_id and not flush then - return - end - - if not self.params.trace_host_id_list[host_id] and not flush then - return - end - - if type(self.trace_action) ~= "table" then - self.trace_action = {} - end - - -- self:notice(self.sc_common:dumper(self.trace_action)) - - if not self.trace_action[host_id] and host_id then - self.trace_action[host_id] = {} - end - - -- self:notice(self.sc_common:dumper(self.trace_action)) - - if host_id then - self.trace_action[host_id][step] = true - end - -- local log_string = "[TRACE][" .. tostring(func_name) .. "][" .. tostring(step) .. "]: " .. tostring(self.params.trace_host_id_list[host_id]) .. " " .. tostring(action) - -- self:notice(log_string) - if flush and type(self.trace_action) == "table" then - local msg = "| host id | action | data |\n| -- | -- | -- |\n" - local h_name - -- self:notice(self.sc_common:dumper(self.trace_action)) - for host_id, trace_info in pairs(self.trace_action) do - h_name = broker_cache:get_hostname(host_id) - if not h_name then - h_name = host_id - end - - for step_name, value in pairs(trace_info) do - msg = msg .. "| " .. tostring(h_name) .. " | " .. tostring(step_name) .. " | " .. tostring(value) .. " |\n" - end - end - - msg = msg .. "\n\n| sent payload | result |\n| -- | -- |\n| " .. tostring(flush.payload) .. " | " .. tostring(flush.result) .. " |\n" - self.trace_action = "" - self:notice(msg) - end -end - -return sc_logger +return sc_logger \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_macros.lua b/modules/centreon-stream-connectors-lib/sc_macros.lua index 44d2938b..e4517921 100644 --- a/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -68,13 +68,13 @@ function sc_macros.new(params, logger, common) HOSTSTATETYPE = "{cache.host.state_type}", HOSTATTEMPTS = "{cache.host.check_attempt}", MAXHOSTATTEMPTS = "{cache.host.max_check_attempts}", - -- HOSTEVENTID doesn't exist + -- HOSTEVENTID doesn't exist -- LASTHOSTEVENTID doesn't exist -- HOSTPROBLEMID doesn't exist -- LASTHOSTPROBLEMID doesn't exist HOSTLATENCY = "{cache.host.latency}", HOSTEXECUTIONTIME = "{cache.host.execution_time}", - -- HOSTDURATION doesn't exist + -- HOSTDURATION doesn't exist -- HOSTDURATIONSEC doesn't exist HOSTDOWNTIME = "{cache.host.scheduled_downtime_depth}", HOSTPERCENTCHANGE = "{percent_state_change}", -- will be replaced by the service percent_state_change if event is about a service @@ -193,7 +193,7 @@ end --- replace_sc_macro: replace any stream connector macro with it's value -- @param string (string) the string in which there might be some stream connector macros to replace -- @param event (table) the current event table --- @param json_string (boolean) +-- @param json_string (boolean) -- @return converted_string (string) the input string but with the macro replaced with their json escaped values function ScMacros:replace_sc_macro(string, event, json_string) local cache_macro_value = false @@ -202,8 +202,8 @@ function ScMacros:replace_sc_macro(string, event, json_string) local format = false local converted_string = string - -- find all macros for exemple the string: - -- {cache.host.name} is the name of host with id: {host_id} + -- find all macros for exemple the string: + -- {cache.host.name} is the name of host with id: {host_id} -- will generate two macros {cache.host.name} and {host_id}) for macro in string.gmatch(string, "{[%w_.%(%),%%%+%-%*%?%[%]%^%$]+}") do self.sc_logger:debug("[sc_macros:replace_sc_macro]: found a macro, name is: " .. tostring(macro)) @@ -265,7 +265,7 @@ function ScMacros:replace_sc_macro(string, event, json_string) return converted_string end ---- get_cache_macro: check if the macro is a macro which value must be found in the cache +--- get_cache_macro: check if the macro is a macro which value must be found in the cache -- @param macro (string) the macro we want to check (for example: {cache.host.name}) -- @param event (table) the event table (obivously, cache must be in the event table if we want to find something in it) -- @return false (boolean) if the macro is not a cache macro ({host_id} instead of {cache.xxxx.yyy} for example) or we can't find the cache type or the macro in the cache @@ -286,7 +286,7 @@ function ScMacros:get_cache_macro(raw_macro, event) -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag local macro_value, flag = self:get_transform_flag(macro) - -- check if the macro is in the cache + -- check if the macro is in the cache if event.cache[cache_type][macro_value] then if flag then self.sc_logger:info("[sc_macros:get_cache_macro]: macro has a flag associated. Flag is: " .. tostring(flag) @@ -303,7 +303,7 @@ function ScMacros:get_cache_macro(raw_macro, event) return false end ---- get_event_macro: check if the macro is a macro which value must be found in the event table (meaning not in the cache) +--- get_event_macro: check if the macro is a macro which value must be found in the event table (meaning not in the cache) -- @param macro (string) the macro we want to check (for example: {host_id}) -- @param event (table) the event table -- @return false (boolean) if the macro is not found in the event @@ -331,13 +331,13 @@ function ScMacros:get_event_macro(macro, event) return false end ---- get_group_macro: check if the macro is a macro which value must be found in a group table (meaning it is a special kind of data in the event) +--- get_group_macro: check if the macro is a macro which value must be found in a group table (meaning it is a special kind of data in the event) -- @param macro (string) the macro we want to check (for example: {group(hg,table)}) -- @param event (table) the event table -- @return false (boolean) if the macro is not found -- @return macro_value (string|boolean|number) the value of the macro function ScMacros:get_group_macro(macro, event) - -- try to cut the macro + -- try to cut the macro local group_type, format, regex = string.match(macro, "^{groups%((%w+),(%w+),(.*)%)}") if not group_type or not format or not regex or not self.group_macro_conversion[group_type] then @@ -438,7 +438,7 @@ function ScMacros:convert_centreon_macro(string, event) local sc_macro_value = false local converted_string = string - -- get all standard macros + -- get all standard macros for macro in string.gmatch(string, "$%w$") do self.sc_logger:debug("[sc_macros:convert_centreon_macro]: found a macro, name is: " .. tostring(macro)) -- try to find the macro in the mapping table table self.centreon_macro @@ -472,7 +472,7 @@ end --- get_transform_flag: check if there is a tranformation flag linked to the macro and separate them -- @param macro (string) the macro that needs to be checked --- @return macro_value (string) the macro name ONLY if there is a flag +-- @return macro_value (string) the macro name ONLY if there is a flag -- @return flag (string) the flag name if there is one -- @return macro (string) the original macro if no flag were found function ScMacros:get_transform_flag(macro) @@ -499,7 +499,7 @@ end -- @param macro_value (string) the string that needs to be shortened -- @return string (string) the input string with only the first lne function ScMacros:transform_short(macro_value) - return string.match(macro_value, "^(.*)\n") + return string.match(macro_value, "^(.*)\n") or macro_value end --- transform_type: convert a 0, 1 value into SOFT or HARD @@ -518,7 +518,7 @@ end -- @param event (table) the event table -- @return string (string) the status of the event in a human readable format (e.g: OK, WARNING) function ScMacros:transform_state(macro_value, event) - -- acknowledgement events are special, the state can be for a host or a service. + -- acknowledgement events are special, the state can be for a host or a service. -- We force the element to be host_status or service_status in order to properly convert the state if event.element == 1 and event.service_id == 0 then return self.params.status_mapping[event.category][event.element].host_status[macro_value] @@ -558,8 +558,8 @@ function ScMacros:build_converted_string_for_cache_and_event_macro(macro_value, .. tostring(macro) .. ", value is: " .. tostring(clean_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) --[[ - to have the best json possible, we try to remove double quotes. - "service_severity": "{cache.severity.service}" must become "service_severity": 1 and not "service_severity": "1" + to have the best json possible, we try to remove double quotes. + "service_severity": "{cache.severity.service}" must become "service_severity": 1 and not "service_severity": "1" "service_severity": "my service severity is: {cache.severity.service}" must become "service_severity": "my service severity is: 1" ]]-- if string.match(converted_string, '"' .. macro .. '"') then @@ -579,4 +579,4 @@ function ScMacros:build_converted_string_for_cache_and_event_macro(macro_value, return converted_string end -return sc_macros +return sc_macros \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_metrics.lua b/modules/centreon-stream-connectors-lib/sc_metrics.lua index 6a04f220..7f7d546a 100644 --- a/modules/centreon-stream-connectors-lib/sc_metrics.lua +++ b/modules/centreon-stream-connectors-lib/sc_metrics.lua @@ -19,7 +19,7 @@ local ScMetrics = {} -- @param params (table) the params table of the stream connector -- @param common (object) a sc_common instance -- @param broker (object) a sc_broker instance --- @param [opt] sc_logger (object) a sc_logger instance +-- @param [opt] sc_logger (object) a sc_logger instance function sc_metrics.new(event, params, common, broker, logger) self = {} @@ -63,7 +63,7 @@ function sc_metrics.new(event, params, common, broker, logger) self.metrics_name_operations.custom.replacement_character = self.params.metrics_name_custom_replacement_character end - -- initiate metrics table + -- initiate metrics table self.metrics = {} -- initiate sc_event object self.sc_event = sc_event.new(event, self.params, self.sc_common, self.sc_logger, self.sc_broker) @@ -134,32 +134,24 @@ function ScMetrics:is_valid_host_metric_event() return false end - self.sc_logger:log_trace("valid_host", self.sc_event.event.host_id) - -- return false if host is not monitored from an accepted poller if not self.sc_event:is_valid_poller() then self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") return false end - self.sc_logger:log_trace("valid_poller", self.sc_event.event.host_id) - -- return false if host has not an accepted severity if not self.sc_event:is_valid_host_severity() then self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " has not an accepted severity") return false end - self.sc_logger:log_trace("valid_host_severity", self.sc_event.event.host_id) - -- return false if host is not in an accepted hostgroup if not self.sc_event:is_valid_hostgroup() then self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not in an accepted hostgroup") return false end - self.sc_logger:log_trace("valid_hostgroup", self.sc_event.event.host_id) - -- return false if there is no perfdata or it can't be parsed if not self:is_valid_perfdata(self.sc_event.event.perfdata) then self.sc_logger:warning("[sc_metrics:is_vaild_host_metric_event]: host_id: " @@ -167,8 +159,6 @@ function ScMetrics:is_valid_host_metric_event() return false end - self.sc_logger:log_trace("valid_perfdata", self.sc_event.event.host_id) - return true end @@ -181,16 +171,12 @@ function ScMetrics:is_valid_service_metric_event() return false end - self.sc_logger:log_trace("valid_host", self.sc_event.event.host_id) - -- return false if we can't get service description of service id is nil if not self.sc_event:is_valid_service() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service with id: " .. tostring(self.sc_event.event.service_id) .. " hasn't been validated") return false end - self.sc_logger:log_trace("valid_service", self.sc_event.event.host_id) - -- return false if host is not monitored from an accepted poller if not self.sc_event:is_valid_poller() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) @@ -205,8 +191,6 @@ function ScMetrics:is_valid_service_metric_event() return false end - self.sc_logger:log_trace("valid_host_severity", self.sc_event.event.host_id) - -- return false if service has not an accepted severity if not self.sc_event:is_valid_service_severity() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) @@ -214,8 +198,6 @@ function ScMetrics:is_valid_service_metric_event() return false end - self.sc_logger:log_trace("valid_service_severity", self.sc_event.event.host_id) - -- return false if host is not in an accepted hostgroup if not self.sc_event:is_valid_hostgroup() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) @@ -223,16 +205,12 @@ function ScMetrics:is_valid_service_metric_event() return false end - self.sc_logger:log_trace("valid_hostgroup", self.sc_event.event.host_id) - - -- return false if service is not in an accepted servicegroup + -- return false if service is not in an accepted servicegroup if not self.sc_event:is_valid_servicegroup() then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) .. " is not in an accepted servicegroup") return false end - self.sc_logger:log_trace("valid_servicegroup", self.sc_event.event.host_id) - -- return false if there is no perfdata or they it can't be parsed if not self:is_valid_perfdata(self.sc_event.event.perfdata) then self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " @@ -240,8 +218,6 @@ function ScMetrics:is_valid_service_metric_event() return false end - self.sc_logger:log_trace("valid_perfdata", self.sc_event.event.host_id) - return true end @@ -282,7 +258,7 @@ function ScMetrics:is_valid_perfdata(perfdata) return true end --- to name a few : +-- to name a few : -- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation -- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics -- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key @@ -294,13 +270,6 @@ function ScMetrics:build_metric(format_metric) local metrics_info = self.metrics_info for metric, metric_data in pairs(self.metrics_info) do - if metrics_info[metric].instance ~= "" then - if #metrics_info[metric].subinstance ~= 0 then - metrics_info[metric].metric_name = metrics_info[metric].instance .. '~' .. table.concat(metrics_info[metric].subinstance, '~') .. '#' .. metrics_info[metric].metric_name - else - metrics_info[metric].metric_name = metrics_info[metric].instance .. '#' .. metrics_info[metric].metric_name - end - end if string.match(metric_data.metric_name, self.params.accepted_metrics) then metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.params.metric_name_regex, self.params.metric_replacement_character) -- use stream connector method to format the metric event @@ -311,4 +280,4 @@ function ScMetrics:build_metric(format_metric) end end -return sc_metrics +return sc_metrics \ No newline at end of file diff --git a/modules/centreon-stream-connectors-lib/sc_params.lua b/modules/centreon-stream-connectors-lib/sc_params.lua index 7e10a2c1..ef0d21db 100644 --- a/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/modules/centreon-stream-connectors-lib/sc_params.lua @@ -85,7 +85,7 @@ function sc_params.new(common, logger) -- communication parameters max_buffer_size = 1, max_buffer_age = 5, --deprecated - max_all_queues_age = 60, + max_all_queues_age = 5, send_mixed_events = 1, -- connection parameters @@ -138,8 +138,6 @@ function sc_params.new(common, logger) logfile = "", log_level = "", log_curl_commands = 0, - enable_trace = 0, - trace_host_id_list = "", -- metric metric_name_regex = "no_forbidden_character_to_replace", @@ -1052,7 +1050,6 @@ function ScParams:check_params() -- handle some dedicated parameters that can use lua pattern (such as accepted_hosts and accepted_services) self:build_and_validate_filters_pattern({ "accepted_hosts", "accepted_services" }) - self:build_trace_host_list(self.params.trace_host_id_list) end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table @@ -1256,24 +1253,4 @@ function ScParams:build_and_validate_filters_pattern(param_list) end end -function ScParams:build_trace_host_list(param_value) - if self.params.enable_trace == 1 then - if param_value == "" then - self.logger:notice("[sc_params:build_trace_host_list]: enable_trace param is set to 1 but no trace_host_id_list provided. Trace is going to be disabled") - self.params.enable_trace = 0 - return - end - local tmp_trace_list = self.common:split(param_value) - local trace_list = {} - local host_info - - for index, host_id in ipairs(tmp_trace_list) do - trace_list[tonumber(host_id)] = tonumber(host_id) - end - self.params.trace_host_id_list = trace_list - elseif self.params.trace_host_id_list ~= "" and self.params.enable_trace == 0 then - self.logger:notice("[sc_params:build_trace_host_list]: trace_host_id_list is not empty but enable_trace param is set to 0. trace_host_id_list param is going to be ignored") - end -end - return sc_params diff --git a/modules/centreon-stream-connectors-lib/sc_storage.lua b/modules/centreon-stream-connectors-lib/sc_storage.lua index 43b273df..320eb24a 100644 --- a/modules/centreon-stream-connectors-lib/sc_storage.lua +++ b/modules/centreon-stream-connectors-lib/sc_storage.lua @@ -28,10 +28,12 @@ function sc_storage.new(common, logger, params) } -- make sure we are able to load the desired storage backend. If not, fall back to the one provided by broker - if pcall(require, "centreon-stream-connectors-lib.storage_backends.sc_storage_" .. params.storage_backend) then + local pcall_success, pcall_message = pcall(require, "centreon-stream-connectors-lib.storage_backends.sc_storage_" .. params.storage_backend) + if pcall_success then local storage_backend = require("centreon-stream-connectors-lib.storage_backends.sc_storage_" .. params.storage_backend) self.storage_backend = storage_backend.new(self.sc_common, logger, params) else + self.sc_logger:error("[sc_storage:new]: " .. tostring(pcall_message)) self.sc_logger:error("[sc_storage:new]: Couldn't load storage backend: " .. tostring(params.storage_backend) .. ". Make sure that the file sc_storage_" .. tostring(params.storage_backend) .. ".lua exists on your server." .. " The stream connector is going to use the broker storage backend.") diff --git a/modules/centreon-stream-connectors-lib/storage_backends/sc_storage_broker.lua b/modules/centreon-stream-connectors-lib/storage_backends/sc_storage_broker.lua new file mode 100644 index 00000000..fb65c8b9 --- /dev/null +++ b/modules/centreon-stream-connectors-lib/storage_backends/sc_storage_broker.lua @@ -0,0 +1,65 @@ +--- +-- a storage module that is using centreon broker +-- @module sc_storage_broker +-- @module sc_storage_broker + +--[[ + + THIS IS A STORAGE MODULE SKELETON/PLACEHOLDER + IT WILL LATER ON BE A REAL STORAGE MECANISM. + IT IS JUST HERE TO HAVE A FALLBACK FAKE STORAGE SYSTEM WHILE THIS FEATURE IS DEPLOYED + +]]-- + +local sc_storage_broker = {} +local ScStorageBroker = {} + +function sc_storage_broker.new(common, logger, params) + local self = {} + + self.sc_common = common + self.sc_logger = logger + self.params = params + + setmetatable(self, { __index = ScStorageBroker}) + return self +end + + +function ScStorageBroker:set(object_id, property, value) + return true +end + +function ScStorageBroker:set_multiple(object_id, properties) + return true +end + +function ScStorageBroker:get(object_id, property) + return true, "" +end + +function ScStorageBroker:get_multiple(object_id, properties) + return true, {} +end + +function ScStorageBroker:delete(object_id, property) + return true +end + +function ScStorageBroker:delete_multiple(object_id, properties) + return true +end + +function ScStorageBroker:show(object_id) + return true +end + +function ScStorageBroker:clear() + return true +end + +function ScStorageBroker:get_all_values_from_property(property) + return true, {} +end + +return sc_storage_broker \ No newline at end of file From e6115f7dfc4d176f559812cd7fc5253fbe5dc5b7 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Thu, 22 May 2025 12:00:33 +0200 Subject: [PATCH 18/32] update error message --- modules/centreon-stream-connectors-lib/sc_storage.lua | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/modules/centreon-stream-connectors-lib/sc_storage.lua b/modules/centreon-stream-connectors-lib/sc_storage.lua index 320eb24a..dc18533a 100644 --- a/modules/centreon-stream-connectors-lib/sc_storage.lua +++ b/modules/centreon-stream-connectors-lib/sc_storage.lua @@ -33,10 +33,7 @@ function sc_storage.new(common, logger, params) local storage_backend = require("centreon-stream-connectors-lib.storage_backends.sc_storage_" .. params.storage_backend) self.storage_backend = storage_backend.new(self.sc_common, logger, params) else - self.sc_logger:error("[sc_storage:new]: " .. tostring(pcall_message)) - self.sc_logger:error("[sc_storage:new]: Couldn't load storage backend: " .. tostring(params.storage_backend) - .. ". Make sure that the file sc_storage_" .. tostring(params.storage_backend) .. ".lua exists on your server." - .. " The stream connector is going to use the broker storage backend.") + self.sc_logger:error("[sc_storage:new]: Couldn't load storage backend: " .. tostring(params.storage_backend) .. ".\nThe stream connector is going to use the broker storage backend.\nError: " .. tostring(pcall_message)) self.storage_backend = require("centreon-stream-connectors-lib.storage_backends.sc_storage_broker") end From 9ddd860618705e9e68e859549bacb2edde0a94e6 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Thu, 22 May 2025 14:39:40 +0200 Subject: [PATCH 19/32] update retry for incomplete metrics (every 10 seconds) + clean logs + removing comments from the code to load the stored metrics --- .../influxdb/influxdb-metrics-apiv2.lua | 33 ++++++++----------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index d35ef42d..8920d662 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -87,12 +87,12 @@ function EventQueue.new(params) self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) self.sc_broker = sc_broker.new(self.sc_params.params, self.sc_logger) self.sc_storage = sc_storage.new(self.sc_common, self.sc_logger, self.sc_params.params) - --local rc, init_metrics = self.sc_storage:get_all_values_from_property("metric_id") - --if rc == false or type(init_metrics) == "boolean" then - -- self.sc_logger:notice("no metric_id found in the sqlite db. That's probably because it is the first time the stream connector is executed") - --else - -- metrics = init_metrics - --end + local rc, init_metrics = self.sc_storage:get_all_values_from_property("metric_id") + if rc == false or type(init_metrics) == "boolean" then + self.sc_logger:notice("no metric_id found in the sqlite db. That's probably because it is the first time the stream connector is executed") + else + metrics = init_metrics + end local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements @@ -362,7 +362,6 @@ function EventQueue:send_data(payload, queue_metadata) end function EventQueue:check_incomplete_metrics() - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: start check_incomplete_metrics") self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: start check_incomplete_metrics") local incomplete_metrics_queue_size = 0 local incomplete_metrics_payload = "" @@ -373,22 +372,18 @@ function EventQueue:check_incomplete_metrics() } for metric_index = #incomplete_metrics, 1, -1 do local metric_data = incomplete_metrics[metric_index] - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: metric_data: " .. broker.json_encode(metric_data)) if metrics[metric_data.metric_key] then - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: metric_key found") + self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: metric_key " .. tostring(metric_data.metric_key) .. " found: sending metric") incomplete_metrics_payload = incomplete_metrics_payload .. metric_data.metric_name .. ",metric_id=" .. metrics[metric_data.metric_key] .. " value=" .. metric_data.metric_value .. " " .. metric_data.last_check .. "\n" incomplete_metrics_queue_size = incomplete_metrics_queue_size + 1 table.remove(incomplete_metrics, metric_index) elseif os.time() - metric_data.entry_creation_date > 60 then - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: metric_key " .. tostring(metric_data.metric_key) .. " is too old, removing it") self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: metric_key " .. tostring(metric_data.metric_key) .. " is too old, removing it") table.remove(incomplete_metrics, metric_index) else - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: keeping metric_key " .. tostring(metric_data.metric_key) .. " in the incomplete metrics list") self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: keeping metric_key " .. tostring(metric_data.metric_key) .. " in the incomplete metrics list") end if incomplete_metrics_queue_size > self.sc_params.params.max_buffer_size then - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") self:send_data(incomplete_metrics_payload, queue_metadata) incomplete_metrics_payload = "" @@ -396,7 +391,6 @@ function EventQueue:check_incomplete_metrics() end end if incomplete_metrics_payload ~= "" then - broker_log:info(0, "[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: sending incomplete metrics payload") self:send_data(incomplete_metrics_payload, queue_metadata) end @@ -466,11 +460,18 @@ function write (event) return flush() end +local last_check_incomplete_metrics = 0 -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() + -- retry to send the incomplete metrics table every 10 seconds, if there are some + if #incomplete_metrics > 0 and os.time() - last_check_incomplete_metrics > 10 then + last_check_incomplete_metrics = os.time() + queue:check_incomplete_metrics() + end + -- nothing to flush if queues_size == 0 then return true @@ -481,9 +482,6 @@ function flush() if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then return false end - if #incomplete_metrics > 0 then - queue:check_incomplete_metrics() - end return true end @@ -492,9 +490,6 @@ function flush() if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then return false end - if #incomplete_metrics > 0 then - queue:check_incomplete_metrics() - end return true end From 6e64f43764b9410d93a8cf4dd90a88f8c6b4b046 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Thu, 22 May 2025 20:54:36 +0200 Subject: [PATCH 20/32] reduce times --- centreon-certified/influxdb/influxdb-metrics-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua index 8920d662..8f19c7c4 100644 --- a/centreon-certified/influxdb/influxdb-metrics-apiv2.lua +++ b/centreon-certified/influxdb/influxdb-metrics-apiv2.lua @@ -377,7 +377,7 @@ function EventQueue:check_incomplete_metrics() incomplete_metrics_payload = incomplete_metrics_payload .. metric_data.metric_name .. ",metric_id=" .. metrics[metric_data.metric_key] .. " value=" .. metric_data.metric_value .. " " .. metric_data.last_check .. "\n" incomplete_metrics_queue_size = incomplete_metrics_queue_size + 1 table.remove(incomplete_metrics, metric_index) - elseif os.time() - metric_data.entry_creation_date > 60 then + elseif os.time() - metric_data.entry_creation_date > 30 then self.sc_logger:debug("[EventQueue:check_incomplete_metrics]: metric_key " .. tostring(metric_data.metric_key) .. " is too old, removing it") table.remove(incomplete_metrics, metric_index) else @@ -467,7 +467,7 @@ function flush() local queues_size = queue.sc_flush:get_queues_size() -- retry to send the incomplete metrics table every 10 seconds, if there are some - if #incomplete_metrics > 0 and os.time() - last_check_incomplete_metrics > 10 then + if #incomplete_metrics > 0 and os.time() - last_check_incomplete_metrics > 1 then last_check_incomplete_metrics = os.time() queue:check_incomplete_metrics() end From 73add7bfcae6a2d5d5671a1dab8d915f207f8646 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 10:40:36 +0200 Subject: [PATCH 21/32] Add installation/removing test for stream connectors library package + add noble --- .../actions/test-packaged-library/action.yml | 91 +++++++++++++++++++ ...ile.packaging-stream-connectors-nfpm-noble | 17 ++++ .github/workflows/stream-connectors-lib.yml | 73 ++++++++++++++- 3 files changed, 176 insertions(+), 5 deletions(-) create mode 100644 .github/actions/test-packaged-library/action.yml create mode 100644 .github/docker/Dockerfile.packaging-stream-connectors-nfpm-noble diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml new file mode 100644 index 00000000..5f7b8f5b --- /dev/null +++ b/.github/actions/test-packaged-library/action.yml @@ -0,0 +1,91 @@ +name: "test-cpan-libs" +description: "Test packaged CPAN libraries" +inputs: + package_extension: + description: "The package extension (deb or rpm)" + required: true + distrib: + description: "The distribution name" + required: true + arch: + description: "The architecture (amd64 or arm64)" + required: true + +runs: + using: "composite" + steps: + + - if: ${{ inputs.package_extension == 'rpm' }} + name: Install zstd and Centreon repositories + run: | + dnf install -y zstd epel-release 'dnf-command(config-manager)' + dnf config-manager --set-enabled powertools || true # alma 8 + dnf config-manager --set-enabled crb || true # alma 9 + # Import Centreon GPG key + GPG_KEY_URL="https://yum-gpg.centreon.com/RPM-GPG-KEY-CES" + curl -sSL $GPG_KEY_URL -o RPM-GPG-KEY-CES + rpm --import RPM-GPG-KEY-CES + shell: bash + + - if: ${{ inputs.package_extension == 'deb' }} + name: Install zstd, perl and Centreon repositories + run: | + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y zstd wget gpg apt-utils procps build-essential + wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 + apt-get update + shell: bash + + - name: Restore package from cache + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: ./*.${{ inputs.package_extension }} + key: ${{ github.sha }}-${{ github.run_id }}-${{ inputs.package_extension }}-${{ inputs.distrib }} + fail-on-cache-miss: true + + - if: ${{ inputs.package_extension == 'rpm' }} + name: Check package installation / uninstallation + run: | + error_log="install_error_${{ inputs.distrib }}_${{ inputs.arch }}.log" + for package in ./*.rpm; do + echo "Installing package: $package" + # Install package, then uninstall it with all his dependencies + echo "Package installation..." + error_output=$(dnf install -y $package 2>&1) || { echo "$error_output" >> $error_log; echo "Error during installation of the package $package" >> $error_log; true; } + echo "Package installation done." + echo "Package uninstallation..." + error_output=$(dnf autoremove --setopt=keepcache=True -y $(echo $package | sed 's/_[0-9].*\.rpm//' | sed 's/.\///') 2>&1) || { echo "$error_output" >> $error_log; echo "Error during autoremove of the package $package" >> $error_log; true; } + echo "Package uninstallation done." + done + # If the file error_log exists and is not empty, the workflow is in error + if [[ -s $error_log ]]; then + cat $error_log + exit 1 + fi + shell: bash + + - if: ${{ inputs.package_extension == 'deb' }} + name: Check packages installation / uninstallation + run: | + error_log="install_error_${{ inputs.distrib }}_${{ inputs.arch }}.log" + for package in ./*.deb; do + # If the debian package name ends with amd64 or arm64, we only install it if the tested architecture is the same, otherwise we skip it + if [[ $package == *amd64.deb && ${{ inputs.arch }} != "amd64" || $package == *arm64.deb && ${{ inputs.arch }} != "arm64" ]]; then + continue + fi + echo "Installing package: $package" + # Install package, then uninstall it with all his dependencies + echo "Package installation..." + error_output=$(apt-get install -y $package 2>&1) || { echo "$error_output" >> $error_log; echo "Error during installation of the package $package" >> $error_log; true; } + echo "Package installation done." + echo "Package uninstallation..." + error_output=$(apt-get autoremove -y --purge $(echo $package | sed 's/_[0-9].*\.deb//' | sed 's/.\///') 2>&1) || { echo "$error_output" >> $error_log; echo "Error during autoremove of the package $package" >> $error_log; true; } + echo "Package uninstallation done." + done + # If the file error_log exists and is not empty, the workflow is in error + if [[ -s $error_log ]]; then + cat $error_log + exit 1 + fi + shell: bash diff --git a/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-noble b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-noble new file mode 100644 index 00000000..da9ad891 --- /dev/null +++ b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-noble @@ -0,0 +1,17 @@ +ARG REGISTRY_URL + +FROM ${REGISTRY_URL}/ubuntu:noble + +RUN bash -e < Date: Mon, 26 May 2025 10:48:01 +0200 Subject: [PATCH 22/32] fix add noble --- .github/workflows/docker-packaging.yml | 2 +- .github/workflows/stream-connectors.yml | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docker-packaging.yml b/.github/workflows/docker-packaging.yml index f7f8988f..113802d9 100644 --- a/.github/workflows/docker-packaging.yml +++ b/.github/workflows/docker-packaging.yml @@ -23,7 +23,7 @@ jobs: strategy: matrix: - distrib: [alma8, alma9, bullseye, bookworm, jammy] + distrib: [alma8, alma9, bullseye, bookworm, jammy, noble] steps: - name: Checkout sources uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index e5dc46c5..3edfd089 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -21,7 +21,7 @@ jobs: uses: ./.github/workflows/get-environment.yml detect-changes: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: connectors: ${{ steps.list-connectors.outputs.connectors }} steps: @@ -53,10 +53,10 @@ jobs: package: if: ${{ needs.detect-changes.outputs.connectors != '[]' }} needs: [get-environment, detect-changes] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: - distrib: [el8, el9, bullseye, bookworm, jammy] + distrib: [el8, el9, bullseye, bookworm, jammy, noble] connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} include: - distrib: el8 @@ -74,6 +74,9 @@ jobs: - distrib: jammy image: packaging-stream-connectors-nfpm-jammy package_extension: deb + - distrib: noble + image: packaging-stream-connectors-nfpm-noble + package_extension: deb name: package ${{ matrix.distrib }} ${{ matrix.connector_path }} container: @@ -138,7 +141,7 @@ jobs: deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} needs: [get-environment, detect-changes, package] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: distrib: [el8, el9] @@ -161,10 +164,10 @@ jobs: deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} needs: [get-environment, detect-changes, package] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: - distrib: [bullseye, bookworm, jammy] + distrib: [bullseye, bookworm, jammy, noble] connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} From f1e4c3ac7025c3f65530c33d66e2d85a7e75a501 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 10:57:03 +0200 Subject: [PATCH 23/32] fix yaml-lint --- .github/workflows/stream-connectors-lib.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index 0f9f978f..413e6a3c 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -80,7 +80,7 @@ jobs: stability: ${{ needs.get-environment.outputs.stability }} test-packages: - needs: [ get-environment, package ] + needs: [get-environment, package] strategy: fail-fast: false matrix: From 36b64fd2b2edb880f336346ce4ad702a7fcdb103 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 11:20:22 +0200 Subject: [PATCH 24/32] update: tests before delivery --- .github/workflows/stream-connectors-lib.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index 413e6a3c..eecbb39a 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -141,7 +141,7 @@ jobs: deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + needs: [get-environment, package, test-packages] runs-on: ubuntu-24.04 strategy: matrix: @@ -163,7 +163,7 @@ jobs: deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + needs: [get-environment, package, test-packages] runs-on: ubuntu-24.04 strategy: matrix: From ba0bbb84d1da8f2018ef81a5954526225e10b9d6 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 14:14:16 +0200 Subject: [PATCH 25/32] update --- .github/actions/test-packaged-library/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index 5f7b8f5b..c02c3e9b 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -28,7 +28,7 @@ runs: shell: bash - if: ${{ inputs.package_extension == 'deb' }} - name: Install zstd, perl and Centreon repositories + name: Install zstd and Centreon repositories run: | export DEBIAN_FRONTEND=noninteractive apt-get update From 8e2fe19f4e9280309a114112219266d10d3ae7f8 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 14:18:20 +0200 Subject: [PATCH 26/32] fix --- .github/workflows/stream-connectors-lib.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index eecbb39a..46bd2256 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -125,8 +125,8 @@ jobs: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - name: Test packaged libs - uses: ./.github/actions/test-packaged-lib + - name: Test packaged library + uses: ./.github/actions/test-packaged-library with: package_extension: ${{ matrix.package_extension }} distrib: ${{ matrix.distrib }} From 91fa93cd67e142b0ccac05cdb9b75002e3ba2f5c Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 16:06:24 +0200 Subject: [PATCH 27/32] add centreon repo for tests --- .github/actions/test-packaged-library/action.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index c02c3e9b..30f72c92 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -14,6 +14,9 @@ inputs: runs: using: "composite" steps: + - name: Set environment variable + run: echo "CENTREON_VERSION=24.10" >> $GITHUB_ENV + shell: bash - if: ${{ inputs.package_extension == 'rpm' }} name: Install zstd and Centreon repositories @@ -25,6 +28,10 @@ runs: GPG_KEY_URL="https://yum-gpg.centreon.com/RPM-GPG-KEY-CES" curl -sSL $GPG_KEY_URL -o RPM-GPG-KEY-CES rpm --import RPM-GPG-KEY-CES + # Add Centreon repository + dnf -y config-manager --add-repo https://packages.centreon.com/rpm-standard/${{ env.CENTREON_VERSION }}/${{ inputs.distrib }}/centreon-${{ env.CENTREON_VERSION }}.repo + dnf -y clean all --enablerepo=* + dnf -y update shell: bash - if: ${{ inputs.package_extension == 'deb' }} @@ -34,6 +41,15 @@ runs: apt-get update apt-get install -y zstd wget gpg apt-utils procps build-essential wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 + # si distrib = jammy ou noble, le repository est différent + if [[ "${{ inputs.distrib }}" == "jammy" || "${{ inputs.distrib }}" == "noble" ]]; then + repo="ubuntu-standard" + else + repo="apt-standard" + fi + echo "deb https://packages.centreon.com/${{ repo }}-${{ env.CENTREON_VERSION }}-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon.list + echo "deb https://packages.centreon.com/${{ repo }}-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${{ repo }}-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list apt-get update shell: bash From b597e49a0b50f295cb3e7ab8e47eb3af8f8ab8f9 Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 16:17:41 +0200 Subject: [PATCH 28/32] Add Centreon plugins repositories --- .../actions/test-packaged-library/action.yml | 57 ++++++++++++++++--- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index 30f72c92..65aa1ee2 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -31,6 +31,45 @@ runs: # Add Centreon repository dnf -y config-manager --add-repo https://packages.centreon.com/rpm-standard/${{ env.CENTREON_VERSION }}/${{ inputs.distrib }}/centreon-${{ env.CENTREON_VERSION }}.repo dnf -y clean all --enablerepo=* + # Add Centreon plugins repository + cat <> /etc/yum.repos.d/centreon-plugins.repo + [centreon-plugins-stable] + name=centreon plugins stable x86_64 + baseurl=https://packages.centreon.com/rpm-plugins/${{ inputs.distrib }}/stable/x86_64 + enabled=1 + gpgcheck=1 + gpgkey=https://yum-gpg.centreon.com/RPM-GPG-KEY-CES + [centreon-plugins-stable-noarch] + name=centreon plugins stable noarch + baseurl=https://packages.centreon.com/rpm-plugins/${{ inputs.distrib }}/stable/noarch + enabled=1 + gpgcheck=1 + gpgkey=https://yum-gpg.centreon.com/RPM-GPG-KEY-CES + [centreon-plugins-testing] + name=centreon plugins testing x86_64 + baseurl=https://packages.centreon.com/rpm-plugins/${{ inputs.distrib }}/testing/x86_64 + enabled=1 + gpgcheck=1 + gpgkey=https://yum-gpg.centreon.com/RPM-GPG-KEY-CES + [centreon-plugins-testing-noarch] + name=centreon plugins testing noarch + baseurl=https://packages.centreon.com/rpm-plugins/${{ inputs.distrib }}/testing/noarch + enabled=1 + gpgcheck=1 + gpgkey=https://yum-gpg.centreon.com/RPM-GPG-KEY-CES + [centreon-plugins-unstable] + name=centreon plugins unstable x86_64 + baseurl=https://packages.centreon.com/rpm-plugins/${{ inputs.distrib }}/unstable/x86_64 + enabled=1 + gpgcheck=1 + gpgkey=https://yum-gpg.centreon.com/RPM-GPG-KEY-CES + [centreon-plugins-unstable-noarch] + name=centreon plugins unstable noarch + baseurl=https://packages.centreon.com/rpm-plugins/${{ inputs.distrib }}/unstable/noarch + enabled=1 + gpgcheck=1 + gpgkey=https://yum-gpg.centreon.com/RPM-GPG-KEY-CES + EOF dnf -y update shell: bash @@ -41,15 +80,19 @@ runs: apt-get update apt-get install -y zstd wget gpg apt-utils procps build-essential wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 - # si distrib = jammy ou noble, le repository est différent + # Get centreon repo + repo="apt" if [[ "${{ inputs.distrib }}" == "jammy" || "${{ inputs.distrib }}" == "noble" ]]; then - repo="ubuntu-standard" - else - repo="apt-standard" + repo="ubuntu" fi - echo "deb https://packages.centreon.com/${{ repo }}-${{ env.CENTREON_VERSION }}-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon.list - echo "deb https://packages.centreon.com/${{ repo }}-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list - echo "deb https://packages.centreon.com/${{ repo }}-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + # Add Centreon repositories + echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon.list + echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + # Add Centreon plugins repositories + echo "deb https://packages.centreon.com/${repo}-${{ env.CENTREON_VERSION }}-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list apt-get update shell: bash From 6ad2b36ce056d5188c9bdbfaad05cab972a6a01f Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 16:31:05 +0200 Subject: [PATCH 29/32] fix plugins repo --- .github/actions/test-packaged-library/action.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index 65aa1ee2..d4c00ac3 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -90,9 +90,9 @@ runs: echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list # Add Centreon plugins repositories - echo "deb https://packages.centreon.com/${repo}-${{ env.CENTREON_VERSION }}-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon-plugins.list - echo "deb https://packages.centreon.com/${repo}-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list - echo "deb https://packages.centreon.com/${repo}-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-plugins-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-plugins-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-plugins-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list apt-get update shell: bash From 65e566d34f7605c448857afb8172e071817c92ad Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 17:26:45 +0200 Subject: [PATCH 30/32] updates for noble + dependencies and repo versions --- .github/actions/deb-delivery/action.yml | 8 ++++---- .github/actions/package-nfpm/action.yml | 6 ++++++ .github/actions/test-packaged-library/action.yml | 10 ++++++++-- .github/workflows/lua-cffi.yml | 13 ++++++++----- .github/workflows/lua-tz.yml | 13 ++++++++----- .../centreon-stream-connectors-lib.yaml | 6 +++--- 6 files changed, 37 insertions(+), 19 deletions(-) diff --git a/.github/actions/deb-delivery/action.yml b/.github/actions/deb-delivery/action.yml index 3e25f433..0e1eeff5 100644 --- a/.github/actions/deb-delivery/action.yml +++ b/.github/actions/deb-delivery/action.yml @@ -20,7 +20,7 @@ inputs: runs: using: "composite" steps: - - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} + - if: ${{ ! ((inputs.distrib == 'jammy' || inputs.distrib == 'noble') && inputs.stability == 'stable') }} name: Use cache DEB files uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: @@ -28,18 +28,18 @@ runs: key: ${{ inputs.cache_key }} fail-on-cache-miss: true - - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} + - if: ${{ ! ((inputs.distrib == 'jammy' || inputs.distrib == 'noble') && inputs.stability == 'stable') }} uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 env: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} + - if: ${{ ! ((inputs.distrib == 'jammy' || inputs.distrib == 'noble') && inputs.stability == 'stable') }} name: Publish DEBs run: | FILES="*.deb" - if [[ "${{ inputs.distrib }}" == "jammy" ]]; then + if [[ "${{ inputs.distrib }}" == "jammy" || "${{ inputs.distrib }}" == "noble" ]]; then REPO_PREFIX="ubuntu" else REPO_PREFIX="apt" diff --git a/.github/actions/package-nfpm/action.yml b/.github/actions/package-nfpm/action.yml index 295bf030..b63bad96 100644 --- a/.github/actions/package-nfpm/action.yml +++ b/.github/actions/package-nfpm/action.yml @@ -82,6 +82,12 @@ runs: export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" + + luacurl_version="0.3.13-10" + if [[ ${{ inputs.distrib }} == "jammy" ]] || [[ ${{ inputs.distrib }} == "noble" ]]; then + luacurl_version="0.3.0-9" + fi + export LUA_CURL_VERSION="$luacurl_version" for FILE in ${{ inputs.nfpm_file_pattern }}; do DIRNAME=$(dirname $FILE) diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index d4c00ac3..dfe4c497 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -15,7 +15,13 @@ runs: using: "composite" steps: - name: Set environment variable - run: echo "CENTREON_VERSION=24.10" >> $GITHUB_ENV + run: | + # Set Centreon version as an environment variable + version="24.10" + if [[ "${{ inputs.distrib }}" == "bookworm" ]] then + version="24.04" + fi + echo "CENTREON_VERSION=${version}" >> $GITHUB_ENV shell: bash - if: ${{ inputs.package_extension == 'rpm' }} @@ -80,7 +86,7 @@ runs: apt-get update apt-get install -y zstd wget gpg apt-utils procps build-essential wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 - # Get centreon repo + # Set Centreon repository prefix based on distribution repo="apt" if [[ "${{ inputs.distrib }}" == "jammy" || "${{ inputs.distrib }}" == "noble" ]]; then repo="ubuntu" diff --git a/.github/workflows/lua-cffi.yml b/.github/workflows/lua-cffi.yml index 37709aac..0e26fd9c 100644 --- a/.github/workflows/lua-cffi.yml +++ b/.github/workflows/lua-cffi.yml @@ -26,7 +26,7 @@ jobs: strategy: fail-fast: false matrix: - distrib: [el8, el9, bullseye, bookworm, jammy] + distrib: [el8, el9, bullseye, bookworm, jammy, noble] include: - package_extension: rpm image: packaging-stream-connectors-nfpm-alma8 @@ -43,8 +43,11 @@ jobs: - package_extension: deb image: packaging-stream-connectors-nfpm-jammy distrib: jammy + - package_extension: deb + image: packaging-stream-connectors-nfpm-noble + distrib: noble - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest @@ -115,7 +118,7 @@ jobs: deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} needs: [get-environment, package] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: distrib: [el8, el9] @@ -137,10 +140,10 @@ jobs: deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} needs: [get-environment, package] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: - distrib: [bullseye, bookworm, jammy] + distrib: [bullseye, bookworm, jammy, noble] name: deliver ${{ matrix.distrib }} steps: diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml index 27c34963..a62eed4f 100644 --- a/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -26,7 +26,7 @@ jobs: strategy: fail-fast: false matrix: - distrib: [el8, el9, bullseye, bookworm, jammy] + distrib: [el8, el9, bullseye, bookworm, jammy, noble] include: - package_extension: rpm image: packaging-stream-connectors-nfpm-alma8 @@ -43,8 +43,11 @@ jobs: - package_extension: deb image: packaging-stream-connectors-nfpm-jammy distrib: jammy + - package_extension: deb + image: packaging-stream-connectors-nfpm-noble + distrib: noble - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest @@ -88,7 +91,7 @@ jobs: deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} needs: [get-environment, package] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: distrib: [el8, el9] @@ -110,10 +113,10 @@ jobs: deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} needs: [get-environment, package] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: - distrib: [bullseye, bookworm, jammy] + distrib: [bullseye, bookworm, jammy, noble] name: deliver ${{ matrix.distrib }} steps: diff --git a/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/packaging/connectors-lib/centreon-stream-connectors-lib.yaml index 356f5f1e..05abe294 100644 --- a/packaging/connectors-lib/centreon-stream-connectors-lib.yaml +++ b/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -30,15 +30,15 @@ overrides: rpm: depends: - lua-socket >= 3.0 - - centreon-broker-core >= 22.04.0 + - centreon-broker-core >= 23.10.0 - lua-curl >= 0.3.13-10 - lua-sql-mysql - lua deb: depends: - - "centreon-broker-core (>= 22.04.0)" + - "centreon-broker-core (>= 23.10.0)" - "lua-socket (>= 3.0~)" - - "lua-curl (>= 0.3.13-10)" + - "lua-curl (>= ${LUA_CURL_VERSION})" - "lua-sql-mysql" - "lua5.3" From 6fefbd5870f17e5294b2328891d5f6df318ef42f Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 17:30:31 +0200 Subject: [PATCH 31/32] fix yaml lint + test --- .github/actions/package-nfpm/action.yml | 2 +- .github/actions/test-packaged-library/action.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/package-nfpm/action.yml b/.github/actions/package-nfpm/action.yml index b63bad96..cbcb00e0 100644 --- a/.github/actions/package-nfpm/action.yml +++ b/.github/actions/package-nfpm/action.yml @@ -82,7 +82,7 @@ runs: export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" - + luacurl_version="0.3.13-10" if [[ ${{ inputs.distrib }} == "jammy" ]] || [[ ${{ inputs.distrib }} == "noble" ]]; then luacurl_version="0.3.0-9" diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index dfe4c497..09471496 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -18,7 +18,7 @@ runs: run: | # Set Centreon version as an environment variable version="24.10" - if [[ "${{ inputs.distrib }}" == "bookworm" ]] then + if [[ "${{ inputs.distrib }}" == "bullseye" ]]; then version="24.04" fi echo "CENTREON_VERSION=${version}" >> $GITHUB_ENV From bfe31462f1b15f02950af6285bc34aee093c97ac Mon Sep 17 00:00:00 2001 From: Sophie Depassio Date: Mon, 26 May 2025 18:27:25 +0200 Subject: [PATCH 32/32] fix deb repos --- .github/actions/test-packaged-library/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/test-packaged-library/action.yml b/.github/actions/test-packaged-library/action.yml index 09471496..5dc8ee14 100644 --- a/.github/actions/test-packaged-library/action.yml +++ b/.github/actions/test-packaged-library/action.yml @@ -93,8 +93,8 @@ runs: fi # Add Centreon repositories echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon.list - echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list - echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list + echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon.list + echo "deb https://packages.centreon.com/${repo}-standard-${{ env.CENTREON_VERSION }}-unstable/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon.list # Add Centreon plugins repositories echo "deb https://packages.centreon.com/${repo}-plugins-stable/ ${{ inputs.distrib }} main" | tee /etc/apt/sources.list.d/centreon-plugins.list echo "deb https://packages.centreon.com/${repo}-plugins-testing/ ${{ inputs.distrib }} main" | tee -a /etc/apt/sources.list.d/centreon-plugins.list