-
Notifications
You must be signed in to change notification settings - Fork 8
Changes for making extension work with lambda managed instance environment #30
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,164 @@ | ||
| package workers | ||
|
|
||
| import ( | ||
| "context" | ||
|
|
||
| cfg "github.com/SumoLogic/sumologic-lambda-extensions/lambda-extensions/config" | ||
| sumocli "github.com/SumoLogic/sumologic-lambda-extensions/lambda-extensions/sumoclient" | ||
|
|
||
| "github.com/sirupsen/logrus" | ||
| ) | ||
|
|
||
| // ElevatorTaskConsumer exposes methods for consuming tasks in elevator mode | ||
| type ElevatorTaskConsumer interface { | ||
| Start(context.Context) | ||
| FlushDataQueue(context.Context) | ||
| DrainQueue(context.Context) int | ||
| } | ||
|
|
||
| // elevatorSumoConsumer drains log from dataQueue in elevator mode | ||
| type elevatorSumoConsumer struct { | ||
| dataQueue chan []byte | ||
| flushSignal chan string | ||
| logger *logrus.Entry | ||
| config *cfg.LambdaExtensionConfig | ||
| sumoclient sumocli.LogSender | ||
| } | ||
|
|
||
| // NewElevatorTaskConsumer returns a new elevator consumer | ||
| // flushSignal channel is used to receive signals from producer to trigger flushing | ||
| func NewElevatorTaskConsumer(consumerQueue chan []byte, flushSignal chan string, config *cfg.LambdaExtensionConfig, logger *logrus.Entry) ElevatorTaskConsumer { | ||
| return &elevatorSumoConsumer{ | ||
| dataQueue: consumerQueue, | ||
| flushSignal: flushSignal, | ||
| logger: logger, | ||
| sumoclient: sumocli.NewLogSenderClient(logger, config), | ||
| config: config, | ||
| } | ||
| } | ||
|
|
||
| // Start starts the elevator consumer in a goroutine to listen for flush signals independently | ||
| func (esc *elevatorSumoConsumer) Start(ctx context.Context) { | ||
| esc.logger.Info("Starting Elevator Consumer") | ||
| go esc.processFlushSignals(ctx) | ||
| } | ||
|
|
||
| // processFlushSignals continuously listens for flush signals and triggers queue draining | ||
| // This runs independently without needing callbacks from main thread | ||
| func (esc *elevatorSumoConsumer) processFlushSignals(ctx context.Context) { | ||
| esc.logger.Info("Elevator Consumer: Started listening for flush signals") | ||
|
|
||
| for { | ||
| select { | ||
| case <-ctx.Done(): | ||
| esc.logger.Info("Elevator Consumer: Context cancelled, flushing remaining data") | ||
| esc.FlushDataQueue(ctx) | ||
| return | ||
|
|
||
| case signal := <-esc.flushSignal: | ||
| esc.logger.Infof("Elevator Consumer: Received flush signal: %s", signal) | ||
|
|
||
| switch signal { | ||
| case "queue_threshold": | ||
| esc.logger.Info("Elevator Consumer: Draining queue due to 80% threshold") | ||
| esc.DrainQueue(ctx) | ||
|
|
||
| case "platform.report": | ||
| esc.logger.Info("Elevator Consumer: Draining queue due to platform.report event") | ||
| esc.DrainQueue(ctx) | ||
|
|
||
| default: | ||
| esc.logger.Warnf("Elevator Consumer: Unknown flush signal received: %s", signal) | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| // FlushDataQueue drains the dataqueue completely (called during shutdown) | ||
| func (esc *elevatorSumoConsumer) FlushDataQueue(ctx context.Context) { | ||
| esc.logger.Info("Elevator Consumer: Flushing DataQueue") | ||
|
|
||
| if esc.config.EnableFailover { | ||
| var rawMsgArr [][]byte | ||
| Loop: | ||
| for { | ||
| select { | ||
| case rawmsg := <-esc.dataQueue: | ||
| rawMsgArr = append(rawMsgArr, rawmsg) | ||
| default: | ||
| if len(rawMsgArr) > 0 { | ||
| err := esc.sumoclient.FlushAll(rawMsgArr) | ||
| if err != nil { | ||
| esc.logger.Errorln("Elevator Consumer: Unable to flush DataQueue", err.Error()) | ||
| // putting back all the msg to the queue in case of failure | ||
| for _, msg := range rawMsgArr { | ||
| select { | ||
| case esc.dataQueue <- msg: | ||
| default: | ||
| esc.logger.Warnf("Elevator Consumer: Failed to requeue message, queue full") | ||
| } | ||
| } | ||
| } else { | ||
| esc.logger.Infof("Elevator Consumer: Successfully flushed %d messages", len(rawMsgArr)) | ||
| } | ||
| } | ||
| close(esc.dataQueue) | ||
| esc.logger.Debugf("Elevator Consumer: DataQueue completely drained and closed") | ||
| break Loop | ||
| } | ||
| } | ||
| } else { | ||
| // calling drainqueue (during shutdown) if failover is not enabled | ||
| maxCallsNeededForCompleteDraining := (len(esc.dataQueue) / esc.config.MaxConcurrentRequests) + 1 | ||
| for i := 0; i < maxCallsNeededForCompleteDraining; i++ { | ||
| esc.DrainQueue(ctx) | ||
| } | ||
| esc.logger.Info("Elevator Consumer: DataQueue drained without failover") | ||
| } | ||
| } | ||
|
|
||
| // DrainQueue drains the current contents of the queue | ||
| func (esc *elevatorSumoConsumer) DrainQueue(ctx context.Context) int { | ||
| esc.logger.Debug("Elevator Consumer: Draining data from dataQueue") | ||
|
|
||
| var rawMsgArr [][]byte | ||
| var logsStr string | ||
| var runtime_done = 0 | ||
|
||
|
|
||
| // Collect all available messages from the queue | ||
| Loop: | ||
| for { | ||
| select { | ||
| case rawmsg := <-esc.dataQueue: | ||
| rawMsgArr = append(rawMsgArr, rawmsg) | ||
| logsStr = string(rawmsg) | ||
| esc.logger.Debugf("Elevator Consumer: DrainQueue: logsStr length: %d", len(logsStr)) | ||
|
|
||
| default: | ||
| // No more messages in queue, send what we have | ||
| if len(rawMsgArr) > 0 { | ||
| esc.logger.Infof("Elevator Consumer: Sending %d messages to Sumo Logic", len(rawMsgArr)) | ||
| err := esc.sumoclient.SendAllLogs(ctx, rawMsgArr) | ||
| if err != nil { | ||
| esc.logger.Errorln("Elevator Consumer: Unable to send logs to Sumo Logic", err.Error()) | ||
| // putting back all the msg to the queue in case of failure | ||
| for _, msg := range rawMsgArr { | ||
| select { | ||
| case esc.dataQueue <- msg: | ||
| default: | ||
| esc.logger.Warn("Elevator Consumer: Failed to requeue message, queue full") | ||
| } | ||
| } | ||
| } else { | ||
| esc.logger.Infof("Elevator Consumer: Successfully sent %d messages", len(rawMsgArr)) | ||
| } | ||
| } else { | ||
| esc.logger.Debug("Elevator Consumer: No messages to drain") | ||
| } | ||
| break Loop | ||
| } | ||
| } | ||
|
|
||
| esc.logger.Debugf("Elevator Consumer: DrainQueue complete. Runtime done: %d", runtime_done) | ||
| return runtime_done | ||
|
Comment on lines
+162
to
+163
|
||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Variable name 'runtime_done' uses snake_case which is inconsistent with Go naming conventions. It should be renamed to 'runtimeDone' to follow camelCase style.