|
| 1 | +package functional |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "log/slog" |
| 7 | + "sync" |
| 8 | + |
| 9 | + "github.com/deckhouse/deckhouse/pkg/log" |
| 10 | + |
| 11 | + "github.com/flant/addon-operator/pkg/app" |
| 12 | + "github.com/flant/addon-operator/pkg/task" |
| 13 | + "github.com/flant/addon-operator/pkg/task/queue" |
| 14 | + sh_task "github.com/flant/shell-operator/pkg/task" |
| 15 | +) |
| 16 | + |
| 17 | +const ( |
| 18 | + channelsBuffer = 24 |
| 19 | + |
| 20 | + Root = "" |
| 21 | +) |
| 22 | + |
| 23 | +type Scheduler struct { |
| 24 | + queueService *queue.Service |
| 25 | + logger *log.Logger |
| 26 | + |
| 27 | + // batch control |
| 28 | + cancel context.CancelFunc |
| 29 | + |
| 30 | + // for safe shutdown on replacement |
| 31 | + wg *sync.WaitGroup |
| 32 | + |
| 33 | + mtx sync.Mutex |
| 34 | + requests []*Request |
| 35 | + done map[string]struct{} |
| 36 | + scheduled map[string]struct{} |
| 37 | + |
| 38 | + doneCh chan string |
| 39 | + processCh chan *Request |
| 40 | +} |
| 41 | + |
| 42 | +type Request struct { |
| 43 | + Name string |
| 44 | + Description string |
| 45 | + Dependencies []string |
| 46 | + IsReloadAll bool |
| 47 | + DoStartup bool |
| 48 | + Labels map[string]string |
| 49 | +} |
| 50 | + |
| 51 | +func NewScheduler(qService *queue.Service, logger *log.Logger) *Scheduler { |
| 52 | + return &Scheduler{ |
| 53 | + queueService: qService, |
| 54 | + logger: logger, |
| 55 | + wg: new(sync.WaitGroup), |
| 56 | + } |
| 57 | +} |
| 58 | + |
| 59 | +// Start schedules a new batch, canceling the previous one if active. |
| 60 | +func (s *Scheduler) Start(ctx context.Context, modules []*Request) { |
| 61 | + // cancel the previous batch. |
| 62 | + if s.cancel != nil { |
| 63 | + s.cancel() |
| 64 | + // wait for batch goroutines to finish |
| 65 | + s.wg.Wait() |
| 66 | + } |
| 67 | + |
| 68 | + s.logger.Debug("following functional modules will be scheduled", slog.Any("modules", modules)) |
| 69 | + |
| 70 | + // initialize new batch state. |
| 71 | + batchCtx, cancel := context.WithCancel(ctx) |
| 72 | + s.cancel = cancel |
| 73 | + |
| 74 | + s.mtx.Lock() |
| 75 | + s.done = make(map[string]struct{}, len(modules)) |
| 76 | + s.scheduled = make(map[string]struct{}, len(modules)) |
| 77 | + s.requests = modules |
| 78 | + s.mtx.Unlock() |
| 79 | + |
| 80 | + s.doneCh = make(chan string, channelsBuffer) |
| 81 | + s.processCh = make(chan *Request, channelsBuffer) |
| 82 | + |
| 83 | + s.wg.Add(2) |
| 84 | + go func() { |
| 85 | + defer s.wg.Done() |
| 86 | + s.runScheduleLoop(batchCtx) |
| 87 | + }() |
| 88 | + |
| 89 | + go func() { |
| 90 | + defer s.wg.Done() |
| 91 | + s.runProcessLoop(batchCtx) |
| 92 | + }() |
| 93 | +} |
| 94 | + |
| 95 | +// runScheduleLoop launches the scheduling loop for a batch. |
| 96 | +func (s *Scheduler) runScheduleLoop(ctx context.Context) { |
| 97 | + for { |
| 98 | + select { |
| 99 | + case <-ctx.Done(): |
| 100 | + return |
| 101 | + case name, ok := <-s.doneCh: |
| 102 | + if !ok { |
| 103 | + return |
| 104 | + } |
| 105 | + s.reschedule(name) |
| 106 | + } |
| 107 | + } |
| 108 | +} |
| 109 | + |
| 110 | +// runProcessLoop waits for requests to be processed |
| 111 | +func (s *Scheduler) runProcessLoop(ctx context.Context) { |
| 112 | + var idx int |
| 113 | + for { |
| 114 | + select { |
| 115 | + case <-ctx.Done(): |
| 116 | + return |
| 117 | + case req := <-s.processCh: |
| 118 | + s.handleRequest(idx, req) |
| 119 | + idx++ |
| 120 | + } |
| 121 | + } |
| 122 | +} |
| 123 | + |
| 124 | +// reschedule marks module done and schedule new modules to be processed |
| 125 | +func (s *Scheduler) reschedule(name string) { |
| 126 | + if name != Root { |
| 127 | + // skip modules that not present in the batch |
| 128 | + if _, ok := s.scheduled[name]; !ok { |
| 129 | + return |
| 130 | + } |
| 131 | + |
| 132 | + // mark module done |
| 133 | + s.done[name] = struct{}{} |
| 134 | + } |
| 135 | + |
| 136 | + for _, req := range s.requests { |
| 137 | + // skip already processed |
| 138 | + if _, ok := s.done[req.Name]; ok { |
| 139 | + continue |
| 140 | + } |
| 141 | + |
| 142 | + // skip already scheduled |
| 143 | + if _, ok := s.scheduled[req.Name]; ok { |
| 144 | + continue |
| 145 | + } |
| 146 | + |
| 147 | + // check if all dependencies done |
| 148 | + ready := true |
| 149 | + for _, dep := range req.Dependencies { |
| 150 | + if _, ok := s.done[dep]; !ok { |
| 151 | + ready = false |
| 152 | + break |
| 153 | + } |
| 154 | + } |
| 155 | + |
| 156 | + // schedule module if ready |
| 157 | + if ready { |
| 158 | + s.logger.Debug("trigger module scheduling", slog.String("module", req.Name), slog.Any("trigger", name)) |
| 159 | + s.scheduled[req.Name] = struct{}{} |
| 160 | + s.processCh <- req |
| 161 | + } |
| 162 | + } |
| 163 | +} |
| 164 | + |
| 165 | +// handleRequest creates a ModuleRun task for request in a parallel queue |
| 166 | +func (s *Scheduler) handleRequest(idx int, req *Request) { |
| 167 | + queueName := fmt.Sprintf(app.ParallelQueueNamePattern, idx%(app.NumberOfParallelQueues-1)) |
| 168 | + |
| 169 | + moduleTask := sh_task.NewTask(task.ModuleRun). |
| 170 | + WithLogLabels(req.Labels). |
| 171 | + WithQueueName(queueName). |
| 172 | + WithMetadata(task.HookMetadata{ |
| 173 | + EventDescription: req.Description, |
| 174 | + ModuleName: req.Name, |
| 175 | + DoModuleStartup: req.DoStartup, |
| 176 | + IsReloadAll: req.IsReloadAll, |
| 177 | + }) |
| 178 | + |
| 179 | + _ = s.queueService.AddLastTaskToQueue(queueName, moduleTask) |
| 180 | +} |
| 181 | + |
| 182 | +// Done sends signal that module processing done |
| 183 | +func (s *Scheduler) Done(name string) { |
| 184 | + if s.doneCh != nil { |
| 185 | + s.doneCh <- name |
| 186 | + } |
| 187 | +} |
| 188 | + |
| 189 | +// Finished defines if processing done |
| 190 | +func (s *Scheduler) Finished() bool { |
| 191 | + s.mtx.Lock() |
| 192 | + defer s.mtx.Unlock() |
| 193 | + |
| 194 | + return len(s.done) == len(s.requests) |
| 195 | +} |
| 196 | + |
| 197 | +// Stop is the graceful shutdown |
| 198 | +func (s *Scheduler) Stop() { |
| 199 | + s.mtx.Lock() |
| 200 | + defer s.mtx.Unlock() |
| 201 | + |
| 202 | + if s.cancel != nil { |
| 203 | + s.cancel() |
| 204 | + s.wg.Wait() |
| 205 | + |
| 206 | + close(s.doneCh) |
| 207 | + close(s.processCh) |
| 208 | + } |
| 209 | +} |
0 commit comments