Skip to content

Commit d683eff

Browse files
authored
fix: Make scheduler stateful to support sync option (#1046)
1 parent b1ad878 commit d683eff

File tree

8 files changed

+83
-81
lines changed

8 files changed

+83
-81
lines changed

go.mod

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ go 1.19
55
require (
66
github.com/apache/arrow/go/v13 v13.0.0-20230630125530-5a06b2ec2a8e
77
github.com/bradleyjkemp/cupaloy/v2 v2.8.0
8-
github.com/cloudquery/plugin-pb-go v1.5.0
8+
github.com/cloudquery/plugin-pb-go v1.5.1
99
github.com/cloudquery/plugin-sdk/v2 v2.7.0
1010
github.com/getsentry/sentry-go v0.20.0
1111
github.com/goccy/go-json v0.10.2
@@ -33,9 +33,9 @@ require (
3333
github.com/google/flatbuffers v23.1.21+incompatible // indirect
3434
github.com/inconshreveable/mousetrap v1.1.0 // indirect
3535
github.com/klauspost/compress v1.16.6 // indirect
36-
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
36+
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
3737
github.com/mattn/go-colorable v0.1.13 // indirect
38-
github.com/mattn/go-isatty v0.0.18 // indirect
38+
github.com/mattn/go-isatty v0.0.19 // indirect
3939
github.com/pierrec/lz4/v4 v4.1.17 // indirect
4040
github.com/pmezard/go-difflib v1.0.0 // indirect
4141
github.com/spf13/pflag v1.0.5 // indirect

go.sum

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
4242
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
4343
github.com/cloudquery/arrow/go/v13 v13.0.0-20230703001435-df3b664a289d h1:Rm71jctmvnnPd3HtFh3Rzo5uSm8cCFKPce538YD2Sy8=
4444
github.com/cloudquery/arrow/go/v13 v13.0.0-20230703001435-df3b664a289d/go.mod h1:W69eByFNO0ZR30q1/7Sr9d83zcVZmF2MiP3fFYAWJOc=
45-
github.com/cloudquery/plugin-pb-go v1.5.0 h1:A/RE1U1l34W5T+JlXJzrHz0IMzfpdUK4VSg+J1Hw0gw=
46-
github.com/cloudquery/plugin-pb-go v1.5.0/go.mod h1:NbWAtT2BzJQ9+XUWwh3IKBg3MOeV9ZEpHoHNAQ/YDV8=
45+
github.com/cloudquery/plugin-pb-go v1.5.1 h1:Ojx4KlHDjWIqZwEnPCMFwlQOGUxTWwk99jbTCvQHLpw=
46+
github.com/cloudquery/plugin-pb-go v1.5.1/go.mod h1:R0Wse6NbJDZIHcRQjJ1sZGYDo3mrIDm4k3El1YUrvGA=
4747
github.com/cloudquery/plugin-sdk/v2 v2.7.0 h1:hRXsdEiaOxJtsn/wZMFQC9/jPfU1MeMK3KF+gPGqm7U=
4848
github.com/cloudquery/plugin-sdk/v2 v2.7.0/go.mod h1:pAX6ojIW99b/Vg4CkhnsGkRIzNaVEceYMR+Bdit73ug=
4949
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@@ -145,8 +145,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
145145
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
146146
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
147147
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
148-
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
149-
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
148+
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
149+
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
150150
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
151151
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
152152
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -157,8 +157,8 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
157157
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
158158
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
159159
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
160-
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
161-
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
160+
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
161+
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
162162
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
163163
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
164164
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
@@ -314,8 +314,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
314314
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
315315
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
316316
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
317-
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
318317
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
318+
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
319319
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
320320
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
321321
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

internal/servers/plugin/v3/plugin.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,12 @@ func (s *Server) Sync(req *pb.Sync_Request, stream pb.Plugin_SyncServer) error {
112112
SkipDependentTables: req.SkipDependentTables,
113113
DeterministicCQID: req.DeterministicCqId,
114114
}
115+
if req.Backend != nil {
116+
syncOptions.BackendOptions = &plugin.BackendOptions{
117+
TableName: req.Backend.TableName,
118+
Connection: req.Backend.Connection,
119+
}
120+
}
115121

116122
go func() {
117123
defer close(msgs)

plugin/plugin_source.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,17 @@ import (
1111
"github.com/rs/zerolog"
1212
)
1313

14+
type BackendOptions struct {
15+
TableName string
16+
Connection string
17+
}
18+
1419
type SyncOptions struct {
1520
Tables []string
1621
SkipTables []string
1722
SkipDependentTables bool
1823
DeterministicCQID bool
24+
BackendOptions *BackendOptions
1925
}
2026

2127
type SourceClient interface {

scheduler/scheduler.go

Lines changed: 42 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -136,15 +136,11 @@ func WithStrategy(strategy Strategy) Option {
136136
}
137137
}
138138

139-
type SyncOptions struct {
140-
DeterministicCQID bool
141-
}
142-
143-
type SyncOption func(*SyncOptions)
139+
type SyncOption func(*syncClient)
144140

145141
func WithSyncDeterministicCQID(deterministicCQID bool) SyncOption {
146-
return func(s *SyncOptions) {
147-
s.DeterministicCQID = deterministicCQID
142+
return func(s *syncClient) {
143+
s.deterministicCQID = deterministicCQID
148144
}
149145
}
150146

@@ -153,12 +149,8 @@ type Client interface {
153149
}
154150

155151
type Scheduler struct {
156-
tables schema.Tables
157-
client schema.ClientMeta
158152
caser *caser.Caser
159153
strategy Strategy
160-
// status sync metrics
161-
metrics *Metrics
162154
maxDepth uint64
163155
// resourceSem is a semaphore that limits the number of concurrent resources being fetched
164156
resourceSem *semaphore.Weighted
@@ -169,28 +161,47 @@ type Scheduler struct {
169161
concurrency uint64
170162
}
171163

172-
func NewScheduler(client schema.ClientMeta, opts ...Option) *Scheduler {
164+
type syncClient struct {
165+
tables schema.Tables
166+
client schema.ClientMeta
167+
scheduler *Scheduler
168+
deterministicCQID bool
169+
// status sync metrics
170+
metrics *Metrics
171+
logger zerolog.Logger
172+
}
173+
174+
func NewScheduler(opts ...Option) *Scheduler {
173175
s := Scheduler{
174-
client: client,
175-
metrics: &Metrics{TableClient: make(map[string]map[string]*TableClientMetrics)},
176176
caser: caser.New(),
177177
concurrency: DefaultConcurrency,
178178
maxDepth: DefaultMaxDepth,
179179
}
180180
for _, opt := range opts {
181181
opt(&s)
182182
}
183+
// This is very similar to the concurrent web crawler problem with some minor changes.
184+
// We are using DFS/Round-Robin to make sure memory usage is capped at O(h) where h is the height of the tree.
185+
tableConcurrency := max(s.concurrency/minResourceConcurrency, minTableConcurrency)
186+
resourceConcurrency := tableConcurrency * minResourceConcurrency
187+
s.tableSems = make([]*semaphore.Weighted, s.maxDepth)
188+
for i := uint64(0); i < s.maxDepth; i++ {
189+
s.tableSems[i] = semaphore.NewWeighted(int64(tableConcurrency))
190+
// reduce table concurrency logarithmically for every depth level
191+
tableConcurrency = max(tableConcurrency/2, minTableConcurrency)
192+
}
193+
s.resourceSem = semaphore.NewWeighted(int64(resourceConcurrency))
183194
return &s
184195
}
185196

186197
// SyncAll is mostly used for testing as it will sync all tables and can run out of memory
187198
// in the real world. Should use Sync for production.
188-
func (s *Scheduler) SyncAll(ctx context.Context, tables schema.Tables) (message.SyncMessages, error) {
199+
func (s *Scheduler) SyncAll(ctx context.Context, client schema.ClientMeta, tables schema.Tables) (message.SyncMessages, error) {
189200
res := make(chan message.SyncMessage)
190201
var err error
191202
go func() {
192203
defer close(res)
193-
err = s.Sync(ctx, tables, res)
204+
err = s.Sync(ctx, client, tables, res)
194205
}()
195206
// nolint:prealloc
196207
var messages message.SyncMessages
@@ -200,20 +211,25 @@ func (s *Scheduler) SyncAll(ctx context.Context, tables schema.Tables) (message.
200211
return messages, err
201212
}
202213

203-
func (s *Scheduler) Sync(ctx context.Context, tables schema.Tables, res chan<- message.SyncMessage, opts ...SyncOption) error {
214+
func (s *Scheduler) Sync(ctx context.Context, client schema.ClientMeta, tables schema.Tables, res chan<- message.SyncMessage, opts ...SyncOption) error {
204215
if len(tables) == 0 {
205216
return nil
206217
}
207218

208-
syncOpts := &SyncOptions{}
219+
syncClient := &syncClient{
220+
metrics: &Metrics{TableClient: make(map[string]map[string]*TableClientMetrics)},
221+
tables: tables,
222+
client: client,
223+
scheduler: s,
224+
logger: s.logger,
225+
}
209226
for _, opt := range opts {
210-
opt(syncOpts)
227+
opt(syncClient)
211228
}
212229

213230
if maxDepth(tables) > s.maxDepth {
214231
return fmt.Errorf("max depth exceeded, max depth is %d", s.maxDepth)
215232
}
216-
s.tables = tables
217233

218234
// send migrate messages first
219235
for _, table := range tables.FlattenTables() {
@@ -227,9 +243,9 @@ func (s *Scheduler) Sync(ctx context.Context, tables schema.Tables, res chan<- m
227243
defer close(resources)
228244
switch s.strategy {
229245
case StrategyDFS:
230-
s.syncDfs(ctx, resources, syncOpts)
246+
syncClient.syncDfs(ctx, resources)
231247
case StrategyRoundRobin:
232-
s.syncRoundRobin(ctx, resources, syncOpts)
248+
syncClient.syncRoundRobin(ctx, resources)
233249
default:
234250
panic(fmt.Errorf("unknown scheduler %s", s.strategy.String()))
235251
}
@@ -244,7 +260,7 @@ func (s *Scheduler) Sync(ctx context.Context, tables schema.Tables, res chan<- m
244260
return nil
245261
}
246262

247-
func (s *Scheduler) logTablesMetrics(tables schema.Tables, client Client) {
263+
func (s *syncClient) logTablesMetrics(tables schema.Tables, client Client) {
248264
clientName := client.ID()
249265
for _, table := range tables {
250266
metrics := s.metrics.TableClient[table.Name][clientName]
@@ -253,7 +269,7 @@ func (s *Scheduler) logTablesMetrics(tables schema.Tables, client Client) {
253269
}
254270
}
255271

256-
func (s *Scheduler) resolveResource(ctx context.Context, table *schema.Table, client schema.ClientMeta, parent *schema.Resource, item any) *schema.Resource {
272+
func (s *syncClient) resolveResource(ctx context.Context, table *schema.Table, client schema.ClientMeta, parent *schema.Resource, item any) *schema.Resource {
257273
var validationErr *schema.ValidationError
258274
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
259275
defer cancel()
@@ -307,7 +323,7 @@ func (s *Scheduler) resolveResource(ctx context.Context, table *schema.Table, cl
307323
return resource
308324
}
309325

310-
func (s *Scheduler) resolveColumn(ctx context.Context, logger zerolog.Logger, tableMetrics *TableClientMetrics, client schema.ClientMeta, resource *schema.Resource, c schema.Column) {
326+
func (s *syncClient) resolveColumn(ctx context.Context, logger zerolog.Logger, tableMetrics *TableClientMetrics, client schema.ClientMeta, resource *schema.Resource, c schema.Column) {
311327
var validationErr *schema.ValidationError
312328
columnStartTime := time.Now()
313329
defer func() {
@@ -337,7 +353,7 @@ func (s *Scheduler) resolveColumn(ctx context.Context, logger zerolog.Logger, ta
337353
}
338354
} else {
339355
// base use case: try to get column with CamelCase name
340-
v := funk.Get(resource.GetItem(), s.caser.ToPascal(c.Name), funk.WithAllowZero())
356+
v := funk.Get(resource.GetItem(), s.scheduler.caser.ToPascal(c.Name), funk.WithAllowZero())
341357
if v != nil {
342358
err := resource.Set(c.Name, v)
343359
if err != nil {

scheduler/scheduler_dfs.go

Lines changed: 13 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -11,23 +11,9 @@ import (
1111
"github.com/cloudquery/plugin-sdk/v4/helpers"
1212
"github.com/cloudquery/plugin-sdk/v4/schema"
1313
"github.com/getsentry/sentry-go"
14-
"golang.org/x/sync/semaphore"
1514
)
1615

17-
func (s *Scheduler) syncDfs(ctx context.Context, resolvedResources chan<- *schema.Resource, syncOpts *SyncOptions) {
18-
// This is very similar to the concurrent web crawler problem with some minor changes.
19-
// We are using DFS to make sure memory usage is capped at O(h) where h is the height of the tree.
20-
tableConcurrency := max(s.concurrency/minResourceConcurrency, minTableConcurrency)
21-
resourceConcurrency := tableConcurrency * minResourceConcurrency
22-
23-
s.tableSems = make([]*semaphore.Weighted, s.maxDepth)
24-
for i := uint64(0); i < s.maxDepth; i++ {
25-
s.tableSems[i] = semaphore.NewWeighted(int64(tableConcurrency))
26-
// reduce table concurrency logarithmically for every depth level
27-
tableConcurrency = max(tableConcurrency/2, minTableConcurrency)
28-
}
29-
s.resourceSem = semaphore.NewWeighted(int64(resourceConcurrency))
30-
16+
func (s *syncClient) syncDfs(ctx context.Context, resolvedResources chan<- *schema.Resource) {
3117
// we have this because plugins can return sometimes clients in a random way which will cause
3218
// differences between this run and the next one.
3319
preInitialisedClients := make([][]schema.ClientMeta, len(s.tables))
@@ -61,18 +47,18 @@ func (s *Scheduler) syncDfs(ctx context.Context, resolvedResources chan<- *schem
6147
clients := preInitialisedClients[i]
6248
for _, client := range clients {
6349
client := client
64-
if err := s.tableSems[0].Acquire(ctx, 1); err != nil {
50+
if err := s.scheduler.tableSems[0].Acquire(ctx, 1); err != nil {
6551
// This means context was cancelled
6652
wg.Wait()
6753
return
6854
}
6955
wg.Add(1)
7056
go func() {
7157
defer wg.Done()
72-
defer s.tableSems[0].Release(1)
58+
defer s.scheduler.tableSems[0].Release(1)
7359
// not checking for error here as nothing much todo.
7460
// the error is logged and this happens when context is cancelled
75-
s.resolveTableDfs(ctx, table, client, nil, resolvedResources, 1, syncOpts)
61+
s.resolveTableDfs(ctx, table, client, nil, resolvedResources, 1)
7662
}()
7763
}
7864
}
@@ -81,7 +67,7 @@ func (s *Scheduler) syncDfs(ctx context.Context, resolvedResources chan<- *schem
8167
wg.Wait()
8268
}
8369

84-
func (s *Scheduler) resolveTableDfs(ctx context.Context, table *schema.Table, client schema.ClientMeta, parent *schema.Resource, resolvedResources chan<- *schema.Resource, depth int, syncOpts *SyncOptions) {
70+
func (s *syncClient) resolveTableDfs(ctx context.Context, table *schema.Table, client schema.ClientMeta, parent *schema.Resource, resolvedResources chan<- *schema.Resource, depth int) {
8571
var validationErr *schema.ValidationError
8672
clientName := client.ID()
8773
logger := s.logger.With().Str("table", table.Name).Str("client", clientName).Logger()
@@ -119,7 +105,7 @@ func (s *Scheduler) resolveTableDfs(ctx context.Context, table *schema.Table, cl
119105
}()
120106

121107
for r := range res {
122-
s.resolveResourcesDfs(ctx, table, client, parent, r, resolvedResources, depth, syncOpts)
108+
s.resolveResourcesDfs(ctx, table, client, parent, r, resolvedResources, depth)
123109
}
124110

125111
// we don't need any waitgroups here because we are waiting for the channel to close
@@ -129,7 +115,7 @@ func (s *Scheduler) resolveTableDfs(ctx context.Context, table *schema.Table, cl
129115
}
130116
}
131117

132-
func (s *Scheduler) resolveResourcesDfs(ctx context.Context, table *schema.Table, client schema.ClientMeta, parent *schema.Resource, resources any, resolvedResources chan<- *schema.Resource, depth int, syncOpts *SyncOptions) {
118+
func (s *syncClient) resolveResourcesDfs(ctx context.Context, table *schema.Table, client schema.ClientMeta, parent *schema.Resource, resources any, resolvedResources chan<- *schema.Resource, depth int) {
133119
resourcesSlice := helpers.InterfaceSlice(resources)
134120
if len(resourcesSlice) == 0 {
135121
return
@@ -141,23 +127,23 @@ func (s *Scheduler) resolveResourcesDfs(ctx context.Context, table *schema.Table
141127
sentValidationErrors := sync.Map{}
142128
for i := range resourcesSlice {
143129
i := i
144-
if err := s.resourceSem.Acquire(ctx, 1); err != nil {
130+
if err := s.scheduler.resourceSem.Acquire(ctx, 1); err != nil {
145131
s.logger.Warn().Err(err).Msg("failed to acquire semaphore. context cancelled")
146132
wg.Wait()
147133
// we have to continue emptying the channel to exit gracefully
148134
return
149135
}
150136
wg.Add(1)
151137
go func() {
152-
defer s.resourceSem.Release(1)
138+
defer s.scheduler.resourceSem.Release(1)
153139
defer wg.Done()
154140
//nolint:all
155141
resolvedResource := s.resolveResource(ctx, table, client, parent, resourcesSlice[i])
156142
if resolvedResource == nil {
157143
return
158144
}
159145

160-
if err := resolvedResource.CalculateCQID(syncOpts.DeterministicCQID); err != nil {
146+
if err := resolvedResource.CalculateCQID(s.deterministicCQID); err != nil {
161147
tableMetrics := s.metrics.TableClient[table.Name][client.ID()]
162148
s.logger.Error().Err(err).Str("table", table.Name).Str("client", client.ID()).Msg("resource resolver finished with primary key calculation error")
163149
if _, found := sentValidationErrors.LoadOrStore(table.Name, struct{}{}); !found {
@@ -197,16 +183,16 @@ func (s *Scheduler) resolveResourcesDfs(ctx context.Context, table *schema.Table
197183
resolvedResources <- resource
198184
for _, relation := range resource.Table.Relations {
199185
relation := relation
200-
if err := s.tableSems[depth].Acquire(ctx, 1); err != nil {
186+
if err := s.scheduler.tableSems[depth].Acquire(ctx, 1); err != nil {
201187
// This means context was cancelled
202188
wg.Wait()
203189
return
204190
}
205191
wg.Add(1)
206192
go func() {
207193
defer wg.Done()
208-
defer s.tableSems[depth].Release(1)
209-
s.resolveTableDfs(ctx, relation, client, resource, resolvedResources, depth+1, syncOpts)
194+
defer s.scheduler.tableSems[depth].Release(1)
195+
s.resolveTableDfs(ctx, relation, client, resource, resolvedResources, depth+1)
210196
}()
211197
}
212198
}

0 commit comments

Comments
 (0)