Skip to content

Commit 78221e5

Browse files
committed
style: Lint
1 parent 0df3478 commit 78221e5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+157
-157
lines changed

examples/simple_plugin/plugin/client.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,12 @@ func (*Client) Write(context.Context, <-chan message.WriteMessage) error {
5959
return nil
6060
}
6161

62-
func (*Client) Read(context.Context, *schema.Table, chan<- arrow.Record) error {
62+
func (*Client) Read(context.Context, *schema.Table, chan<- arrow.RecordBatch) error {
6363
// Not implemented, just used for testing destination packaging
6464
return nil
6565
}
6666

67-
func (*Client) Transform(_ context.Context, _ <-chan arrow.Record, _ chan<- arrow.Record) error {
67+
func (*Client) Transform(_ context.Context, _ <-chan arrow.RecordBatch, _ chan<- arrow.RecordBatch) error {
6868
// Not implemented, just used for testing destination packaging
6969
return nil
7070
}

internal/batch/slice.go

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@ import (
77

88
type (
99
SlicedRecord struct {
10-
arrow.Record
10+
arrow.RecordBatch
1111
Bytes int64 // we need this as the util.TotalRecordSize will report the full size even for the sliced record
1212
bytesPerRow int64
1313
}
1414
)
1515

16-
func (s *SlicedRecord) split(limit *Cap) (add *SlicedRecord, toFlush []arrow.Record, rest *SlicedRecord) {
16+
func (s *SlicedRecord) split(limit *Cap) (add *SlicedRecord, toFlush []arrow.RecordBatch, rest *SlicedRecord) {
1717
if s == nil {
1818
return nil, nil, nil
1919
}
@@ -23,13 +23,13 @@ func (s *SlicedRecord) split(limit *Cap) (add *SlicedRecord, toFlush []arrow.Rec
2323
limit.add(add.Bytes, add.NumRows())
2424
}
2525

26-
if s.Record == nil {
26+
if s.RecordBatch == nil {
2727
// all processed
2828
return add, nil, nil
2929
}
3030

3131
toFlush = s.getToFlush(limit)
32-
if s.Record == nil {
32+
if s.RecordBatch == nil {
3333
// all processed
3434
return add, toFlush, nil
3535
}
@@ -56,21 +56,21 @@ func (s *SlicedRecord) getAdd(limit *Cap) *SlicedRecord {
5656
// grab the whole record (either no limits or not overflowing)
5757
res := *s
5858
s.Bytes = 0
59-
s.Record = nil
59+
s.RecordBatch = nil
6060
return &res
6161
}
6262

6363
res := SlicedRecord{
64-
Record: s.NewSlice(0, rows),
64+
RecordBatch: s.NewSlice(0, rows),
6565
Bytes: rows * s.bytesPerRow,
6666
bytesPerRow: s.bytesPerRow,
6767
}
68-
s.Record = s.NewSlice(rows, s.NumRows())
68+
s.RecordBatch = s.NewSlice(rows, s.NumRows())
6969
s.Bytes -= res.Bytes
7070
return &res
7171
}
7272

73-
func (s *SlicedRecord) getToFlush(limit *Cap) []arrow.Record {
73+
func (s *SlicedRecord) getToFlush(limit *Cap) []arrow.RecordBatch {
7474
rowsByBytes := limit.bytes.capPerN(s.bytesPerRow)
7575
rows := limit.rows.cap()
7676
switch {
@@ -93,41 +93,41 @@ func (s *SlicedRecord) getToFlush(limit *Cap) []arrow.Record {
9393
return nil
9494
}
9595

96-
flush := make([]arrow.Record, 0, s.NumRows()/rows)
96+
flush := make([]arrow.RecordBatch, 0, s.NumRows()/rows)
9797
offset := int64(0)
9898
for offset+rows <= s.NumRows() {
9999
flush = append(flush, s.NewSlice(offset, offset+rows))
100100
offset += rows
101101
}
102102
if offset == s.NumRows() {
103103
// we processed everything for flush
104-
s.Record = nil
104+
s.RecordBatch = nil
105105
s.Bytes = 0
106106
return flush
107107
}
108108

109109
// set record to the remainder
110-
s.Record = s.NewSlice(offset, s.NumRows())
110+
s.RecordBatch = s.NewSlice(offset, s.NumRows())
111111
s.Bytes = s.NumRows() * s.bytesPerRow
112112

113113
return flush
114114
}
115115

116-
func (s *SlicedRecord) slice() []arrow.Record {
117-
res := make([]arrow.Record, s.NumRows())
116+
func (s *SlicedRecord) slice() []arrow.RecordBatch {
117+
res := make([]arrow.RecordBatch, s.NumRows())
118118
for i := int64(0); i < s.NumRows(); i++ {
119119
res[i] = s.NewSlice(i, i+1)
120120
}
121121
return res
122122
}
123123

124-
func newSlicedRecord(r arrow.Record) *SlicedRecord {
124+
func newSlicedRecord(r arrow.RecordBatch) *SlicedRecord {
125125
if r.NumRows() == 0 {
126126
return nil
127127
}
128128
res := SlicedRecord{
129-
Record: r,
130-
Bytes: util.TotalRecordSize(r),
129+
RecordBatch: r,
130+
Bytes: util.TotalRecordSize(r),
131131
}
132132
res.bytesPerRow = res.Bytes / r.NumRows()
133133
return &res
@@ -136,10 +136,10 @@ func newSlicedRecord(r arrow.Record) *SlicedRecord {
136136
// SliceRecord will return the SlicedRecord you can add to the batch given the restrictions provided (if any).
137137
// The meaning of the returned values:
138138
// - `add` is good to be added to the current batch that the caller is assembling
139-
// - `flush` represents sliced arrow.Record that needs own batch to be flushed
139+
// - `flush` represents sliced arrow.RecordBatch that needs own batch to be flushed
140140
// - `remaining` represents the overflow of the batch after `add` & `flush` are processed
141141
// Note that the `limit` provided will not be updated.
142-
func SliceRecord(r arrow.Record, limit *Cap) (add *SlicedRecord, flush []arrow.Record, remaining *SlicedRecord) {
142+
func SliceRecord(r arrow.RecordBatch, limit *Cap) (add *SlicedRecord, flush []arrow.RecordBatch, remaining *SlicedRecord) {
143143
l := *limit // copy value
144144
return newSlicedRecord(r).split(&l)
145145
}

internal/clients/state/v3/state.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ func (c *Client) Flush(ctx context.Context) error {
163163
version.Append(val.version)
164164
}
165165
}
166-
rec := bldr.NewRecord()
166+
rec := bldr.NewRecordBatch()
167167
recordBytes, err := pb.RecordToBytes(rec)
168168
if err != nil {
169169
return err

internal/memdb/memdb.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import (
1616

1717
// client is mostly used for testing the destination plugin.
1818
type client struct {
19-
memoryDB map[string][]arrow.Record
19+
memoryDB map[string][]arrow.RecordBatch
2020
tables map[string]*schema.Table
2121
memoryDBLock sync.RWMutex
2222
errOnWrite bool
@@ -42,7 +42,7 @@ func WithBlockingWrite() Option {
4242

4343
func GetNewClient(options ...Option) plugin.NewClientFunc {
4444
c := &client{
45-
memoryDB: make(map[string][]arrow.Record),
45+
memoryDB: make(map[string][]arrow.RecordBatch),
4646
memoryDBLock: sync.RWMutex{},
4747
tables: map[string]*schema.Table{
4848
"table1": {
@@ -112,13 +112,13 @@ func NewMemDBClientErrOnNew(context.Context, zerolog.Logger, []byte, plugin.NewC
112112
return nil, errors.New("newTestDestinationMemDBClientErrOnNew")
113113
}
114114

115-
func (c *client) overwrite(table *schema.Table, record arrow.Record) {
115+
func (c *client) overwrite(table *schema.Table, record arrow.RecordBatch) {
116116
for i := int64(0); i < record.NumRows(); i++ {
117117
c.overwriteRow(table, record.NewSlice(i, i+1))
118118
}
119119
}
120120

121-
func (c *client) overwriteRow(table *schema.Table, data arrow.Record) {
121+
func (c *client) overwriteRow(table *schema.Table, data arrow.RecordBatch) {
122122
tableName := table.Name
123123
pksIndex := table.PrimaryKeysIndexes()
124124
if len(pksIndex) == 0 {
@@ -152,7 +152,7 @@ func (*client) GetSpec() any {
152152
return &Spec{}
153153
}
154154

155-
func (c *client) Read(_ context.Context, table *schema.Table, res chan<- arrow.Record) error {
155+
func (c *client) Read(_ context.Context, table *schema.Table, res chan<- arrow.RecordBatch) error {
156156
c.memoryDBLock.RLock()
157157
defer c.memoryDBLock.RUnlock()
158158

@@ -196,7 +196,7 @@ func (c *client) migrate(_ context.Context, table *schema.Table) {
196196
tableName := table.Name
197197
memTable := c.memoryDB[tableName]
198198
if memTable == nil {
199-
c.memoryDB[tableName] = make([]arrow.Record, 0)
199+
c.memoryDB[tableName] = make([]arrow.RecordBatch, 0)
200200
c.tables[tableName] = table
201201
return
202202
}
@@ -206,7 +206,7 @@ func (c *client) migrate(_ context.Context, table *schema.Table) {
206206
if changes == nil {
207207
return
208208
}
209-
c.memoryDB[tableName] = make([]arrow.Record, 0)
209+
c.memoryDB[tableName] = make([]arrow.RecordBatch, 0)
210210
c.tables[tableName] = table
211211
}
212212

@@ -253,7 +253,7 @@ func (c *client) Close(context.Context) error {
253253
}
254254

255255
func (c *client) deleteStale(_ context.Context, msg *message.WriteDeleteStale) {
256-
var filteredTable []arrow.Record
256+
var filteredTable []arrow.RecordBatch
257257
tableName := msg.TableName
258258
for i, row := range c.memoryDB[tableName] {
259259
sc := row.Schema()
@@ -280,7 +280,7 @@ func (c *client) deleteStale(_ context.Context, msg *message.WriteDeleteStale) {
280280
}
281281

282282
func (c *client) deleteRecord(_ context.Context, msg *message.WriteDeleteRecord) {
283-
var filteredTable []arrow.Record
283+
var filteredTable []arrow.RecordBatch
284284
tableName := msg.TableName
285285
for i, row := range c.memoryDB[tableName] {
286286
isMatch := true
@@ -308,15 +308,15 @@ func (c *client) deleteRecord(_ context.Context, msg *message.WriteDeleteRecord)
308308
c.memoryDB[tableName] = filteredTable
309309
}
310310

311-
func (*client) Transform(_ context.Context, _ <-chan arrow.Record, _ chan<- arrow.Record) error {
311+
func (*client) Transform(_ context.Context, _ <-chan arrow.RecordBatch, _ chan<- arrow.RecordBatch) error {
312312
return nil
313313
}
314314

315315
func (*client) TransformSchema(_ context.Context, _ *arrow.Schema) (*arrow.Schema, error) {
316316
return nil, nil
317317
}
318318

319-
func evaluatePredicate(pred message.Predicate, record arrow.Record) bool {
319+
func evaluatePredicate(pred message.Predicate, record arrow.RecordBatch) bool {
320320
sc := record.Schema()
321321
indices := sc.FieldIndices(pred.Column)
322322
if len(indices) == 0 {

internal/memdb/memdb_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ func TestOnWriteError(t *testing.T) {
6262
// sourceSpec := pbPlugin.Spec{
6363
// Name: sourceName,
6464
// }
65-
// ch := make(chan arrow.Record, 1)
65+
// ch := make(chan arrow.RecordBatch, 1)
6666
// ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
6767
// opts := schema.GenTestDataOptions{
6868
// SourceName: "test",

internal/pk/pk.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import (
77
"github.com/cloudquery/plugin-sdk/v4/schema"
88
)
99

10-
func String(resource arrow.Record) string {
10+
func String(resource arrow.RecordBatch) string {
1111
sc := resource.Schema()
1212
table, err := schema.NewTableFromArrowSchema(sc)
1313
if err != nil {

internal/reversertransformer/reversertransformer.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ func (*client) Close(context.Context) error {
4040
return nil
4141
}
4242

43-
func (c *client) Transform(ctx context.Context, recvRecords <-chan arrow.Record, sendRecords chan<- arrow.Record) error {
43+
func (c *client) Transform(ctx context.Context, recvRecords <-chan arrow.RecordBatch, sendRecords chan<- arrow.RecordBatch) error {
4444
for {
4545
select {
4646
case record, ok := <-recvRecords:
@@ -62,7 +62,7 @@ func (*client) TransformSchema(_ context.Context, old *arrow.Schema) (*arrow.Sch
6262
return old, nil
6363
}
6464

65-
func (*client) reverseStrings(record arrow.Record) (arrow.Record, error) {
65+
func (*client) reverseStrings(record arrow.RecordBatch) (arrow.RecordBatch, error) {
6666
for i, column := range record.Columns() {
6767
if column.DataType().ID() != arrow.STRING {
6868
continue

internal/reversertransformer/reversertransformer_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,13 +55,13 @@ func makeRequestFromString(s string) *pb.Transform_Request {
5555
return &pb.Transform_Request{Record: bs}
5656
}
5757

58-
func makeRecordFromString(s string) arrow.Record {
58+
func makeRecordFromString(s string) arrow.RecordBatch {
5959
str := array.NewStringBuilder(memory.DefaultAllocator)
6060
str.AppendString(s)
6161
arr := str.NewStringArray()
6262
schema := arrow.NewSchema([]arrow.Field{{Name: "col1", Type: arrow.BinaryTypes.String}}, nil)
6363

64-
return array.NewRecord(schema, []arrow.Array{arr}, 1)
64+
return array.NewRecordBatch(schema, []arrow.Array{arr}, 1)
6565
}
6666

6767
type mockTransformServer struct {

internal/servers/destination/v0/schemav2tov3.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,11 +96,11 @@ func TypeV2ToV3(dataType schemav2.ValueType) arrow.DataType {
9696
}
9797
}
9898

99-
func CQTypesOneToRecord(mem memory.Allocator, c schemav2.CQTypes, arrowSchema *arrow.Schema) arrow.Record {
99+
func CQTypesOneToRecord(mem memory.Allocator, c schemav2.CQTypes, arrowSchema *arrow.Schema) arrow.RecordBatch {
100100
return CQTypesToRecord(mem, []schemav2.CQTypes{c}, arrowSchema)
101101
}
102102

103-
func CQTypesToRecord(mem memory.Allocator, c []schemav2.CQTypes, arrowSchema *arrow.Schema) arrow.Record {
103+
func CQTypesToRecord(mem memory.Allocator, c []schemav2.CQTypes, arrowSchema *arrow.Schema) arrow.RecordBatch {
104104
bldr := array.NewRecordBuilder(mem, arrowSchema)
105105
fields := bldr.Fields()
106106
for i := range fields {
@@ -242,5 +242,5 @@ func CQTypesToRecord(mem memory.Allocator, c []schemav2.CQTypes, arrowSchema *ar
242242
}
243243
}
244244

245-
return bldr.NewRecord()
245+
return bldr.NewRecordBatch()
246246
}

internal/servers/destination/v1/destination_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ func TestPluginSync(t *testing.T) {
121121
sc := table.ToArrowSchema()
122122
bldr := array.NewRecordBuilder(memory.DefaultAllocator, sc)
123123
bldr.Field(0).(*array.StringBuilder).Append("test")
124-
record := bldr.NewRecord()
124+
record := bldr.NewRecordBatch()
125125
recordBytes, err := pbSource.RecordToBytes(record)
126126
if err != nil {
127127
t.Fatal(err)

0 commit comments

Comments
 (0)