@@ -34,7 +34,7 @@ const (
34
34
defaultBatchSize = 1000
35
35
defaultPayloadBytes = 100
36
36
defaultRanges = 10
37
- defaultNumTables = 1
37
+ defaultTables = 1
38
38
maxTransfer = 999
39
39
)
40
40
@@ -46,7 +46,7 @@ type bank struct {
46
46
47
47
rows , batchSize int
48
48
payloadBytes , ranges int
49
- numTables int
49
+ tables int
50
50
}
51
51
52
52
func init () {
@@ -68,7 +68,7 @@ var bankMeta = workload.Meta{
68
68
g .flags .IntVar (& g .batchSize , `batch-size` , defaultBatchSize , `Number of rows in each batch of initial data.` )
69
69
g .flags .IntVar (& g .payloadBytes , `payload-bytes` , defaultPayloadBytes , `Size of the payload field in each initial row.` )
70
70
g .flags .IntVar (& g .ranges , `ranges` , defaultRanges , `Initial number of ranges in bank table.` )
71
- g .flags .IntVar (& g .numTables , `num- tables` , defaultNumTables , `Number of bank tables to create.` )
71
+ g .flags .IntVar (& g .tables , `tables` , defaultTables , `Initial number of bank tables to create.` )
72
72
RandomSeed .AddFlag (& g .flags )
73
73
g .connFlags = workload .NewConnFlags (& g .flags )
74
74
// Because this workload can create a large number of objects, the import
@@ -123,8 +123,8 @@ func (b *bank) Hooks() workload.Hooks {
123
123
if b .batchSize <= 0 {
124
124
return errors .Errorf (`Value of batch-size must be greater than zero; was %d` , b .batchSize )
125
125
}
126
- if b .numTables <= 0 {
127
- return errors .Errorf (`Value of num- tables must be greater than zero; was %d` , b .numTables )
126
+ if b .tables <= 0 {
127
+ return errors .Errorf (`Value of tables must be greater than zero; was %d` , b .tables )
128
128
}
129
129
return nil
130
130
},
@@ -133,7 +133,7 @@ func (b *bank) Hooks() workload.Hooks {
133
133
134
134
// tableName returns the table name with optional schema prefix and table number.
135
135
func (b * bank ) tableName (baseName string , tableIdx int ) string {
136
- if b .numTables > 1 {
136
+ if b .tables > 1 {
137
137
return fmt .Sprintf ("%s_%d" , baseName , tableIdx )
138
138
}
139
139
return baseName
@@ -149,8 +149,8 @@ var bankTypes = []*types.T{
149
149
func (b * bank ) Tables () []workload.Table {
150
150
numBatches := (b .rows + b .batchSize - 1 ) / b .batchSize // ceil(b.rows/b.batchSize)
151
151
152
- tables := make ([]workload.Table , b .numTables )
153
- for tableIdx := range b .numTables {
152
+ tables := make ([]workload.Table , b .tables )
153
+ for tableIdx := range b .tables {
154
154
table := workload.Table {
155
155
Name : b .tableName (`bank` , tableIdx ),
156
156
Schema : bankSchema ,
@@ -208,8 +208,8 @@ func (b *bank) Ops(
208
208
db .SetMaxIdleConns (b .connFlags .Concurrency + 1 )
209
209
210
210
// TODO(dan): Move the various queries in the backup/restore tests here.
211
- updateStmts := make ([]* gosql.Stmt , b .numTables )
212
- for tableIdx := range b .numTables {
211
+ updateStmts := make ([]* gosql.Stmt , b .tables )
212
+ for tableIdx := range b .tables {
213
213
updateStmt , err := db .Prepare (fmt .Sprintf (`
214
214
UPDATE %s
215
215
SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
@@ -233,7 +233,7 @@ func (b *bank) Ops(
233
233
hists := reg .GetHandle ()
234
234
235
235
workerFn := func (ctx context.Context ) error {
236
- tableIdx := rng .IntN (b .numTables )
236
+ tableIdx := rng .IntN (b .tables )
237
237
updateStmt := updateStmts [tableIdx ]
238
238
239
239
from := rng .IntN (b .rows )
0 commit comments