@@ -398,6 +398,8 @@ type replicateBulkOps struct {
398
398
399
399
// debugSkipRollback skips all rollback steps during the test.
400
400
debugSkipRollback bool
401
+
402
+ withSettings []struct { setting , value string }
401
403
}
402
404
403
405
func (bo replicateBulkOps ) sourceInitCmd (tenantName string , nodes option.NodeListOption ) string {
@@ -411,6 +413,18 @@ func (bo replicateBulkOps) sourceRunCmd(tenantName string, nodes option.NodeList
411
413
func (bo replicateBulkOps ) runDriver (
412
414
workloadCtx context.Context , c cluster.Cluster , t test.Test , setup * c2cSetup ,
413
415
) error {
416
+ mainTenantConn := c .Conn (workloadCtx , t .L (), 1 , option .VirtualClusterName (setup .src .name ))
417
+ for _ , pair := range bo .withSettings {
418
+ settingStmt := fmt .Sprintf ("SET CLUSTER SETTING %s = '%s'" , pair .setting , pair .value )
419
+ t .L ().Printf ("Setting on sys/main/standby-sys: %s" , settingStmt )
420
+ setup .src .sysSQL .Exec (t , settingStmt )
421
+ // PCR settings are system-only; assume others are app-level.
422
+ if ! strings .Contains (pair .setting , "physical_replication" ) {
423
+ if _ , err := mainTenantConn .ExecContext (workloadCtx , settingStmt ); err != nil {
424
+ return err
425
+ }
426
+ }
427
+ }
414
428
runBackupMVCCRangeTombstones (workloadCtx , t , c , mvccRangeTombstoneConfig {
415
429
skipBackupRestore : true ,
416
430
skipClusterSetup : true ,
@@ -1519,7 +1533,7 @@ func registerClusterToCluster(r registry.Registry) {
1519
1533
suites : registry .Suites (registry .Nightly ),
1520
1534
},
1521
1535
{
1522
- name : "c2c/BulkOps" ,
1536
+ name : "c2c/BulkOps/settings=none " ,
1523
1537
srcNodes : 4 ,
1524
1538
dstNodes : 4 ,
1525
1539
cpus : 8 ,
@@ -1543,6 +1557,63 @@ func registerClusterToCluster(r registry.Registry) {
1543
1557
clouds : registry .OnlyGCE ,
1544
1558
suites : registry .Suites (registry .Nightly ),
1545
1559
},
1560
+ {
1561
+ name : "c2c/BulkOps/settings=ac-import" ,
1562
+ srcNodes : 4 ,
1563
+ dstNodes : 4 ,
1564
+ cpus : 8 ,
1565
+ pdSize : 100 ,
1566
+ workload : replicateBulkOps {withSettings : []struct { setting , value string }{
1567
+ {"bulkio.import.elastic_control.enabled" , "true" },
1568
+ {"bulkio.elastic_cpu_control.request_duration" , "3ms" },
1569
+ }},
1570
+ timeout : 2 * time .Hour ,
1571
+ additionalDuration : 0 ,
1572
+ // Cutover currently takes around 4 minutes, perhaps because we need to
1573
+ // revert 10 GB of replicated data.
1574
+ //
1575
+ // TODO(msbutler): investigate further if cutover can be sped up.
1576
+ cutoverTimeout : 20 * time .Minute ,
1577
+ cutover : 5 * time .Minute ,
1578
+ // In a few ad hoc runs, the max latency hikes up to 27 minutes before lag
1579
+ // replanning and distributed catch up scans fix the poor initial plan. If
1580
+ // max accepted latency doubles, then there's likely a regression.
1581
+ maxAcceptedLatency : 1 * time .Hour ,
1582
+ // Skipping node distribution check because there is little data on the
1583
+ // source when the replication stream begins.
1584
+ skipNodeDistributionCheck : true ,
1585
+ clouds : registry .OnlyGCE ,
1586
+ suites : registry .Suites (registry .Nightly ),
1587
+ },
1588
+ {
1589
+ name : "c2c/BulkOps/settings=ac-and-splits" ,
1590
+ srcNodes : 4 ,
1591
+ dstNodes : 4 ,
1592
+ cpus : 8 ,
1593
+ pdSize : 100 ,
1594
+ workload : replicateBulkOps {withSettings : []struct { setting , value string }{
1595
+ {"bulkio.import.elastic_control.enabled" , "true" },
1596
+ {"bulkio.elastic_cpu_control.request_duration" , "3ms" },
1597
+ {"physical_replication.consumer.ingest_split_event.enabled" , "true" },
1598
+ }},
1599
+ timeout : 2 * time .Hour ,
1600
+ additionalDuration : 0 ,
1601
+ // Cutover currently takes around 4 minutes, perhaps because we need to
1602
+ // revert 10 GB of replicated data.
1603
+ //
1604
+ // TODO(msbutler): investigate further if cutover can be sped up.
1605
+ cutoverTimeout : 20 * time .Minute ,
1606
+ cutover : 5 * time .Minute ,
1607
+ // In a few ad hoc runs, the max latency hikes up to 27 minutes before lag
1608
+ // replanning and distributed catch up scans fix the poor initial plan. If
1609
+ // max accepted latency doubles, then there's likely a regression.
1610
+ maxAcceptedLatency : 1 * time .Hour ,
1611
+ // Skipping node distribution check because there is little data on the
1612
+ // source when the replication stream begins.
1613
+ skipNodeDistributionCheck : true ,
1614
+ clouds : registry .OnlyGCE ,
1615
+ suites : registry .Suites (registry .Nightly ),
1616
+ },
1546
1617
{
1547
1618
name : "c2c/BulkOps/singleImport" ,
1548
1619
srcNodes : 4 ,
0 commit comments