|
| 1 | +// Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +// or more contributor license agreements. See the NOTICE file |
| 3 | +// distributed with this work for additional information |
| 4 | +// regarding copyright ownership. The ASF licenses this file |
| 5 | +// to you under the Apache License, Version 2.0 (the |
| 6 | +// "License"); you may not use this file except in compliance |
| 7 | +// with the License. You may obtain a copy of the License at |
| 8 | +// |
| 9 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +// |
| 11 | +// Unless required by applicable law or agreed to in writing, |
| 12 | +// software distributed under the License is distributed on an |
| 13 | +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +// KIND, either express or implied. See the License for the |
| 15 | +// specific language governing permissions and limitations |
| 16 | +// under the License. |
| 17 | +import com.mysql.cj.jdbc.StatementImpl |
| 18 | +import org.apache.doris.regression.suite.ClusterOptions |
| 19 | +import org.apache.doris.regression.util.NodeType |
| 20 | +import org.apache.doris.regression.suite.SuiteCluster |
| 21 | + |
| 22 | +suite("test_create_partition_and_insert_overwrite_race", 'p0, docker') { |
| 23 | + if (!isCloudMode()) { |
| 24 | + return |
| 25 | + } |
| 26 | + def options = new ClusterOptions() |
| 27 | + options.enableDebugPoints() |
| 28 | + // one master, one observer |
| 29 | + options.setFeNum(2) |
| 30 | + options.feConfigs.add('sys_log_verbose_modules=org') |
| 31 | + options.setBeNum(3) |
| 32 | + options.cloudMode = true |
| 33 | + |
| 34 | + // 1. connect to observer |
| 35 | + options.connectToFollower = true |
| 36 | + docker(options) { |
| 37 | + sql """set enable_sql_cache=false""" |
| 38 | + def tbl = 'test_create_partition_and_insert_overwrite_race_tbl' |
| 39 | + def tbl2 = 'test_create_partition_and_insert_overwrite_race_tbl2' |
| 40 | + def createTableSql = { String tableName -> |
| 41 | + sql """ |
| 42 | + CREATE TABLE ${tableName} ( |
| 43 | + order_id BIGINT, |
| 44 | + create_dt datetime, |
| 45 | + username VARCHAR(20) |
| 46 | + ) |
| 47 | + DUPLICATE KEY(order_id) |
| 48 | + PARTITION BY RANGE(create_dt) () |
| 49 | + DISTRIBUTED BY HASH(order_id) BUCKETS 10 |
| 50 | + PROPERTIES( |
| 51 | + "dynamic_partition.enable" = "true", |
| 52 | + "dynamic_partition.time_unit" = "DAY", |
| 53 | + "dynamic_partition.start" = "-5", |
| 54 | + "dynamic_partition.end" = "5", |
| 55 | + "dynamic_partition.prefix" = "p", |
| 56 | + "dynamic_partition.create_history_partition" = "true" |
| 57 | + ); |
| 58 | + """ |
| 59 | + } |
| 60 | + |
| 61 | + createTableSql(tbl) |
| 62 | + createTableSql(tbl2) |
| 63 | + |
| 64 | + // Generate insert statements with dates: current date -2, -1, 0, +1, +2 days |
| 65 | + def now = new Date() |
| 66 | + def dateFormat = new java.text.SimpleDateFormat("yyyy-MM-dd") |
| 67 | + for (def i = -2; i <= 2; i++) { |
| 68 | + def targetDate = new Date(now.time + i * 24 * 60 * 60 * 1000L) |
| 69 | + def dateStr = dateFormat.format(targetDate) |
| 70 | + def hour = String.format("%02d", Math.abs(i) + 1) |
| 71 | + def insertDate = "${dateStr} ${hour}:00:00" |
| 72 | + sql """insert into ${tbl2} values (${i + 3}, '${insertDate}', 'test')""" |
| 73 | + } |
| 74 | + |
| 75 | + sql """DROP TABLE ${tbl}""" |
| 76 | + def partitionNameFormat = new java.text.SimpleDateFormat("yyyyMMdd") |
| 77 | + def currentPartitionName = "p" + partitionNameFormat.format(now) |
| 78 | + cluster.injectDebugPoints(NodeType.FE, ['FE.logAddPartition.slow':[pName:currentPartitionName, sleep:50 * 1000]]) |
| 79 | + def futrue = thread { |
| 80 | + for (def i = 0; i < 55; i++) { |
| 81 | + try_sql """INSERT OVERWRITE TABLE ${tbl} partition(*) select * from ${tbl2}""" |
| 82 | + sleep(1 * 1000) |
| 83 | + cluster.checkFeIsAlive(2, true) |
| 84 | + } |
| 85 | + } |
| 86 | + def future1 = thread { |
| 87 | + createTableSql(tbl) |
| 88 | + } |
| 89 | + futrue.get() |
| 90 | + future1.get() |
| 91 | + } |
| 92 | +} |
0 commit comments