Skip to content

Commit d3a5a4b

Browse files
committed
[DOP-21813] Update CHANGELOG
1 parent e9d7b27 commit d3a5a4b

File tree

6 files changed

+7
-7
lines changed

6 files changed

+7
-7
lines changed

docs/changelog/0.12.5.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
0.12.5 (2024-12-02)
1+
0.12.5 (2024-12-03)
22
===================
33

44
Improvements

tests/tests_integration/tests_core_integration/tests_db_reader_integration/test_clickhouse_reader_integration.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def test_clickhouse_reader_snapshot_with_partitioning_mode_hash(spark, processin
269269
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
270270
# with some variance caused by randomness & hash distribution
271271
min_count_per_partition = 10
272-
max_count_per_partition = 50
272+
max_count_per_partition = 55
273273

274274
count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
275275
for partition in count_per_partition:
@@ -365,7 +365,7 @@ def test_clickhouse_reader_snapshot_with_partitioning_mode_mod_date(spark, proce
365365
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
366366
# with some variance caused by randomness & hash distribution
367367
min_count_per_partition = 10
368-
max_count_per_partition = 50
368+
max_count_per_partition = 55
369369

370370
count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
371371
for partition in count_per_partition:

tests/tests_integration/tests_core_integration/tests_db_reader_integration/test_mssql_reader_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ def test_mssql_reader_snapshot_with_partitioning_mode_hash(spark, processing, lo
275275
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
276276
# with some variance caused by randomness & hash distribution
277277
min_count_per_partition = 10
278-
max_count_per_partition = 50
278+
max_count_per_partition = 55
279279

280280
count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
281281

tests/tests_integration/tests_core_integration/tests_db_reader_integration/test_mysql_reader_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def test_mysql_reader_snapshot_with_partitioning_mode_hash(spark, processing, lo
266266
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
267267
# with some variance caused by randomness & hash distribution (+- 50% range is wide enough)
268268
min_count_per_partition = 10
269-
max_count_per_partition = 50
269+
max_count_per_partition = 55
270270

271271
count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
272272
for partition in count_per_partition:

tests/tests_integration/tests_core_integration/tests_db_reader_integration/test_oracle_reader_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def test_oracle_reader_snapshot_with_partitioning_mode_hash(spark, processing, l
224224
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
225225
# with some variance caused by randomness & hash distribution
226226
min_count_per_partition = 10
227-
max_count_per_partition = 50
227+
max_count_per_partition = 55
228228

229229
count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
230230

tests/tests_integration/tests_core_integration/tests_db_reader_integration/test_postgres_reader_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -511,7 +511,7 @@ def test_postgres_reader_snapshot_with_partitioning_mode_hash(spark, processing,
511511
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
512512
# with some variance caused by randomness & hash distribution
513513
min_count_per_partition = 10
514-
max_count_per_partition = 50
514+
max_count_per_partition = 55
515515

516516
count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
517517

0 commit comments

Comments
 (0)