|
5 | 5 | import os |
6 | 6 | import tempfile |
7 | 7 | from decimal import Decimal |
8 | | -from unittest.mock import patch |
9 | 8 |
|
10 | 9 | import pytest |
11 | 10 |
|
12 | | -from snowflake.snowpark import DataFrame, Row, context |
| 11 | +from snowflake.snowpark import DataFrame, Row |
13 | 12 | from snowflake.snowpark.functions import lit |
14 | 13 | from snowflake.snowpark.types import ( |
15 | 14 | BooleanType, |
@@ -426,18 +425,15 @@ def test_join_basic(session): |
426 | 425 | def test_numeric_type_store_precision_and_scale(session, massive_number, precision): |
427 | 426 | table_name = Utils.random_table_name() |
428 | 427 | try: |
429 | | - with patch.object(context, "_store_precision_and_scale_in_numeric_type", True): |
430 | | - df = session.create_dataframe( |
431 | | - [Decimal(massive_number)], |
432 | | - StructType( |
433 | | - [StructField("large_value", DecimalType(precision, 0), True)] |
434 | | - ), |
435 | | - ) |
436 | | - df.write.save_as_table(table_name, mode="overwrite", table_type="temp") |
437 | | - result = session.sql(f"select * from {table_name}") |
438 | | - datatype = result.schema.fields[0].datatype |
439 | | - assert isinstance(datatype, LongType) |
440 | | - assert datatype._precision == 38 and datatype._scale == 0 |
| 428 | + df = session.create_dataframe( |
| 429 | + [Decimal(massive_number)], |
| 430 | + StructType([StructField("large_value", DecimalType(precision, 0), True)]), |
| 431 | + ) |
| 432 | + df.write.save_as_table(table_name, mode="overwrite", table_type="temp") |
| 433 | + result = session.sql(f"select * from {table_name}") |
| 434 | + datatype = result.schema.fields[0].datatype |
| 435 | + assert isinstance(datatype, LongType) |
| 436 | + assert datatype._precision == 38 and datatype._scale == 0 |
441 | 437 | finally: |
442 | 438 | session.sql(f"drop table {table_name}").collect() |
443 | 439 |
|
@@ -468,28 +464,27 @@ def write_csv(data): |
468 | 464 | file_path = write_csv(test_data) |
469 | 465 |
|
470 | 466 | try: |
471 | | - with patch.object(context, "_store_precision_and_scale_in_numeric_type", True): |
472 | | - Utils.create_stage(session, stage_name, is_temporary=True) |
473 | | - result = session.file.put( |
474 | | - file_path, f"@{stage_name}", auto_compress=False, overwrite=True |
475 | | - ) |
476 | | - |
477 | | - # Infer schema from only the short file |
478 | | - constrained_reader = session.read.options( |
479 | | - { |
480 | | - "INFER_SCHEMA": True, |
481 | | - "INFER_SCHEMA_OPTIONS": {"FILES": [result[0].target]}, |
482 | | - "PARSE_HEADER": True, |
483 | | - # Only load the short file |
484 | | - "PATTERN": f".*{result[0].target}", |
485 | | - } |
486 | | - ) |
487 | | - |
488 | | - # df1 uses constrained types |
489 | | - df1 = constrained_reader.csv(f"@{stage_name}/") |
490 | | - datatype = df1.schema.fields[0].datatype |
491 | | - assert isinstance(datatype, LongType) |
492 | | - assert datatype._precision == 38 and datatype._scale == 0 |
| 467 | + Utils.create_stage(session, stage_name, is_temporary=True) |
| 468 | + result = session.file.put( |
| 469 | + file_path, f"@{stage_name}", auto_compress=False, overwrite=True |
| 470 | + ) |
| 471 | + |
| 472 | + # Infer schema from only the short file |
| 473 | + constrained_reader = session.read.options( |
| 474 | + { |
| 475 | + "INFER_SCHEMA": True, |
| 476 | + "INFER_SCHEMA_OPTIONS": {"FILES": [result[0].target]}, |
| 477 | + "PARSE_HEADER": True, |
| 478 | + # Only load the short file |
| 479 | + "PATTERN": f".*{result[0].target}", |
| 480 | + } |
| 481 | + ) |
| 482 | + |
| 483 | + # df1 uses constrained types |
| 484 | + df1 = constrained_reader.csv(f"@{stage_name}/") |
| 485 | + datatype = df1.schema.fields[0].datatype |
| 486 | + assert isinstance(datatype, LongType) |
| 487 | + assert datatype._precision == 38 and datatype._scale == 0 |
493 | 488 |
|
494 | 489 | finally: |
495 | 490 | Utils.drop_stage(session, stage_name) |
|
0 commit comments