@@ -303,7 +303,7 @@ def pandas_verify(cur, data, deserialize):
303303 ), f"Result value { value } should match input example { datum } ."
304304
305305
306- @pytest .mark .parametrize ("datatype" , ICEBERG_UNSUPPORTED_TYPES )
306+ @pytest .mark .parametrize ("datatype" , sorted ( ICEBERG_UNSUPPORTED_TYPES ) )
307307def test_iceberg_negative (datatype , conn_cnx , iceberg_support , structured_type_support ):
308308 if not iceberg_support :
309309 pytest .skip ("Test requires iceberg support." )
@@ -1002,35 +1002,46 @@ def test_select_vector(conn_cnx, is_public_test):
10021002
10031003
10041004def test_select_time (conn_cnx ):
1005- for scale in range (10 ):
1006- select_time_with_scale (conn_cnx , scale )
1007-
1008-
1009- def select_time_with_scale (conn_cnx , scale ):
1005+ # Test key scales and meaningful cases in a single table operation
1006+ # Cover: no fractional seconds, milliseconds, microseconds, nanoseconds
1007+ scales = [0 , 3 , 6 , 9 ] # Key precision levels
10101008 cases = [
1011- "00:01:23" ,
1012- "00:01:23.1" ,
1013- "00:01:23.12" ,
1014- "00:01:23.123" ,
1015- "00:01:23.1234" ,
1016- "00:01:23.12345" ,
1017- "00:01:23.123456" ,
1018- "00:01:23.1234567" ,
1019- "00:01:23.12345678" ,
1020- "00:01:23.123456789" ,
1009+ "00:01:23" , # Basic time
1010+ "00:01:23.123456789" , # Max precision
1011+ "23:59:59.999999999" , # Edge case - max time with max precision
1012+ "00:00:00.000000001" , # Edge case - min time with min precision
10211013 ]
1022- table = "test_arrow_time"
1023- column = f"(a time({ scale } ))"
1024- values = (
1025- "(-1, NULL), ("
1026- + "),(" .join ([f"{ i } , '{ c } '" for i , c in enumerate (cases )])
1027- + f"), ({ len (cases )} , NULL)"
1028- )
1029- init (conn_cnx , table , column , values )
1030- sql_text = f"select a from { table } order by s"
1031- row_count = len (cases ) + 2
1032- col_count = 1
1033- iterate_over_test_chunk ("time" , conn_cnx , sql_text , row_count , col_count )
1014+
1015+ table = "test_arrow_time_scales"
1016+
1017+ # Create columns for selected scales only (init function will add 's number' automatically)
1018+ columns = ", " .join ([f"a{ i } time({ i } )" for i in scales ])
1019+ column_def = f"({ columns } )"
1020+
1021+ # Create values for selected scales - each case tests all scales simultaneously
1022+ value_rows = []
1023+ for i , case in enumerate (cases ):
1024+ # Each row has the same time value for all scale columns
1025+ time_values = ", " .join ([f"'{ case } '" for _ in scales ])
1026+ value_rows .append (f"({ i } , { time_values } )" )
1027+
1028+ # Add NULL rows
1029+ null_values = ", " .join (["NULL" for _ in scales ])
1030+ value_rows .append (f"(-1, { null_values } )" )
1031+ value_rows .append (f"({ len (cases )} , { null_values } )" )
1032+
1033+ values = ", " .join (value_rows )
1034+
1035+ # Single table creation and test
1036+ init (conn_cnx , table , column_def , values )
1037+
1038+ # Test each scale column
1039+ for scale in scales :
1040+ sql_text = f"select a{ scale } from { table } order by s"
1041+ row_count = len (cases ) + 2
1042+ col_count = 1
1043+ iterate_over_test_chunk ("time" , conn_cnx , sql_text , row_count , col_count )
1044+
10341045 finish (conn_cnx , table )
10351046
10361047
0 commit comments