@@ -300,7 +300,7 @@ def pandas_verify(cur, data, deserialize):
300300 ), f"Result value { value } should match input example { datum } ."
301301
302302
303- @pytest .mark .parametrize ("datatype" , ICEBERG_UNSUPPORTED_TYPES )
303+ @pytest .mark .parametrize ("datatype" , sorted ( ICEBERG_UNSUPPORTED_TYPES ) )
304304def test_iceberg_negative (datatype , conn_cnx , iceberg_support , structured_type_support ):
305305 if not iceberg_support :
306306 pytest .skip ("Test requires iceberg support." )
@@ -999,35 +999,46 @@ def test_select_vector(conn_cnx, is_public_test):
999999
10001000
10011001def test_select_time (conn_cnx ):
1002- for scale in range (10 ):
1003- select_time_with_scale (conn_cnx , scale )
1004-
1005-
1006- def select_time_with_scale (conn_cnx , scale ):
1002+ # Test key scales and meaningful cases in a single table operation
1003+ # Cover: no fractional seconds, milliseconds, microseconds, nanoseconds
1004+ scales = [0 , 3 , 6 , 9 ] # Key precision levels
10071005 cases = [
1008- "00:01:23" ,
1009- "00:01:23.1" ,
1010- "00:01:23.12" ,
1011- "00:01:23.123" ,
1012- "00:01:23.1234" ,
1013- "00:01:23.12345" ,
1014- "00:01:23.123456" ,
1015- "00:01:23.1234567" ,
1016- "00:01:23.12345678" ,
1017- "00:01:23.123456789" ,
1006+ "00:01:23" , # Basic time
1007+ "00:01:23.123456789" , # Max precision
1008+ "23:59:59.999999999" , # Edge case - max time with max precision
1009+ "00:00:00.000000001" , # Edge case - min time with min precision
10181010 ]
1019- table = "test_arrow_time"
1020- column = f"(a time({ scale } ))"
1021- values = (
1022- "(-1, NULL), ("
1023- + "),(" .join ([f"{ i } , '{ c } '" for i , c in enumerate (cases )])
1024- + f"), ({ len (cases )} , NULL)"
1025- )
1026- init (conn_cnx , table , column , values )
1027- sql_text = f"select a from { table } order by s"
1028- row_count = len (cases ) + 2
1029- col_count = 1
1030- iterate_over_test_chunk ("time" , conn_cnx , sql_text , row_count , col_count )
1011+
1012+ table = "test_arrow_time_scales"
1013+
1014+ # Create columns for selected scales only (init function will add 's number' automatically)
1015+ columns = ", " .join ([f"a{ i } time({ i } )" for i in scales ])
1016+ column_def = f"({ columns } )"
1017+
1018+ # Create values for selected scales - each case tests all scales simultaneously
1019+ value_rows = []
1020+ for i , case in enumerate (cases ):
1021+ # Each row has the same time value for all scale columns
1022+ time_values = ", " .join ([f"'{ case } '" for _ in scales ])
1023+ value_rows .append (f"({ i } , { time_values } )" )
1024+
1025+ # Add NULL rows
1026+ null_values = ", " .join (["NULL" for _ in scales ])
1027+ value_rows .append (f"(-1, { null_values } )" )
1028+ value_rows .append (f"({ len (cases )} , { null_values } )" )
1029+
1030+ values = ", " .join (value_rows )
1031+
1032+ # Single table creation and test
1033+ init (conn_cnx , table , column_def , values )
1034+
1035+ # Test each scale column
1036+ for scale in scales :
1037+ sql_text = f"select a{ scale } from { table } order by s"
1038+ row_count = len (cases ) + 2
1039+ col_count = 1
1040+ iterate_over_test_chunk ("time" , conn_cnx , sql_text , row_count , col_count )
1041+
10311042 finish (conn_cnx , table )
10321043
10331044
0 commit comments