@@ -116,7 +116,7 @@ def test_xml():
116
116
117
117
118
118
def test_xml_str ():
119
- with ensure_clean () as path :
119
+ with ensure_clean ():
120
120
out = check (assert_type (DF .to_xml (), str ), str )
121
121
check (assert_type (read_xml (io .StringIO (out )), DataFrame ), DataFrame )
122
122
@@ -578,61 +578,131 @@ def _true_if_col1(s: str) -> bool:
578
578
579
579
def test_types_read_csv () -> None :
580
580
df = pd .DataFrame (data = {"col1" : [1 , 2 ], "col2" : [3 , 4 ]})
581
- csv_df : str = df .to_csv ()
581
+ check ( assert_type ( df .to_csv (), str ), str )
582
582
583
583
with ensure_clean () as path :
584
584
df .to_csv (path )
585
- df2 : pd .DataFrame = pd .read_csv (path )
586
- df3 : pd .DataFrame = pd .read_csv (path , sep = "a" )
587
- df4 : pd .DataFrame = pd .read_csv (
588
- path ,
589
- header = None ,
590
- )
591
- df5 : pd .DataFrame = pd .read_csv (
592
- path , engine = "python" , true_values = ["no" , "No" , "NO" ], na_filter = False
593
- )
594
- df6 : pd .DataFrame = pd .read_csv (
595
- path ,
596
- skiprows = lambda x : x in [0 , 2 ],
597
- skip_blank_lines = True ,
598
- dayfirst = False ,
599
- )
600
- df7 : pd .DataFrame = pd .read_csv (path , nrows = 2 )
601
- df8 : pd .DataFrame = pd .read_csv (path , dtype = {"a" : float , "b" : int })
602
- df9 : pd .DataFrame = pd .read_csv (path , usecols = ["col1" ])
603
- df10 : pd .DataFrame = pd .read_csv (path , usecols = [0 ])
604
- df11 : pd .DataFrame = pd .read_csv (path , usecols = np .array ([0 ]))
605
- df12 : pd .DataFrame = pd .read_csv (path , usecols = ("col1" ,))
606
- df13 : pd .DataFrame = pd .read_csv (path , usecols = pd .Series (data = ["col1" ]))
607
- df14 : pd .DataFrame = pd .read_csv (path , converters = None )
608
- df15 : pd .DataFrame = pd .read_csv (path , names = ("first" , "second" ), header = 0 )
609
- df16 : pd .DataFrame = pd .read_csv (path , names = range (2 ), header = 0 )
610
- df17 : pd .DataFrame = pd .read_csv (path , names = (1 , "two" ), header = 0 )
611
- df18 : pd .DataFrame = pd .read_csv (
612
- path ,
613
- names = (
614
- (
615
- "first" ,
616
- 1 ,
585
+ check (assert_type (pd .read_csv (path ), pd .DataFrame ), pd .DataFrame )
586
+ check (assert_type (pd .read_csv (path , sep = "a" ), pd .DataFrame ), pd .DataFrame )
587
+ check (assert_type (pd .read_csv (path , header = None ), pd .DataFrame ), pd .DataFrame )
588
+ check (
589
+ assert_type (
590
+ pd .read_csv (
591
+ path ,
592
+ engine = "python" ,
593
+ true_values = ["no" , "No" , "NO" ],
594
+ na_filter = False ,
595
+ ),
596
+ pd .DataFrame ,
597
+ ),
598
+ pd .DataFrame ,
599
+ )
600
+ check (
601
+ assert_type (
602
+ pd .read_csv (
603
+ path ,
604
+ skiprows = lambda x : x in [0 , 2 ],
605
+ skip_blank_lines = True ,
606
+ dayfirst = False ,
607
+ ),
608
+ pd .DataFrame ,
609
+ ),
610
+ pd .DataFrame ,
611
+ )
612
+ check (assert_type (pd .read_csv (path , nrows = 2 ), pd .DataFrame ), pd .DataFrame )
613
+ check (
614
+ assert_type (pd .read_csv (path , dtype = {"a" : float , "b" : int }), pd .DataFrame ),
615
+ pd .DataFrame ,
616
+ )
617
+ check (
618
+ assert_type (pd .read_csv (path , usecols = ["col1" ]), pd .DataFrame ), pd .DataFrame
619
+ )
620
+ check (assert_type (pd .read_csv (path , usecols = [0 ]), pd .DataFrame ), pd .DataFrame )
621
+ check (
622
+ assert_type (pd .read_csv (path , usecols = np .array ([0 ])), pd .DataFrame ),
623
+ pd .DataFrame ,
624
+ )
625
+ check (
626
+ assert_type (pd .read_csv (path , usecols = ("col1" ,)), pd .DataFrame ),
627
+ pd .DataFrame ,
628
+ )
629
+ check (
630
+ assert_type (
631
+ pd .read_csv (path , usecols = pd .Series (data = ["col1" ])), pd .DataFrame
632
+ ),
633
+ pd .DataFrame ,
634
+ )
635
+ check (
636
+ assert_type (pd .read_csv (path , converters = None ), pd .DataFrame ), pd .DataFrame
637
+ )
638
+ check (
639
+ assert_type (
640
+ pd .read_csv (path , names = ("first" , "second" ), header = 0 ), pd .DataFrame
641
+ ),
642
+ pd .DataFrame ,
643
+ )
644
+ check (
645
+ assert_type (pd .read_csv (path , names = range (2 ), header = 0 ), pd .DataFrame ),
646
+ pd .DataFrame ,
647
+ )
648
+ check (
649
+ assert_type (pd .read_csv (path , names = (1 , "two" ), header = 0 ), pd .DataFrame ),
650
+ pd .DataFrame ,
651
+ )
652
+ check (
653
+ assert_type (
654
+ pd .read_csv (
655
+ path ,
656
+ names = (
657
+ (
658
+ "first" ,
659
+ 1 ,
660
+ ),
661
+ ("last" , 2 ),
662
+ ),
663
+ header = 0 ,
617
664
),
618
- ( "last" , 2 ) ,
665
+ pd . DataFrame ,
619
666
),
620
- header = 0 ,
667
+ pd .DataFrame ,
668
+ )
669
+ check (assert_type (pd .read_csv (path , usecols = None ), pd .DataFrame ), pd .DataFrame )
670
+ check (
671
+ assert_type (pd .read_csv (path , usecols = ["col1" ]), pd .DataFrame ), pd .DataFrame
672
+ )
673
+ check (assert_type (pd .read_csv (path , usecols = (0 ,)), pd .DataFrame ), pd .DataFrame )
674
+ check (
675
+ assert_type (pd .read_csv (path , usecols = range (1 )), pd .DataFrame ), pd .DataFrame
621
676
)
622
- df19 : pd .DataFrame = pd .read_csv (path , usecols = None )
623
- df20 : pd .DataFrame = pd .read_csv (path , usecols = ["col1" ])
624
- df21 : pd .DataFrame = pd .read_csv (path , usecols = (0 ,))
625
- df22 : pd .DataFrame = pd .read_csv (path , usecols = range (1 ))
626
- df23 : pd .DataFrame = pd .read_csv (path , usecols = _true_if_col1 )
627
- df24 : pd .DataFrame = pd .read_csv (
628
- path , names = [1 , 2 ], usecols = _true_if_greater_than_0 , header = 0 , index_col = 0
677
+ check (
678
+ assert_type (pd .read_csv (path , usecols = _true_if_col1 ), pd .DataFrame ),
679
+ pd .DataFrame ,
629
680
)
630
- df25 : pd .DataFrame = pd .read_csv (
631
- path ,
632
- names = (("head" , 1 ), ("tail" , 2 )),
633
- usecols = _true_if_first_param_is_head ,
634
- header = 0 ,
635
- index_col = 0 ,
681
+ check (
682
+ assert_type (
683
+ pd .read_csv (
684
+ path ,
685
+ names = [1 , 2 ],
686
+ usecols = _true_if_greater_than_0 ,
687
+ header = 0 ,
688
+ index_col = 0 ,
689
+ ),
690
+ pd .DataFrame ,
691
+ ),
692
+ pd .DataFrame ,
693
+ )
694
+ check (
695
+ assert_type (
696
+ pd .read_csv (
697
+ path ,
698
+ names = (("head" , 1 ), ("tail" , 2 )),
699
+ usecols = _true_if_first_param_is_head ,
700
+ header = 0 ,
701
+ index_col = 0 ,
702
+ ),
703
+ pd .DataFrame ,
704
+ ),
705
+ pd .DataFrame ,
636
706
)
637
707
638
708
if TYPE_CHECKING_INVALID_USAGE :
@@ -656,14 +726,28 @@ def test_types_read_csv() -> None:
656
726
with ensure_clean () as path :
657
727
df_dates .to_csv (path )
658
728
659
- df26 : pd .DataFrame = pd .read_csv (
660
- path , parse_dates = ["col1" ], date_format = "%Y-%m-%d"
729
+ check (
730
+ assert_type (
731
+ pd .read_csv (path , parse_dates = ["col1" ], date_format = "%Y-%m-%d" ),
732
+ pd .DataFrame ,
733
+ ),
734
+ pd .DataFrame ,
661
735
)
662
- df27 : pd .DataFrame = pd .read_csv (
663
- path , parse_dates = ["col1" ], date_format = {"col1" : "%Y-%m-%d" }
736
+ check (
737
+ assert_type (
738
+ pd .read_csv (
739
+ path , parse_dates = ["col1" ], date_format = {"col1" : "%Y-%m-%d" }
740
+ ),
741
+ pd .DataFrame ,
742
+ ),
743
+ pd .DataFrame ,
664
744
)
665
- df28 : pd .DataFrame = pd .read_csv (
666
- path , parse_dates = ["col1" ], date_format = {1 : "%Y-%m-%d" }
745
+ check (
746
+ assert_type (
747
+ pd .read_csv (path , parse_dates = ["col1" ], date_format = {1 : "%Y-%m-%d" }),
748
+ pd .DataFrame ,
749
+ ),
750
+ pd .DataFrame ,
667
751
)
668
752
669
753
@@ -790,8 +874,10 @@ def test_types_read_table():
790
874
791
875
with ensure_clean () as path :
792
876
df .to_csv (path )
793
-
794
- df2 : pd .DataFrame = pd .read_table (path , sep = "," , converters = None )
877
+ check (
878
+ assert_type (pd .read_table (path , sep = "," , converters = None ), pd .DataFrame ),
879
+ pd .DataFrame ,
880
+ )
795
881
796
882
797
883
def test_btest_read_fwf ():
@@ -1242,7 +1328,7 @@ def test_read_sql_query_via_sqlalchemy_engine_with_params():
1242
1328
reason = "Only works in Postgres (and MySQL, but with different query syntax)"
1243
1329
)
1244
1330
def test_read_sql_query_via_sqlalchemy_engine_with_tuple_valued_params ():
1245
- with ensure_clean () as path :
1331
+ with ensure_clean ():
1246
1332
db_uri = "postgresql+psycopg2://postgres@localhost:5432/postgres"
1247
1333
engine = sqlalchemy .create_engine (db_uri )
1248
1334
0 commit comments