Skip to content

Commit 10102e6

Browse files
TST: Replace 'ensure_clean' with 'temp_file' in some tests (#62474)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 5d30c83 commit 10102e6

File tree

6 files changed

+87
-95
lines changed

6 files changed

+87
-95
lines changed

pandas/tests/io/parser/common/test_chunksize.py

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -295,29 +295,28 @@ def test_empty_with_nrows_chunksize(all_parsers, iterator):
295295
tm.assert_frame_equal(result, expected)
296296

297297

298-
def test_read_csv_memory_growth_chunksize(all_parsers):
298+
def test_read_csv_memory_growth_chunksize(temp_file, all_parsers):
299299
# see gh-24805
300300
#
301301
# Let's just make sure that we don't crash
302302
# as we iteratively process all chunks.
303303
parser = all_parsers
304304

305-
with tm.ensure_clean() as path:
306-
with open(path, "w", encoding="utf-8") as f:
307-
for i in range(1000):
308-
f.write(str(i) + "\n")
309-
310-
if parser.engine == "pyarrow":
311-
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
312-
with pytest.raises(ValueError, match=msg):
313-
with parser.read_csv(path, chunksize=20) as result:
314-
for _ in result:
315-
pass
316-
return
317-
318-
with parser.read_csv(path, chunksize=20) as result:
319-
for _ in result:
320-
pass
305+
with open(temp_file, "w", encoding="utf-8") as f:
306+
for i in range(1000):
307+
f.write(str(i) + "\n")
308+
309+
if parser.engine == "pyarrow":
310+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
311+
with pytest.raises(ValueError, match=msg):
312+
with parser.read_csv(temp_file, chunksize=20) as result:
313+
for _ in result:
314+
pass
315+
return
316+
317+
with parser.read_csv(temp_file, chunksize=20) as result:
318+
for _ in result:
319+
pass
321320

322321

323322
def test_chunksize_with_usecols_second_block_shorter(all_parsers):

pandas/tests/io/parser/common/test_iterator.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -142,19 +142,18 @@ def test_iterator_skipfooter_errors(all_parsers, kwargs):
142142
pass
143143

144144

145-
def test_iteration_open_handle(all_parsers):
145+
def test_iteration_open_handle(temp_file, all_parsers):
146146
parser = all_parsers
147147
kwargs = {"header": None}
148148

149-
with tm.ensure_clean() as path:
150-
with open(path, "w", encoding="utf-8") as f:
151-
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
149+
with open(temp_file, "w", encoding="utf-8") as f:
150+
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
152151

153-
with open(path, encoding="utf-8") as f:
154-
for line in f:
155-
if "CCC" in line:
156-
break
152+
with open(temp_file, encoding="utf-8") as f:
153+
for line in f:
154+
if "CCC" in line:
155+
break
157156

158-
result = parser.read_csv(f, **kwargs)
159-
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
160-
tm.assert_frame_equal(result, expected)
157+
result = parser.read_csv(f, **kwargs)
158+
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
159+
tm.assert_frame_equal(result, expected)

pandas/tests/io/parser/test_index_col.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def test_multi_index_naming_not_all_at_beginning(all_parsers):
200200

201201

202202
@xfail_pyarrow # ValueError: Found non-unique column index
203-
def test_no_multi_index_level_names_empty(all_parsers):
203+
def test_no_multi_index_level_names_empty(temp_file, all_parsers):
204204
# GH 10984
205205
parser = all_parsers
206206
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
@@ -209,9 +209,8 @@ def test_no_multi_index_level_names_empty(all_parsers):
209209
index=midx,
210210
columns=["x", "y", "z"],
211211
)
212-
with tm.ensure_clean() as path:
213-
expected.to_csv(path)
214-
result = parser.read_csv(path, index_col=[0, 1, 2])
212+
expected.to_csv(temp_file)
213+
result = parser.read_csv(temp_file, index_col=[0, 1, 2])
215214
tm.assert_frame_equal(result, expected)
216215

217216

@@ -240,7 +239,7 @@ def test_header_with_index_col(all_parsers):
240239

241240

242241
@pytest.mark.slow
243-
def test_index_col_large_csv(all_parsers, monkeypatch):
242+
def test_index_col_large_csv(temp_file, all_parsers, monkeypatch):
244243
# https://github.com/pandas-dev/pandas/issues/37094
245244
parser = all_parsers
246245

@@ -252,11 +251,10 @@ def test_index_col_large_csv(all_parsers, monkeypatch):
252251
}
253252
)
254253

255-
with tm.ensure_clean() as path:
256-
df.to_csv(path, index=False)
257-
with monkeypatch.context() as m:
258-
m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
259-
result = parser.read_csv(path, index_col=[0])
254+
df.to_csv(temp_file, index=False)
255+
with monkeypatch.context() as m:
256+
m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
257+
result = parser.read_csv(temp_file, index_col=[0])
260258

261259
tm.assert_frame_equal(result, df.set_index("a"))
262260

pandas/tests/io/parser/test_python_parser_only.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,9 @@ def test_skipfooter(python_parser_only, kwargs):
158158
@pytest.mark.parametrize(
159159
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
160160
)
161-
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
161+
def test_decompression_regex_sep(
162+
temp_file, python_parser_only, csv1, compression, klass
163+
):
162164
# see gh-6607
163165
parser = python_parser_only
164166

@@ -171,12 +173,11 @@ def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
171173
module = pytest.importorskip(compression)
172174
klass = getattr(module, klass)
173175

174-
with tm.ensure_clean() as path:
175-
with klass(path, mode="wb") as tmp:
176-
tmp.write(data)
176+
with klass(temp_file, mode="wb") as tmp:
177+
tmp.write(data)
177178

178-
result = parser.read_csv(path, sep="::", compression=compression)
179-
tm.assert_frame_equal(result, expected)
179+
result = parser.read_csv(temp_file, sep="::", compression=compression)
180+
tm.assert_frame_equal(result, expected)
180181

181182

182183
def test_read_csv_buglet_4x_multi_index(python_parser_only):

pandas/tests/io/pytables/test_round_trip.py

Lines changed: 21 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -27,33 +27,31 @@
2727
pytestmark = [pytest.mark.single_cpu]
2828

2929

30-
def test_conv_read_write():
31-
with tm.ensure_clean() as path:
30+
def test_conv_read_write(temp_file):
31+
def roundtrip(key, obj, **kwargs):
32+
obj.to_hdf(temp_file, key=key, **kwargs)
33+
return read_hdf(temp_file, key)
3234

33-
def roundtrip(key, obj, **kwargs):
34-
obj.to_hdf(path, key=key, **kwargs)
35-
return read_hdf(path, key)
36-
37-
o = Series(
38-
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
39-
)
40-
tm.assert_series_equal(o, roundtrip("series", o))
35+
o = Series(
36+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
37+
)
38+
tm.assert_series_equal(o, roundtrip("series", o))
4139

42-
o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
43-
tm.assert_series_equal(o, roundtrip("string_series", o))
40+
o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
41+
tm.assert_series_equal(o, roundtrip("string_series", o))
4442

45-
o = DataFrame(
46-
1.1 * np.arange(120).reshape((30, 4)),
47-
columns=Index(list("ABCD")),
48-
index=Index([f"i-{i}" for i in range(30)]),
49-
)
50-
tm.assert_frame_equal(o, roundtrip("frame", o))
43+
o = DataFrame(
44+
1.1 * np.arange(120).reshape((30, 4)),
45+
columns=Index(list("ABCD")),
46+
index=Index([f"i-{i}" for i in range(30)]),
47+
)
48+
tm.assert_frame_equal(o, roundtrip("frame", o))
5149

52-
# table
53-
df = DataFrame({"A": range(5), "B": range(5)})
54-
df.to_hdf(path, key="table", append=True)
55-
result = read_hdf(path, "table", where=["index>2"])
56-
tm.assert_frame_equal(df[df.index > 2], result)
50+
# table
51+
df = DataFrame({"A": range(5), "B": range(5)})
52+
df.to_hdf(temp_file, key="table", append=True)
53+
result = read_hdf(temp_file, "table", where=["index>2"])
54+
tm.assert_frame_equal(df[df.index > 2], result)
5755

5856

5957
def test_long_strings(setup_path):

pandas/tests/io/test_sql.py

Lines changed: 25 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -748,10 +748,9 @@ def postgresql_psycopg2_conn_types(postgresql_psycopg2_engine_types):
748748

749749

750750
@pytest.fixture
751-
def sqlite_str():
751+
def sqlite_str(temp_file):
752752
pytest.importorskip("sqlalchemy")
753-
with tm.ensure_clean() as name:
754-
yield f"sqlite:///{name}"
753+
return f"sqlite:///{temp_file}"
755754

756755

757756
@pytest.fixture
@@ -817,20 +816,19 @@ def sqlite_conn_types(sqlite_engine_types):
817816

818817

819818
@pytest.fixture
820-
def sqlite_adbc_conn():
819+
def sqlite_adbc_conn(temp_file):
821820
pytest.importorskip("pyarrow")
822821
pytest.importorskip("adbc_driver_sqlite")
823822
from adbc_driver_sqlite import dbapi
824823

825-
with tm.ensure_clean() as name:
826-
uri = f"file:{name}"
827-
with dbapi.connect(uri) as conn:
828-
yield conn
829-
for view in get_all_views(conn):
830-
drop_view(view, conn)
831-
for tbl in get_all_tables(conn):
832-
drop_table(tbl, conn)
833-
conn.commit()
824+
uri = f"file:{temp_file}"
825+
with dbapi.connect(uri) as conn:
826+
yield conn
827+
for view in get_all_views(conn):
828+
drop_view(view, conn)
829+
for tbl in get_all_tables(conn):
830+
drop_table(tbl, conn)
831+
conn.commit()
834832

835833

836834
@pytest.fixture
@@ -2504,20 +2502,20 @@ def test_sqlalchemy_integer_overload_mapping(conn, request, integer):
25042502
sql.SQLTable("test_type", db, frame=df)
25052503

25062504

2507-
def test_database_uri_string(request, test_frame1):
2505+
def test_database_uri_string(temp_file, request, test_frame1):
25082506
pytest.importorskip("sqlalchemy")
25092507
# Test read_sql and .to_sql method with a database URI (GH10654)
25102508
# db_uri = 'sqlite:///:memory:' # raises
25112509
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
25122510
# "iris": syntax error [SQL: 'iris']
2513-
with tm.ensure_clean() as name:
2514-
db_uri = "sqlite:///" + name
2515-
table = "iris"
2516-
test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
2517-
test_frame2 = sql.read_sql(table, db_uri)
2518-
test_frame3 = sql.read_sql_table(table, db_uri)
2519-
query = "SELECT * FROM iris"
2520-
test_frame4 = sql.read_sql_query(query, db_uri)
2511+
name = str(temp_file)
2512+
db_uri = "sqlite:///" + name
2513+
table = "iris"
2514+
test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
2515+
test_frame2 = sql.read_sql(table, db_uri)
2516+
test_frame3 = sql.read_sql_table(table, db_uri)
2517+
query = "SELECT * FROM iris"
2518+
test_frame4 = sql.read_sql_query(query, db_uri)
25212519
tm.assert_frame_equal(test_frame1, test_frame2)
25222520
tm.assert_frame_equal(test_frame1, test_frame3)
25232521
tm.assert_frame_equal(test_frame1, test_frame4)
@@ -2581,16 +2579,15 @@ def test_column_with_percentage(conn, request):
25812579
tm.assert_frame_equal(res, df)
25822580

25832581

2584-
def test_sql_open_close(test_frame3):
2582+
def test_sql_open_close(temp_file, test_frame3):
25852583
# Test if the IO in the database still work if the connection closed
25862584
# between the writing and reading (as in many real situations).
25872585

2588-
with tm.ensure_clean() as name:
2589-
with contextlib.closing(sqlite3.connect(name)) as conn:
2590-
assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4
2586+
with contextlib.closing(sqlite3.connect(temp_file)) as conn:
2587+
assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4
25912588

2592-
with contextlib.closing(sqlite3.connect(name)) as conn:
2593-
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
2589+
with contextlib.closing(sqlite3.connect(temp_file)) as conn:
2590+
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
25942591

25952592
tm.assert_frame_equal(test_frame3, result)
25962593

0 commit comments

Comments
 (0)