Skip to content

Commit 35fc186

Browse files
TST: ALL the ensure_clean functions in file pandas/pandas/tests/io/pytables/test_store.py and pandas/pandas/tests/io/parser/common/test_file_buffer_url.py have benn replaced.
1 parent 9308b13 commit 35fc186

File tree

2 files changed

+68
-73
lines changed

2 files changed

+68
-73
lines changed

pandas/tests/io/parser/common/test_file_buffer_url.py

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -97,25 +97,25 @@ def test_nonexistent_path(all_parsers):
9797

9898
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
9999
@td.skip_if_windows # os.chmod does not work in windows
100-
def test_no_permission(all_parsers):
100+
def test_no_permission(all_parsers, temp_file):
101101
# GH 23784
102102
parser = all_parsers
103103

104104
msg = r"\[Errno 13\]"
105-
with tm.ensure_clean() as path:
106-
os.chmod(path, 0) # make file unreadable
105+
path = temp_file
106+
os.chmod(path, 0) # make file unreadable
107107

108-
# verify that this process cannot open the file (not running as sudo)
109-
try:
110-
with open(path, encoding="utf-8"):
111-
pass
112-
pytest.skip("Running as sudo.")
113-
except PermissionError:
108+
# verify that this process cannot open the file (not running as sudo)
109+
try:
110+
with open(path, encoding="utf-8"):
114111
pass
112+
pytest.skip("Running as sudo.")
113+
except PermissionError:
114+
pass
115115

116-
with pytest.raises(PermissionError, match=msg) as e:
117-
parser.read_csv(path)
118-
assert path == e.value.filename
116+
with pytest.raises(PermissionError, match=msg) as e:
117+
parser.read_csv(path)
118+
assert path == e.value.filename
119119

120120

121121
@pytest.mark.parametrize(
@@ -269,19 +269,18 @@ def test_internal_eof_byte(all_parsers):
269269
tm.assert_frame_equal(result, expected)
270270

271271

272-
def test_internal_eof_byte_to_file(all_parsers):
272+
def test_internal_eof_byte_to_file(all_parsers, tmp_path):
273273
# see gh-16559
274274
parser = all_parsers
275275
data = b'c1,c2\r\n"test \x1a test", test\r\n'
276276
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
277-
path = f"__{uuid.uuid4()}__.csv"
277+
path = tmp_path / f"__{uuid.uuid4()}__.csv"
278278

279-
with tm.ensure_clean(path) as path:
280-
with open(path, "wb") as f:
281-
f.write(data)
279+
with open(path, "wb") as f:
280+
f.write(data)
282281

283-
result = parser.read_csv(path)
284-
tm.assert_frame_equal(result, expected)
282+
result = parser.read_csv(path)
283+
tm.assert_frame_equal(result, expected)
285284

286285

287286
def test_file_handle_string_io(all_parsers):
@@ -372,7 +371,7 @@ def test_read_csv_file_handle(all_parsers, io_class, encoding):
372371
assert not handle.closed
373372

374373

375-
def test_memory_map_compression(all_parsers, compression):
374+
def test_memory_map_compression(all_parsers, compression, temp_file):
376375
"""
377376
Support memory map for compressed files.
378377
@@ -381,16 +380,16 @@ def test_memory_map_compression(all_parsers, compression):
381380
parser = all_parsers
382381
expected = DataFrame({"a": [1], "b": [2]})
383382

384-
with tm.ensure_clean() as path:
385-
expected.to_csv(path, index=False, compression=compression)
383+
path = temp_file
384+
expected.to_csv(path, index=False, compression=compression)
386385

387-
if parser.engine == "pyarrow":
388-
msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
389-
with pytest.raises(ValueError, match=msg):
390-
parser.read_csv(path, memory_map=True, compression=compression)
391-
return
386+
if parser.engine == "pyarrow":
387+
msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
388+
with pytest.raises(ValueError, match=msg):
389+
parser.read_csv(path, memory_map=True, compression=compression)
390+
return
392391

393-
result = parser.read_csv(path, memory_map=True, compression=compression)
392+
result = parser.read_csv(path, memory_map=True, compression=compression)
394393

395394
tm.assert_frame_equal(
396395
result,
@@ -442,12 +441,11 @@ def test_context_manageri_user_provided(all_parsers, datapath):
442441

443442

444443
@skip_pyarrow # ParserError: Empty CSV file
445-
def test_file_descriptor_leak(all_parsers):
444+
def test_file_descriptor_leak(all_parsers, temp_file):
446445
# GH 31488
447446
parser = all_parsers
448-
with tm.ensure_clean() as path:
449-
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
450-
parser.read_csv(path)
447+
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
448+
parser.read_csv(temp_file)
451449

452450

453451
def test_memory_map(all_parsers, csv_dir_path):

pandas/tests/io/pytables/test_store.py

Lines changed: 38 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -41,22 +41,20 @@
4141
tables = pytest.importorskip("tables")
4242

4343

44-
def test_context(setup_path):
45-
with tm.ensure_clean(setup_path) as path:
46-
try:
47-
with HDFStore(path) as tbl:
48-
raise ValueError("blah")
49-
except ValueError:
50-
pass
51-
with tm.ensure_clean(setup_path) as path:
52-
with HDFStore(path) as tbl:
53-
tbl["a"] = DataFrame(
54-
1.1 * np.arange(120).reshape((30, 4)),
55-
columns=Index(list("ABCD"), dtype=object),
56-
index=Index([f"i-{i}" for i in range(30)], dtype=object),
57-
)
58-
assert len(tbl) == 1
59-
assert type(tbl["a"]) == DataFrame
44+
def test_context(temp_file):
45+
try:
46+
with HDFStore(temp_file) as tbl:
47+
raise ValueError("blah")
48+
except ValueError:
49+
pass
50+
with HDFStore(temp_file) as tbl:
51+
tbl["a"] = DataFrame(
52+
1.1 * np.arange(120).reshape((30, 4)),
53+
columns=Index(list("ABCD"), dtype=object),
54+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
55+
)
56+
assert len(tbl) == 1
57+
assert type(tbl["a"]) == DataFrame
6058

6159

6260
def test_no_track_times(tmp_path, setup_path):
@@ -971,37 +969,36 @@ def test_pickle_path_localpath():
971969

972970

973971
@pytest.mark.parametrize("propindexes", [True, False])
974-
def test_copy(propindexes):
972+
def test_copy(propindexes, temp_file):
975973
df = DataFrame(
976974
1.1 * np.arange(120).reshape((30, 4)),
977975
columns=Index(list("ABCD")),
978976
index=Index([f"i-{i}" for i in range(30)]),
979977
)
980978

981-
with tm.ensure_clean() as path:
982-
with HDFStore(path) as st:
983-
st.append("df", df, data_columns=["A"])
984-
with tempfile.NamedTemporaryFile() as new_f:
985-
with HDFStore(path) as store:
986-
with contextlib.closing(
987-
store.copy(new_f.name, keys=None, propindexes=propindexes)
988-
) as tstore:
989-
# check keys
990-
keys = store.keys()
991-
assert set(keys) == set(tstore.keys())
992-
# check indices & nrows
993-
for k in tstore.keys():
994-
if tstore.get_storer(k).is_table:
995-
new_t = tstore.get_storer(k)
996-
orig_t = store.get_storer(k)
997-
998-
assert orig_t.nrows == new_t.nrows
999-
1000-
# check propindixes
1001-
if propindexes:
1002-
for a in orig_t.axes:
1003-
if a.is_indexed:
1004-
assert new_t[a.name].is_indexed
979+
with HDFStore(temp_file) as st:
980+
st.append("df", df, data_columns=["A"])
981+
with tempfile.NamedTemporaryFile() as new_f:
982+
with HDFStore(temp_file) as store:
983+
with contextlib.closing(
984+
store.copy(new_f.name, keys=None, propindexes=propindexes)
985+
) as tstore:
986+
# check keys
987+
keys = store.keys()
988+
assert set(keys) == set(tstore.keys())
989+
# check indices & nrows
990+
for k in tstore.keys():
991+
if tstore.get_storer(k).is_table:
992+
new_t = tstore.get_storer(k)
993+
orig_t = store.get_storer(k)
994+
995+
assert orig_t.nrows == new_t.nrows
996+
997+
# check propindixes
998+
if propindexes:
999+
for a in orig_t.axes:
1000+
if a.is_indexed:
1001+
assert new_t[a.name].is_indexed
10051002

10061003

10071004
def test_duplicate_column_name(tmp_path, setup_path):

0 commit comments

Comments
 (0)