Skip to content

Commit 2a22dd7

Browse files
committed
Update from database to schema in unit tests
1 parent 9cd94de commit 2a22dd7

File tree

4 files changed

+30
-30
lines changed

4 files changed

+30
-30
lines changed

python/tests/test_catalog.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def test_basic(ctx, database):
3232
default = ctx.catalog()
3333
assert default.names() == {"public"}
3434

35-
for db in [default.database("public"), default.database()]:
35+
for db in [default.schema("public"), default.schema()]:
3636
assert db.names() == {"csv1", "csv", "csv2"}
3737

3838
table = db.table("csv")

python/tests/test_context.py

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def test_runtime_configs(tmp_path, path_to_str):
5757
ctx = SessionContext(config, runtime)
5858
assert ctx is not None
5959

60-
db = ctx.catalog("foo").database("bar")
60+
db = ctx.catalog("foo").schema("bar")
6161
assert db is not None
6262

6363

@@ -70,7 +70,7 @@ def test_temporary_files(tmp_path, path_to_str):
7070
ctx = SessionContext(config, runtime)
7171
assert ctx is not None
7272

73-
db = ctx.catalog("foo").database("bar")
73+
db = ctx.catalog("foo").schema("bar")
7474
assert db is not None
7575

7676

@@ -91,7 +91,7 @@ def test_create_context_with_all_valid_args():
9191
ctx = SessionContext(config, runtime)
9292

9393
# verify that at least some of the arguments worked
94-
ctx.catalog("foo").database("bar")
94+
ctx.catalog("foo").schema("bar")
9595
with pytest.raises(KeyError):
9696
ctx.catalog("datafusion")
9797

@@ -105,7 +105,7 @@ def test_register_record_batches(ctx):
105105

106106
ctx.register_record_batches("t", [[batch]])
107107

108-
assert ctx.catalog().database().names() == {"t"}
108+
assert ctx.catalog().schema().names() == {"t"}
109109

110110
result = ctx.sql("SELECT a+b, a-b FROM t").collect()
111111

@@ -121,7 +121,7 @@ def test_create_dataframe_registers_unique_table_name(ctx):
121121
)
122122

123123
df = ctx.create_dataframe([[batch]])
124-
tables = list(ctx.catalog().database().names())
124+
tables = list(ctx.catalog().schema().names())
125125

126126
assert df
127127
assert len(tables) == 1
@@ -141,7 +141,7 @@ def test_create_dataframe_registers_with_defined_table_name(ctx):
141141
)
142142

143143
df = ctx.create_dataframe([[batch]], name="tbl")
144-
tables = list(ctx.catalog().database().names())
144+
tables = list(ctx.catalog().schema().names())
145145

146146
assert df
147147
assert len(tables) == 1
@@ -155,7 +155,7 @@ def test_from_arrow_table(ctx):
155155

156156
# convert to DataFrame
157157
df = ctx.from_arrow(table)
158-
tables = list(ctx.catalog().database().names())
158+
tables = list(ctx.catalog().schema().names())
159159

160160
assert df
161161
assert len(tables) == 1
@@ -200,7 +200,7 @@ def test_from_arrow_table_with_name(ctx):
200200

201201
# convert to DataFrame with optional name
202202
df = ctx.from_arrow(table, name="tbl")
203-
tables = list(ctx.catalog().database().names())
203+
tables = list(ctx.catalog().schema().names())
204204

205205
assert df
206206
assert tables[0] == "tbl"
@@ -213,7 +213,7 @@ def test_from_arrow_table_empty(ctx):
213213

214214
# convert to DataFrame
215215
df = ctx.from_arrow(table)
216-
tables = list(ctx.catalog().database().names())
216+
tables = list(ctx.catalog().schema().names())
217217

218218
assert df
219219
assert len(tables) == 1
@@ -228,7 +228,7 @@ def test_from_arrow_table_empty_no_schema(ctx):
228228

229229
# convert to DataFrame
230230
df = ctx.from_arrow(table)
231-
tables = list(ctx.catalog().database().names())
231+
tables = list(ctx.catalog().schema().names())
232232

233233
assert df
234234
assert len(tables) == 1
@@ -246,7 +246,7 @@ def test_from_pylist(ctx):
246246
]
247247

248248
df = ctx.from_pylist(data)
249-
tables = list(ctx.catalog().database().names())
249+
tables = list(ctx.catalog().schema().names())
250250

251251
assert df
252252
assert len(tables) == 1
@@ -260,7 +260,7 @@ def test_from_pydict(ctx):
260260
data = {"a": [1, 2, 3], "b": [4, 5, 6]}
261261

262262
df = ctx.from_pydict(data)
263-
tables = list(ctx.catalog().database().names())
263+
tables = list(ctx.catalog().schema().names())
264264

265265
assert df
266266
assert len(tables) == 1
@@ -276,7 +276,7 @@ def test_from_pandas(ctx):
276276
pandas_df = pd.DataFrame(data)
277277

278278
df = ctx.from_pandas(pandas_df)
279-
tables = list(ctx.catalog().database().names())
279+
tables = list(ctx.catalog().schema().names())
280280

281281
assert df
282282
assert len(tables) == 1
@@ -292,7 +292,7 @@ def test_from_polars(ctx):
292292
polars_df = pd.DataFrame(data)
293293

294294
df = ctx.from_polars(polars_df)
295-
tables = list(ctx.catalog().database().names())
295+
tables = list(ctx.catalog().schema().names())
296296

297297
assert df
298298
assert len(tables) == 1
@@ -303,7 +303,7 @@ def test_from_polars(ctx):
303303

304304
def test_register_table(ctx, database):
305305
default = ctx.catalog()
306-
public = default.database("public")
306+
public = default.schema("public")
307307
assert public.names() == {"csv", "csv1", "csv2"}
308308
table = public.table("csv")
309309

@@ -313,7 +313,7 @@ def test_register_table(ctx, database):
313313

314314
def test_read_table(ctx, database):
315315
default = ctx.catalog()
316-
public = default.database("public")
316+
public = default.schema("public")
317317
assert public.names() == {"csv", "csv1", "csv2"}
318318

319319
table = public.table("csv")
@@ -323,7 +323,7 @@ def test_read_table(ctx, database):
323323

324324
def test_deregister_table(ctx, database):
325325
default = ctx.catalog()
326-
public = default.database("public")
326+
public = default.schema("public")
327327
assert public.names() == {"csv", "csv1", "csv2"}
328328

329329
ctx.deregister_table("csv")
@@ -339,7 +339,7 @@ def test_register_dataset(ctx):
339339
dataset = ds.dataset([batch])
340340
ctx.register_dataset("t", dataset)
341341

342-
assert ctx.catalog().database().names() == {"t"}
342+
assert ctx.catalog().schema().names() == {"t"}
343343

344344
result = ctx.sql("SELECT a+b, a-b FROM t").collect()
345345

@@ -356,7 +356,7 @@ def test_dataset_filter(ctx, capfd):
356356
dataset = ds.dataset([batch])
357357
ctx.register_dataset("t", dataset)
358358

359-
assert ctx.catalog().database().names() == {"t"}
359+
assert ctx.catalog().schema().names() == {"t"}
360360
df = ctx.sql("SELECT a+b, a-b FROM t WHERE a BETWEEN 2 and 3 AND b > 5")
361361

362362
# Make sure the filter was pushed down in Physical Plan
@@ -455,7 +455,7 @@ def test_dataset_filter_nested_data(ctx):
455455
dataset = ds.dataset([batch])
456456
ctx.register_dataset("t", dataset)
457457

458-
assert ctx.catalog().database().names() == {"t"}
458+
assert ctx.catalog().schema().names() == {"t"}
459459

460460
df = ctx.table("t")
461461

python/tests/test_sql.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def test_register_csv(ctx, tmp_path):
7575
)
7676
ctx.register_csv("csv3", path, schema=alternative_schema)
7777

78-
assert ctx.catalog().database().names() == {
78+
assert ctx.catalog().schema().names() == {
7979
"csv",
8080
"csv1",
8181
"csv2",
@@ -150,7 +150,7 @@ def test_register_parquet(ctx, tmp_path):
150150
path = helpers.write_parquet(tmp_path / "a.parquet", helpers.data())
151151
ctx.register_parquet("t", path)
152152
ctx.register_parquet("t1", str(path))
153-
assert ctx.catalog().database().names() == {"t", "t1"}
153+
assert ctx.catalog().schema().names() == {"t", "t1"}
154154

155155
result = ctx.sql("SELECT COUNT(a) AS cnt FROM t").collect()
156156
result = pa.Table.from_batches(result)
@@ -184,7 +184,7 @@ def test_register_parquet_partitioned(ctx, tmp_path, path_to_str):
184184
parquet_pruning=True,
185185
file_extension=".parquet",
186186
)
187-
assert ctx.catalog().database().names() == {"datapp"}
187+
assert ctx.catalog().schema().names() == {"datapp"}
188188

189189
result = ctx.sql("SELECT grp, COUNT(*) AS cnt FROM datapp GROUP BY grp").collect()
190190
result = pa.Table.from_batches(result)
@@ -200,7 +200,7 @@ def test_register_dataset(ctx, tmp_path, path_to_str):
200200
dataset = ds.dataset(path, format="parquet")
201201

202202
ctx.register_dataset("t", dataset)
203-
assert ctx.catalog().database().names() == {"t"}
203+
assert ctx.catalog().schema().names() == {"t"}
204204

205205
result = ctx.sql("SELECT COUNT(a) AS cnt FROM t").collect()
206206
result = pa.Table.from_batches(result)
@@ -247,7 +247,7 @@ def test_register_json(ctx, tmp_path):
247247
)
248248
ctx.register_json("json3", path, schema=alternative_schema)
249249

250-
assert ctx.catalog().database().names() == {
250+
assert ctx.catalog().schema().names() == {
251251
"json",
252252
"json1",
253253
"json2",
@@ -304,7 +304,7 @@ def test_execute(ctx, tmp_path):
304304
path = helpers.write_parquet(tmp_path / "a.parquet", pa.array(data))
305305
ctx.register_parquet("t", path)
306306

307-
assert ctx.catalog().database().names() == {"t"}
307+
assert ctx.catalog().schema().names() == {"t"}
308308

309309
# count
310310
result = ctx.sql("SELECT COUNT(a) AS cnt FROM t WHERE a IS NOT NULL").collect()
@@ -520,7 +520,7 @@ def test_register_listing_table(
520520
schema=table.schema if pass_schema else None,
521521
file_sort_order=file_sort_order,
522522
)
523-
assert ctx.catalog().database().names() == {"my_table"}
523+
assert ctx.catalog().schema().names() == {"my_table"}
524524

525525
result = ctx.sql(
526526
"SELECT grp, COUNT(*) AS count FROM my_table GROUP BY grp"

python/tests/test_substrait.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def test_substrait_serialization(ctx):
3434

3535
ctx.register_record_batches("t", [[batch]])
3636

37-
assert ctx.catalog().database().names() == {"t"}
37+
assert ctx.catalog().schema().names() == {"t"}
3838

3939
# For now just make sure the method calls blow up
4040
substrait_plan = ss.Serde.serialize_to_plan("SELECT * FROM t", ctx)
@@ -59,7 +59,7 @@ def test_substrait_file_serialization(ctx, tmp_path, path_to_str):
5959

6060
ctx.register_record_batches("t", [[batch]])
6161

62-
assert ctx.catalog().database().names() == {"t"}
62+
assert ctx.catalog().schema().names() == {"t"}
6363

6464
path = tmp_path / "substrait_plan"
6565
path = str(path) if path_to_str else path

0 commit comments

Comments
 (0)