Skip to content

Commit 16fcbbc

Browse files
committed
Ruff C408: rewrite dict() calls as literals
1 parent 7ad26fb commit 16fcbbc

File tree

11 files changed

+26
-26
lines changed

11 files changed

+26
-26
lines changed

duckdb/experimental/spark/sql/types.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -650,12 +650,12 @@ def needConversion(self) -> bool: # noqa: D102
650650
def toInternal(self, obj: dict[T, Optional[U]]) -> dict[T, Optional[U]]: # noqa: D102
651651
if not self.needConversion():
652652
return obj
653-
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v)) for k, v in obj.items())
653+
return obj and {self.keyType.toInternal(k): self.valueType.toInternal(v) for k, v in obj.items()}
654654

655655
def fromInternal(self, obj: dict[T, Optional[U]]) -> dict[T, Optional[U]]: # noqa: D102
656656
if not self.needConversion():
657657
return obj
658-
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v)) for k, v in obj.items())
658+
return obj and {self.keyType.fromInternal(k): self.valueType.fromInternal(v) for k, v in obj.items()}
659659

660660

661661
class StructField(DataType):
@@ -1031,16 +1031,16 @@ def __eq__(self, other: object) -> bool:
10311031
TimestampNTZType,
10321032
NullType,
10331033
]
1034-
_all_atomic_types: dict[str, type[DataType]] = dict((t.typeName(), t) for t in _atomic_types)
1034+
_all_atomic_types: dict[str, type[DataType]] = {t.typeName(): t for t in _atomic_types}
10351035

10361036
_complex_types: list[type[Union[ArrayType, MapType, StructType]]] = [
10371037
ArrayType,
10381038
MapType,
10391039
StructType,
10401040
]
1041-
_all_complex_types: dict[str, type[Union[ArrayType, MapType, StructType]]] = dict(
1042-
(v.typeName(), v) for v in _complex_types
1043-
)
1041+
_all_complex_types: dict[str, type[Union[ArrayType, MapType, StructType]]] = {
1042+
v.typeName(): v for v in _complex_types
1043+
}
10441044

10451045

10461046
_FIXED_DECIMAL = re.compile(r"decimal\(\s*(\d+)\s*,\s*(-?\d+)\s*\)")
@@ -1164,7 +1164,7 @@ def conv(obj: Union[Row, list, dict, object]) -> Union[list, dict, object]:
11641164
elif isinstance(obj, list):
11651165
return [conv(o) for o in obj]
11661166
elif isinstance(obj, dict):
1167-
return dict((k, conv(v)) for k, v in obj.items())
1167+
return {k: conv(v) for k, v in obj.items()}
11681168
else:
11691169
return obj
11701170

duckdb_packaging/build_backend.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def _duckdb_submodule_path() -> Path:
7777
raise RuntimeError(msg)
7878
# search the duckdb submodule
7979
gitmodules_path = Path(".gitmodules")
80-
modules = dict()
80+
modules = {}
8181
with gitmodules_path.open("r") as f:
8282
cur_module_path = None
8383
cur_module_reponame = None

duckdb_packaging/pypi_cleanup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ def _delete_versions(self, http_session: Session, versions_to_delete: set[str])
482482
"""Delete the specified package versions."""
483483
logging.info(f"Starting deletion of {len(versions_to_delete)} development versions")
484484

485-
failed_deletions = list()
485+
failed_deletions = []
486486
for version in sorted(versions_to_delete):
487487
try:
488488
self._delete_single_version(http_session, version)

tests/extensions/json/test_read_json.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def test_read_json_records(self):
113113
],
114114
)
115115
def test_read_json_options(self, duckdb_cursor, option):
116-
keyword_arguments = dict()
116+
keyword_arguments = {}
117117
option_name, option_value = option
118118
keyword_arguments[option_name] = option_value
119119
if option_name == "hive_types":

tests/fast/api/test_to_parquet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def test_field_ids_auto(self):
3838
def test_field_ids(self):
3939
temp_file_name = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names())) # noqa: PTH118
4040
rel = duckdb.sql("""SELECT 1 as i, {j: 128} AS my_struct""")
41-
rel.to_parquet(temp_file_name, field_ids=dict(i=42, my_struct={"__duckdb_field_id": 43, "j": 44}))
41+
rel.to_parquet(temp_file_name, field_ids={"i": 42, "my_struct": {"__duckdb_field_id": 43, "j": 44}})
4242
parquet_rel = duckdb.read_parquet(temp_file_name)
4343
assert rel.execute().fetchall() == parquet_rel.execute().fetchall()
4444
assert duckdb.sql(

tests/fast/arrow/test_5547.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@ def test_5547():
1313
tbl = pa.Table.from_pandas(
1414
pd.DataFrame.from_records(
1515
[
16-
dict(
17-
id=i,
18-
nested=dict(
19-
a=i,
20-
),
21-
)
16+
{
17+
"id": i,
18+
"nested": {
19+
"a": i,
20+
},
21+
}
2222
for i in range(num_rows)
2323
]
2424
)

tests/fast/arrow/test_large_offsets.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,23 +12,23 @@ class TestArrowLargeOffsets:
1212
@pytest.mark.skip(reason="CI does not have enough memory to validate this")
1313
def test_large_lists(self, duckdb_cursor):
1414
ary = pa.array([np.arange(start=0, stop=3000, dtype=np.uint8) for i in range(1_000_000)])
15-
tbl = pa.Table.from_pydict(dict(col=ary)) # noqa: F841
15+
tbl = pa.Table.from_pydict({"col": ary}) # noqa: F841
1616
with pytest.raises(
1717
duckdb.InvalidInputException,
1818
match="Arrow Appender: The maximum combined list offset for regular list buffers is 2147483647 but "
1919
"the offset of 2147481000 exceeds this.",
2020
):
2121
duckdb_cursor.sql("SELECT col FROM tbl").fetch_arrow_table()
2222

23-
tbl2 = pa.Table.from_pydict(dict(col=ary.cast(pa.large_list(pa.uint8())))) # noqa: F841
23+
tbl2 = pa.Table.from_pydict({"col": ary.cast(pa.large_list(pa.uint8()))}) # noqa: F841
2424
duckdb_cursor.sql("set arrow_large_buffer_size = true")
2525
res2 = duckdb_cursor.sql("SELECT col FROM tbl2").fetch_arrow_table()
2626
res2.validate()
2727

2828
@pytest.mark.skip(reason="CI does not have enough memory to validate this")
2929
def test_large_maps(self, duckdb_cursor):
3030
ary = pa.array([np.arange(start=3000 * j, stop=3000 * (j + 1), dtype=np.uint64) for j in range(1_000_000)])
31-
tbl = pa.Table.from_pydict(dict(col=ary)) # noqa: F841
31+
tbl = pa.Table.from_pydict({"col": ary}) # noqa: F841
3232

3333
with pytest.raises(
3434
duckdb.InvalidInputException,

tests/fast/pandas/test_pandas_object.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ def test_object_to_string(self, duckdb_cursor):
3030

3131
def test_tuple_to_list(self, duckdb_cursor):
3232
tuple_df = pd.DataFrame.from_dict( # noqa: F841
33-
dict(
34-
nums=[
33+
{
34+
"nums": [
3535
(
3636
1,
3737
2,
@@ -43,7 +43,7 @@ def test_tuple_to_list(self, duckdb_cursor):
4343
6,
4444
),
4545
]
46-
)
46+
}
4747
)
4848
duckdb_cursor.execute("CREATE TABLE test as SELECT * FROM tuple_df")
4949
res = duckdb_cursor.table("test").fetchall()

tests/fast/spark/test_spark_dataframe_sort.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def test_sort_invalid_column(self, spark):
8484
df = spark.createDataFrame(self.data, ["age", "name"])
8585

8686
with pytest.raises(PySparkTypeError):
87-
df = df.sort(dict(a=1))
87+
df = df.sort({"a": 1})
8888

8989
def test_sort_with_desc(self, spark):
9090
df = spark.createDataFrame(self.data, ["age", "name"])

tests/fast/spark/test_spark_filter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,4 +184,4 @@ def test_invalid_condition_type(self, spark):
184184
df = spark.createDataFrame([(1, "A")], ["A", "B"])
185185

186186
with pytest.raises(PySparkTypeError):
187-
df = df.filter(dict(a=1))
187+
df = df.filter({"a": 1})

0 commit comments

Comments
 (0)