Skip to content

Commit ca4d912

Browse files
committed
resolve merge conflicts
2 parents ba1fc7e + d3cc3c0 commit ca4d912

File tree

17 files changed

+201
-197
lines changed

17 files changed

+201
-197
lines changed

awswrangler/_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,7 @@ def inner(*args: Any, **kwargs: Any) -> Any:
121121
package_name = INSTALL_MAPPING.get(name)
122122
install_name = package_name if package_name is not None else name
123123
raise ModuleNotFoundError(
124-
f"Missing optional dependency '{name}'. "
125-
f"Use pip install awswrangler[{install_name}] to install it."
124+
f"Missing optional dependency '{name}'. Use pip install awswrangler[{install_name}] to install it."
126125
)
127126
return func(*args, **kwargs)
128127

awswrangler/athena/_read.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -610,7 +610,7 @@ def _unload(
610610
if partitioned_by:
611611
unload_parameters += f" , partitioned_by=ARRAY{partitioned_by}"
612612

613-
sql = f"UNLOAD ({sql}) " f"TO '{path}' " f"WITH ({unload_parameters})"
613+
sql = f"UNLOAD ({sql}) TO '{path}' WITH ({unload_parameters})"
614614
_logger.debug("Executing unload query: %s", sql)
615615
try:
616616
query_id: str = _start_query_execution(

awswrangler/athena/_utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,13 +1025,13 @@ def parse_properties(parameters: dict[str, str]) -> str:
10251025

10261026
query_parts += [
10271027
"""ROW FORMAT SERDE """,
1028-
f""" '{table_detail['StorageDescriptor']['SerdeInfo']['SerializationLibrary']}' """,
1028+
f""" '{table_detail["StorageDescriptor"]["SerdeInfo"]["SerializationLibrary"]}' """,
10291029
"""STORED AS INPUTFORMAT """,
1030-
f""" '{table_detail['StorageDescriptor']['InputFormat']}' """,
1030+
f""" '{table_detail["StorageDescriptor"]["InputFormat"]}' """,
10311031
"""OUTPUTFORMAT """,
1032-
f""" '{table_detail['StorageDescriptor']['OutputFormat']}'""",
1032+
f""" '{table_detail["StorageDescriptor"]["OutputFormat"]}'""",
10331033
"""LOCATION""",
1034-
f""" '{table_detail['StorageDescriptor']['Location']}'""",
1034+
f""" '{table_detail["StorageDescriptor"]["Location"]}'""",
10351035
f"""TBLPROPERTIES (\n{tblproperties})""",
10361036
]
10371037
sql = "\n".join(query_parts)

awswrangler/athena/_write_iceberg.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ def _merge_iceberg(
309309
if merge_cols:
310310
if merge_condition == "update":
311311
match_condition = f"""WHEN MATCHED THEN
312-
UPDATE SET {', '.join([f'"{x}" = source."{x}"' for x in df.columns])}"""
312+
UPDATE SET {", ".join([f'"{x}" = source."{x}"' for x in df.columns])}"""
313313
else:
314314
match_condition = ""
315315

@@ -321,16 +321,16 @@ def _merge_iceberg(
321321
sql_statement = f"""
322322
MERGE INTO "{database}"."{table}" target
323323
USING "{database}"."{source_table}" source
324-
ON {' AND '.join(merge_conditions)}
324+
ON {" AND ".join(merge_conditions)}
325325
{match_condition}
326326
WHEN NOT MATCHED THEN
327-
INSERT ({', '.join([f'"{x}"' for x in df.columns])})
328-
VALUES ({', '.join([f'source."{x}"' for x in df.columns])})
327+
INSERT ({", ".join([f'"{x}"' for x in df.columns])})
328+
VALUES ({", ".join([f'source."{x}"' for x in df.columns])})
329329
"""
330330
else:
331331
sql_statement = f"""
332-
INSERT INTO "{database}"."{table}" ({', '.join([f'"{x}"' for x in df.columns])})
333-
SELECT {', '.join([f'"{x}"' for x in df.columns])}
332+
INSERT INTO "{database}"."{table}" ({", ".join([f'"{x}"' for x in df.columns])})
333+
SELECT {", ".join([f'"{x}"' for x in df.columns])}
334334
FROM "{database}"."{source_table}"
335335
"""
336336

@@ -763,7 +763,7 @@ def delete_from_iceberg_table(
763763
sql_statement = f"""
764764
MERGE INTO "{database}"."{table}" target
765765
USING "{database}"."{temp_table}" source
766-
ON {' AND '.join([f'target."{x}" = source."{x}"' for x in merge_cols])}
766+
ON {" AND ".join([f'target."{x}" = source."{x}"' for x in merge_cols])}
767767
WHEN MATCHED THEN
768768
DELETE
769769
"""

awswrangler/dynamodb/_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ def _remove_dup_pkeys_request_if_any(
253253
if self._extract_pkey_values(item, overwrite_by_pkeys) == pkey_values_new:
254254
self._items_buffer.remove(item)
255255
_logger.debug(
256-
"With overwrite_by_pkeys enabled, skipping " "request:%s",
256+
"With overwrite_by_pkeys enabled, skipping request:%s",
257257
item,
258258
)
259259

awswrangler/oracle.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,8 @@ def _validate_connection(con: "oracledb.Connection") -> None:
4848

4949

5050
def _get_table_identifier(schema: str | None, table: str) -> str:
51-
schema_str = f'{identifier(schema, sql_mode="ansi")}.' if schema else ""
52-
table_identifier = f'{schema_str}{identifier(table, sql_mode="ansi")}'
51+
schema_str = f"{identifier(schema, sql_mode='ansi')}." if schema else ""
52+
table_identifier = f"{schema_str}{identifier(table, sql_mode='ansi')}"
5353
return table_identifier
5454

5555

@@ -104,10 +104,10 @@ def _create_table(
104104
varchar_lengths=varchar_lengths,
105105
converter_func=_data_types.pyarrow2oracle,
106106
)
107-
cols_str: str = "".join([f'{identifier(k, sql_mode="ansi")} {v},\n' for k, v in oracle_types.items()])[:-2]
107+
cols_str: str = "".join([f"{identifier(k, sql_mode='ansi')} {v},\n" for k, v in oracle_types.items()])[:-2]
108108

109109
if primary_keys:
110-
primary_keys_str = ", ".join([f'{identifier(k, sql_mode="ansi")}' for k in primary_keys])
110+
primary_keys_str = ", ".join([f"{identifier(k, sql_mode='ansi')}" for k in primary_keys])
111111
else:
112112
primary_keys_str = None
113113

@@ -469,17 +469,17 @@ def _generate_upsert_statement(
469469

470470
non_primary_key_columns = [key for key in df.columns if key not in set(primary_keys)]
471471

472-
primary_keys_str = ", ".join([f'{identifier(key, sql_mode="ansi")}' for key in primary_keys])
473-
columns_str = ", ".join([f'{identifier(key, sql_mode="ansi")}' for key in non_primary_key_columns])
472+
primary_keys_str = ", ".join([f"{identifier(key, sql_mode='ansi')}" for key in primary_keys])
473+
columns_str = ", ".join([f"{identifier(key, sql_mode='ansi')}" for key in non_primary_key_columns])
474474

475475
column_placeholders: str = f"({', '.join([':' + str(i + 1) for i in range(len(df.columns))])})"
476476

477477
primary_key_condition_str = " AND ".join(
478-
[f'{identifier(key, sql_mode="ansi")} = :{i+1}' for i, key in enumerate(primary_keys)]
478+
[f"{identifier(key, sql_mode='ansi')} = :{i + 1}" for i, key in enumerate(primary_keys)]
479479
)
480480
assignment_str = ", ".join(
481481
[
482-
f'{identifier(col, sql_mode="ansi")} = :{i + len(primary_keys) + 1}'
482+
f"{identifier(col, sql_mode='ansi')} = :{i + len(primary_keys) + 1}"
483483
for i, col in enumerate(non_primary_key_columns)
484484
]
485485
)

awswrangler/redshift/_utils.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def _begin_transaction(cursor: "redshift_connector.Cursor") -> None:
7373
def _drop_table(cursor: "redshift_connector.Cursor", schema: str | None, table: str, cascade: bool = False) -> None:
7474
schema_str = f'"{schema}".' if schema else ""
7575
cascade_str = " CASCADE" if cascade else ""
76-
sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"' f"{cascade_str}"
76+
sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"{cascade_str}'
7777
_logger.debug("Executing drop table query:\n%s", sql)
7878
cursor.execute(sql)
7979

@@ -130,10 +130,7 @@ def _add_table_columns(
130130
def _does_table_exist(cursor: "redshift_connector.Cursor", schema: str | None, table: str) -> bool:
131131
schema_str = f"TABLE_SCHEMA = '{schema}' AND" if schema else ""
132132
sql = (
133-
f"SELECT true WHERE EXISTS ("
134-
f"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE "
135-
f"{schema_str} TABLE_NAME = '{table}'"
136-
f");"
133+
f"SELECT true WHERE EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE {schema_str} TABLE_NAME = '{table}');"
137134
)
138135
_logger.debug("Executing select query:\n%s", sql)
139136
cursor.execute(sql)
@@ -236,12 +233,12 @@ def _validate_parameters(
236233
if sortkey:
237234
if not isinstance(sortkey, list):
238235
raise exceptions.InvalidRedshiftSortkey(
239-
f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {sortkey}"
236+
f"sortkey must be a List of items in the columns list: {cols}. Currently value: {sortkey}"
240237
)
241238
for key in sortkey:
242239
if key not in cols:
243240
raise exceptions.InvalidRedshiftSortkey(
244-
f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {key}"
241+
f"sortkey must be a List of items in the columns list: {cols}. Currently value: {key}"
245242
)
246243
if primary_keys:
247244
if not isinstance(primary_keys, list):

awswrangler/sqlserver.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -536,8 +536,7 @@ def to_sql(
536536
sql = f"MERGE INTO {table_identifier}\nUSING (VALUES {placeholders}) AS source ({quoted_columns})\n"
537537
sql += f"ON {' AND '.join(f'{table_identifier}.{col}=source.{col}' for col in merge_on_columns)}\n"
538538
sql += (
539-
f"WHEN MATCHED THEN\n UPDATE "
540-
f"SET {', '.join(f'{col}=source.{col}' for col in column_names)}\n"
539+
f"WHEN MATCHED THEN\n UPDATE SET {', '.join(f'{col}=source.{col}' for col in column_names)}\n"
541540
)
542541
sql += (
543542
f"WHEN NOT MATCHED THEN\n INSERT "

awswrangler/timestream/_read.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ def unload_to_files(
409409
timestream_client = _utils.client(service_name="timestream-query", session=boto3_session)
410410

411411
partitioned_by_str: str = (
412-
f"""partitioned_by = ARRAY [{','.join([f"'{col}'" for col in partition_cols])}],\n"""
412+
f"""partitioned_by = ARRAY [{",".join([f"'{col}'" for col in partition_cols])}],\n"""
413413
if partition_cols is not None
414414
else ""
415415
)

0 commit comments

Comments
 (0)