Skip to content

Commit c8c6037

Browse files
authored
Enforcing invalid-name pylint rule (#957)
1 parent f68686c commit c8c6037

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+469
-451
lines changed

pyproject.toml

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -322,10 +322,19 @@ function-naming-style = "snake_case"
322322

323323
# Regular expression matching correct function names. Overrides function-naming-
324324
# style. If left empty, function names will be checked with the set naming style.
325-
function-rgx = "[a-z_][a-z0-9_]{2,30}$"
325+
function-rgx = "[a-z_][a-z0-9_]{2,}$"
326326

327327
# Good variable names which should always be accepted, separated by a comma.
328-
good-names = ["i", "j", "k", "ex", "Run", "_"]
328+
good-names = [
329+
"f", # use for file handles
330+
"i", "j", "k", # use for loops
331+
"df", # use for pyspark.sql.DataFrame
332+
"ex", "e", # use for exceptions
333+
"fn", "cb", # use for callbacks
334+
"_", # use for ignores
335+
"a", # use for databricks.sdk.AccountClient
336+
"w", "ws" # use for databricks.sdk.WorkspaceClient
337+
]
329338

330339
# Good variable names regexes, separated by a comma. If names match any regex,
331340
# they will always be accepted
@@ -537,22 +546,12 @@ confidence = ["HIGH", "CONTROL_FLOW", "INFERENCE", "INFERENCE_FAILURE", "UNDEFIN
537546
# no Warning level messages displayed, use "--disable=all --enable=classes
538547
# --disable=W".
539548
disable = [
540-
"raw-checker-failed",
541-
"bad-inline-option",
542-
"locally-disabled",
543-
"file-ignored",
544-
"suppressed-message",
545-
"deprecated-pragma",
546-
"use-implicit-booleaness-not-comparison-to-string",
547-
"use-implicit-booleaness-not-comparison-to-zero",
548549
"consider-using-augmented-assign",
549550
"prefer-typing-namedtuple",
550551
"attribute-defined-outside-init",
551-
"invalid-name",
552552
"missing-module-docstring",
553553
"missing-class-docstring",
554554
"missing-function-docstring",
555-
# "protected-access", # TODO: enable back
556555
"too-few-public-methods",
557556
"line-too-long",
558557
"too-many-lines",

src/databricks/labs/ucx/account.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ class AccountWorkspaces:
2222

2323
SYNC_FILE_NAME: ClassVar[str] = "workspaces.json"
2424

25-
def __init__(self, ac: AccountClient, new_workspace_client=WorkspaceClient):
25+
def __init__(self, account_client: AccountClient, new_workspace_client=WorkspaceClient):
2626
self._new_workspace_client = new_workspace_client
27-
self._ac = ac
27+
self._ac = account_client
2828

2929
def _workspaces(self):
3030
return self._ac.workspaces.list()

src/databricks/labs/ucx/assessment/jobs.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -29,21 +29,21 @@ def _get_cluster_configs_from_all_jobs(all_jobs, all_clusters_by_id): # pylint:
2929
if j.settings is None:
3030
continue
3131
if j.settings.job_clusters is not None:
32-
for jc in j.settings.job_clusters:
33-
if jc.new_cluster is None:
32+
for job_cluster in j.settings.job_clusters:
33+
if job_cluster.new_cluster is None:
3434
continue
35-
yield j, jc.new_cluster
35+
yield j, job_cluster.new_cluster
3636
if j.settings.tasks is None:
3737
continue
38-
for t in j.settings.tasks:
39-
if t.existing_cluster_id is not None:
40-
interactive_cluster = all_clusters_by_id.get(t.existing_cluster_id, None)
38+
for task in j.settings.tasks:
39+
if task.existing_cluster_id is not None:
40+
interactive_cluster = all_clusters_by_id.get(task.existing_cluster_id, None)
4141
if interactive_cluster is None:
4242
continue
4343
yield j, interactive_cluster
4444

45-
elif t.new_cluster is not None:
46-
yield j, t.new_cluster
45+
elif task.new_cluster is not None:
46+
yield j, task.new_cluster
4747

4848

4949
class JobsCrawler(CrawlerBase[JobInfo], JobsMixin, CheckClusterMixin):
@@ -74,7 +74,7 @@ def _assess_jobs(self, all_jobs: list[BaseJob], all_clusters_by_id) -> Iterable[
7474
return list(job_details.values())
7575

7676
@staticmethod
77-
def _prepare(all_jobs):
77+
def _prepare(all_jobs) -> tuple[dict[int, set[str]], dict[int, JobInfo]]:
7878
job_assessment: dict[int, set[str]] = {}
7979
job_details: dict[int, JobInfo] = {}
8080
for job in all_jobs:

src/databricks/labs/ucx/azure/access.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,16 @@ class StoragePermissionMapping:
2020

2121

2222
class AzureResourcePermissions:
23-
def __init__(self, installation: Installation, ws: WorkspaceClient, azurerm: AzureResources, lc: ExternalLocations):
23+
def __init__(
24+
self,
25+
installation: Installation,
26+
ws: WorkspaceClient,
27+
azurerm: AzureResources,
28+
external_locations: ExternalLocations,
29+
):
2430
self._filename = 'azure_storage_account_info.csv'
2531
self._installation = installation
26-
self._locations = lc
32+
self._locations = external_locations
2733
self._azurerm = azurerm
2834
self._ws = ws
2935
self._levels = {

src/databricks/labs/ucx/azure/resources.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,9 @@ def __init__(self, resource_id: str):
3131
i = 0
3232
while i < len(split):
3333
k = split[i]
34-
v = split[i + 1]
34+
value = split[i + 1]
3535
i += 2
36-
self._pairs[k] = v
36+
self._pairs[k] = value
3737

3838
@property
3939
def subscription_id(self):

src/databricks/labs/ucx/cli.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,10 +170,10 @@ def revert_migrated_tables(w: WorkspaceClient, schema: str, table: str, *, delet
170170
question = "You haven't specified a schema or a table. All migrated tables will be reverted. Continue?"
171171
if not prompts.confirm(question, max_attempts=2):
172172
return
173-
tm = TablesMigrate.for_cli(w)
174-
revert = tm.print_revert_report(delete_managed=delete_managed)
173+
tables_migrate = TablesMigrate.for_cli(w)
174+
revert = tables_migrate.print_revert_report(delete_managed=delete_managed)
175175
if revert and prompts.confirm("Would you like to continue?", max_attempts=2):
176-
tm.revert_migrated_tables(schema, table, delete_managed=delete_managed)
176+
tables_migrate.revert_migrated_tables(schema, table, delete_managed=delete_managed)
177177

178178

179179
@ucx.command

src/databricks/labs/ucx/configure.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,18 @@ def configure(self):
2121
"""User may override standard job clusters with interactive clusters"""
2222
logger.info("Configuring cluster overrides from existing clusters")
2323

24-
def is_classic(c) -> bool:
24+
def is_classic(cluster_info) -> bool:
2525
return (
26-
c.state == compute.State.RUNNING
27-
and c.spark_version >= MINIMUM_SPARK_VERSION
28-
and c.data_security_mode == compute.DataSecurityMode.NONE
26+
cluster_info.state == compute.State.RUNNING
27+
and cluster_info.spark_version >= MINIMUM_SPARK_VERSION
28+
and cluster_info.data_security_mode == compute.DataSecurityMode.NONE
2929
)
3030

31-
def is_tacl(c) -> bool:
31+
def is_tacl(cluster_info) -> bool:
3232
return (
33-
c.state == compute.State.RUNNING
34-
and c.spark_version >= MINIMUM_SPARK_VERSION
35-
and c.data_security_mode == compute.DataSecurityMode.LEGACY_TABLE_ACL
33+
cluster_info.state == compute.State.RUNNING
34+
and cluster_info.spark_version >= MINIMUM_SPARK_VERSION
35+
and cluster_info.data_security_mode == compute.DataSecurityMode.LEGACY_TABLE_ACL
3636
)
3737

3838
def build_and_prompt(prompt, clusters):
@@ -44,11 +44,11 @@ def build_and_prompt(prompt, clusters):
4444
# build list of valid active clusters
4545
classic_clusters = []
4646
tacl_clusters = []
47-
for c in self._ws.clusters.list(can_use_client="NOTEBOOK"):
48-
if is_classic(c):
49-
classic_clusters.append(c)
50-
if is_tacl(c):
51-
tacl_clusters.append(c)
47+
for cluster in self._ws.clusters.list(can_use_client="NOTEBOOK"):
48+
if is_classic(cluster):
49+
classic_clusters.append(cluster)
50+
if is_tacl(cluster):
51+
tacl_clusters.append(cluster)
5252

5353
preamble = """
5454
We detected an install issue and

src/databricks/labs/ucx/framework/dashboards.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919

2020
logger = logging.getLogger(__name__)
2121

22+
# pylint: disable=invalid-name
23+
2224

2325
@dataclass
2426
class SimpleQuery:

src/databricks/labs/ucx/hive_metastore/mapping.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,27 +96,27 @@ def skip_table(self, schema: str, table: str):
9696
self._sql_backend.execute(
9797
f"ALTER TABLE {escape_sql_identifier(schema)}.{escape_sql_identifier(table)} SET TBLPROPERTIES('{self.UCX_SKIP_PROPERTY}' = true)"
9898
)
99-
except NotFound as nf:
100-
if "[TABLE_OR_VIEW_NOT_FOUND]" in str(nf) or "[DELTA_TABLE_NOT_FOUND]" in str(nf):
99+
except NotFound as err:
100+
if "[TABLE_OR_VIEW_NOT_FOUND]" in str(err) or "[DELTA_TABLE_NOT_FOUND]" in str(err):
101101
logger.error(f"Failed to apply skip marker for Table {schema}.{table}. Table not found.")
102102
else:
103-
logger.error(f"Failed to apply skip marker for Table {schema}.{table}: {nf!s}", exc_info=True)
104-
except BadRequest as br:
105-
logger.error(f"Failed to apply skip marker for Table {schema}.{table}: {br!s}", exc_info=True)
103+
logger.error(f"Failed to apply skip marker for Table {schema}.{table}: {err!s}", exc_info=True)
104+
except BadRequest as err:
105+
logger.error(f"Failed to apply skip marker for Table {schema}.{table}: {err!s}", exc_info=True)
106106

107107
def skip_schema(self, schema: str):
108108
# Marks a schema to be skipped in the migration process by applying a table property
109109
try:
110110
self._sql_backend.execute(
111111
f"ALTER SCHEMA {escape_sql_identifier(schema)} SET DBPROPERTIES('{self.UCX_SKIP_PROPERTY}' = true)"
112112
)
113-
except NotFound as nf:
114-
if "[SCHEMA_NOT_FOUND]" in str(nf):
113+
except NotFound as err:
114+
if "[SCHEMA_NOT_FOUND]" in str(err):
115115
logger.error(f"Failed to apply skip marker for Schema {schema}. Schema not found.")
116116
else:
117-
logger.error(nf)
118-
except BadRequest as br:
119-
logger.error(br)
117+
logger.error(err)
118+
except BadRequest as err:
119+
logger.error(err)
120120

121121
def get_tables_to_migrate(self, tables_crawler: TablesCrawler):
122122
rules = self.load()

src/databricks/labs/ucx/hive_metastore/table_migrate.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,15 @@
2525
class TablesMigrate:
2626
def __init__(
2727
self,
28-
tc: TablesCrawler,
28+
tables_crawler: TablesCrawler,
2929
ws: WorkspaceClient,
3030
backend: SqlBackend,
31-
tm: TableMapping,
31+
table_mapping: TableMapping,
3232
):
33-
self._tc = tc
33+
self._tc = tables_crawler
3434
self._backend = backend
3535
self._ws = ws
36-
self._tm = tm
36+
self._tm = table_mapping
3737
self._seen_tables: dict[str, str] = {}
3838

3939
@classmethod

0 commit comments

Comments
 (0)