Skip to content

Commit 34ce1a1

Browse files
Change ruff line-length setting to 120 (#3675)
## Changes This changes the ruff formatter line-length from 150 to 120. ## Why 120 * PEP8 line length is 79 ← _this is extremely tight_ * Black and Ruff default line length is 88 ← _this is good since it's a tooling default but, still rather tight_ * Current line length was 150 to balance line length vs. spreading code over many lines * The [workspace formatter](https://docs.databricks.com/aws/en/notebooks/notebooks-code#python-black-formatter-library) defaults to 120 ← _this seems like a sweet spot for us; it means our templates by default use the same setting as the workspace_ Note that for IDE support of our templates, we could set a `tool.black` section as done in https://github.com/databricks/cli/pull/3671/files?w=1#diff-ed1a9755eebbb3b630edc93a5731f9dd8caa66b9967f9a57234c6ce9bde88c33R25-R26. Doing that is out of scope for the present PR. ## Tests Standard acceptance tests. --------- Co-authored-by: Claude <[email protected]>
1 parent ddbf610 commit 34ce1a1

File tree

23 files changed

+137
-34
lines changed

23 files changed

+137
-34
lines changed

acceptance/bin/yamlcheck.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,21 @@ def main():
3333
if original_content != formatted_content:
3434
has_changes.append(str(yaml_file))
3535
# Add $ markers for trailing whitespace
36-
original_with_markers = [line.rstrip("\n") + ("$" if line.rstrip() != line.rstrip("\n") else "") + "\n" for line in original_content]
37-
formatted_with_markers = [line.rstrip("\n") + ("$" if line.rstrip() != line.rstrip("\n") else "") + "\n" for line in formatted_content]
38-
diff = unified_diff(original_with_markers, formatted_with_markers, fromfile=str(yaml_file), tofile=str(yaml_file), lineterm="")
36+
original_with_markers = [
37+
line.rstrip("\n") + ("$" if line.rstrip() != line.rstrip("\n") else "") + "\n"
38+
for line in original_content
39+
]
40+
formatted_with_markers = [
41+
line.rstrip("\n") + ("$" if line.rstrip() != line.rstrip("\n") else "") + "\n"
42+
for line in formatted_content
43+
]
44+
diff = unified_diff(
45+
original_with_markers,
46+
formatted_with_markers,
47+
fromfile=str(yaml_file),
48+
tofile=str(yaml_file),
49+
lineterm="",
50+
)
3951
print("".join(diff))
4052

4153
if has_changes:

acceptance/bundle/templates/default-python/classic/output/my_default_python/tests/conftest.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,9 @@
1010
from pyspark.sql import SparkSession
1111
import pytest
1212
except ImportError:
13-
raise ImportError("Test dependencies not found.\n\nRun tests using 'uv run pytest'. See http://docs.astral.sh/uv to learn more about uv.")
13+
raise ImportError(
14+
"Test dependencies not found.\n\nRun tests using 'uv run pytest'. See http://docs.astral.sh/uv to learn more about uv."
15+
)
1416

1517

1618
def enable_fallback_compute():

acceptance/bundle/templates/default-python/serverless/output/my_default_python/tests/conftest.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,9 @@
1010
from pyspark.sql import SparkSession
1111
import pytest
1212
except ImportError:
13-
raise ImportError("Test dependencies not found.\n\nRun tests using 'uv run pytest'. See http://docs.astral.sh/uv to learn more about uv.")
13+
raise ImportError(
14+
"Test dependencies not found.\n\nRun tests using 'uv run pytest'. See http://docs.astral.sh/uv to learn more about uv."
15+
)
1416

1517

1618
def enable_fallback_compute():

acceptance/bundle/templates/lakeflow-pipelines/python/output/my_lakeflow_pipelines/resources/lakeflow_pipelines_etl/transformations/sample_zones_my_lakeflow_pipelines.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,8 @@
1010
@dp.table
1111
def sample_zones_my_lakeflow_pipelines():
1212
# Read from the "sample_trips" table, then sum all the fares
13-
return spark.read.table(f"sample_trips_my_lakeflow_pipelines").groupBy(col("pickup_zip")).agg(sum("fare_amount").alias("total_fare"))
13+
return (
14+
spark.read.table(f"sample_trips_my_lakeflow_pipelines")
15+
.groupBy(col("pickup_zip"))
16+
.agg(sum("fare_amount").alias("total_fare"))
17+
)

acceptance/install_terraform.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,7 @@ def main():
6868
terraform_provider_version = args.provider_version
6969

7070
terraform_provider_file = f"terraform-provider-databricks_{terraform_provider_version}_{os_name}_{arch}.zip"
71-
terraform_provider_url = (
72-
f"https://github.com/databricks/terraform-provider-databricks/releases/download/v{terraform_provider_version}/{terraform_provider_file}"
73-
)
71+
terraform_provider_url = f"https://github.com/databricks/terraform-provider-databricks/releases/download/v{terraform_provider_version}/{terraform_provider_file}"
7472

7573
target.mkdir(exist_ok=True, parents=True)
7674

@@ -90,7 +88,9 @@ def main():
9088
terraform_path.chmod(0o755)
9189

9290
tfplugins_path = target / "tfplugins"
93-
provider_dir = Path(tfplugins_path / f"registry.terraform.io/databricks/databricks/{terraform_provider_version}/{os_name}_{arch}")
91+
provider_dir = Path(
92+
tfplugins_path / f"registry.terraform.io/databricks/databricks/{terraform_provider_version}/{os_name}_{arch}"
93+
)
9494
if not provider_dir.exists():
9595
print(f"Extracting {terraform_provider_path} -> {provider_dir}")
9696
os.makedirs(provider_dir, exist_ok=True)

acceptance/pipelines/e2e/output/my_project/transformations/sample_trips_my_project.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,6 @@
1010

1111
@dlt.table
1212
def sample_trips_my_project():
13-
return spark.read.table("samples.nyctaxi.trips").withColumn("trip_distance_km", utils.distance_km(col("trip_distance")))
13+
return spark.read.table("samples.nyctaxi.trips").withColumn(
14+
"trip_distance_km", utils.distance_km(col("trip_distance"))
15+
)

acceptance/pipelines/e2e/output/my_project/transformations/sample_zones_my_project.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,8 @@
1010
@dlt.table
1111
def sample_zones_my_project():
1212
# Read from the "sample_trips" table, then sum all the fares
13-
return spark.read.table(f"sample_trips_my_project").groupBy(col("pickup_zip")).agg(sum("fare_amount").alias("total_fare"))
13+
return (
14+
spark.read.table(f"sample_trips_my_project")
15+
.groupBy(col("pickup_zip"))
16+
.agg(sum("fare_amount").alias("total_fare"))
17+
)

acceptance/pipelines/init/error-cases/output/my_project/transformations/sample_trips_my_project.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,6 @@
1010

1111
@dlt.table
1212
def sample_trips_my_project():
13-
return spark.read.table("samples.nyctaxi.trips").withColumn("trip_distance_km", utils.distance_km(col("trip_distance")))
13+
return spark.read.table("samples.nyctaxi.trips").withColumn(
14+
"trip_distance_km", utils.distance_km(col("trip_distance"))
15+
)

acceptance/pipelines/init/error-cases/output/my_project/transformations/sample_zones_my_project.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,8 @@
1010
@dlt.table
1111
def sample_zones_my_project():
1212
# Read from the "sample_trips" table, then sum all the fares
13-
return spark.read.table(f"sample_trips_my_project").groupBy(col("pickup_zip")).agg(sum("fare_amount").alias("total_fare"))
13+
return (
14+
spark.read.table(f"sample_trips_my_project")
15+
.groupBy(col("pickup_zip"))
16+
.agg(sum("fare_amount").alias("total_fare"))
17+
)

acceptance/pipelines/init/python/output/my_python_project/transformations/sample_trips_my_python_project.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,6 @@
1010

1111
@dlt.table
1212
def sample_trips_my_python_project():
13-
return spark.read.table("samples.nyctaxi.trips").withColumn("trip_distance_km", utils.distance_km(col("trip_distance")))
13+
return spark.read.table("samples.nyctaxi.trips").withColumn(
14+
"trip_distance_km", utils.distance_km(col("trip_distance"))
15+
)

0 commit comments

Comments
 (0)