Skip to content

Commit 52c43ca

Browse files
gaogaotiantianzhengruifeng
authored andcommitted
[SPARK-55480][PYTHON] Remove all unused noqa for ruff
### What changes were proposed in this pull request? Removed all unused `# noqa` comments for linter ### Why are the changes needed? We accumulated years of `# noqa` comments for linter. A lot of them are unused (because linter gets smarter). We removed all the unused ones and make `ruff` check for unused noqa in the future - just like `mypy` did. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? CI, but this should be a comment only change. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #54264 from gaogaotiantian/remove-unused-noqa. Authored-by: Tian Gao <gaogaotiantian@hotmail.com> Signed-off-by: Ruifeng Zheng <ruifengz@apache.org>
1 parent 9c45ce4 commit 52c43ca

35 files changed

+67
-68
lines changed

dev/sparktestsupport/modules.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -674,7 +674,7 @@ def __hash__(self):
674674
"pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state",
675675
"pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state_checkpoint_v2",
676676
"pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state_state_variable",
677-
"pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state_state_variable_checkpoint_v2", # noqa: E501
677+
"pyspark.sql.tests.pandas.streaming.test_pandas_transform_with_state_state_variable_checkpoint_v2",
678678
"pyspark.sql.tests.pandas.streaming.test_transform_with_state",
679679
"pyspark.sql.tests.pandas.streaming.test_transform_with_state_checkpoint_v2",
680680
"pyspark.sql.tests.pandas.streaming.test_transform_with_state_state_variable",
@@ -1215,9 +1215,9 @@ def __hash__(self):
12151215
"pyspark.sql.tests.connect.streaming.test_parity_foreach_batch",
12161216
"pyspark.sql.tests.connect.pandas.streaming.test_parity_pandas_grouped_map_with_state",
12171217
"pyspark.sql.tests.connect.pandas.streaming.test_parity_pandas_transform_with_state",
1218-
"pyspark.sql.tests.connect.pandas.streaming.test_parity_pandas_transform_with_state_state_variable", # noqa: E501
1218+
"pyspark.sql.tests.connect.pandas.streaming.test_parity_pandas_transform_with_state_state_variable",
12191219
"pyspark.sql.tests.connect.pandas.streaming.test_parity_transform_with_state",
1220-
"pyspark.sql.tests.connect.pandas.streaming.test_parity_transform_with_state_state_variable", # noqa: E501
1220+
"pyspark.sql.tests.connect.pandas.streaming.test_parity_transform_with_state_state_variable",
12211221
],
12221222
excluded_python_implementations=[
12231223
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@ exclude = [
3838

3939
[tool.ruff.lint]
4040
extend-select = [
41-
"G010" # logging-warn
41+
"G010", # logging-warn
42+
"RUF100", # unused-noqa
4243
]
4344
ignore = [
4445
"E402", # Module top level import is disabled for optional import check, etc.

python/pyspark/accumulators.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from pyspark.errors import PySparkRuntimeError
2828

2929
if TYPE_CHECKING:
30-
from pyspark._typing import SupportsIAdd # noqa: F401
30+
from pyspark._typing import SupportsIAdd
3131
import socketserver.BaseRequestHandler # type: ignore[import-not-found]
3232

3333

python/pyspark/errors/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"""
1919
PySpark exceptions.
2020
"""
21-
from pyspark.errors.exceptions.base import ( # noqa: F401
21+
from pyspark.errors.exceptions.base import (
2222
PySparkException,
2323
AnalysisException,
2424
SessionNotSameException,

python/pyspark/errors/exceptions/tblib.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,7 @@ def as_traceback(self) -> Optional[TracebackType]:
206206

207207
# noinspection PyBroadException
208208
try:
209-
exec(
210-
code, dict(current.tb_frame.f_globals), dict(current.tb_frame.f_locals)
211-
) # noqa: S102
209+
exec(code, dict(current.tb_frame.f_globals), dict(current.tb_frame.f_locals))
212210
except Exception:
213211
next_tb = sys.exc_info()[2].tb_next # type: ignore
214212
if top_tb is None:

python/pyspark/errors_doc_gen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def generate_errors_doc(output_rst_file_path: str) -> None:
4444
This is a list of common, named error classes returned by PySpark which are defined at `error-conditions.json <https://github.com/apache/spark/blob/master/python/pyspark/errors/error-conditions.json>`_.
4545
4646
When writing PySpark errors, developers must use an error class from the list. If an appropriate error class is not available, add a new one into the list. For more information, please refer to `Contributing Error and Exception <contributing.rst#contributing-error-and-exception>`_.
47-
""" # noqa
47+
"""
4848
with open(output_rst_file_path, "w") as f:
4949
f.write(header + "\n\n")
5050
for error_key, error_details in ERROR_CLASSES_MAP.items():

python/pyspark/logger/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,6 @@
1818
"""
1919
PySpark logging
2020
"""
21-
from pyspark.logger.logger import PySparkLogger, SPARK_LOG_SCHEMA # noqa: F401
21+
from pyspark.logger.logger import PySparkLogger, SPARK_LOG_SCHEMA
2222

2323
__all__ = ["PySparkLogger", "SPARK_LOG_SCHEMA"]

python/pyspark/ml/regression.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2180,7 +2180,7 @@ def __init__(
21802180
0.9,
21812181
0.95,
21822182
0.99,
2183-
], # noqa: B005
2183+
],
21842184
quantilesCol: Optional[str] = None,
21852185
aggregationDepth: int = 2,
21862186
maxBlockSizeInMB: float = 0.0,
@@ -2220,7 +2220,7 @@ def setParams(
22202220
0.9,
22212221
0.95,
22222222
0.99,
2223-
], # noqa: B005
2223+
],
22242224
quantilesCol: Optional[str] = None,
22252225
aggregationDepth: int = 2,
22262226
maxBlockSizeInMB: float = 0.0,

python/pyspark/mllib/_typing.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from typing import List, Tuple, TYPE_CHECKING, TypeVar, Union
2020

2121
from typing_extensions import Literal
22-
from numpy import ndarray # noqa: F401
22+
from numpy import ndarray
2323
from py4j.java_gateway import JavaObject
2424

2525
from pyspark.mllib.linalg import Vector

python/pyspark/pandas/plot/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@
1414
# See the License for the specific language governing permissions and
1515
# limitations under the License.
1616
#
17-
from pyspark.pandas.plot.core import * # noqa: F401,F403
17+
from pyspark.pandas.plot.core import * # noqa: F403

0 commit comments

Comments
 (0)