Skip to content
This repository was archived by the owner on Jul 16, 2025. It is now read-only.

Commit f544a53

Browse files
fix typos discovered by codespell (#669)
Co-authored-by: Tom Hu <[email protected]>
1 parent 1c24461 commit f544a53

File tree

11 files changed

+20
-20
lines changed

11 files changed

+20
-20
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ Codecov-cli supports user input. These inputs, along with their descriptions and
126126
| `create-report-results` | Used for local upload. It tells codecov that you finished local uploading and want it to calculate the results for you to get them locally.
127127
| `get-report-results` | Used for local upload. It asks codecov to provide you the report results you calculated with the previous command.
128128
| `pr-base-picking` | Tells codecov that you want to explicitly define a base for your PR
129-
| `upload-process` | A wrapper for 3 commands. Create-commit, create-report and do-upload. You can use this command to upload to codecov instead of using the previosly mentioned commands.
129+
| `upload-process` | A wrapper for 3 commands. Create-commit, create-report and do-upload. You can use this command to upload to codecov instead of using the previously mentioned commands.
130130
| `send-notifications` | A command that tells Codecov that you finished uploading and you want to be sent notifications. To disable automatically sent notifications please consider adding manual_trigger to your codecov.yml, so it will look like codecov: notify: manual_trigger: true.
131131
>**Note**: Every command has its own different options that will be mentioned later in this doc. Codecov will try to load these options from your CI environment variables, if not, it will try to load them from git, if not found, you may need to add them manually.
132132

codecov_cli/commands/labelanalysis.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ def _parse_runner_params(runner_params: List[str]) -> Dict[str, str]:
259259
# a good reason for the param to include '=' in the value.
260260
if param.count("=") == 0:
261261
logger.warning(
262-
f"Runner param {param} is not well formated. Setting value to None. Use '--runner-param key=value' to set value"
262+
f"Runner param {param} is not well formatted. Setting value to None. Use '--runner-param key=value' to set value"
263263
)
264264
final_params[param] = None
265265
else:

codecov_cli/plugins/pycoverage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def project_root(self) -> typing.Optional[pathlib.Path]:
2828
def report_type(self) -> str:
2929
"""
3030
Report type to generate.
31-
Overrided if include_contexts == True
31+
Overridden if include_contexts == True
3232
report_type: str [values xml|json; default xml]
3333
"""
3434
return self.get("report_type", "xml")

codecov_cli/runners/pytest_standard_runner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def _possibly_warn_bad_config(self, config_params: dict):
7676
provided_config_params = config_params.keys()
7777
for provided_param in provided_config_params:
7878
if provided_param not in available_config_params:
79-
logger.warning(f"Config parameter '{provided_param}' is unknonw.")
79+
logger.warning(f"Config parameter '{provided_param}' is unknown.")
8080

8181
def parse_captured_output_error(self, exp: CalledProcessError) -> str:
8282
result = ""

codecov_cli/services/report/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def send_reports_result_get_request(
138138
state = response_content.get("state").lower()
139139
if state == "error":
140140
logger.error(
141-
"An error occured while processing the report. Please try again later.",
141+
"An error occurred while processing the report. Please try again later.",
142142
extra=dict(
143143
extra_log_attributes=dict(
144144
response_status_code=response_obj.status_code,

codecov_cli/services/staticanalysis/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ async def run_analysis_entrypoint(
3939
files = list(ff.find_files(folder, pattern, folders_to_exclude))
4040
processing_results = await process_files(files, numberprocesses, config)
4141
# Let users know if there were processing errors
42-
# This is here and not in the funcition so we can add an option to ignore those (possibly)
42+
# This is here and not in the function so we can add an option to ignore those (possibly)
4343
# Also makes the function easier to test
4444
processing_errors = processing_results["processing_errors"]
4545
log_processing_errors(processing_errors)

codecov_cli/services/staticanalysis/analyzers/python/node_wrappers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def visit(self, node: Node):
1616
self.visit(c)
1717

1818
def _is_function_docstring(self, node: Node):
19-
"""Skips docstrings for funtions, such as this one.
19+
"""Skips docstrings for functions, such as this one.
2020
Pytest doesn't include them in the report, so I don't think we should either,
2121
at least for now.
2222
"""

tests/helpers/test_folder_searcher.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ def test_search_directories(tmp_path):
256256
filename_include_regex = globs_to_regex(["*.app"])
257257
filepaths = [
258258
"banana.app/path/of/directory.txt",
259-
"path/to/apple.app/path/of/directorys",
259+
"path/to/apple.app/path/of/directories",
260260
"path/to/banana.app/folder/test.txt",
261261
"apple.py",
262262
"banana.py",

tests/runners/test_pytest_standard_runner.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def test_warning_bad_config(self, mock_warning):
5656
runner = PytestStandardRunner(params)
5757
# Adding invalid config options emits a warning
5858
mock_warning.assert_called_with(
59-
"Config parameter 'some_missing_option' is unknonw."
59+
"Config parameter 'some_missing_option' is unknown."
6060
)
6161
# Warnings don't change the config
6262
assert runner.params == {**params, "some_missing_option": "option"}
@@ -91,7 +91,7 @@ def side_effect(command, *args, **kwargs):
9191
cmd=command,
9292
returncode=2,
9393
output=b"Process running up to here...",
94-
stderr=b"Some error occured",
94+
stderr=b"Some error occurred",
9595
)
9696

9797
mock_subprocess.run.side_effect = side_effect
@@ -100,7 +100,7 @@ def side_effect(command, *args, **kwargs):
100100
_ = self.runner._execute_pytest(["--option", "--ignore=batata"])
101101
assert (
102102
str(exp.value)
103-
== "Pytest exited with non-zero code 2.\nThis is likely not a problem with label-analysis. Check pytest's output and options.\nPYTEST OUTPUT:\nProcess running up to here...\nSome error occured"
103+
== "Pytest exited with non-zero code 2.\nThis is likely not a problem with label-analysis. Check pytest's output and options.\nPYTEST OUTPUT:\nProcess running up to here...\nSome error occurred"
104104
)
105105

106106
@patch("codecov_cli.runners.pytest_standard_runner.subprocess")
@@ -112,7 +112,7 @@ def side_effect(command, *args, **kwargs):
112112
raise CalledProcessError(
113113
cmd=command,
114114
returncode=2,
115-
stderr=b"Some error occured",
115+
stderr=b"Some error occurred",
116116
)
117117

118118
mock_subprocess.run.side_effect = side_effect
@@ -134,26 +134,26 @@ def side_effect(command, *args, **kwargs):
134134
cmd=["python", "-m", "pytest", "missing_label"],
135135
returncode=2,
136136
output=b"Process running up to here...",
137-
stderr=b"Some error occured",
137+
stderr=b"Some error occurred",
138138
),
139-
"\nProcess running up to here...\nSome error occured",
139+
"\nProcess running up to here...\nSome error occurred",
140140
),
141141
(
142142
CalledProcessError(
143143
cmd=["python", "-m", "pytest", "missing_label"],
144144
returncode=2,
145145
output="Process running up to here...",
146-
stderr="Some error occured",
146+
stderr="Some error occurred",
147147
),
148-
"\nProcess running up to here...\nSome error occured",
148+
"\nProcess running up to here...\nSome error occurred",
149149
),
150150
(
151151
CalledProcessError(
152152
cmd=["python", "-m", "pytest", "missing_label"],
153153
returncode=2,
154-
stderr=b"Some error occured",
154+
stderr=b"Some error occurred",
155155
),
156-
"\nSome error occured",
156+
"\nSome error occurred",
157157
),
158158
],
159159
)

tests/services/report/test_report_results.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ def test_get_report_results_200_error(mocker, capsys):
215215
mocked_response.assert_called_once()
216216
assert (
217217
"error",
218-
'An error occured while processing the report. Please try again later. --- {"response_status_code": 200, "state": "error", "result": {}}',
218+
'An error occurred while processing the report. Please try again later. --- {"response_status_code": 200, "state": "error", "result": {}}',
219219
) in output
220220

221221

0 commit comments

Comments
 (0)