Skip to content

Commit 695cb0d

Browse files
lingebengpytorchmergebot
authored andcommitted
[2/N][Fix] Fix typo in test folder (pytorch#166374)
Fix typo in test folder. _typos.toml ```bash [default.extend-words] nd = "nd" arange = "arange" Nd = "Nd" GLOBALs = "GLOBALs" hte = "hte" iy = "iy" PN = "PN" Dout = "Dout" optin = "optin" gam = "gam" PTD = "PTD" ``` Pull Request resolved: pytorch#166374 Approved by: https://github.com/cyyever, https://github.com/ezyang
1 parent 1764f3a commit 695cb0d

File tree

87 files changed

+169
-169
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

87 files changed

+169
-169
lines changed

test/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ def pytest_pycollect_makemodule(module_path, path, parent) -> Module:
238238

239239
@pytest.hookimpl(hookwrapper=True)
240240
def pytest_report_teststatus(report, config):
241-
# Add the test time to the verbose output, unforunately I don't think this
241+
# Add the test time to the verbose output, unfortunately I don't think this
242242
# includes setup or teardown
243243
pluggy_result = yield
244244
if not isinstance(report, pytest.TestReport):

test/cpp/api/autograd.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ TEST(CustomAutogradTest, MarkDirty) {
584584
}
585585
};
586586

587-
// Clone here because modifying leafs inplace is not allowed
587+
// Clone here because modifying leaves inplace is not allowed
588588
auto x = torch::randn({5, 5}, torch::requires_grad()).clone();
589589
auto version_before = x._version();
590590
auto out = MyFunction::apply(x);

test/cpp/api/parallel.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ TEST_F(ParallelTest, DataParallelNumericalEquivalence_MultiCUDA) {
264264
input += i;
265265
input_dp += i;
266266

267-
// non-prallel training
267+
// non-parallel training
268268
torch::optim::SGD optim(model->parameters(), torch::optim::SGDOptions(0.1));
269269
auto output = model->forward(input);
270270
auto loss = torch::mse_loss(output, torch::zeros_like(output));

test/cpp_extensions/open_registration_extension/torch_openreg/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ Please refer to [this](https://docs.pytorch.org/docs/main/accelerator/index.html
188188
- Device-agnostic APIs
189189
- Memory Management
190190
- Generator
191-
- Distrubuted
191+
- Distributed
192192
- Custom Tensor&Storage
193193
- ...
194194
- **Improve Tests**: Add more test cases related to the integration mechanism.

test/distributed/checkpoint/test_planner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def create_data(rank):
216216
# Number of plans should remain unchanged
217217
self.assertEqual(len(all_plans), len(deduped_plans))
218218

219-
# Numer of items in the deduped plans should be less than the original plans
219+
# Number of items in the deduped plans should be less than the original plans
220220
for new_plan, old_plan in zip(deduped_plans, all_plans):
221221
self.assertFalse(_compare_save_plans(new_plan, old_plan))
222222
self.assertTrue(len(new_plan.items) < len(old_plan.items))

test/distributed/tensor/test_attention.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def _ring_attention_sdpa(
158158
# parameters because when require_grad is True, resize_ is not
159159
# allowed. But requires_grad of cp_q, cp_k, and cp_v are False
160160
# now. So we can just use context_parallel() to shard q, k, v.
161-
# In reality, context_paralle() should be used to shard the input.
161+
# In reality, context_parallel() should be used to shard the input.
162162
# In reality, context_parallel() should only be used to shard
163163
# the model inputs (batch).
164164

@@ -701,7 +701,7 @@ def test_cp_flex_attention_document_mask(self) -> None:
701701
)
702702

703703
# TODO: change this for-loop to run_subtests
704-
# Use a for-loop instead of run_subtests because we need to intialize the mask
704+
# Use a for-loop instead of run_subtests because we need to initialize the mask
705705
# for each subtest. This can be baked into self._test_cp_flex_attention as
706706
# a str argument denoting mask type.
707707
for batch_size, max_seq_len, lb_type in itertools.product(

test/distributed/test_c10d_nccl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4901,7 +4901,7 @@ def test_batched_send_recv(self, op_sizes_per_coalesce, timing_enabled):
49014901
for p2p_op_idx, input_sizes in zip(
49024902
range(first_op, coalesced_op, 1), op_sizes_per_coalesce
49034903
):
4904-
# the indivudal ops inside the coalescing group the individual op metadata,
4904+
# the individual ops inside the coalescing group the individual op metadata,
49054905
# but not the timing info coming from the actual coalesced kernel
49064906
profiling_name = (
49074907
"nccl:recv 0<-1" if self.rank == 0 else "nccl:send 1->0"

test/distributed/test_nvshmem.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ def test_all_to_all_vdev_2d(self, align: int) -> None:
398398
nsplits, dtype=torch.int64, device=self.device
399399
).copy_(inp_splits)
400400
# 2 rows: output splits, output offsets
401-
# Initiallizing all values to -1 to check if they are updated
401+
# Initializing all values to -1 to check if they are updated
402402
out_splits_offsets = symm_mem.empty(
403403
(2, nsplits), dtype=torch.int64, device=self.device
404404
).fill_(-1)
@@ -503,7 +503,7 @@ def test_all_to_all_vdev_2d_offset(self) -> None:
503503
(2, nsplits), dtype=torch.int64, device=self.device
504504
)
505505
# 2 rows: output splits, output offsets
506-
# Initiallizing all values to -1 to check if they are updated
506+
# Initializing all values to -1 to check if they are updated
507507
out_splits_offsets = symm_mem.empty(
508508
(2, nsplits), dtype=torch.int64, device=self.device
509509
).fill_(-1)
@@ -617,15 +617,15 @@ def dispatch_then_combine(device, align: int, group) -> None:
617617
inp_splits
618618
)
619619
# 2 rows: output splits, output offsets
620-
# Initiallizing all values to -1 to check if they are updated
620+
# Initializing all values to -1 to check if they are updated
621621
out_splits_offsets = symm_mem.empty(
622622
(2, nsplits), dtype=torch.int64, device=device
623623
).fill_(-1)
624624

625625
# Buffers for combine
626626
combine_out = symm_mem.empty(max_out_numel, dtype=dtype, device=device).fill_(-1)
627627
# 2 rows: output splits, output offsets
628-
# Initiallizing all values to -1 to check if they are updated
628+
# Initializing all values to -1 to check if they are updated
629629
combine_out_splits_offsets = symm_mem.empty(
630630
(2, nsplits), dtype=torch.int64, device=device
631631
).fill_(-1)

test/distributed/test_symmetric_memory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def test_subgroup(self) -> None:
274274
self.assertTrue(buf.eq(peer_rank + world.size() // 2).all())
275275

276276

277-
# We move AsyncTP tests to a seperate test suite because 1) Async TP ops are not
277+
# We move AsyncTP tests to a separate test suite because 1) Async TP ops are not
278278
# the core symmetric memory APIs, they are more like applications, 2)
279279
# MultiProcContinuousTest will skip all the following tests if a test fails (
280280
# we should fix this too). We still want to get the test signals for the core
@@ -621,7 +621,7 @@ def test_optimal_layout(self, dim: int) -> None:
621621

622622
# [READ ME FIRST]
623623
# The `SymmMemEmptySetDeviceTest` suite parameterizes whether user sets the
624-
# device before calling symm_mem.emtpy. Either way should work.
624+
# device before calling symm_mem.empty. Either way should work.
625625
# However, since `set_device` is persistent, we cannot use the
626626
# `MultiProcContinuousTest` template because the next function will be
627627
# "contaminated", leading to flaky tests (e.g. hang). Therefore, we use

test/dynamo/cpython/3_13/mathdata/ieee754.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ nan
5151
>>> INF / INF
5252
nan
5353

54-
However unambigous operations with inf return inf:
54+
However unambiguous operations with inf return inf:
5555
>>> INF * INF
5656
inf
5757
>>> 1.5 * INF

0 commit comments

Comments
 (0)