Skip to content

Commit 5ec92ca

Browse files
[docs] correct typos and update broken documentation links (#286)
1 parent b602030 commit 5ec92ca

File tree

9 files changed

+12
-12
lines changed

9 files changed

+12
-12
lines changed

.github/PULL_REQUEST_TEMPLATE.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ Make sure the following tasks are completed before submitting the PR:
3131

3232
### General
3333

34-
- [ ] 📜 I have read and followed the [contributing guidelines](https://servicenow.github.io/Fast-LLM/developers/contributing).
34+
- [ ] 📜 I have read and followed the [contributing guidelines](https://servicenow.github.io/Fast-LLM/contributing/contributing).
3535
- [ ] 🏷️ I am using a clear and descriptive PR title that summarizes the key change or feature introduced.
3636
- [ ] 🎉 The functionality is complete, and I have tested the changes.
3737
- [ ] 📝 I have updated the documentation if needed.

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
# Contributing to Fast-LLM
22

3-
Please refer to the [contributing guidelines](https://servicenow.github.io/Fast-LLM/developers/contributing) for more information on how to contribute to Fast-LLM.
3+
Please refer to the [contributing guidelines](https://servicenow.github.io/Fast-LLM/contributing/contributing/) for more information on how to contribute to Fast-LLM.

fast_llm/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
4141

4242

4343
class UpdateType(str, enum.Enum):
44-
# Override entries no matter what they contais.
44+
# Override entries no matter what they contain.
4545
override = "override"
4646
# Override atomic entries and lists, but update dicts recursively by setting or overriding only the specified entries.
4747
update = "update"

fast_llm/engine/inference/huggingface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def from_pretrained(
7878
)
7979

8080
# Create the model
81-
# always set up model and crate distributed instance internally for now
81+
# always set up model and create distributed instance internally for now
8282
fast_llm_model = cls.runner_class.model_class.from_pretrained(
8383
pretrained_model_name_or_path,
8484
*updates,

fast_llm/layers/ssm/discrete_mamba2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from fast_llm.tensor import ParameterMeta, init_ones_, init_uniform_, init_zeros_, kaiming_init_
1212

1313
"""
14-
This code is adapted fropm https://github.com/cartesia-ai/edge/blob/main/cartesia-pytorch/cartesia_pytorch/Llamba/mixers/discrete_mamba2.py
14+
This code is adapted from https://github.com/cartesia-ai/edge/blob/main/cartesia-pytorch/cartesia_pytorch/Llamba/mixers/discrete_mamba2.py
1515
"""
1616

1717

@@ -65,7 +65,7 @@ def __init__(
6565
self.act = config.activation_type.activation_fn
6666
self.activation_name = config.activation_type.name
6767

68-
# TODO: double check innitializations
68+
# TODO: double check initializations
6969
# Projections
7070
self.in_proj = Linear(td_model, td_inner_proj, bias=bias, weight_init_method=kaiming_init_(td_model.size))
7171
self.z_bias = (

fast_llm/layers/ssm/mamba_layer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from fast_llm.tensor import ParameterMeta, init_ones_, kaiming_init_
1212

1313
"""
14-
Note: this is mostly addapted from https://github.com/Zyphra/Zamba2, similar code is aslo in https://github.com/state-spaces/mamba.
14+
Note: this is mostly adapted from https://github.com/Zyphra/Zamba2, similar code is also in https://github.com/state-spaces/mamba.
1515
For now it only supports training and not inference.
1616
This works with triton 3.1.0
1717
"""
@@ -20,7 +20,7 @@
2020
def init_A(d_state, d_inner) -> Callable[[ParameterMeta, torch.Tensor, torch.Generator], torch.Tensor]:
2121
def init_(meta: ParameterMeta, tensor: torch.Tensor, generator: torch.Generator): # noqa
2222
# S4D real initialization
23-
# TODO: adopt this innitialization to work for tensor parallel setting!
23+
# TODO: adopt this initialization to work for tensor parallel setting!
2424
A = einops.repeat(torch.arange(1, d_state + 1, dtype=torch.float32), "n -> d n", d=d_inner).contiguous()
2525
A_log = torch.log(A) # Keep A_log in fp32
2626
if tensor.shape != A_log.shape:
@@ -106,7 +106,7 @@ def __init__(
106106
)
107107
self.x_proj.weight.auto_grad_accumulation = True
108108

109-
# TODO: the weights are innitialized a bit differently here https://github.com/state-spaces/mamba/blob/0cce0fa645f100f00620ddf2333c2b7712abfdec/mamba_ssm/modules/mamba_simple.py#L82
109+
# TODO: the weights are initialized a bit differently here https://github.com/state-spaces/mamba/blob/0cce0fa645f100f00620ddf2333c2b7712abfdec/mamba_ssm/modules/mamba_simple.py#L82
110110
self.dt_proj_weight = ParameterMeta.from_dims(
111111
(td_inner, tdt_rank),
112112
init_method=kaiming_init_(tdt_rank.size),

fast_llm/layers/transformer/preprocessing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def get_rotary_frequencies(
109109
# `exp(i * n * a) = cos(n * a) + i sin(n * a)`,
110110
# `a = theta ** - (2 * (channel // 2) / kv_channels)`,
111111
# where n is the position in the sequence.
112-
# We preform the calculation in high precision because it matters for rotary embeddings.
112+
# We perform the calculation in high precision because it matters for rotary embeddings.
113113
positions = torch.arange(sequence_length, device=device, dtype=torch.float64)
114114
frequencies = config.theta ** -torch.arange(0, 1, 2 / kv_channels, device=device, dtype=torch.float64)
115115
# Apply scaling

fast_llm/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ def __call__(self, *args, **kwargs):
290290

291291
def try_decorate(get_decorator: Callable, _return_decorator: bool = True) -> Callable:
292292
"""
293-
Try to decorate an object, but ignore the error until the object is actualy used.
293+
Try to decorate an object, but ignore the error until the object is actually used.
294294
The wrapped decorator should always be instantiated before calling,
295295
i.e.. called as `@decorator()` rather than `@decorator`.
296296
"""

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ OPTIONAL =
4141
# Hydra
4242
hydra-core>=1.3.2
4343
omegaconf>=2.3.0
44-
# Miscellanous
44+
# Miscellaneous
4545
requests>=2.32.3
4646
tqdm>=4.66.3
4747

0 commit comments

Comments
 (0)