Skip to content

Commit 4d514dd

Browse files
authored
Fix typos 'lenght' -> 'length' (#78)
1 parent 4f5ca7f commit 4d514dd

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

gpt_oss/tools/simple_browser/page_contents.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,13 +87,13 @@ def mark_lines(text: str) -> str:
8787

8888

8989
@functools.cache
90-
def _tiktoken_vocabulary_lenghts(enc_name: str) -> list[int]:
90+
def _tiktoken_vocabulary_lengths(enc_name: str) -> list[int]:
9191
encoding = tiktoken.get_encoding(enc_name)
9292
return [len(encoding.decode([i])) for i in range(encoding.n_vocab)]
9393

9494

9595
def warmup_caches(enc_names: list[str]) -> None:
96-
for _ in map(_tiktoken_vocabulary_lenghts, enc_names):
96+
for _ in map(_tiktoken_vocabulary_lengths, enc_names):
9797
pass
9898

9999

gpt_oss/tools/simple_browser/simple_browser_tool.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ def max_chars_per_token(enc_name: str) -> int:
102102
def get_tokens(text: str, enc_name: str) -> Tokens:
103103
encoding = tiktoken.get_encoding(enc_name)
104104
tokens = encoding.encode(text, disallowed_special=())
105-
_vocabulary_lenghts = _tiktoken_vocabulary_lengths(enc_name)
106-
tok2idx = [0] + list(itertools.accumulate(_vocabulary_lenghts[i] for i in tokens))[
105+
_vocabulary_lengths = _tiktoken_vocabulary_lengths(enc_name)
106+
tok2idx = [0] + list(itertools.accumulate(_vocabulary_lengths[i] for i in tokens))[
107107
:-1
108108
]
109109
result = Tokens(tokens=tokens, tok2idx=tok2idx)

0 commit comments

Comments
 (0)