Skip to content

Commit 5cb76bc

Browse files
Maksym MatviievskyiMaksym Matviievskyi
authored andcommitted
fix: examples/swiftui llama_model -> llama_vocab
1 parent 5598f47 commit 5cb76bc

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

examples/batched.swift/Sources/main.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
207207
let utf8Count = text.utf8.count
208208
let n_tokens = utf8Count + (add_bos ? 1 : 0)
209209
let tokens = UnsafeMutablePointer<llama_token>.allocate(capacity: n_tokens)
210-
let tokenCount = llama_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false)
210+
let tokenCount = llama_tokenize(llama_model_get_vocab, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false)
211211
var swiftTokens: [llama_token] = []
212212
for i in 0 ..< tokenCount {
213213
swiftTokens.append(tokens[Int(i)])
@@ -218,12 +218,12 @@ private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
218218

219219
private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
220220
var result = [CChar](repeating: 0, count: 8)
221-
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), 0, false)
221+
let nTokens = llama_token_to_piece(llama_model_get_vocab, token, &result, Int32(result.count), 0, false)
222222
if nTokens < 0 {
223223
let actualTokensCount = -Int(nTokens)
224224
result = .init(repeating: 0, count: actualTokensCount)
225225
let check = llama_token_to_piece(
226-
model,
226+
llama_model_get_vocab,
227227
token,
228228
&result,
229229
Int32(result.count),

examples/llama.swiftui/llama.cpp.swift/LibLlama.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ actor LlamaContext {
151151

152152
new_token_id = llama_sampler_sample(sampling, context, batch.n_tokens - 1)
153153

154-
if llama_vocab_is_eog(model, new_token_id) || n_cur == n_len {
154+
if llama_vocab_is_eog(llama_model_get_vocab, new_token_id) || n_cur == n_len {
155155
print("\n")
156156
is_done = true
157157
let new_token_str = String(cString: temporary_invalid_cchars + [0])
@@ -297,7 +297,7 @@ actor LlamaContext {
297297
let utf8Count = text.utf8.count
298298
let n_tokens = utf8Count + (add_bos ? 1 : 0) + 1
299299
let tokens = UnsafeMutablePointer<llama_token>.allocate(capacity: n_tokens)
300-
let tokenCount = llama_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, false)
300+
let tokenCount = llama_tokenize(llama_model_get_vocab, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, false)
301301

302302
var swiftTokens: [llama_token] = []
303303
for i in 0..<tokenCount {
@@ -316,15 +316,15 @@ actor LlamaContext {
316316
defer {
317317
result.deallocate()
318318
}
319-
let nTokens = llama_token_to_piece(model, token, result, 8, 0, false)
319+
let nTokens = llama_token_to_piece(llama_model_get_vocab, token, result, 8, 0, false)
320320

321321
if nTokens < 0 {
322322
let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
323323
newResult.initialize(repeating: Int8(0), count: Int(-nTokens))
324324
defer {
325325
newResult.deallocate()
326326
}
327-
let nNewTokens = llama_token_to_piece(model, token, newResult, -nTokens, 0, false)
327+
let nNewTokens = llama_token_to_piece(llama_model_get_vocab, token, newResult, -nTokens, 0, false)
328328
let bufferPointer = UnsafeBufferPointer(start: newResult, count: Int(nNewTokens))
329329
return Array(bufferPointer)
330330
} else {

0 commit comments

Comments
 (0)