Skip to content

Commit 55bc512

Browse files
committed
Improve documentation comments for deprecated llama model properties
1 parent a48cf2e commit 55bc512

File tree

1 file changed

+61
-18
lines changed

1 file changed

+61
-18
lines changed

Sources/AnyLanguageModel/Models/LlamaLanguageModel.swift

Lines changed: 61 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -193,8 +193,8 @@ import Foundation
193193

194194
/// The context size for the model.
195195
///
196-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
197-
/// custom options instead:
196+
/// - Important: This property is deprecated.
197+
/// Use ``GenerationOptions`` with custom options instead:
198198
/// ```swift
199199
/// var options = GenerationOptions()
200200
/// options[custom: LlamaLanguageModel.self] = .init(contextSize: 4096)
@@ -204,57 +204,91 @@ import Foundation
204204

205205
/// The batch size for processing.
206206
///
207-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
208-
/// custom options instead.
207+
/// - Important: This property is deprecated.
208+
/// Use ``GenerationOptions`` with custom options instead:
209+
/// ```swift
210+
/// var options = GenerationOptions()
211+
/// options[custom: LlamaLanguageModel.self] = .init(batchSize: 1024)
212+
/// ```
209213
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
210214
public var batchSize: UInt32 { legacyDefaults.batchSize }
211215

212216
/// The number of threads to use.
213217
///
214-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
218+
/// - Important: This property is deprecated.
219+
/// Use ``GenerationOptions`` with custom options instead:
220+
/// ```swift
221+
/// var options = GenerationOptions()
222+
/// options[custom: LlamaLanguageModel.self] = .init(threads: 8)
223+
/// ```
215224
/// custom options instead.
216225
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
217226
public var threads: Int32 { legacyDefaults.threads }
218227

219228
/// The random seed for generation.
220229
///
221-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
230+
/// - Important: This property is deprecated.
231+
/// Use ``GenerationOptions`` with custom options instead:
232+
/// ```swift
233+
/// var options = GenerationOptions()
234+
/// options[custom: LlamaLanguageModel.self] = .init(seed: 42)
235+
/// ```
222236
/// custom options instead.
223237
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
224238
public var seed: UInt32 { legacyDefaults.seed }
225239

226240
/// The temperature for sampling.
227241
///
228-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
229-
/// custom options instead.
242+
/// - Important: This property is deprecated.
243+
/// Use ``GenerationOptions`` with custom options instead:
244+
/// ```swift
245+
/// var options = GenerationOptions()
246+
/// options[custom: LlamaLanguageModel.self] = .init(temperature: 0.6)
247+
/// ```
230248
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
231249
public var temperature: Float { legacyDefaults.temperature }
232250

233251
/// The top-K sampling parameter.
234252
///
235-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
236-
/// custom options instead.
253+
/// - Important: This property is deprecated.
254+
/// Use ``GenerationOptions`` with custom options instead:
255+
/// ```swift
256+
/// var options = GenerationOptions()
257+
/// options[custom: LlamaLanguageModel.self] = .init(topK: 25)
258+
/// ```
237259
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
238260
public var topK: Int32 { legacyDefaults.topK }
239261

240262
/// The top-P (nucleus) sampling parameter.
241263
///
242-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
243-
/// custom options instead.
264+
/// - Important: This property is deprecated.
265+
/// Use ``GenerationOptions`` with custom options instead:
266+
/// ```swift
267+
/// var options = GenerationOptions()
268+
/// options[custom: LlamaLanguageModel.self] = .init(topP: 0.9)
269+
/// ```
244270
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
245271
public var topP: Float { legacyDefaults.topP }
246272

247273
/// The repeat penalty for generation.
248274
///
249-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
250-
/// custom options instead.
275+
/// - Important: This property is deprecated.
276+
/// Use ``GenerationOptions`` with custom options instead:
277+
/// ```swift
278+
/// var options = GenerationOptions()
279+
/// options[custom: LlamaLanguageModel.self] = .init(repeatPenalty: 1.2)
280+
/// ```
251281
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
252282
public var repeatPenalty: Float { legacyDefaults.repeatPenalty }
253283

254284
/// The number of tokens to consider for repeat penalty.
255285
///
256-
/// - Important: This property is deprecated. Use ``GenerationOptions`` with
257-
/// custom options instead.
286+
/// - Important: This property is deprecated.
287+
/// Use ``GenerationOptions`` with custom options instead:
288+
/// ```swift
289+
/// var options = GenerationOptions()
290+
/// options[custom: LlamaLanguageModel.self] = .init(repeatLastN: 128)
291+
/// ```
258292
@available(*, deprecated, message: "Use GenerationOptions custom options instead")
259293
public var repeatLastN: Int32 { legacyDefaults.repeatLastN }
260294

@@ -397,9 +431,18 @@ import Foundation
397431

398432
/// Creates a Llama language model using legacy parameter defaults.
399433
///
400-
/// - Important: This initializer is deprecated. Use
401-
/// `init(modelPath:)` and configure per-request values via
434+
/// - Important: This initializer is deprecated.
435+
/// Use `init(modelPath:)` and configure per-request values via
402436
/// ``GenerationOptions`` custom options instead.
437+
///
438+
/// ```swift
439+
/// let model = LlamaLanguageModel(modelPath: "/path/to/model.gguf")
440+
/// var options = GenerationOptions()
441+
/// options[custom: LlamaLanguageModel.self] = .init(contextSize: 4096)
442+
///
443+
/// let session = LanguageModelSession(model: model)
444+
/// session.respond(to: "Hello, world!", options: options)
445+
/// ```
403446
@available(
404447
*,
405448
deprecated,

0 commit comments

Comments
 (0)