@@ -358,7 +358,7 @@ import Foundation
358358 return LanguageModelSession . ResponseStream ( stream: stream)
359359 }
360360
361- /// Prewarms the model for the given session and optional prompt prefix.
361+ /// Prewarms the model
362362 public func prewarm(
363363 for session: LanguageModelSession ,
364364 promptPrefix: Prompt ?
@@ -367,39 +367,12 @@ import Foundation
367367 let hub = self . hub
368368 let directory = self . directory
369369
370- let instructions = session. instructions? . description
371- let tools = session. tools
372-
373370 Task {
374-
375- let context = try await loadContext ( modelId: modelId, hub: hub, directory: directory)
376-
377- // Build chat history similar to respond() to prime the cache effectively
378- var chat : [ MLXLMCommon . Chat . Message ] = [ ]
379-
380- // Add system instructions if present
381- if let instructions, !instructions. isEmpty {
382- chat. append ( . init( role: . system, content: instructions) )
371+ do {
372+ _ = try await loadContext ( modelId: modelId, hub: hub, directory: directory)
373+ } catch {
374+ // Ignore errors during prewarm
383375 }
384-
385- // Add prompt prefix or minimal user message
386- let promptText = promptPrefix? . description ?? " . "
387- chat. append ( . init( role: . user, content: promptText) )
388-
389- // Convert tools to MLX format
390- let toolSpecs : [ ToolSpec ] ? =
391- tools. isEmpty
392- ? nil
393- : tools. map { convertToolToMLXSpec ( $0) }
394-
395- let userInput = MLXLMCommon . UserInput (
396- chat: chat,
397- processing: . init( resize: . init( width: 512 , height: 512 ) ) ,
398- tools: toolSpecs
399- )
400-
401- // Prepare input - triggers tokenization and processor initialization
402- _ = try await context. processor. prepare ( input: userInput)
403376 }
404377 }
405378 }
0 commit comments