Skip to content

Commit 3e313a6

Browse files
committed
refactor: extract platform-generic logic from run_ollama_model() handler
Signed-off-by: Nick Mitchell <[email protected]>
1 parent 6a5b1de commit 3e313a6

File tree

1 file changed

+63
-52
lines changed

1 file changed

+63
-52
lines changed

pdl-live-react/src-tauri/src/pdl/interpreter.rs

Lines changed: 63 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -813,7 +813,8 @@ impl<'a> Interpreter<'a> {
813813
block: &ModelBlock,
814814
metadata: &Metadata,
815815
state: &mut State,
816-
) -> BodyInterpretation {
816+
input_messages: Vec<ChatMessage>,
817+
) -> Result<(String, Option<PdlUsage>), PdlError> {
817818
let mut ollama = Ollama::default();
818819
let model = if pdl_model.starts_with("ollama/") {
819820
&pdl_model[7..]
@@ -827,19 +828,6 @@ impl<'a> Interpreter<'a> {
827828
eprintln!("Model tools {:?} {:?}", metadata.description, tools);
828829
}
829830

830-
let mut trace = block.clone();
831-
832-
// The input messages to the model is either:
833-
// a) block.input, if given
834-
// b) the current state's accumulated messages
835-
let input_messages = match &block.input {
836-
Some(input) => {
837-
// TODO ignoring result and trace
838-
let (_result, messages, _trace) = self.run_quiet(&*input, state).await?;
839-
messages
840-
}
841-
None => state.messages.clone(),
842-
};
843831
let (prompt, history_slice): (&ChatMessage, &[ChatMessage]) =
844832
match input_messages.split_last() {
845833
Some(x) => x,
@@ -853,10 +841,6 @@ impl<'a> Interpreter<'a> {
853841
);
854842
}
855843

856-
//if state.emit {
857-
//println!("{}", pretty_print(&input_messages));
858-
//}
859-
860844
let req = ChatMessageRequest::new(model.into(), vec![prompt.clone()])
861845
.options(options)
862846
.tools(tools);
@@ -919,6 +903,45 @@ impl<'a> Interpreter<'a> {
919903
}
920904
}
921905

906+
let usage = if let Some(res) = last_res {
907+
if let Some(usage) = res.final_data {
908+
Some(PdlUsage {
909+
prompt_tokens: usage.prompt_eval_count,
910+
prompt_nanos: Some(usage.prompt_eval_duration),
911+
completion_tokens: usage.eval_count,
912+
completion_nanos: Some(usage.eval_duration),
913+
})
914+
} else {
915+
None
916+
}
917+
} else {
918+
None
919+
};
920+
921+
Ok((response_string, usage))
922+
}
923+
924+
/// Run a PdlBlock::Model
925+
async fn run_model(
926+
&mut self,
927+
block: &ModelBlock,
928+
metadata: &Metadata,
929+
state: &mut State,
930+
) -> BodyInterpretation {
931+
// The input messages to the model is either:
932+
// a) block.input, if given
933+
// b) the current state's accumulated messages
934+
let input_messages = match &block.input {
935+
Some(input) => {
936+
// TODO ignoring result and trace
937+
let (_result, messages, _trace) = self.run_quiet(&*input, state).await?;
938+
messages
939+
}
940+
None => state.messages.clone(),
941+
};
942+
943+
let mut trace = block.clone();
944+
922945
// TODO, does this belong in run_advanced(), and does
923946
// trace.context belong in Metadata rather than ModelBlock
924947
trace.context = Some(
@@ -948,42 +971,30 @@ impl<'a> Interpreter<'a> {
948971
.collect(),
949972
);
950973

951-
if let Some(res) = last_res {
952-
if let Some(usage) = res.final_data {
953-
trace.pdl_usage = Some(PdlUsage {
954-
prompt_tokens: usage.prompt_eval_count,
955-
prompt_nanos: Some(usage.prompt_eval_duration),
956-
completion_tokens: usage.eval_count,
957-
completion_nanos: Some(usage.eval_duration),
958-
});
959-
}
960-
let output_messages = vec![ChatMessage::assistant(response_string.clone())];
961-
Ok((
962-
PdlResult::String(response_string),
963-
output_messages,
964-
Model(trace),
965-
))
966-
} else {
967-
// nothing came out of the model
968-
Ok(("".into(), vec![], Model(trace)))
969-
}
970-
// dbg!(history);
971-
}
974+
let (response_string, usage) =
975+
if let PdlResult::String(s) = self.eval_string_to_string(&block.model, state)? {
976+
if s.starts_with("ollama/") || s.starts_with("ollama_chat/") {
977+
self.run_ollama_model(s, block, metadata, state, input_messages)
978+
.await
979+
/*} else if s.starts_with("openai/") {
980+
return self.run_openai_model(s, block, metadata, state, input_messages).await;*/
981+
} else {
982+
Err(Box::from(format!("Unsupported model {:?}", block.model)))
983+
}
984+
} else {
985+
Err(Box::from(format!(
986+
"Model expression evaluated to non-string {:?}",
987+
block.model
988+
)))
989+
}?;
972990

973-
/// Run a PdlBlock::Model
974-
async fn run_model(
975-
&mut self,
976-
block: &ModelBlock,
977-
metadata: &Metadata,
978-
state: &mut State,
979-
) -> BodyInterpretation {
980-
if let PdlResult::String(s) = self.eval_string_to_string(&block.model, state)? {
981-
if s.starts_with("ollama/") || s.starts_with("ollama_chat/") {
982-
return self.run_ollama_model(s, block, metadata, state).await;
983-
}
984-
}
991+
trace.pdl_usage = usage;
985992

986-
Err(Box::from(format!("Unsupported model {:?}", block.model)))
993+
Ok((
994+
PdlResult::String(response_string.clone()),
995+
vec![ChatMessage::assistant(response_string)],
996+
Model(trace),
997+
))
987998
}
988999

9891000
/// Run a PdlBlock::Data

0 commit comments

Comments
 (0)