Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 20 additions & 2 deletions src/llm/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use std::borrow::Cow;

use anyhow::Result;
use async_trait::async_trait;
use schemars::schema::SchemaObject;
use serde::{Deserialize, Serialize};

Expand Down Expand Up @@ -32,5 +34,21 @@ pub struct LlmGenerateResponse {
pub text: String,
}

mod client;
pub use client::LlmClient;
#[async_trait]
pub trait LlmGenerationClient: Send + Sync {
async fn generate<'req>(
&self,
request: LlmGenerateRequest<'req>,
) -> Result<LlmGenerateResponse>;
}

mod ollama;

pub async fn new_llm_generation_client(spec: LlmSpec) -> Result<Box<dyn LlmGenerationClient>> {
let client = match spec.api_type {
LlmApiType::Ollama => {
Box::new(ollama::Client::new(spec).await?) as Box<dyn LlmGenerationClient>
}
};
Ok(client)
}
13 changes: 9 additions & 4 deletions src/llm/client.rs → src/llm/ollama.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
use super::LlmGenerationClient;
use anyhow::Result;
use async_trait::async_trait;
use schemars::schema::SchemaObject;
use serde::{Deserialize, Serialize};

pub struct LlmClient {
pub struct Client {
generate_url: String,
model: String,
reqwest_client: reqwest::Client,
Expand Down Expand Up @@ -30,7 +32,7 @@ struct OllamaResponse {

const OLLAMA_DEFAULT_ADDRESS: &str = "http://localhost:11434";

impl LlmClient {
impl Client {
pub async fn new(spec: super::LlmSpec) -> Result<Self> {
let address = match &spec.address {
Some(addr) => addr.trim_end_matches('/'),
Expand All @@ -42,10 +44,13 @@ impl LlmClient {
reqwest_client: reqwest::Client::new(),
})
}
}

pub async fn generate<'a>(
#[async_trait]
impl LlmGenerationClient for Client {
async fn generate<'req>(
&self,
request: super::LlmGenerateRequest<'a>,
request: super::LlmGenerateRequest<'req>,
) -> Result<super::LlmGenerateResponse> {
let req = OllamaRequest {
model: &self.model,
Expand Down
8 changes: 5 additions & 3 deletions src/ops/functions/extract_by_llm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ use schemars::schema::SchemaObject;
use serde::Serialize;

use crate::base::json_schema::ToJsonSchema;
use crate::llm::{LlmClient, LlmGenerateRequest, LlmSpec, OutputFormat};
use crate::llm::{
new_llm_generation_client, LlmGenerateRequest, LlmGenerationClient, LlmSpec, OutputFormat,
};
use crate::ops::sdk::*;

#[derive(Debug, Clone, Serialize, Deserialize)]
Expand All @@ -16,7 +18,7 @@ pub struct Spec {
}

struct Executor {
client: LlmClient,
client: Box<dyn LlmGenerationClient>,
output_json_schema: SchemaObject,
output_type: EnrichedValueType,
system_prompt: String,
Expand All @@ -41,7 +43,7 @@ Output only the JSON without any additional messages or explanations."
impl Executor {
async fn new(spec: Spec) -> Result<Self> {
Ok(Self {
client: LlmClient::new(spec.llm_spec).await?,
client: new_llm_generation_client(spec.llm_spec).await?,
output_json_schema: spec.output_type.to_json_schema(),
output_type: spec.output_type,
system_prompt: get_system_prompt(&spec.instruction),
Expand Down