|
| 1 | +use serde_json::json; |
| 2 | +use serde_json::Value; |
| 3 | +use std::env; |
| 4 | +use std::io; |
| 5 | +use wasmedge_wasi_nn::{ |
| 6 | + self, BackendError, Error, ExecutionTarget, GraphBuilder, GraphEncoding, GraphExecutionContext, |
| 7 | + TensorType, |
| 8 | +}; |
| 9 | + |
| 10 | +fn read_input() -> String { |
| 11 | + loop { |
| 12 | + let mut answer = String::new(); |
| 13 | + io::stdin() |
| 14 | + .read_line(&mut answer) |
| 15 | + .expect("Failed to read line"); |
| 16 | + if !answer.is_empty() && answer != "\n" && answer != "\r\n" { |
| 17 | + return answer.trim().to_string(); |
| 18 | + } |
| 19 | + } |
| 20 | +} |
| 21 | + |
| 22 | +fn get_options_from_env() -> Value { |
| 23 | + let mut options = json!({}); |
| 24 | + if let Ok(val) = env::var("enable_log") { |
| 25 | + options["enable-log"] = serde_json::from_str(val.as_str()) |
| 26 | + .expect("invalid value for enable-log option (true/false)") |
| 27 | + } else { |
| 28 | + options["enable-log"] = serde_json::from_str("false").unwrap() |
| 29 | + } |
| 30 | + if let Ok(val) = env::var("n_gpu_layers") { |
| 31 | + options["n-gpu-layers"] = |
| 32 | + serde_json::from_str(val.as_str()).expect("invalid ngl value (unsigned integer") |
| 33 | + } else { |
| 34 | + options["n-gpu-layers"] = serde_json::from_str("0").unwrap() |
| 35 | + } |
| 36 | + if let Ok(val) = env::var("n_predict") { |
| 37 | + options["n-predict"] = |
| 38 | + serde_json::from_str(val.as_str()).expect("invalid n-predict value (unsigned integer") |
| 39 | + } |
| 40 | + if let Ok(val) = env::var("json_schema") { |
| 41 | + options["json-schema"] = |
| 42 | + serde_json::from_str(val.as_str()).expect("invalid n-predict value (unsigned integer") |
| 43 | + } |
| 44 | + |
| 45 | + options |
| 46 | +} |
| 47 | + |
| 48 | +fn set_data_to_context(context: &mut GraphExecutionContext, data: Vec<u8>) -> Result<(), Error> { |
| 49 | + context.set_input(0, TensorType::U8, &[1], &data) |
| 50 | +} |
| 51 | + |
| 52 | +#[allow(dead_code)] |
| 53 | +fn set_metadata_to_context( |
| 54 | + context: &mut GraphExecutionContext, |
| 55 | + data: Vec<u8>, |
| 56 | +) -> Result<(), Error> { |
| 57 | + context.set_input(1, TensorType::U8, &[1], &data) |
| 58 | +} |
| 59 | + |
| 60 | +fn get_data_from_context(context: &GraphExecutionContext, index: usize) -> String { |
| 61 | + // Preserve for 4096 tokens with average token length 6 |
| 62 | + const MAX_OUTPUT_BUFFER_SIZE: usize = 4096 * 6; |
| 63 | + let mut output_buffer = vec![0u8; MAX_OUTPUT_BUFFER_SIZE]; |
| 64 | + let mut output_size = context |
| 65 | + .get_output(index, &mut output_buffer) |
| 66 | + .expect("Failed to get output"); |
| 67 | + output_size = std::cmp::min(MAX_OUTPUT_BUFFER_SIZE, output_size); |
| 68 | + |
| 69 | + return String::from_utf8_lossy(&output_buffer[..output_size]).to_string(); |
| 70 | +} |
| 71 | + |
| 72 | +fn get_output_from_context(context: &GraphExecutionContext) -> String { |
| 73 | + get_data_from_context(context, 0) |
| 74 | +} |
| 75 | + |
| 76 | +fn get_metadata_from_context(context: &GraphExecutionContext) -> Value { |
| 77 | + serde_json::from_str(&get_data_from_context(context, 1)).expect("Failed to get metadata") |
| 78 | +} |
| 79 | + |
| 80 | +const JSON_SCHEMA: &str = r#" |
| 81 | +{ |
| 82 | + "items": { |
| 83 | + "title": "Product", |
| 84 | + "description": "A product from the catalog", |
| 85 | + "type": "object", |
| 86 | + "properties": { |
| 87 | + "productId": { |
| 88 | + "description": "The unique identifier for a product", |
| 89 | + "type": "integer" |
| 90 | + }, |
| 91 | + "productName": { |
| 92 | + "description": "Name of the product", |
| 93 | + "type": "string" |
| 94 | + }, |
| 95 | + "price": { |
| 96 | + "description": "The price of the product", |
| 97 | + "type": "number", |
| 98 | + "exclusiveMinimum": 0 |
| 99 | + } |
| 100 | + }, |
| 101 | + "required": [ |
| 102 | + "productId", |
| 103 | + "productName", |
| 104 | + "price" |
| 105 | + ] |
| 106 | + }, |
| 107 | + "minItems": 5 |
| 108 | +} |
| 109 | +"#; |
| 110 | + |
| 111 | +fn main() { |
| 112 | + let args: Vec<String> = env::args().collect(); |
| 113 | + let model_name: &str = &args[1]; |
| 114 | + |
| 115 | + // Set options for the graph. Check our README for more details: |
| 116 | + // https://github.com/second-state/WasmEdge-WASINN-examples/tree/master/wasmedge-ggml#parameters |
| 117 | + let mut options = get_options_from_env(); |
| 118 | + |
| 119 | + // Add grammar for JSON output. |
| 120 | + // Check [here](https://github.com/ggerganov/llama.cpp/tree/master/grammars) for more details. |
| 121 | + options["json-schema"] = JSON_SCHEMA.into(); |
| 122 | + |
| 123 | + // Make the output more consistent. |
| 124 | + options["temp"] = json!(0.1); |
| 125 | + |
| 126 | + // Create graph and initialize context. |
| 127 | + let graph = GraphBuilder::new(GraphEncoding::Ggml, ExecutionTarget::AUTO) |
| 128 | + .config(serde_json::to_string(&options).expect("Failed to serialize options")) |
| 129 | + .build_from_cache(model_name) |
| 130 | + .expect("Failed to build graph"); |
| 131 | + let mut context = graph |
| 132 | + .init_execution_context() |
| 133 | + .expect("Failed to init context"); |
| 134 | + |
| 135 | + // If there is a third argument, use it as the prompt and enter non-interactive mode. |
| 136 | + // This is mainly for the CI workflow. |
| 137 | + if args.len() >= 3 { |
| 138 | + let prompt = &args[2]; |
| 139 | + // Set the prompt. |
| 140 | + println!("Prompt:\n{}", prompt); |
| 141 | + let tensor_data = prompt.as_bytes().to_vec(); |
| 142 | + context |
| 143 | + .set_input(0, TensorType::U8, &[1], &tensor_data) |
| 144 | + .expect("Failed to set input"); |
| 145 | + println!("Response:"); |
| 146 | + |
| 147 | + // Get the number of input tokens and llama.cpp versions. |
| 148 | + let input_metadata = get_metadata_from_context(&context); |
| 149 | + println!("[INFO] llama_commit: {}", input_metadata["llama_commit"]); |
| 150 | + println!( |
| 151 | + "[INFO] llama_build_number: {}", |
| 152 | + input_metadata["llama_build_number"] |
| 153 | + ); |
| 154 | + println!( |
| 155 | + "[INFO] Number of input tokens: {}", |
| 156 | + input_metadata["input_tokens"] |
| 157 | + ); |
| 158 | + |
| 159 | + // Get the output. |
| 160 | + context.compute().expect("Failed to compute"); |
| 161 | + let output = get_output_from_context(&context); |
| 162 | + println!("{}", output.trim()); |
| 163 | + |
| 164 | + // Retrieve the output metadata. |
| 165 | + let metadata = get_metadata_from_context(&context); |
| 166 | + println!( |
| 167 | + "[INFO] Number of input tokens: {}", |
| 168 | + metadata["input_tokens"] |
| 169 | + ); |
| 170 | + println!( |
| 171 | + "[INFO] Number of output tokens: {}", |
| 172 | + metadata["output_tokens"] |
| 173 | + ); |
| 174 | + std::process::exit(0); |
| 175 | + } |
| 176 | + |
| 177 | + loop { |
| 178 | + println!("USER:"); |
| 179 | + let input = read_input(); |
| 180 | + |
| 181 | + // Set prompt to the input tensor. |
| 182 | + set_data_to_context(&mut context, input.as_bytes().to_vec()).expect("Failed to set input"); |
| 183 | + |
| 184 | + // Execute the inference. |
| 185 | + match context.compute() { |
| 186 | + Ok(_) => (), |
| 187 | + Err(Error::BackendError(BackendError::ContextFull)) => { |
| 188 | + println!("\n[INFO] Context full, we'll reset the context and continue."); |
| 189 | + } |
| 190 | + Err(Error::BackendError(BackendError::PromptTooLong)) => { |
| 191 | + println!("\n[INFO] Prompt too long, we'll reset the context and continue."); |
| 192 | + } |
| 193 | + Err(err) => { |
| 194 | + println!("\n[ERROR] {}", err); |
| 195 | + } |
| 196 | + } |
| 197 | + |
| 198 | + // Retrieve the output. |
| 199 | + let output = get_output_from_context(&context); |
| 200 | + println!("ASSISTANT:\n{}", output.trim()); |
| 201 | + } |
| 202 | +} |
0 commit comments