diff --git a/src/content/docs/workers-ai/fine-tunes/loras.mdx b/src/content/docs/workers-ai/fine-tunes/loras.mdx index c6441bb6010ba77..094d93771fc37a4 100644 --- a/src/content/docs/workers-ai/fine-tunes/loras.mdx +++ b/src/content/docs/workers-ai/fine-tunes/loras.mdx @@ -171,7 +171,7 @@ To make inference requests and apply the LoRA adapter, you will need your model const response = await env.AI.run( "@cf/mistralai/mistral-7b-instruct-v0.2-lora", //the model supporting LoRAs { - messages: [{"role": "user", "content": "Hello world"], + messages: [{"role": "user", "content": "Hello world"}], raw: true, //skip applying the default chat template lora: "00000000-0000-0000-0000-000000000", //the finetune id OR name }