diff --git a/modus/quickstart.mdx b/modus/quickstart.mdx index 5acaf6e2..2756e5a5 100644 --- a/modus/quickstart.mdx +++ b/modus/quickstart.mdx @@ -84,13 +84,39 @@ learn how to use the basic components of a Modus app and how to run it locally. ``` - + + Modus also supports AI models. You can define new models in your `modus.json` file. Let's add a new meta-llama model: + + ```json + "models": { + "text-generator": { + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", + "provider": "hugging-face", + "connection": "hypermode" + } + }, + ``` + + + + Next, install the Hyp CLI. This allows you to access hosted models on the Hypermode platform. + + ```sh + npm install -g @hypermode/hyp-cli + ``` + + You can now use the `hyp login` command to log in to the Hyp CLI. + This links your project to the Hypermode platform, allowing you to leverage the model in your modus app. + + + + Functions are the building blocks of your app. Let's add a function that fetches a random quote from - the ZenQuotes connection you just created. + the ZenQuotes connection and uses AI to generate a summary for the quote. - To add a function, create a new file in the root directory with the following code: + Create a new file in the root directory with the following code: ```go quotes.go package main @@ -98,17 +124,23 @@ learn how to use the basic components of a Modus app and how to run it locally. import ( "errors" "fmt" + "strings" "github.com/hypermodeinc/modus/sdk/go/pkg/http" + "github.com/hypermodeinc/modus/sdk/go/pkg/models" + "github.com/hypermodeinc/modus/sdk/go/pkg/models/openai" ) type Quote struct { - Quote string `json:"q"` - Author string `json:"a"` + Quote string `json:"q"` + Author string `json:"a"` + Summary string `json:"summary,omitempty"` } - // this function makes a request to an API that returns data in JSON format, and - // returns an object representing the data + const modelName = "text-generator" + + // this function makes a request to an API that returns data in JSON format, + // and returns a single quote with AI-generated summary func GetRandomQuote() (*Quote, error) { request := http.NewRequest("https://zenquotes.io/api/random") @@ -117,25 +149,72 @@ learn how to use the basic components of a Modus app and how to run it locally. return nil, err } if !response.Ok() { - return nil, fmt.Errorf("Failed to fetch quote. Received: %d %s", response.Status, response.StatusText) + return nil, fmt.Errorf("failed to fetch quote. Received: %d %s", response.Status, response.StatusText) } - // the API returns an array of quotes, but we only want the first one + // the API returns an array of quotes, but we only need the first one var quotes []Quote response.JSON("es) if len(quotes) == 0 { return nil, errors.New("expected at least one quote in the response, but none were found") } - return "es[0], nil + + // Get the first (and only) quote + quote := quotes[0] + + // Generate AI summary for the quote + summary, err := summarizeQuote(quote.Quote, quote.Author) + if err != nil { + fmt.Printf("Warning: failed to summarize quote by %s: %v\n", quote.Author, err) + quote.Summary = "Summary unavailable" + } else { + quote.Summary = summary + } + + return "e, nil + } + + // summarizeQuote uses the AI model to generate a concise summary of the quote + func summarizeQuote(quote, author string) (string, error) { + model, err := models.GetModel[openai.ChatModel](modelName) + if err != nil { + return "", err + } + + instruction := "Provide a brief, insightful summary that captures the essence and meaning of the quote in 1-2 sentences." + prompt := fmt.Sprintf("Quote: \"%s\" - %s", quote, author) + + input, err := model.CreateInput( + openai.NewSystemMessage(instruction), + openai.NewUserMessage(prompt), + ) + if err != nil { + return "", err + } + + // Set temperature for consistent but creative responses + input.Temperature = 0.7 + + output, err := model.Invoke(input) + if err != nil { + return "", err + } + + return strings.TrimSpace(output.Choices[0].Message.Content), nil } ``` - To add a function, create a new file in the `assembly` directory with the following code: + Create a new file in the `assembly` directory with the following code: ```ts quotes.ts - import { http } from "@hypermode/modus-sdk-as"; + import { http, models } from "@hypermode/modus-sdk-as"; + import { + OpenAIChatModel, + SystemMessage, + UserMessage, + } from "@hypermode/modus-sdk-as/models/openai/chat"; @json class Quote { @@ -144,10 +223,14 @@ learn how to use the basic components of a Modus app and how to run it locally. @alias("a") author!: string; + + summary?: string; } - // this function makes a request to an API that returns data in JSON format, and - // returns an object representing the data + const modelName: string = "text-generator"; + + // this function makes a request to an API that returns data in JSON format, + // and returns a single quote with AI-generated summary export function getRandomQuote(): Quote { const request = new http.Request("https://zenquotes.io/api/random"); @@ -158,8 +241,43 @@ learn how to use the basic components of a Modus app and how to run it locally. ); } - // the API returns an array of quotes, but we only want the first one - return response.json()[0]; + // the API returns an array of quotes, but we only need the first one + const quotes = response.json(); + if (quotes.length === 0) { + throw new Error("Expected at least one quote in the response, but none were found"); + } + + // Get the first (and only) quote + const quote = quotes[0]; + + // Generate AI summary for the quote + try { + quote.summary = summarizeQuote(quote.quote, quote.author); + } catch (error) { + console.log(`Warning: failed to summarize quote by ${quote.author}: ${error}`); + quote.summary = "Summary unavailable"; + } + + return quote; + } + + // summarizeQuote uses the AI model to generate a concise summary of the quote + function summarizeQuote(quote: string, author: string): string { + const model = models.getModel(modelName); + + const instruction = "Provide a brief, insightful summary that captures the essence and meaning of the quote in 1-2 sentences."; + const prompt = `Quote: "${quote}" - ${author}`; + + const input = model.createInput([ + new SystemMessage(instruction), + new UserMessage(prompt), + ]); + + // Set temperature for consistent but creative responses + input.temperature = 0.7; + + const output = model.invoke(input); + return output.choices[0].message.content.trim(); } ``` @@ -172,32 +290,26 @@ learn how to use the basic components of a Modus app and how to run it locally. - After adding your function, you can use the API Explorer interface to test the `GetRandomQuote` function. - - Modus also supports AI models. You can define new models in your `modus.json` file. Let's add a new meta-llama model: - - ```json - "models": { - "text-generator": { - "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", - "provider": "hugging-face", - "connection": "hypermode" - } - }, - ``` - - - Next, install the Hyp CLI. This allows you to access hosted models on the Hypermode platform. + + Now that you've integrated the AI model, let's test it! After adding your function, restart your development server: ```sh - npm install -g @hypermode/hyp-cli + modus dev ``` - You can now use the `hyp login` command to log in to the Hyp CLI. - This links your project to the Hypermode platform, allowing you to leverage the model in your modus app. + Navigate to the API Explorer at `http://localhost:8686/explorer` and you'll see your `randomQuote` function available to test. + + When you call the function, you'll notice that the quote includes three fields: + - `quote`: The original quote text + - `author`: The author's name + - `summary`: An AI-generated summary that captures the essence of the quote + + The AI model analyzes the quote and provides insightful context about its meaning, making your app more engaging and informative for users. + + Try calling the function multiple times to see how the AI generates different summaries for various quotes! @@ -213,6 +325,12 @@ learn how to use the basic components of a Modus app and how to run it locally. ![local model tracing](../images/observe-functions/local-inference-history.png) + You can now see detailed information about each AI model call, including: + - Input prompts sent to the model + - Generated responses + - Performance metrics like response time + - Token usage and costs +