Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
188 changes: 153 additions & 35 deletions modus/quickstart.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -84,31 +84,63 @@
```

</Step>
<Step title="Add a function">
<Step title="Add a model">
Modus also supports AI models. You can define new models in your `modus.json` file. Let's add a new meta-llama model:

```json
"models": {
"text-generator": {
"sourceModel": "meta-llama/Llama-3.2-3B-Instruct",
"provider": "hugging-face",
"connection": "hypermode"
}
},
```

</Step>
<Step title="Install the Hyp CLI and log in">
Next, install the Hyp CLI. This allows you to access hosted models on the Hypermode platform.

```sh
npm install -g @hypermode/hyp-cli
```

You can now use the `hyp login` command to log in to the Hyp CLI.
This links your project to the Hypermode platform, allowing you to leverage the model in your modus app.

</Step>

<Step title="Add a function with AI integration">
Functions are the building blocks of your app. Let's add a function that fetches a random quote from
the ZenQuotes connection you just created.
the ZenQuotes connection and uses AI to generate a summary for the quote.

<Tabs>
<Tab title="Go">
To add a function, create a new file in the root directory with the following code:
Create a new file in the root directory with the following code:

```go quotes.go
package main

import (
"errors"
"fmt"
"strings"

"github.com/hypermodeinc/modus/sdk/go/pkg/http"
"github.com/hypermodeinc/modus/sdk/go/pkg/models"
"github.com/hypermodeinc/modus/sdk/go/pkg/models/openai"
)

type Quote struct {
Quote string `json:"q"`
Author string `json:"a"`
Quote string `json:"q"`
Author string `json:"a"`
Summary string `json:"summary,omitempty"`
}

// this function makes a request to an API that returns data in JSON format, and
// returns an object representing the data
const modelName = "text-generator"

// this function makes a request to an API that returns data in JSON format,
// and returns a single quote with AI-generated summary
func GetRandomQuote() (*Quote, error) {
request := http.NewRequest("https://zenquotes.io/api/random")

Expand All @@ -117,25 +149,72 @@
return nil, err
}
if !response.Ok() {
return nil, fmt.Errorf("Failed to fetch quote. Received: %d %s", response.Status, response.StatusText)
return nil, fmt.Errorf("failed to fetch quote. Received: %d %s", response.Status, response.StatusText)
}

// the API returns an array of quotes, but we only want the first one
// the API returns an array of quotes, but we only need the first one
var quotes []Quote
response.JSON(&quotes)
if len(quotes) == 0 {
return nil, errors.New("expected at least one quote in the response, but none were found")
}
return &quotes[0], nil

// Get the first (and only) quote
quote := quotes[0]

// Generate AI summary for the quote
summary, err := summarizeQuote(quote.Quote, quote.Author)
if err != nil {
fmt.Printf("Warning: failed to summarize quote by %s: %v\n", quote.Author, err)
quote.Summary = "Summary unavailable"
} else {
quote.Summary = summary
}

return &quote, nil
}

// summarizeQuote uses the AI model to generate a concise summary of the quote
func summarizeQuote(quote, author string) (string, error) {
model, err := models.GetModel[openai.ChatModel](modelName)
if err != nil {
return "", err
}

instruction := "You are a helpful assistant that summarizes quotes. Provide a brief, insightful summary that captures the essence and meaning of the quote in 1-2 sentences."

Check notice on line 184 in modus/quickstart.mdx

View check run for this annotation

Trunk.io / Trunk Check

markdownlint(MD013)

[new] Line length
prompt := fmt.Sprintf("Quote: \"%s\" - %s", quote, author)

input, err := model.CreateInput(
openai.NewSystemMessage(instruction),
openai.NewUserMessage(prompt),
)
if err != nil {
return "", err
}

// Set temperature for consistent but creative responses
input.Temperature = 0.7

output, err := model.Invoke(input)
if err != nil {
return "", err
}

return strings.TrimSpace(output.Choices[0].Message.Content), nil
}
```
</Tab>

<Tab title="AssemblyScript">
To add a function, create a new file in the `assembly` directory with the following code:
Create a new file in the `assembly` directory with the following code:

```ts quotes.ts
import { http } from "@hypermode/modus-sdk-as";
import { http, models } from "@hypermode/modus-sdk-as";
import {
OpenAIChatModel,
SystemMessage,
UserMessage,
} from "@hypermode/modus-sdk-as/models/openai/chat";

@json
class Quote {
Expand All @@ -144,10 +223,14 @@

@alias("a")
author!: string;

summary?: string;
}

// this function makes a request to an API that returns data in JSON format, and
// returns an object representing the data
const modelName: string = "text-generator";

// this function makes a request to an API that returns data in JSON format,
// and returns a single quote with AI-generated summary
export function getRandomQuote(): Quote {
const request = new http.Request("https://zenquotes.io/api/random");

Expand All @@ -158,8 +241,43 @@
);
}

// the API returns an array of quotes, but we only want the first one
return response.json<Quote[]>()[0];
// the API returns an array of quotes, but we only need the first one
const quotes = response.json<Quote[]>();
if (quotes.length === 0) {
throw new Error("Expected at least one quote in the response, but none were found");
}

// Get the first (and only) quote
const quote = quotes[0];

// Generate AI summary for the quote
try {
quote.summary = summarizeQuote(quote.quote, quote.author);
} catch (error) {
console.log(`Warning: failed to summarize quote by ${quote.author}: ${error}`);
quote.summary = "Summary unavailable";
}

return quote;
}

// summarizeQuote uses the AI model to generate a concise summary of the quote
function summarizeQuote(quote: string, author: string): string {
const model = models.getModel<OpenAIChatModel>(modelName);

const instruction = "You are a helpful assistant that summarizes quotes. Provide a brief, insightful summary that captures the essence and meaning of the quote in 1-2 sentences.";

Check notice on line 268 in modus/quickstart.mdx

View check run for this annotation

Trunk.io / Trunk Check

markdownlint(MD013)

[new] Line length
const prompt = `Quote: "${quote}" - ${author}`;

const input = model.createInput([
new SystemMessage(instruction),
new UserMessage(prompt),
]);

// Set temperature for consistent but creative responses
input.temperature = 0.7;

const output = model.invoke(input);
return output.choices[0].message.content.trim();
}
```

Expand All @@ -172,32 +290,26 @@

</Tab>
</Tabs>
After adding your function, you can use the API Explorer interface to test the `GetRandomQuote` function.

</Step>
<Step title="Add a model">
Modus also supports AI models. You can define new models in your `modus.json` file. Let's add a new meta-llama model:

```json
"models": {
"text-generator": {
"sourceModel": "meta-llama/Llama-3.2-3B-Instruct",
"provider": "hugging-face",
"connection": "hypermode"
}
},
```

</Step>
<Step title="Install the Hyp CLI and log in">
Next, install the Hyp CLI. This allows you to access hosted models on the Hypermode platform.
<Step title="Make your first AI call">
Now that you've integrated the AI model, let's test it! After adding your function, restart your development server:

```sh
npm install -g @hypermode/hyp-cli
modus dev
```

You can now use the `hyp login` command to log in to the Hyp CLI.
This links your project to the Hypermode platform, allowing you to leverage the model in your modus app.
Navigate to the API Explorer at `http://localhost:8686/explorer` and you'll see your `randomQuote` function available to test.

When you call the function, you'll notice that the quote includes three fields:
- `quote`: The original quote text
- `author`: The author's name
- `summary`: An AI-generated summary that captures the essence of the quote

The AI model analyzes the quote and provides insightful context about its meaning, making your app more engaging and informative for users.

Try calling the function multiple times to see how the AI generates different summaries for various quotes!

</Step>

Expand All @@ -213,6 +325,12 @@

![local model tracing](../images/observe-functions/local-inference-history.png)

You can now see detailed information about each AI model call, including:
- Input prompts sent to the model
- Generated responses
- Performance metrics like response time
- Token usage and costs

</Step>
</Steps>

Expand Down