Skip to content

Commit fdd2e21

Browse files
committed
chore: enhance OpenAI API integration with detailed documentation and update CI/CD workflow for NPM publishing
1 parent 1723cee commit fdd2e21

File tree

4 files changed

+120
-0
lines changed

4 files changed

+120
-0
lines changed

.github/scripts/publish-npm.sh

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
#!/bin/bash
2+
3+
cargo component build -p $1 --release
4+
safe_component=$(echo $1 | tr '-' '_')
5+
path="target/wasm32-wasip1/release/$safe_component.wasm"
6+
7+
# Extract version from Cargo.toml
8+
version=$(grep "^version = " "crates/$1/Cargo.toml" | cut -d'"' -f2)
9+
10+
rm -rf npm/$1
11+
mkdir -p npm/$1
12+
cd npm/$1
13+
npm init --init-version $version --scope=@wassemble -y
14+
cd ../..
15+
wit-bindgen markdown crates/$1/wit/world.wit --html-in-md
16+
mv $1.md npm/$1/README.md
17+
npx jco transpile $path -o npm/$1
18+
cd npm/$1
19+
npm publish --access public --provenance

.github/workflows/cd.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ permissions:
1212
contents: read
1313
packages: write
1414
id-token: write
15+
actions: read
1516

1617
jobs:
1718
detect-changes:
@@ -32,6 +33,7 @@ jobs:
3233
runs-on: ubuntu-latest
3334
env:
3435
GH_TOKEN: ${{ github.token }}
36+
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
3537
strategy:
3638
matrix:
3739
component: ${{ fromJson(needs.detect-changes.outputs.matrix).component }}
@@ -55,3 +57,12 @@ jobs:
5557
# Publish
5658
- run: |
5759
cargo component publish -p ${{ matrix.component }} --config config.toml
60+
61+
# NPM package
62+
- uses: actions/setup-node@v4
63+
with:
64+
node-version: '20.x'
65+
registry-url: 'https://registry.npmjs.org'
66+
- run: .github/scripts/publish-npm.sh ${{ matrix.component }}
67+
env:
68+
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

crates/openai/src/bindings.rs

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
11
// Generated by `wit-bindgen` 0.41.0. DO NOT EDIT!
22
// Options used:
33
// * runtime_path: "wit_bindgen_rt"
4+
/// Represents a single message in a chat conversation
5+
/// Used for both user input and assistant responses
46
#[derive(Clone)]
57
pub struct ChatMessage {
8+
/// The role of the message sender: "user", "assistant", or "system"
69
pub role: _rt::String,
10+
/// The content/body of the message
711
pub content: _rt::String,
812
}
913
impl ::core::fmt::Debug for ChatMessage {
@@ -14,12 +18,21 @@ impl ::core::fmt::Debug for ChatMessage {
1418
.finish()
1519
}
1620
}
21+
/// Configuration for a chat completion request to OpenAI
22+
/// Contains the conversation history and generation parameters
1723
#[derive(Clone)]
1824
pub struct ChatCompletion {
25+
/// Unique identifier for this completion request
1926
pub id: _rt::String,
27+
/// The OpenAI model to use (e.g., "gpt-3.5-turbo", "gpt-4")
2028
pub model: _rt::String,
29+
/// List of messages in the conversation history
2130
pub messages: _rt::Vec<ChatMessage>,
31+
/// Controls randomness in the response (0.0 = deterministic, 2.0 = very random)
32+
/// Optional parameter - if not provided, uses model default
2233
pub temperature: Option<f64>,
34+
/// Maximum number of tokens to generate in the response
35+
/// Optional parameter - if not provided, uses model default
2336
pub max_tokens: Option<u32>,
2437
}
2538
impl ::core::fmt::Debug for ChatCompletion {
@@ -33,11 +46,17 @@ impl ::core::fmt::Debug for ChatCompletion {
3346
.finish()
3447
}
3548
}
49+
/// Response from OpenAI's chat completion API
50+
/// Contains the generated text and metadata about the completion
3651
#[derive(Clone)]
3752
pub struct ChatResponse {
53+
/// Unique identifier for this completion response
3854
pub id: _rt::String,
55+
/// The model that was used to generate the response
3956
pub model: _rt::String,
57+
/// The generated text content from the assistant
4058
pub content: _rt::String,
59+
/// Reason why the generation stopped: "stop", "length", "content_filter", etc.
4160
pub finish_reason: _rt::String,
4261
}
4362
impl ::core::fmt::Debug for ChatResponse {
@@ -50,9 +69,13 @@ impl ::core::fmt::Debug for ChatResponse {
5069
.finish()
5170
}
5271
}
72+
/// Configuration for an embedding request to OpenAI
73+
/// Used to convert text into numerical vector representations
5374
#[derive(Clone)]
5475
pub struct Embedding {
76+
/// The OpenAI embedding model to use (e.g., "text-embedding-ada-002")
5577
pub model: _rt::String,
78+
/// The text input to convert into an embedding vector
5679
pub input: _rt::String,
5780
}
5881
impl ::core::fmt::Debug for Embedding {
@@ -63,9 +86,14 @@ impl ::core::fmt::Debug for Embedding {
6386
.finish()
6487
}
6588
}
89+
/// Response from OpenAI's embedding API
90+
/// Contains the numerical vector representation of the input text
6691
#[derive(Clone)]
6792
pub struct EmbeddingResponse {
93+
/// The model that was used to generate the embedding
6894
pub model: _rt::String,
95+
/// The numerical vector representation of the input text
96+
/// Each number represents a dimension in the embedding space
6997
pub embedding: _rt::Vec<f64>,
7098
}
7199
impl ::core::fmt::Debug for EmbeddingResponse {
@@ -302,7 +330,23 @@ pub unsafe fn __post_return_create_embedding<T: Guest>(arg0: *mut u8) {
302330
_rt::cabi_dealloc(base4, len4 * 8, 8);
303331
}
304332
pub trait Guest {
333+
/// Creates a chat completion using OpenAI's API
334+
/// Sends a conversation to OpenAI and returns the assistant's response
335+
///
336+
/// Parameters:
337+
/// - api-key: Your OpenAI API key for authentication
338+
/// - completion: The chat completion configuration and conversation
339+
///
340+
/// Returns: The generated response from the assistant
305341
fn create_chat_completion(api_key: _rt::String, completion: ChatCompletion) -> ChatResponse;
342+
/// Creates an embedding vector using OpenAI's API
343+
/// Converts text into a numerical representation for semantic analysis
344+
///
345+
/// Parameters:
346+
/// - api-key: Your OpenAI API key for authentication
347+
/// - embedding: The embedding configuration and input text
348+
///
349+
/// Returns: The numerical vector representation of the input text
306350
fn create_embedding(api_key: _rt::String, embedding: Embedding) -> EmbeddingResponse;
307351
}
308352
#[doc(hidden)]

crates/openai/wit/world.wit

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,82 @@
11
package wassemble:openai@0.1.0;
22

3+
/// OpenAI API integration world for chat completions and embeddings
34
world openai {
5+
/// Represents a single message in a chat conversation
6+
/// Used for both user input and assistant responses
47
record chat-message {
8+
/// The role of the message sender: "user", "assistant", or "system"
59
role: string,
10+
/// The content/body of the message
611
content: string,
712
}
813

14+
/// Configuration for a chat completion request to OpenAI
15+
/// Contains the conversation history and generation parameters
916
record chat-completion {
17+
/// Unique identifier for this completion request
1018
id: string,
19+
/// The OpenAI model to use (e.g., "gpt-3.5-turbo", "gpt-4")
1120
model: string,
21+
/// List of messages in the conversation history
1222
messages: list<chat-message>,
23+
/// Controls randomness in the response (0.0 = deterministic, 2.0 = very random)
24+
/// Optional parameter - if not provided, uses model default
1325
temperature: option<f64>,
26+
/// Maximum number of tokens to generate in the response
27+
/// Optional parameter - if not provided, uses model default
1428
max-tokens: option<u32>,
1529
}
1630

31+
/// Response from OpenAI's chat completion API
32+
/// Contains the generated text and metadata about the completion
1733
record chat-response {
34+
/// Unique identifier for this completion response
1835
id: string,
36+
/// The model that was used to generate the response
1937
model: string,
38+
/// The generated text content from the assistant
2039
content: string,
40+
/// Reason why the generation stopped: "stop", "length", "content_filter", etc.
2141
finish-reason: string,
2242
}
2343

44+
/// Configuration for an embedding request to OpenAI
45+
/// Used to convert text into numerical vector representations
2446
record embedding {
47+
/// The OpenAI embedding model to use (e.g., "text-embedding-ada-002")
2548
model: string,
49+
/// The text input to convert into an embedding vector
2650
input: string,
2751
}
2852

53+
/// Response from OpenAI's embedding API
54+
/// Contains the numerical vector representation of the input text
2955
record embedding-response {
56+
/// The model that was used to generate the embedding
3057
model: string,
58+
/// The numerical vector representation of the input text
59+
/// Each number represents a dimension in the embedding space
3160
embedding: list<f64>,
3261
}
3362

63+
/// Creates a chat completion using OpenAI's API
64+
/// Sends a conversation to OpenAI and returns the assistant's response
65+
///
66+
/// Parameters:
67+
/// - api-key: Your OpenAI API key for authentication
68+
/// - completion: The chat completion configuration and conversation
69+
///
70+
/// Returns: The generated response from the assistant
3471
export create-chat-completion: func(api-key: string, completion: chat-completion) -> chat-response;
72+
73+
/// Creates an embedding vector using OpenAI's API
74+
/// Converts text into a numerical representation for semantic analysis
75+
///
76+
/// Parameters:
77+
/// - api-key: Your OpenAI API key for authentication
78+
/// - embedding: The embedding configuration and input text
79+
///
80+
/// Returns: The numerical vector representation of the input text
3581
export create-embedding: func(api-key: string, embedding: embedding) -> embedding-response;
3682
}

0 commit comments

Comments
 (0)