Skip to content

Commit 80a7a46

Browse files
committed
go v2 remove EoL models
1 parent 9daa6e3 commit 80a7a46

File tree

3 files changed

+3
-195
lines changed

3 files changed

+3
-195
lines changed

gov2/bedrock-runtime/actions/invoke_model.go

Lines changed: 0 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -80,63 +80,6 @@ func (wrapper InvokeModelWrapper) InvokeClaude(ctx context.Context, prompt strin
8080

8181
// snippet-end:[gov2.bedrock-runtime.InvokeClaude]
8282

83-
// snippet-start:[gov2.bedrock-runtime.InvokeJurassic2]
84-
85-
// Each model provider has their own individual request and response formats.
86-
// For the format, ranges, and default values for AI21 Labs Jurassic-2, refer to:
87-
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-jurassic2.html
88-
89-
type Jurassic2Request struct {
90-
Prompt string `json:"prompt"`
91-
MaxTokens int `json:"maxTokens,omitempty"`
92-
Temperature float64 `json:"temperature,omitempty"`
93-
}
94-
95-
type Jurassic2Response struct {
96-
Completions []Completion `json:"completions"`
97-
}
98-
type Completion struct {
99-
Data Data `json:"data"`
100-
}
101-
type Data struct {
102-
Text string `json:"text"`
103-
}
104-
105-
// Invokes AI21 Labs Jurassic-2 on Amazon Bedrock to run an inference using the input
106-
// provided in the request body.
107-
func (wrapper InvokeModelWrapper) InvokeJurassic2(ctx context.Context, prompt string) (string, error) {
108-
modelId := "ai21.j2-mid-v1"
109-
110-
body, err := json.Marshal(Jurassic2Request{
111-
Prompt: prompt,
112-
MaxTokens: 200,
113-
Temperature: 0.5,
114-
})
115-
116-
if err != nil {
117-
log.Fatal("failed to marshal", err)
118-
}
119-
120-
output, err := wrapper.BedrockRuntimeClient.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{
121-
ModelId: aws.String(modelId),
122-
ContentType: aws.String("application/json"),
123-
Body: body,
124-
})
125-
126-
if err != nil {
127-
ProcessError(err, modelId)
128-
}
129-
130-
var response Jurassic2Response
131-
if err := json.Unmarshal(output.Body, &response); err != nil {
132-
log.Fatal("failed to unmarshal", err)
133-
}
134-
135-
return response.Completions[0].Data.Text, nil
136-
}
137-
138-
// snippet-end:[gov2.bedrock-runtime.InvokeJurassic2]
139-
14083
// snippet-start:[gov2.bedrock-runtime.InvokeTitanImage]
14184

14285
type TitanImageRequest struct {
@@ -207,70 +150,6 @@ func (wrapper InvokeModelWrapper) InvokeTitanImage(ctx context.Context, prompt s
207150

208151
// snippet-end:[gov2.bedrock-runtime.InvokeTitanImage]
209152

210-
// snippet-start:[gov2.bedrock-runtime.InvokeTitanText]
211-
212-
// Each model provider has their own individual request and response formats.
213-
// For the format, ranges, and default values for Amazon Titan Text, refer to:
214-
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-text.html
215-
type TitanTextRequest struct {
216-
InputText string `json:"inputText"`
217-
TextGenerationConfig TextGenerationConfig `json:"textGenerationConfig"`
218-
}
219-
220-
type TextGenerationConfig struct {
221-
Temperature float64 `json:"temperature"`
222-
TopP float64 `json:"topP"`
223-
MaxTokenCount int `json:"maxTokenCount"`
224-
StopSequences []string `json:"stopSequences,omitempty"`
225-
}
226-
227-
type TitanTextResponse struct {
228-
InputTextTokenCount int `json:"inputTextTokenCount"`
229-
Results []Result `json:"results"`
230-
}
231-
232-
type Result struct {
233-
TokenCount int `json:"tokenCount"`
234-
OutputText string `json:"outputText"`
235-
CompletionReason string `json:"completionReason"`
236-
}
237-
238-
func (wrapper InvokeModelWrapper) InvokeTitanText(ctx context.Context, prompt string) (string, error) {
239-
modelId := "amazon.titan-text-express-v1"
240-
241-
body, err := json.Marshal(TitanTextRequest{
242-
InputText: prompt,
243-
TextGenerationConfig: TextGenerationConfig{
244-
Temperature: 0,
245-
TopP: 1,
246-
MaxTokenCount: 4096,
247-
},
248-
})
249-
250-
if err != nil {
251-
log.Fatal("failed to marshal", err)
252-
}
253-
254-
output, err := wrapper.BedrockRuntimeClient.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{
255-
ModelId: aws.String(modelId),
256-
ContentType: aws.String("application/json"),
257-
Body: body,
258-
})
259-
260-
if err != nil {
261-
ProcessError(err, modelId)
262-
}
263-
264-
var response TitanTextResponse
265-
if err := json.Unmarshal(output.Body, &response); err != nil {
266-
log.Fatal("failed to unmarshal", err)
267-
}
268-
269-
return response.Results[0].OutputText, nil
270-
}
271-
272-
// snippet-end:[gov2.bedrock-runtime.InvokeTitanText]
273-
274153
func ProcessError(err error, modelId string) {
275154
errMsg := err.Error()
276155
if strings.Contains(errMsg, "no such host") {

gov2/bedrock-runtime/actions/invoke_model_test.go

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@ import (
1818
)
1919

2020
const CLAUDE_MODEL_ID = "anthropic.claude-v2"
21-
const JURASSIC2_MODEL_ID = "ai21.j2-mid-v1"
2221
const TITAN_IMAGE_MODEL_ID = "amazon.titan-image-generator-v1"
23-
const TITAN_TEXT_EXPRESS_MODEL_ID = "amazon.titan-text-express-v1"
2422

2523
const prompt = "A test prompt"
2624

@@ -41,25 +39,13 @@ func CallInvokeModelActions(sdkConfig aws.Config) {
4139
}
4240
log.Println(claudeCompletion)
4341

44-
jurassic2Completion, err := wrapper.InvokeJurassic2(ctx, prompt)
45-
if err != nil {
46-
panic(err)
47-
}
48-
log.Println(jurassic2Completion)
49-
5042
seed := int64(0)
5143
titanImageCompletion, err := wrapper.InvokeTitanImage(ctx, prompt, seed)
5244
if err != nil {
5345
panic(err)
5446
}
5547
log.Println(titanImageCompletion)
5648

57-
titanTextCompletion, err := wrapper.InvokeTitanText(ctx, prompt)
58-
if err != nil {
59-
panic(err)
60-
}
61-
log.Println(titanTextCompletion)
62-
6349
log.Printf("Thanks for watching!")
6450
}
6551

@@ -73,9 +59,7 @@ type InvokeModelActionsTest struct{}
7359
func (scenTest *InvokeModelActionsTest) SetupDataAndStubs() []testtools.Stub {
7460
var stubList []testtools.Stub
7561
stubList = append(stubList, stubInvokeModel(CLAUDE_MODEL_ID))
76-
stubList = append(stubList, stubInvokeModel(JURASSIC2_MODEL_ID))
7762
stubList = append(stubList, stubInvokeModel(TITAN_IMAGE_MODEL_ID))
78-
stubList = append(stubList, stubInvokeModel(TITAN_TEXT_EXPRESS_MODEL_ID))
7963

8064
return stubList
8165
}
@@ -102,18 +86,6 @@ func stubInvokeModel(modelId string) testtools.Stub {
10286
Completion: "A fake response",
10387
})
10488

105-
case JURASSIC2_MODEL_ID:
106-
request, _ = json.Marshal(Jurassic2Request{
107-
Prompt: prompt,
108-
MaxTokens: 200,
109-
Temperature: 0.5,
110-
})
111-
response, _ = json.Marshal(Jurassic2Response{
112-
Completions: []Completion{
113-
{Data: Data{Text: "A fake response"}},
114-
},
115-
})
116-
11789
case TITAN_IMAGE_MODEL_ID:
11890
request, _ = json.Marshal(TitanImageRequest{
11991
TaskType: "TEXT_IMAGE",
@@ -133,23 +105,6 @@ func stubInvokeModel(modelId string) testtools.Stub {
133105
Images: []string{"FakeBase64String=="},
134106
})
135107

136-
case TITAN_TEXT_EXPRESS_MODEL_ID:
137-
request, _ = json.Marshal(TitanTextRequest{
138-
InputText: prompt,
139-
TextGenerationConfig: TextGenerationConfig{
140-
Temperature: 0,
141-
TopP: 1,
142-
MaxTokenCount: 4096,
143-
},
144-
})
145-
response, _ = json.Marshal(TitanTextResponse{
146-
Results: []Result{
147-
{
148-
OutputText: "A fake response",
149-
},
150-
},
151-
})
152-
153108
default:
154109
return testtools.Stub{}
155110
}

gov2/bedrock-runtime/scenarios/scenario_invoke_models.go

Lines changed: 3 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,9 @@ import (
2525
// to invoke various foundation models for text and image generation
2626
//
2727
// 1. Generate text with Anthropic Claude 2
28-
// 2. Generate text with AI21 Labs Jurassic-2
29-
// 3. Generate text with Meta Llama 2 Chat
30-
// 4. Generate text and asynchronously process the response stream with Anthropic Claude 2
31-
// 5. Generate an image with the Amazon Titan image generation model
32-
// 6. Generate text with Amazon Titan Text G1 Express model
28+
// 2. Generate text with Meta Llama 2 Chat
29+
// 3. Generate text and asynchronously process the response stream with Anthropic Claude 2
30+
// 4. Generate an image with the Amazon Titan image generation model
3331
type InvokeModelsScenario struct {
3432
sdkConfig aws.Config
3533
invokeModelWrapper actions.InvokeModelWrapper
@@ -70,10 +68,6 @@ func (scenario InvokeModelsScenario) Run(ctx context.Context) {
7068
log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt)
7169
scenario.InvokeClaude(ctx, text2textPrompt)
7270

73-
log.Println(strings.Repeat("-", 77))
74-
log.Printf("Invoking Jurassic-2 with prompt: %v\n", text2textPrompt)
75-
scenario.InvokeJurassic2(ctx, text2textPrompt)
76-
7771
log.Println(strings.Repeat("=", 77))
7872
log.Printf("Now, let's invoke Claude with the asynchronous client and process the response stream:\n\n")
7973

@@ -91,10 +85,6 @@ func (scenario InvokeModelsScenario) Run(ctx context.Context) {
9185
log.Printf("Invoking Amazon Titan with prompt: %v\n", text2ImagePrompt)
9286
scenario.InvokeTitanImage(ctx, text2ImagePrompt, seed)
9387

94-
log.Println(strings.Repeat("-", 77))
95-
log.Printf("Invoking Titan Text Express with prompt: %v\n", text2textPrompt)
96-
scenario.InvokeTitanText(ctx, text2textPrompt)
97-
9888
log.Println(strings.Repeat("=", 77))
9989
log.Println("Thanks for watching!")
10090
log.Println(strings.Repeat("=", 77))
@@ -108,14 +98,6 @@ func (scenario InvokeModelsScenario) InvokeClaude(ctx context.Context, prompt st
10898
log.Printf("\nClaude : %v\n", strings.TrimSpace(completion))
10999
}
110100

111-
func (scenario InvokeModelsScenario) InvokeJurassic2(ctx context.Context, prompt string) {
112-
completion, err := scenario.invokeModelWrapper.InvokeJurassic2(ctx, prompt)
113-
if err != nil {
114-
panic(err)
115-
}
116-
log.Printf("\nJurassic-2 : %v\n", strings.TrimSpace(completion))
117-
}
118-
119101
func (scenario InvokeModelsScenario) InvokeWithResponseStream(ctx context.Context, prompt string) {
120102
log.Println("\nClaude with response stream:")
121103
_, err := scenario.responseStreamWrapper.InvokeModelWithResponseStream(ctx, prompt)
@@ -134,14 +116,6 @@ func (scenario InvokeModelsScenario) InvokeTitanImage(ctx context.Context, promp
134116
fmt.Printf("The generated image has been saved to %s\n", imagePath)
135117
}
136118

137-
func (scenario InvokeModelsScenario) InvokeTitanText(ctx context.Context, prompt string) {
138-
completion, err := scenario.invokeModelWrapper.InvokeTitanText(ctx, prompt)
139-
if err != nil {
140-
panic(err)
141-
}
142-
log.Printf("\nTitan Text Express : %v\n\n", strings.TrimSpace(completion))
143-
}
144-
145119
// snippet-end:[gov2.bedrock-runtime.Scenario_InvokeModels]
146120

147121
func saveImage(base64ImageData string, modelId string) string {

0 commit comments

Comments
 (0)