Skip to content

Commit 24027a3

Browse files
committed
Add ConverseStream
1 parent b4a7f1e commit 24027a3

File tree

6 files changed

+127
-49
lines changed

6 files changed

+127
-49
lines changed

kotlin/services/bedrock-runtime/src/main/kotlin/com/example/bedrockruntime/models/amazon/nova/text/Converse.kt

Lines changed: 9 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -18,56 +18,38 @@ import aws.sdk.kotlin.services.bedrockruntime.model.Message
1818
* - Create a message
1919
* - Configure and send a request
2020
* - Process the response
21-
*
22-
* @throws RuntimeException if the model invocation fails
2321
*/
2422
suspend fun main() {
2523
converse().also { println(it) }
2624
}
2725

2826
suspend fun converse(): String {
29-
// Step 1: Create the Amazon Bedrock runtime client
30-
// The runtime client handles the communication with AI models on Amazon Bedrock
27+
// Create and configure the Bedrock runtime client
3128
BedrockRuntimeClient { region = "us-east-1" }.use { client ->
3229

33-
// Step 2: Specify which model to use
34-
// Available Amazon Nova models and their characteristics:
35-
// - Amazon Nova Micro: Text-only model optimized for lowest latency and cost
36-
// - Amazon Nova Lite: Fast, low-cost multimodal model for image, video, and text
37-
// - Amazon Nova Pro: Advanced multimodal model balancing accuracy, speed, and cost
38-
//
3930
// For the latest available models, see:
4031
// https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
4132
val modelId = "amazon.nova-lite-v1:0"
4233

43-
// Step 3: Create the message
44-
// The message includes the text prompt and specifies that it comes from the user
45-
val inputText = "Describe the purpose of a 'hello world' program in one line."
34+
// Create the message with the user's prompt
35+
val prompt = "Describe the purpose of a 'hello world' program in one line."
4636
val message = Message {
4737
role = ConversationRole.User
48-
content = listOf(ContentBlock.Text(inputText))
38+
content = listOf(ContentBlock.Text(prompt))
4939
}
5040

51-
// Step 4: Configure the request
52-
// Optional parameters to control the model's response:
53-
// - maxTokens: maximum number of tokens to generate
54-
// - temperature: randomness (max: 1.0, default: 0.7)
55-
// OR
56-
// - topP: diversity of word choice (max: 1.0, default: 0.9)
57-
// Note: Use either temperature OR topP, but not both
41+
// Configure the request with optional model parameters
5842
val request = ConverseRequest {
5943
this.modelId = modelId
6044
messages = listOf(message)
6145
inferenceConfig {
62-
maxTokens = 500 // The maximum response length
63-
temperature = 0.5F // Using temperature for randomness control
64-
// topP = 0.8F // Alternative: use topP instead of temperature
46+
maxTokens = 500 // Maximum response length
47+
temperature = 0.5F // Lower values: more focused output
48+
// topP = 0.8F // Alternative to temperature
6549
}
6650
}
6751

68-
// Step 5: Send and process the request
69-
// - Send the request to the model
70-
// - Extract and return the generated text
52+
// Send the request and process the model's response
7153
runCatching {
7254
val response = client.converse(request)
7355
return response.output!!.asMessage().content.first().asText()
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
package com.example.bedrockruntime.models.amazon.nova.text
5+
6+
// snippet-start:[bedrock-runtime.kotlin.ConverseStream_AmazonNovaText]
7+
8+
import aws.sdk.kotlin.services.bedrockruntime.BedrockRuntimeClient
9+
import aws.sdk.kotlin.services.bedrockruntime.model.*
10+
11+
/**
12+
* This example demonstrates how to use the Amazon Nova foundation models
13+
* to generate streaming text responses.
14+
* It shows how to:
15+
* - Set up the Amazon Bedrock runtime client
16+
* - Create a message with a prompt
17+
* - Configure a streaming request with parameters
18+
* - Process the response stream in real time
19+
*/
20+
suspend fun main() {
21+
converseStream()
22+
}
23+
24+
suspend fun converseStream(): String {
25+
// A buffer to collect the complete response
26+
val completeResponseBuffer = StringBuilder()
27+
28+
// Create and configure the Bedrock runtime client
29+
BedrockRuntimeClient { region = "us-east-1" }.use { client ->
30+
31+
// For the latest available models, see:
32+
// https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
33+
val modelId = "amazon.nova-lite-v1:0"
34+
35+
// Create the message with the user's prompt
36+
val prompt = "Describe the purpose of a 'hello world' program in a paragraph."
37+
val message = Message {
38+
role = ConversationRole.User
39+
content = listOf(ContentBlock.Text(prompt))
40+
}
41+
42+
// Configure the request with optional model parameters
43+
val request = ConverseStreamRequest {
44+
this.modelId = modelId
45+
messages = listOf(message)
46+
inferenceConfig {
47+
maxTokens = 500 // Maximum response length
48+
temperature = 0.5F // Lower values: more focused output
49+
// topP = 0.8F // Alternative to temperature
50+
}
51+
}
52+
53+
// Process the streaming response
54+
runCatching {
55+
client.converseStream(request) { response ->
56+
response.stream?.collect { chunk ->
57+
when (chunk) {
58+
is ConverseStreamOutput.ContentBlockDelta -> {
59+
// Process each text chunk as it arrives
60+
chunk.value.delta?.asText()?.let { text ->
61+
print(text)
62+
System.out.flush() // Ensure immediate output
63+
completeResponseBuffer.append(text)
64+
}
65+
}
66+
else -> {} // Other output block types can be handled as needed
67+
}
68+
}
69+
}
70+
}.onFailure { error ->
71+
error.message?.let { e -> System.err.println("ERROR: Can't invoke '$modelId'. Reason: $e") }
72+
throw RuntimeException("Failed to generate text with model $modelId: $error", error)
73+
}
74+
}
75+
76+
return completeResponseBuffer.toString()
77+
}
78+
79+
// snippet-end:[bedrock-runtime.kotlin.ConverseStream_AmazonNovaText]

kotlin/services/bedrock-runtime/src/main/kotlin/com/example/bedrockruntime/models/amazon/titan/text/InvokeModel.kt

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@ import kotlinx.serialization.json.Json
1717
* - Create a request payload
1818
* - Configure and send a request
1919
* - Process the response
20-
*
21-
* @throws RuntimeException if the model invocation fails
2220
*/
2321
suspend fun main() {
2422
invokeModel().also { println(it) }
@@ -28,28 +26,14 @@ suspend fun main() {
2826
private val json = Json { ignoreUnknownKeys = true }
2927

3028
suspend fun invokeModel(): String {
31-
// Step 1: Create the Amazon Bedrock runtime client
32-
// The runtime client handles the communication with AI models on Amazon Bedrock
29+
// Create and configure the Bedrock runtime client
3330
BedrockRuntimeClient { region = "us-east-1" }.use { client ->
3431

35-
// Step 2: Specify which model to use
36-
// Available Amazon Titan models and their characteristics:
37-
// - Titan Text Lite: Fast, cost-effective text generation
38-
// - Titan Text Express: Balanced performance and cost
39-
// - Titan Text Large: Advanced capabilities for complex tasks
40-
//
4132
// For the latest available models, see:
4233
// https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
4334
val modelId = "amazon.titan-text-lite-v1"
4435

45-
// Step 3: Create the request payload
46-
// Optional parameters to control the model's response:
47-
// - maxTokenCount: maximum number of tokens to generate
48-
// - temperature: randomness (max: 1.0, default: 0.7)
49-
// OR
50-
// - topP: diversity of word choice (max: 1.0, default: 0.9)
51-
// Note: Use either temperature OR topP, but not both
52-
//
36+
// Create the request payload with optional configuration parameters
5337
// For detailed parameter descriptions, see:
5438
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-text.html
5539
val prompt = "Describe the purpose of a 'hello world' program in one line."
@@ -63,10 +47,7 @@ suspend fun invokeModel(): String {
6347
}
6448
""".trimIndent()
6549

66-
// Step 4: Send and process the request
67-
// - Send the request to the model
68-
// - Parse the JSON response
69-
// - Extract and return the generated text
50+
// Send the request and process the model's response
7051
runCatching {
7152
// Send the request to the model
7253
val response = client.invokeModel(InvokeModelRequest {

kotlin/services/bedrock-runtime/src/test/kotlin/models/TestConverse.kt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,15 @@ import com.example.bedrockruntime.models.amazon.nova.text.converse
77

88
import java.util.stream.Stream
99

10+
/**
11+
* Test class for text generation on Amazon Bedrock using the Converse API.
12+
*/
1013
class TestConverse : AbstractModelTest() {
14+
/**
15+
* Provides test configurations for Amazon Bedrock text generation models.
16+
* Creates test cases that validate each model's ability to generate
17+
* and return text responses.
18+
*/
1119
override fun modelProvider(): Stream<ModelTest> {
1220
return listOf(
1321
ModelTest("Amazon Nova", ::converse)
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
package models
2+
3+
import com.example.bedrockruntime.models.amazon.nova.text.converseStream
4+
import java.util.stream.Stream
5+
6+
/**
7+
* Test class for streaming text generation on Amazon Bedrock using the ConverseStream API.
8+
*/
9+
class TestConverseStream : AbstractModelTest() {
10+
/**
11+
* Provides test configurations for Amazon Bedrock models that support streaming.
12+
* Creates test cases that validate each model's ability to generate
13+
* and return streaming text responses.
14+
*/
15+
override fun modelProvider(): Stream<ModelTest> {
16+
return listOf(
17+
ModelTest("Amazon Nova", ::converseStream)
18+
).stream()
19+
}
20+
}

kotlin/services/bedrock-runtime/src/test/kotlin/models/TestInvokeModel.kt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,15 @@ package models
33
import com.example.bedrockruntime.models.amazon.titan.text.invokeModel
44
import java.util.stream.Stream
55

6+
/**
7+
* Test class for generative AI models on Amazon Bedrock using the InvokeModel API.
8+
*/
69
class TestInvokeModel : AbstractModelTest() {
10+
/**
11+
* Provides test configurations for generative AI models on Amazon Bedrock.
12+
* Creates test cases that validate each model's ability to generate
13+
* and return text or byte[] responses.
14+
*/
715
override fun modelProvider(): Stream<ModelTest> {
816
return listOf(
917
ModelTest("Amazon Titan Text", ::invokeModel)

0 commit comments

Comments
 (0)