Skip to content

Commit b102317

Browse files
committed
fix: aws bedrock client usage
1 parent edecd32 commit b102317

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

tutorials/generator_all_providers.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@ def use_all_providers():
2626
)
2727
# need to run ollama pull llama3.2:1b first to use this model
2828

29-
# aws_bedrock_llm = adal.Generator(
30-
# model_client=adal.BedrockAPIClient(),
31-
# model_kwargs={"modelId": "amazon.mistral.instruct-7b"},
32-
# )
29+
aws_bedrock_llm = adal.Generator(
30+
model_client=adal.BedrockAPIClient(),
31+
model_kwargs={"model": "mistral.mistral-7b-instruct-v0:2"},
32+
)
3333

3434
prompt_kwargs = {"input_str": "What is the meaning of life in one sentence?"}
3535

@@ -38,14 +38,14 @@ def use_all_providers():
3838
anthropic_response = anthropic_llm(prompt_kwargs)
3939
google_gen_ai_response = google_gen_ai_llm(prompt_kwargs)
4040
ollama_response = ollama_llm(prompt_kwargs)
41-
# aws_bedrock_llm_response = aws_bedrock_llm(prompt_kwargs)
41+
aws_bedrock_llm_response = aws_bedrock_llm(prompt_kwargs)
4242

4343
print(f"OpenAI: {openai_response}\n")
4444
print(f"Groq: {groq_response}\n")
4545
print(f"Anthropic: {anthropic_response}\n")
4646
print(f"Google GenAI: {google_gen_ai_response}\n")
4747
print(f"Ollama: {ollama_response}\n")
48-
# print(f"AWS Bedrock: {aws_bedrock_llm_response}\n")
48+
print(f"AWS Bedrock: {aws_bedrock_llm_response}\n")
4949

5050

5151
if __name__ == "__main__":

0 commit comments

Comments
 (0)