-
Notifications
You must be signed in to change notification settings - Fork 10
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
3 changed files
with
305 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
# SPDX-License-Identifier: Apache-2.0 | ||
""" | ||
Shows how to send an document as part of a message to Anthropic Claude 3 Sonnet (on demand). | ||
Source: https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html | ||
""" | ||
|
||
import logging | ||
|
||
import boto3 | ||
from botocore.exceptions import ClientError | ||
|
||
logger = logging.getLogger(__name__) | ||
logging.basicConfig(level=logging.INFO) | ||
|
||
|
||
def generate_message(bedrock_client, model_id, input_text, input_document): | ||
""" | ||
Sends a message to a model. | ||
Args: | ||
bedrock_client: The Boto3 Bedrock runtime client. | ||
model_id (str): The model ID to use. | ||
input text : The input message. | ||
input_document : The input document. | ||
Returns: | ||
response (JSON): The conversation that the model generated. | ||
""" | ||
|
||
logger.info("Generating message with model %s", model_id) | ||
|
||
# Message to send. | ||
|
||
message = { | ||
"role": "user", | ||
"content": [ | ||
{"text": input_text}, | ||
{ | ||
"document": { | ||
"name": "MyDocument", | ||
"format": "txt", | ||
"source": {"bytes": input_document}, | ||
} | ||
}, | ||
], | ||
} | ||
|
||
messages = [message] | ||
|
||
# Send the message. | ||
response = bedrock_client.converse(modelId=model_id, messages=messages) | ||
|
||
return response | ||
|
||
|
||
def main(): | ||
""" | ||
Entrypoint for Anthropic Claude 3 Sonnet example. | ||
""" | ||
|
||
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") | ||
|
||
model_id = "anthropic.claude-3-sonnet-20240229-v1:0" | ||
input_text = "What's in this document?" | ||
input_document = "path/to/document.pdf" | ||
|
||
try: | ||
bedrock_client = boto3.client(service_name="bedrock-runtime") | ||
|
||
response = generate_message(bedrock_client, model_id, input_text, input_document) | ||
|
||
output_message = response["output"]["message"] | ||
|
||
print(f"Role: {output_message['role']}") | ||
|
||
for content in output_message["content"]: | ||
print(f"Text: {content['text']}") | ||
|
||
token_usage = response["usage"] | ||
print(f"Input tokens: {token_usage['inputTokens']}") | ||
print(f"Output tokens: {token_usage['outputTokens']}") | ||
print(f"Total tokens: {token_usage['totalTokens']}") | ||
print(f"Stop reason: {response['stopReason']}") | ||
|
||
except ClientError as err: | ||
message = err.response["Error"]["Message"] | ||
logger.error("A client error occurred: %s", message) | ||
print(f"A client error occured: {message}") | ||
|
||
else: | ||
print(f"Finished generating text with model {model_id}.") | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
# SPDX-License-Identifier: Apache-2.0 | ||
""" | ||
Shows how to send an image with the Converse API to Anthropic Claude 3 Sonnet (on demand). | ||
Source: https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html | ||
""" | ||
|
||
import logging | ||
|
||
import boto3 | ||
from botocore.exceptions import ClientError | ||
|
||
logger = logging.getLogger(__name__) | ||
logging.basicConfig(level=logging.INFO) | ||
|
||
|
||
def generate_conversation(bedrock_client, model_id, input_text, input_image): | ||
""" | ||
Sends a message to a model. | ||
Args: | ||
bedrock_client: The Boto3 Bedrock runtime client. | ||
model_id (str): The model ID to use. | ||
input text : The input message. | ||
input_image : The input image. | ||
Returns: | ||
response (JSON): The conversation that the model generated. | ||
""" | ||
|
||
logger.info("Generating message with model %s", model_id) | ||
|
||
# Message to send. | ||
|
||
with open(input_image, "rb") as f: | ||
image = f.read() | ||
|
||
message = { | ||
"role": "user", | ||
"content": [{"text": input_text}, {"image": {"format": "png", "source": {"bytes": image}}}], | ||
} | ||
|
||
messages = [message] | ||
|
||
# Send the message. | ||
response = bedrock_client.converse(modelId=model_id, messages=messages) | ||
|
||
return response | ||
|
||
|
||
def main(): | ||
""" | ||
Entrypoint for Anthropic Claude 3 Sonnet example. | ||
""" | ||
|
||
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") | ||
|
||
model_id = "anthropic.claude-3-sonnet-20240229-v1:0" | ||
input_text = "What's in this image?" | ||
input_image = "path/to/image" | ||
|
||
try: | ||
bedrock_client = boto3.client(service_name="bedrock-runtime") | ||
|
||
response = generate_conversation(bedrock_client, model_id, input_text, input_image) | ||
|
||
output_message = response["output"]["message"] | ||
|
||
print(f"Role: {output_message['role']}") | ||
|
||
for content in output_message["content"]: | ||
print(f"Text: {content['text']}") | ||
|
||
token_usage = response["usage"] | ||
print(f"Input tokens: {token_usage['inputTokens']}") | ||
print(f"Output tokens: {token_usage['outputTokens']}") | ||
print(f"Total tokens: {token_usage['totalTokens']}") | ||
print(f"Stop reason: {response['stopReason']}") | ||
|
||
except ClientError as err: | ||
message = err.response["Error"]["Message"] | ||
logger.error("A client error occurred: %s", message) | ||
print(f"A client error occured: {message}") | ||
|
||
else: | ||
print(f"Finished generating text with model {model_id}.") | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,119 @@ | ||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
# SPDX-License-Identifier: Apache-2.0 | ||
""" | ||
Shows how to use the Converse API with Anthropic Claude 3 Sonnet (on demand). | ||
Source: https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html | ||
""" | ||
|
||
import logging | ||
|
||
import boto3 | ||
from botocore.exceptions import ClientError | ||
|
||
logger = logging.getLogger(__name__) | ||
logging.basicConfig(level=logging.INFO) | ||
|
||
|
||
def generate_conversation(bedrock_client, model_id, system_prompts, messages): | ||
""" | ||
Sends messages to a model. | ||
Args: | ||
bedrock_client: The Boto3 Bedrock runtime client. | ||
model_id (str): The model ID to use. | ||
system_prompts (JSON) : The system prompts for the model to use. | ||
messages (JSON) : The messages to send to the model. | ||
Returns: | ||
response (JSON): The conversation that the model generated. | ||
""" | ||
|
||
logger.info("Generating message with model %s", model_id) | ||
|
||
# Inference parameters to use. | ||
temperature = 0.5 | ||
top_k = 200 | ||
|
||
# Base inference parameters to use. | ||
inference_config = {"temperature": temperature} | ||
# Additional inference parameters to use. | ||
additional_model_fields = {"top_k": top_k} | ||
|
||
# Send the message. | ||
response = bedrock_client.converse( | ||
modelId=model_id, | ||
messages=messages, | ||
system=system_prompts, | ||
inferenceConfig=inference_config, | ||
additionalModelRequestFields=additional_model_fields, | ||
) | ||
|
||
# Log token usage. | ||
token_usage = response["usage"] | ||
logger.info("Input tokens: %s", token_usage["inputTokens"]) | ||
logger.info("Output tokens: %s", token_usage["outputTokens"]) | ||
logger.info("Total tokens: %s", token_usage["totalTokens"]) | ||
logger.info("Stop reason: %s", response["stopReason"]) | ||
|
||
return response | ||
|
||
|
||
def main(): | ||
""" | ||
Entrypoint for Anthropic Claude 3 Sonnet example. | ||
""" | ||
|
||
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") | ||
|
||
model_id = "anthropic.claude-3-sonnet-20240229-v1:0" | ||
|
||
# Setup the system prompts and messages to send to the model. | ||
system_prompts = [ | ||
{ | ||
"text": "You are an app that creates playlists for a radio station that plays rock and pop music." | ||
"Only return song names and the artist." | ||
} | ||
] | ||
message_1 = {"role": "user", "content": [{"text": "Create a list of 3 pop songs."}]} | ||
message_2 = { | ||
"role": "user", | ||
"content": [{"text": "Make sure the songs are by artists from the United Kingdom."}], | ||
} | ||
messages = [] | ||
|
||
try: | ||
bedrock_client = boto3.client(service_name="bedrock-runtime") | ||
|
||
# Start the conversation with the 1st message. | ||
messages.append(message_1) | ||
response = generate_conversation(bedrock_client, model_id, system_prompts, messages) | ||
|
||
# Add the response message to the conversation. | ||
output_message = response["output"]["message"] | ||
messages.append(output_message) | ||
|
||
# Continue the conversation with the 2nd message. | ||
messages.append(message_2) | ||
response = generate_conversation(bedrock_client, model_id, system_prompts, messages) | ||
|
||
output_message = response["output"]["message"] | ||
messages.append(output_message) | ||
|
||
# Show the complete conversation. | ||
for message in messages: | ||
print(f"Role: {message['role']}") | ||
for content in message["content"]: | ||
print(f"Text: {content['text']}") | ||
print() | ||
|
||
except ClientError as err: | ||
message = err.response["Error"]["Message"] | ||
logger.error("A client error occurred: %s", message) | ||
print(f"A client error occured: {message}") | ||
|
||
else: | ||
print(f"Finished generating text with model {model_id}.") | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |