Skip to main content

Anthropic Claude

Open In Colab Open on GitHub

In this notebook, we demonstrate how a to use Anthropic Claude model for AgentChat.

Requirements

To use Anthropic Claude with AutoGen, first you need to install the pyautogen and anthropic package.

To try out the function call feature of Claude model, you need to install anthropic>=0.23.1.

# !pip install pyautogen
!pip install "anthropic>=0.23.1"
import inspect
import json
from typing import Any, Dict, List, Union

from anthropic import Anthropic
from anthropic import __version__ as anthropic_version
from anthropic.types import Completion, Message
from openai.types.chat.chat_completion import ChatCompletionMessage
from typing_extensions import Annotated

import autogen
from autogen import AssistantAgent, UserProxyAgent

TOOL_ENABLED = anthropic_version >= "0.23.1"
if TOOL_ENABLED:
from anthropic.types.beta.tools import ToolsBetaMessage
else:
ToolsBetaMessage = object

Create Anthropic Model Client following ModelClient Protocol

We will implement our Anthropic client adhere to the ModelClient protocol and response structure which is defined in client.py and shown below.

class ModelClient(Protocol):
"""
A client class must implement the following methods:
- create must return a response object that implements the ModelClientResponseProtocol
- cost must return the cost of the response
- get_usage must return a dict with the following keys:
- prompt_tokens
- completion_tokens
- total_tokens
- cost
- model

This class is used to create a client that can be used by OpenAIWrapper.
The response returned from create must adhere to the ModelClientResponseProtocol but can be extended however needed.
The message_retrieval method must be implemented to return a list of str or a list of messages from the response.
"""

RESPONSE_USAGE_KEYS = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]

class ModelClientResponseProtocol(Protocol):
class Choice(Protocol):
class Message(Protocol):
content: Optional[str]

message: Message

choices: List[Choice]
model: str

def create(self, params) -> ModelClientResponseProtocol:
...

def message_retrieval(
self, response: ModelClientResponseProtocol
) -> Union[List[str], List[ModelClient.ModelClientResponseProtocol.Choice.Message]]:
"""
Retrieve and return a list of strings or a list of Choice.Message from the response.

NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
"""
...

def cost(self, response: ModelClientResponseProtocol) -> float:
...

@staticmethod
def get_usage(response: ModelClientResponseProtocol) -> Dict:
"""Return usage summary of the response using RESPONSE_USAGE_KEYS."""
...

Implementation of AnthropicClient

You can find the introduction to Claude-3-Opus model here.

Since anthropic provides their Python SDK with similar structure as OpenAI’s, we will following the implementation from autogen.oai.client.OpenAIClient.

class AnthropicClient:
def __init__(self, config: Dict[str, Any]):
self._config = config
self.model = config["model"]
anthropic_kwargs = set(inspect.getfullargspec(Anthropic.__init__).kwonlyargs)
filter_dict = {k: v for k, v in config.items() if k in anthropic_kwargs}
self._client = Anthropic(**filter_dict)

self._last_tooluse_status = {}

def message_retrieval(
self, response: Union[Message, ToolsBetaMessage]
) -> Union[List[str], List[ChatCompletionMessage]]:
"""Retrieve the messages from the response."""
messages = response.content
if len(messages) == 0:
return [None]
res = []
if TOOL_ENABLED:
for choice in messages:
if choice.type == "tool_use":
res.insert(0, self.response_to_openai_message(choice))
self._last_tooluse_status["tool_use"] = choice.model_dump()
else:
res.append(choice.text)
self._last_tooluse_status["think"] = choice.text

return res

else:
return [ # type: ignore [return-value]
choice.text if choice.message.function_call is not None else choice.message.content # type: ignore [union-attr]
for choice in messages
]

def create(self, params: Dict[str, Any]) -> Completion:
"""Create a completion for a given config.

Args:
params: The params for the completion.

Returns:
The completion.
"""
if "tools" in params:
converted_functions = self.convert_tools_to_functions(params["tools"])
params["functions"] = params.get("functions", []) + converted_functions

raw_contents = params["messages"]
processed_messages = []
for message in raw_contents:

if message["role"] == "system":
params["system"] = message["content"]
elif message["role"] == "function":
processed_messages.append(self.return_function_call_result(message["content"]))
elif "function_call" in message:
processed_messages.append(self.restore_last_tooluse_status())
elif message["content"] == "":
# I'm not sure how to elegantly terminate the conversation, please give me some advice about this.
message["content"] = "I'm done. Please send TERMINATE"
processed_messages.append(message)
else:
processed_messages.append(message)

params["messages"] = processed_messages

if TOOL_ENABLED and "functions" in params:
completions: Completion = self._client.beta.tools.messages
else:
completions: Completion = self._client.messages # type: ignore [attr-defined]

# Not yet support stream
params = params.copy()
params["stream"] = False
params.pop("model_client_cls")
params["max_tokens"] = params.get("max_tokens", 4096)
if "functions" in params:
tools_configs = params.pop("functions")
tools_configs = [self.openai_func_to_anthropic(tool) for tool in tools_configs]
params["tools"] = tools_configs
response = completions.create(**params)

return response

def cost(self, response: Completion) -> float:
"""Calculate the cost of the response."""
total = 0.0
tokens = {
"input": response.usage.input_tokens if response.usage is not None else 0,
"output": response.usage.output_tokens if response.usage is not None else 0,
}
price_per_million = {
"input": 15,
"output": 75,
}
for key, value in tokens.items():
total += value * price_per_million[key] / 1_000_000

return total

def response_to_openai_message(self, response) -> ChatCompletionMessage:
dict_response = response.model_dump()
return ChatCompletionMessage(
content=None,
role="assistant",
function_call={"name": dict_response["name"], "arguments": json.dumps(dict_response["input"])},
)

def restore_last_tooluse_status(self) -> Dict:
cached_content = []
if "think" in self._last_tooluse_status:
cached_content.append({"type": "text", "text": self._last_tooluse_status["think"]})
cached_content.append(self._last_tooluse_status["tool_use"])
res = {"role": "assistant", "content": cached_content}
return res

def return_function_call_result(self, result: str) -> Dict:
return {
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": self._last_tooluse_status["tool_use"]["id"],
"content": result,
}
],
}

@staticmethod
def openai_func_to_anthropic(openai_func: dict) -> dict:
res = openai_func.copy()
res["input_schema"] = res.pop("parameters")
return res

@staticmethod
def get_usage(response: Completion) -> Dict:
return {
"prompt_tokens": response.usage.input_tokens if response.usage is not None else 0,
"completion_tokens": response.usage.output_tokens if response.usage is not None else 0,
"total_tokens": (
response.usage.input_tokens + response.usage.output_tokens if response.usage is not None else 0
),
"cost": response.cost if hasattr(response, "cost") else 0,
"model": response.model,
}

@staticmethod
def convert_tools_to_functions(tools: List) -> List:
functions = []
for tool in tools:
if tool.get("type") == "function" and "function" in tool:
functions.append(tool["function"])

return functions

Set the config for the Anthropic API

You can add any parameters that are needed for the custom model loading in the same configuration list.

It is important to add the model_client_cls field and set it to a string that corresponds to the class name: "CustomModelClient".

import os

config_list_claude = [
{
# Choose your model name.
"model": "claude-3-sonnet-20240229",
# You need to provide your API key here.
"api_key": os.getenv("ANTHROPIC_API_KEY"),
"base_url": "https://api.anthropic.com",
"api_type": "anthropic",
"model_client_cls": "AnthropicClient",
}
]

Construct Agents

Construct a simple conversation between a User proxy and an ConversableAgent based on Claude-3 model.

assistant = AssistantAgent(
"assistant",
llm_config={
"config_list": config_list_claude,
},
)

user_proxy = UserProxyAgent(
"user_proxy",
human_input_mode="NEVER",
code_execution_config={
"work_dir": "coding",
"use_docker": False,
},
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
max_consecutive_auto_reply=1,
)
[autogen.oai.client: 04-08 22:15:59] {419} INFO - Detected custom model client in config: AnthropicClient, model client can not be used until register_model_client is called.

Function Call in Latest Anthropic API

Anthropic just announced that tool use is now in public beta in the Anthropic API. To use this feature, please install anthropic>=0.23.1.

@user_proxy.register_for_execution()
@assistant.register_for_llm(name="get_weather", description="Get the current weather in a given location.")
def preprocess(location: Annotated[str, "The city and state, e.g. Toronto, ON."]) -> str:
return "Absolutely cloudy and rainy"
[autogen.oai.client: 04-08 22:15:59] {419} INFO - Detected custom model client in config: AnthropicClient, model client can not be used until register_model_client is called.

Register the custom client class to the assistant agent

assistant.register_model_client(model_client_cls=AnthropicClient)
user_proxy.initiate_chat(
assistant,
message="What's the weather in Toronto?",
)
user_proxy (to assistant):

What's the weather in Toronto?

--------------------------------------------------------------------------------
assistant (to user_proxy):

***** Suggested function call: get_weather *****
Arguments:
{"location": "Toronto, ON"}
************************************************

--------------------------------------------------------------------------------

>>>>>>>> EXECUTING FUNCTION get_weather...
user_proxy (to assistant):

***** Response from calling function (get_weather) *****
Absolutely cloudy and rainy
********************************************************

--------------------------------------------------------------------------------
assistant (to user_proxy):

The tool returned that the current weather in Toronto, ON is absolutely cloudy and rainy.

--------------------------------------------------------------------------------
ChatResult(chat_id=None, chat_history=[{'content': "What's the weather in Toronto?", 'role': 'assistant'}, {'function_call': {'arguments': '{"location": "Toronto, ON"}', 'name': 'get_weather'}, 'content': None, 'role': 'assistant'}, {'content': 'Absolutely cloudy and rainy', 'name': 'get_weather', 'role': 'function'}, {'content': 'The tool returned that the current weather in Toronto, ON is absolutely cloudy and rainy.', 'role': 'user'}], summary='The tool returned that the current weather in Toronto, ON is absolutely cloudy and rainy.', cost=({'total_cost': 0.030494999999999998, 'claude-3-sonnet-20240229': {'cost': 0.030494999999999998, 'prompt_tokens': 1533, 'completion_tokens': 100, 'total_tokens': 1633}}, {'total_cost': 0}), human_input=[])