autogen_ext.models.anthropic#

class AnthropicChatCompletionClient(**kwargs: Unpack)[source]#

Bases: BaseAnthropicChatCompletionClient, Component[AnthropicClientConfigurationConfigModel]

Chat completion client for Anthropic’s Claude models.

Parameters:
  • model (str) – The Claude model to use (e.g., “claude-3-sonnet-20240229”, “claude-3-opus-20240229”)

  • api_key (str, optional) – Anthropic API key. Required if not in environment variables.

  • base_url (str, optional) – Override the default API endpoint.

  • max_tokens (int, optional) – Maximum tokens in the response. Default is 4096.

  • temperature (float, optional) – Controls randomness. Lower is more deterministic. Default is 1.0.

  • top_p (float, optional) – Controls diversity via nucleus sampling. Default is 1.0.

  • top_k (int, optional) – Controls diversity via top-k sampling. Default is -1 (disabled).

  • model_info (ModelInfo, optional) – The capabilities of the model. Required if using a custom model.

To use this client, you must install the Anthropic extension:

pip install "autogen-ext[anthropic]"

Example:

import asyncio
from autogen_ext.models.anthropic import AnthropicChatCompletionClient
from autogen_core.models import UserMessage


async def main():
    anthropic_client = AnthropicChatCompletionClient(
        model="claude-3-sonnet-20240229",
        api_key="your-api-key",  # Optional if ANTHROPIC_API_KEY is set in environment
    )

    result = await anthropic_client.create([UserMessage(content="What is the capital of France?", source="user")])  # type: ignore
    print(result)


if __name__ == "__main__":
    asyncio.run(main())

To load the client from a configuration:

from autogen_core.models import ChatCompletionClient

config = {
    "provider": "AnthropicChatCompletionClient",
    "config": {"model": "claude-3-sonnet-20240229"},
}

client = ChatCompletionClient.load_component(config)
classmethod _from_config(config: AnthropicClientConfigurationConfigModel) Self[source]#

Create a new instance of the component from a configuration object.

Parameters:

config (T) – The configuration object.

Returns:

Self – The new instance of the component.

_to_config() AnthropicClientConfigurationConfigModel[source]#

Dump the configuration that would be requite to create a new instance of a component matching the configuration of this instance.

Returns:

T – The configuration of the component.

component_config_schema#

alias of AnthropicClientConfigurationConfigModel

component_provider_override: ClassVar[str | None] = 'autogen_ext.models.anthropic.AnthropicChatCompletionClient'#

Override the provider string for the component. This should be used to prevent internal module names being a part of the module name.

component_type: ClassVar[ComponentType] = 'model'#

The logical type of the component.

class AnthropicClientConfiguration[source]#

Bases: BaseAnthropicClientConfiguration

api_key: str#
base_url: str | None#
default_headers: Dict[str, str] | None#
max_retries: int | None#
max_tokens: int | None#
metadata: Dict[str, str] | None#
model: str#
model_capabilities: ModelCapabilities#
model_info: ModelInfo#
response_format: ResponseFormat | None#
stop_sequences: List[str] | None#
temperature: float | None#
timeout: float | None#
tool_choice: Literal['auto', 'any', 'none'] | Dict[str, Any] | None#
tools: List[Dict[str, Any]] | None#
top_k: int | None#
top_p: float | None#
pydantic model AnthropicClientConfigurationConfigModel[source]#

Bases: BaseAnthropicClientConfigurationConfigModel

Show JSON schema
{
   "title": "AnthropicClientConfigurationConfigModel",
   "type": "object",
   "properties": {
      "model": {
         "title": "Model",
         "type": "string"
      },
      "max_tokens": {
         "anyOf": [
            {
               "type": "integer"
            },
            {
               "type": "null"
            }
         ],
         "default": 4096,
         "title": "Max Tokens"
      },
      "temperature": {
         "anyOf": [
            {
               "type": "number"
            },
            {
               "type": "null"
            }
         ],
         "default": 1.0,
         "title": "Temperature"
      },
      "top_p": {
         "anyOf": [
            {
               "type": "number"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Top P"
      },
      "top_k": {
         "anyOf": [
            {
               "type": "integer"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Top K"
      },
      "stop_sequences": {
         "anyOf": [
            {
               "items": {
                  "type": "string"
               },
               "type": "array"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Stop Sequences"
      },
      "response_format": {
         "anyOf": [
            {
               "$ref": "#/$defs/ResponseFormat"
            },
            {
               "type": "null"
            }
         ],
         "default": null
      },
      "metadata": {
         "anyOf": [
            {
               "additionalProperties": {
                  "type": "string"
               },
               "type": "object"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Metadata"
      },
      "api_key": {
         "anyOf": [
            {
               "type": "string"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Api Key"
      },
      "base_url": {
         "anyOf": [
            {
               "type": "string"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Base Url"
      },
      "model_capabilities": {
         "anyOf": [
            {
               "$ref": "#/$defs/ModelCapabilities"
            },
            {
               "type": "null"
            }
         ],
         "default": null
      },
      "model_info": {
         "anyOf": [
            {
               "$ref": "#/$defs/ModelInfo"
            },
            {
               "type": "null"
            }
         ],
         "default": null
      },
      "timeout": {
         "anyOf": [
            {
               "type": "number"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Timeout"
      },
      "max_retries": {
         "anyOf": [
            {
               "type": "integer"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Max Retries"
      },
      "default_headers": {
         "anyOf": [
            {
               "additionalProperties": {
                  "type": "string"
               },
               "type": "object"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Default Headers"
      },
      "tools": {
         "anyOf": [
            {
               "items": {
                  "type": "object"
               },
               "type": "array"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Tools"
      },
      "tool_choice": {
         "anyOf": [
            {
               "enum": [
                  "auto",
                  "any",
                  "none"
               ],
               "type": "string"
            },
            {
               "type": "object"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Tool Choice"
      }
   },
   "$defs": {
      "ModelCapabilities": {
         "deprecated": true,
         "properties": {
            "vision": {
               "title": "Vision",
               "type": "boolean"
            },
            "function_calling": {
               "title": "Function Calling",
               "type": "boolean"
            },
            "json_output": {
               "title": "Json Output",
               "type": "boolean"
            }
         },
         "required": [
            "vision",
            "function_calling",
            "json_output"
         ],
         "title": "ModelCapabilities",
         "type": "object"
      },
      "ModelInfo": {
         "description": "ModelInfo is a dictionary that contains information about a model's properties.\nIt is expected to be used in the model_info property of a model client.\n\nWe are expecting this to grow over time as we add more features.",
         "properties": {
            "vision": {
               "title": "Vision",
               "type": "boolean"
            },
            "function_calling": {
               "title": "Function Calling",
               "type": "boolean"
            },
            "json_output": {
               "title": "Json Output",
               "type": "boolean"
            },
            "family": {
               "anyOf": [
                  {
                     "enum": [
                        "gpt-4o",
                        "o1",
                        "o3",
                        "gpt-4",
                        "gpt-35",
                        "r1",
                        "gemini-1.5-flash",
                        "gemini-1.5-pro",
                        "gemini-2.0-flash",
                        "claude-3-haiku",
                        "claude-3-sonnet",
                        "claude-3-opus",
                        "claude-3.5-haiku",
                        "claude-3.5-sonnet",
                        "unknown"
                     ],
                     "type": "string"
                  },
                  {
                     "type": "string"
                  }
               ],
               "title": "Family"
            }
         },
         "required": [
            "vision",
            "function_calling",
            "json_output",
            "family"
         ],
         "title": "ModelInfo",
         "type": "object"
      },
      "ResponseFormat": {
         "properties": {
            "type": {
               "enum": [
                  "text",
                  "json_object"
               ],
               "title": "Type",
               "type": "string"
            }
         },
         "required": [
            "type"
         ],
         "title": "ResponseFormat",
         "type": "object"
      }
   },
   "required": [
      "model"
   ]
}

Fields:
  • tool_choice (Literal['auto', 'any', 'none'] | Dict[str, Any] | None)

  • tools (List[Dict[str, Any]] | None)

field tool_choice: Literal['auto', 'any', 'none'] | Dict[str, Any] | None = None#
field tools: List[Dict[str, Any]] | None = None#
class BaseAnthropicChatCompletionClient(client: AsyncAnthropic, *, create_args: Dict[str, Any], model_info: ModelInfo | None = None)[source]#

Bases: ChatCompletionClient

actual_usage() RequestUsage[source]#
property capabilities: ModelCapabilities#
async close() None[source]#
count_tokens(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = []) int[source]#

Estimate the number of tokens used by messages and tools.

Note: This is an estimation based on common tokenization patterns and may not perfectly match Anthropic’s exact token counting for Claude models.

async create(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = [], json_output: bool | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) CreateResult[source]#
async create_stream(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = [], json_output: bool | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None, max_consecutive_empty_chunk_tolerance: int = 0) AsyncGenerator[str | CreateResult, None][source]#

Creates an AsyncGenerator that yields a stream of completions based on the provided messages and tools.

property model_info: ModelInfo#
remaining_tokens(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = []) int[source]#

Calculate the remaining tokens based on the model’s token limit.

total_usage() RequestUsage[source]#
pydantic model CreateArgumentsConfigModel[source]#

Bases: BaseModel

Show JSON schema
{
   "title": "CreateArgumentsConfigModel",
   "type": "object",
   "properties": {
      "model": {
         "title": "Model",
         "type": "string"
      },
      "max_tokens": {
         "anyOf": [
            {
               "type": "integer"
            },
            {
               "type": "null"
            }
         ],
         "default": 4096,
         "title": "Max Tokens"
      },
      "temperature": {
         "anyOf": [
            {
               "type": "number"
            },
            {
               "type": "null"
            }
         ],
         "default": 1.0,
         "title": "Temperature"
      },
      "top_p": {
         "anyOf": [
            {
               "type": "number"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Top P"
      },
      "top_k": {
         "anyOf": [
            {
               "type": "integer"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Top K"
      },
      "stop_sequences": {
         "anyOf": [
            {
               "items": {
                  "type": "string"
               },
               "type": "array"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Stop Sequences"
      },
      "response_format": {
         "anyOf": [
            {
               "$ref": "#/$defs/ResponseFormat"
            },
            {
               "type": "null"
            }
         ],
         "default": null
      },
      "metadata": {
         "anyOf": [
            {
               "additionalProperties": {
                  "type": "string"
               },
               "type": "object"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "title": "Metadata"
      }
   },
   "$defs": {
      "ResponseFormat": {
         "properties": {
            "type": {
               "enum": [
                  "text",
                  "json_object"
               ],
               "title": "Type",
               "type": "string"
            }
         },
         "required": [
            "type"
         ],
         "title": "ResponseFormat",
         "type": "object"
      }
   },
   "required": [
      "model"
   ]
}

Fields:
  • max_tokens (int | None)

  • metadata (Dict[str, str] | None)

  • model (str)

  • response_format (autogen_ext.models.anthropic.config.ResponseFormat | None)

  • stop_sequences (List[str] | None)

  • temperature (float | None)

  • top_k (int | None)

  • top_p (float | None)

field max_tokens: int | None = 4096#
field metadata: Dict[str, str] | None = None#
field model: str [Required]#
field response_format: ResponseFormat | None = None#
field stop_sequences: List[str] | None = None#
field temperature: float | None = 1.0#
field top_k: int | None = None#
field top_p: float | None = None#