autogen_core.components.models#

class autogen_core.components.models.AssistantMessage(content: str | List[autogen_core.components._types.FunctionCall], source: str)[source]#

Bases: object

content: str | List[FunctionCall]#
source: str#
class autogen_core.components.models.ChatCompletionClient(*args, **kwargs)[source]#

Bases: Protocol

actual_usage() RequestUsage[source]#
property capabilities: ModelCapabilities#
count_tokens(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = []) int[source]#
async create(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = [], json_output: bool | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) CreateResult[source]#
create_stream(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = [], json_output: bool | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) AsyncGenerator[str | CreateResult, None][source]#
remaining_tokens(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = []) int[source]#
total_usage() RequestUsage[source]#
class autogen_core.components.models.ChatCompletionTokenLogprob(token: str, logprob: float, top_logprobs: List[autogen_core.components.models._types.TopLogprob] | None = None, bytes: List[int] | None = None)[source]#

Bases: object

bytes: List[int] | None = None#
logprob: float#
token: str#
top_logprobs: List[TopLogprob] | None = None#
class autogen_core.components.models.CreateResult(finish_reason: Literal['stop', 'length', 'function_calls', 'content_filter'], content: str | List[autogen_core.components._types.FunctionCall], usage: autogen_core.components.models._types.RequestUsage, cached: bool, logprobs: List[autogen_core.components.models._types.ChatCompletionTokenLogprob] | None = None)[source]#

Bases: object

cached: bool#
content: str | List[FunctionCall]#
finish_reason: Literal['stop', 'length', 'function_calls', 'content_filter']#
logprobs: List[ChatCompletionTokenLogprob] | None = None#
usage: RequestUsage#
class autogen_core.components.models.FunctionExecutionResult(content: str, call_id: str)[source]#

Bases: object

call_id: str#
content: str#
class autogen_core.components.models.FunctionExecutionResultMessage(content: List[autogen_core.components.models._types.FunctionExecutionResult])[source]#

Bases: object

content: List[FunctionExecutionResult]#
class autogen_core.components.models.ModelCapabilities[source]#

Bases: TypedDict

function_calling: Required[bool]#
json_output: Required[bool]#
vision: Required[bool]#
class autogen_core.components.models.RequestUsage(prompt_tokens: int, completion_tokens: int)[source]#

Bases: object

completion_tokens: int#
prompt_tokens: int#
class autogen_core.components.models.SystemMessage(content: str)[source]#

Bases: object

content: str#
class autogen_core.components.models.TopLogprob(logprob: float, bytes: List[int] | None = None)[source]#

Bases: object

bytes: List[int] | None = None#
logprob: float#
class autogen_core.components.models.UserMessage(content: str | List[str | autogen_core.components._image.Image], source: str)[source]#

Bases: object

content: str | List[str | Image]#
source: str#