autogen_core.components.models#
- class autogen_core.components.models.AssistantMessage(content: str | List[autogen_core.components._types.FunctionCall], source: str)[source]#
Bases:
object
- content: str | List[FunctionCall]#
- class autogen_core.components.models.ChatCompletionClient(*args, **kwargs)[source]#
Bases:
Protocol
- actual_usage() RequestUsage [source]#
- property capabilities: ModelCapabilities#
- count_tokens(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = []) int [source]#
- async create(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = [], json_output: bool | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) CreateResult [source]#
- create_stream(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = [], json_output: bool | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) AsyncGenerator[str | CreateResult, None] [source]#
- remaining_tokens(messages: Sequence[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage], tools: Sequence[Tool | ToolSchema] = []) int [source]#
- total_usage() RequestUsage [source]#
- class autogen_core.components.models.ChatCompletionTokenLogprob(token: str, logprob: float, top_logprobs: List[autogen_core.components.models._types.TopLogprob] | None = None, bytes: List[int] | None = None)[source]#
Bases:
object
- top_logprobs: List[TopLogprob] | None = None#
- class autogen_core.components.models.CreateResult(finish_reason: Literal['stop', 'length', 'function_calls', 'content_filter'], content: str | List[autogen_core.components._types.FunctionCall], usage: autogen_core.components.models._types.RequestUsage, cached: bool, logprobs: List[autogen_core.components.models._types.ChatCompletionTokenLogprob] | None = None)[source]#
Bases:
object
- content: str | List[FunctionCall]#
- logprobs: List[ChatCompletionTokenLogprob] | None = None#
- usage: RequestUsage#
- class autogen_core.components.models.FunctionExecutionResult(content: str, call_id: str)[source]#
Bases:
object
- class autogen_core.components.models.FunctionExecutionResultMessage(content: List[autogen_core.components.models._types.FunctionExecutionResult])[source]#
Bases:
object
- content: List[FunctionExecutionResult]#
- class autogen_core.components.models.RequestUsage(prompt_tokens: int, completion_tokens: int)[source]#
Bases:
object