Source code for autogen_agentchat.agents._assistant_agent
importasyncioimportjsonimportloggingimportwarningsfromtypingimport(Any,AsyncGenerator,Awaitable,Callable,Dict,List,Mapping,Optional,Sequence,Tuple,Union,)fromautogen_coreimportCancellationToken,Component,ComponentModel,FunctionCallfromautogen_core.memoryimportMemoryfromautogen_core.model_contextimport(ChatCompletionContext,UnboundedChatCompletionContext,)fromautogen_core.modelsimport(AssistantMessage,ChatCompletionClient,CreateResult,FunctionExecutionResult,FunctionExecutionResultMessage,LLMMessage,ModelFamily,SystemMessage,UserMessage,)fromautogen_core.toolsimportBaseTool,FunctionToolfrompydanticimportBaseModelfromtyping_extensionsimportSelffrom..importEVENT_LOGGER_NAMEfrom..baseimportHandoffasHandoffBasefrom..baseimportResponsefrom..messagesimport(AgentEvent,ChatMessage,HandoffMessage,MemoryQueryEvent,ModelClientStreamingChunkEvent,TextMessage,ThoughtEvent,ToolCallExecutionEvent,ToolCallRequestEvent,ToolCallSummaryMessage,)from..stateimportAssistantAgentStatefrom..utilsimportremove_imagesfrom._base_chat_agentimportBaseChatAgentevent_logger=logging.getLogger(EVENT_LOGGER_NAME)classAssistantAgentConfig(BaseModel):"""The declarative configuration for the assistant agent."""name:strmodel_client:ComponentModeltools:List[ComponentModel]|Nonehandoffs:List[HandoffBase|str]|None=Nonemodel_context:ComponentModel|None=Nonememory:List[ComponentModel]|None=Nonedescription:strsystem_message:str|None=Nonemodel_client_stream:bool=Falsereflect_on_tool_use:booltool_call_summary_format:str
[docs]classAssistantAgent(BaseChatAgent,Component[AssistantAgentConfig]):"""An agent that provides assistance with tool use. The :meth:`on_messages` returns a :class:`~autogen_agentchat.base.Response` in which :attr:`~autogen_agentchat.base.Response.chat_message` is the final response message. The :meth:`on_messages_stream` creates an async generator that produces the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response` object as the last item before closing the generator. .. attention:: The caller must only pass the new messages to the agent on each call to the :meth:`on_messages` or :meth:`on_messages_stream` method. The agent maintains its state between calls to these methods. Do not pass the entire conversation history to the agent on each call. .. warning:: The assistant agent is not thread-safe or coroutine-safe. It should not be shared between multiple tasks or coroutines, and it should not call its methods concurrently. The following diagram shows how the assistant agent works: .. image:: ../../images/assistant-agent.svg Tool call behavior: * If the model returns no tool call, then the response is immediately returned as a :class:`~autogen_agentchat.messages.TextMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. * When the model returns tool calls, they will be executed right away: - When `reflect_on_tool_use` is False (default), the tool call results are returned as a :class:`~autogen_agentchat.messages.ToolCallSummaryMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. `tool_call_summary_format` can be used to customize the tool call summary. - When `reflect_on_tool_use` is True, the another model inference is made using the tool calls and results, and the text response is returned as a :class:`~autogen_agentchat.messages.TextMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. * If the model returns multiple tool calls, they will be executed concurrently. To disable parallel tool calls you need to configure the model client. For example, set `parallel_tool_calls=False` for :class:`~autogen_ext.models.openai.OpenAIChatCompletionClient` and :class:`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`. .. tip:: By default, the tool call results are returned as response when tool calls are made. So it is recommended to pay attention to the formatting of the tools return values, especially if another agent is expecting them in a specific format. Use `tool_call_summary_format` to customize the tool call summary, if needed. Hand off behavior: * If a handoff is triggered, a :class:`~autogen_agentchat.messages.HandoffMessage` will be returned in :attr:`~autogen_agentchat.base.Response.chat_message`. * If there are tool calls, they will also be executed right away before returning the handoff. * The tool calls and results are passed to the target agent through :attr:`~autogen_agentchat.messages.HandoffMessage.context`. .. note:: If multiple handoffs are detected, only the first handoff is executed. To avoid this, disable parallel tool calls in the model client configuration. Limit context size sent to the model: You can limit the number of messages sent to the model by setting the `model_context` parameter to a :class:`~autogen_core.model_context.BufferedChatCompletionContext`. This will limit the number of recent messages sent to the model and can be useful when the model has a limit on the number of tokens it can process. You can also create your own model context by subclassing :class:`~autogen_core.model_context.ChatCompletionContext`. Streaming mode: The assistant agent can be used in streaming mode by setting `model_client_stream=True`. In this mode, the :meth:`on_messages_stream` and :meth:`BaseChatAgent.run_stream` methods will also yield :class:`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages as the model client produces chunks of response. The chunk messages will not be included in the final response's inner messages. Args: name (str): The name of the agent. model_client (ChatCompletionClient): The model client to use for inference. tools (List[BaseTool[Any, Any] | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent. handoffs (List[HandoffBase | str] | None, optional): The handoff configurations for the agent, allowing it to transfer to other agents by responding with a :class:`HandoffMessage`. The transfer is only executed when the team is in :class:`~autogen_agentchat.teams.Swarm`. If a handoff is a string, it should represent the target agent's name. model_context (ChatCompletionContext | None, optional): The model context for storing and retrieving :class:`~autogen_core.models.LLMMessage`. It can be preloaded with initial messages. The initial messages will be cleared when the agent is reset. description (str, optional): The description of the agent. system_message (str, optional): The system message for the model. If provided, it will be prepended to the messages in the model context when making an inference. Set to `None` to disable. model_client_stream (bool, optional): If `True`, the model client will be used in streaming mode. :meth:`on_messages_stream` and :meth:`BaseChatAgent.run_stream` methods will also yield :class:`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages as the model client produces chunks of response. Defaults to `False`. reflect_on_tool_use (bool, optional): If `True`, the agent will make another model inference using the tool call and result to generate a response. If `False`, the tool call result will be returned as the response. Defaults to `False`. tool_call_summary_format (str, optional): The format string used to create a tool call summary for every tool call result. Defaults to "{result}". When `reflect_on_tool_use` is `False`, a concatenation of all the tool call summaries, separated by a new line character ('\\n') will be returned as the response. Available variables: `{tool_name}`, `{arguments}`, `{result}`. For example, `"{tool_name}: {result}"` will create a summary like `"tool_name: result"`. memory (Sequence[Memory] | None, optional): The memory store to use for the agent. Defaults to `None`. Raises: ValueError: If tool names are not unique. ValueError: If handoff names are not unique. ValueError: If handoff names are not unique from tool names. ValueError: If maximum number of tool iterations is less than 1. Examples: **Example 1: basic agent** The following example demonstrates how to create an assistant agent with a model client and generate a response to a simple task. .. code-block:: python import asyncio from autogen_core import CancellationToken from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage async def main() -> None: model_client = OpenAIChatCompletionClient( model="gpt-4o", # api_key = "your_openai_api_key" ) agent = AssistantAgent(name="assistant", model_client=model_client) response = await agent.on_messages( [TextMessage(content="What is the capital of France?", source="user")], CancellationToken() ) print(response) asyncio.run(main()) **Example 2: model client token streaming** This example demonstrates how to create an assistant agent with a model client and generate a token stream by setting `model_client_stream=True`. .. code-block:: python import asyncio from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken async def main() -> None: model_client = OpenAIChatCompletionClient( model="gpt-4o", # api_key = "your_openai_api_key" ) agent = AssistantAgent( name="assistant", model_client=model_client, model_client_stream=True, ) stream = agent.on_messages_stream( [TextMessage(content="Name two cities in North America.", source="user")], CancellationToken() ) async for message in stream: print(message) asyncio.run(main()) .. code-block:: text source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content=' TERMIN' type='ModelClientStreamingChunkEvent' source='assistant' models_usage=None content='ATE' type='ModelClientStreamingChunkEvent' Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada. TERMINATE', type='TextMessage'), inner_messages=[]) **Example 3: agent with tools** The following example demonstrates how to create an assistant agent with a model client and a tool, generate a stream of messages for a task, and print the messages to the console using :class:`~autogen_agentchat.ui.Console`. The tool is a simple function that returns the current time. Under the hood, the function is wrapped in a :class:`~autogen_core.tools.FunctionTool` and used with the agent's model client. The doc string of the function is used as the tool description, the function name is used as the tool name, and the function signature including the type hints is used as the tool arguments. .. code-block:: python import asyncio from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_agentchat.ui import Console from autogen_core import CancellationToken async def get_current_time() -> str: return "The current time is 12:00 PM." async def main() -> None: model_client = OpenAIChatCompletionClient( model="gpt-4o", # api_key = "your_openai_api_key" ) agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time]) await Console( agent.on_messages_stream( [TextMessage(content="What is the current time?", source="user")], CancellationToken() ) ) asyncio.run(main()) **Example 4: agent with structured output and tool** The following example demonstrates how to create an assistant agent with a model client configured to use structured output and a tool. Note that you need to use :class:`~autogen_core.tools.FunctionTool` to create the tool and the `strict=True` is required for structured output mode. Because the model is configured to use structured output, the output reflection response will be a JSON formatted string. .. code-block:: python import asyncio from typing import Literal from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_agentchat.ui import Console from autogen_core import CancellationToken from autogen_core.tools import FunctionTool from autogen_ext.models.openai import OpenAIChatCompletionClient from pydantic import BaseModel # Define the structured output format. class AgentResponse(BaseModel): thoughts: str response: Literal["happy", "sad", "neutral"] # Define the function to be called as a tool. def sentiment_analysis(text: str) -> str: \"\"\"Given a text, return the sentiment.\"\"\" return "happy" if "happy" in text else "sad" if "sad" in text else "neutral" # Create a FunctionTool instance with `strict=True`, # which is required for structured output mode. tool = FunctionTool(sentiment_analysis, description="Sentiment Analysis", strict=True) # Create an OpenAIChatCompletionClient instance that uses the structured output format. model_client = OpenAIChatCompletionClient( model="gpt-4o-mini", response_format=AgentResponse, # type: ignore ) # Create an AssistantAgent instance that uses the tool and model client. agent = AssistantAgent( name="assistant", model_client=model_client, tools=[tool], system_message="Use the tool to analyze sentiment.", reflect_on_tool_use=True, # Use reflection to have the agent generate a formatted response. ) async def main() -> None: stream = agent.on_messages_stream([TextMessage(content="I am happy today!", source="user")], CancellationToken()) await Console(stream) asyncio.run(main()) .. code-block:: text ---------- assistant ---------- [FunctionCall(id='call_tIZjAVyKEDuijbBwLY6RHV2p', arguments='{"text":"I am happy today!"}', name='sentiment_analysis')] ---------- assistant ---------- [FunctionExecutionResult(content='happy', call_id='call_tIZjAVyKEDuijbBwLY6RHV2p', is_error=False)] ---------- assistant ---------- {"thoughts":"The user expresses a clear positive emotion by stating they are happy today, suggesting an upbeat mood.","response":"happy"} **Example 5: agent with bounded model context** The following example shows how to use a :class:`~autogen_core.model_context.BufferedChatCompletionContext` that only keeps the last 2 messages (1 user + 1 assistant). Bounded model context is useful when the model has a limit on the number of tokens it can process. .. code-block:: python import asyncio from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_core.model_context import BufferedChatCompletionContext from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: # Create a model client. model_client = OpenAIChatCompletionClient( model="gpt-4o-mini", # api_key = "your_openai_api_key" ) # Create a model context that only keeps the last 2 messages (1 user + 1 assistant). model_context = BufferedChatCompletionContext(buffer_size=2) # Create an AssistantAgent instance with the model client and context. agent = AssistantAgent( name="assistant", model_client=model_client, model_context=model_context, system_message="You are a helpful assistant.", ) response = await agent.on_messages( [TextMessage(content="Name two cities in North America.", source="user")], CancellationToken() ) print(response.chat_message.content) # type: ignore response = await agent.on_messages( [TextMessage(content="My favorite color is blue.", source="user")], CancellationToken() ) print(response.chat_message.content) # type: ignore response = await agent.on_messages( [TextMessage(content="Did I ask you any question?", source="user")], CancellationToken() ) print(response.chat_message.content) # type: ignore asyncio.run(main()) .. code-block:: text Two cities in North America are New York City and Toronto. That's great! Blue is often associated with calmness and serenity. Do you have a specific shade of blue that you like, or any particular reason why it's your favorite? No, you didn't ask a question. I apologize for any misunderstanding. If you have something specific you'd like to discuss or ask, feel free to let me know! **Example 6: agent with memory** The following example shows how to use a list-based memory with the assistant agent. The memory is preloaded with some initial content. Under the hood, the memory is used to update the model context before making an inference, using the :meth:`~autogen_core.memory.Memory.update_context` method. .. code-block:: python import asyncio from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_core.memory import ListMemory, MemoryContent from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: # Create a model client. model_client = OpenAIChatCompletionClient( model="gpt-4o-mini", # api_key = "your_openai_api_key" ) # Create a list-based memory with some initial content. memory = ListMemory() await memory.add(MemoryContent(content="User likes pizza.", mime_type="text/plain")) await memory.add(MemoryContent(content="User dislikes cheese.", mime_type="text/plain")) # Create an AssistantAgent instance with the model client and memory. agent = AssistantAgent( name="assistant", model_client=model_client, memory=[memory], system_message="You are a helpful assistant.", ) response = await agent.on_messages( [TextMessage(content="One idea for a dinner.", source="user")], CancellationToken() ) print(response.chat_message.content) # type: ignore asyncio.run(main()) .. code-block:: text How about making a delicious pizza without cheese? You can create a flavorful veggie pizza with a variety of toppings. Here's a quick idea: **Veggie Tomato Sauce Pizza** - Start with a pizza crust (store-bought or homemade). - Spread a layer of marinara or tomato sauce evenly over the crust. - Top with your favorite vegetables like bell peppers, mushrooms, onions, olives, and spinach. - Add some protein if you’d like, such as grilled chicken or pepperoni (ensure it's cheese-free). - Sprinkle with herbs like oregano and basil, and maybe a drizzle of olive oil. - Bake according to the crust instructions until the edges are golden and the veggies are cooked. Serve it with a side salad or some garlic bread to complete the meal! Enjoy your dinner! **Example 7: agent with `o1-mini`** The following example shows how to use `o1-mini` model with the assistant agent. .. code-block:: python import asyncio from autogen_core import CancellationToken from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage async def main() -> None: model_client = OpenAIChatCompletionClient( model="o1-mini", # api_key = "your_openai_api_key" ) # The system message is not supported by the o1 series model. agent = AssistantAgent(name="assistant", model_client=model_client, system_message=None) response = await agent.on_messages( [TextMessage(content="What is the capital of France?", source="user")], CancellationToken() ) print(response) asyncio.run(main()) .. note:: The `o1-preview` and `o1-mini` models do not support system message and function calling. So the `system_message` should be set to `None` and the `tools` and `handoffs` should not be set. See `o1 beta limitations <https://platform.openai.com/docs/guides/reasoning#beta-limitations>`_ for more details. **Example 8: agent using reasoning model with custom model context.** The following example shows how to use a reasoning model (DeepSeek R1) with the assistant agent. The model context is used to filter out the thought field from the assistant message. .. code-block:: python import asyncio from typing import List from autogen_agentchat.agents import AssistantAgent from autogen_core.model_context import UnboundedChatCompletionContext from autogen_core.models import AssistantMessage, LLMMessage, ModelFamily from autogen_ext.models.ollama import OllamaChatCompletionClient class ReasoningModelContext(UnboundedChatCompletionContext): \"\"\"A model context for reasoning models.\"\"\" async def get_messages(self) -> List[LLMMessage]: messages = await super().get_messages() # Filter out thought field from AssistantMessage. messages_out: List[LLMMessage] = [] for message in messages: if isinstance(message, AssistantMessage): message.thought = None messages_out.append(message) return messages_out # Create an instance of the model client for DeepSeek R1 hosted locally on Ollama. model_client = OllamaChatCompletionClient( model="deepseek-r1:8b", model_info={ "vision": False, "function_calling": False, "json_output": False, "family": ModelFamily.R1, }, ) agent = AssistantAgent( "reasoning_agent", model_client=model_client, model_context=ReasoningModelContext(), # Use the custom model context. ) async def run_reasoning_agent() -> None: result = await agent.run(task="What is the capital of France?") print(result) asyncio.run(run_reasoning_agent()) """component_config_schema=AssistantAgentConfigcomponent_provider_override="autogen_agentchat.agents.AssistantAgent"def__init__(self,name:str,model_client:ChatCompletionClient,*,tools:List[BaseTool[Any,Any]|Callable[...,Any]|Callable[...,Awaitable[Any]]]|None=None,handoffs:List[HandoffBase|str]|None=None,model_context:ChatCompletionContext|None=None,description:str="An agent that provides assistance with ability to use tools.",system_message:(str|None)="You are a helpful AI assistant. Solve tasks using your tools. Reply with TERMINATE when the task has been completed.",model_client_stream:bool=False,reflect_on_tool_use:bool=False,tool_call_summary_format:str="{result}",memory:Sequence[Memory]|None=None,):super().__init__(name=name,description=description)ifreflect_on_tool_useandModelFamily.is_claude(model_client.model_info["family"]):warnings.warn("Claude models may not work with reflection on tool use because Claude requires that any requests including a previous tool use or tool result must include the original tools definition.""Consider setting reflect_on_tool_use to False. ""As an alternative, consider calling the agent in a loop until it stops producing tool calls. ""See [Single-Agent Team](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/teams.html#single-agent-team) ""for more details.",UserWarning,stacklevel=2,)self._model_client=model_clientself._model_client_stream=model_client_streamself._memory=NoneifmemoryisnotNone:ifisinstance(memory,list):self._memory=memoryelse:raiseTypeError(f"Expected Memory, List[Memory], or None, got {type(memory)}")self._system_messages:List[SystemMessage]=[]ifsystem_messageisNone:self._system_messages=[]else:self._system_messages=[SystemMessage(content=system_message)]self._tools:List[BaseTool[Any,Any]]=[]iftoolsisnotNone:ifmodel_client.model_info["function_calling"]isFalse:raiseValueError("The model does not support function calling.")fortoolintools:ifisinstance(tool,BaseTool):self._tools.append(tool)elifcallable(tool):ifhasattr(tool,"__doc__")andtool.__doc__isnotNone:description=tool.__doc__else:description=""self._tools.append(FunctionTool(tool,description=description))else:raiseValueError(f"Unsupported tool type: {type(tool)}")# Check if tool names are unique.tool_names=[tool.namefortoolinself._tools]iflen(tool_names)!=len(set(tool_names)):raiseValueError(f"Tool names must be unique: {tool_names}")# Handoff tools.self._handoff_tools:List[BaseTool[Any,Any]]=[]self._handoffs:Dict[str,HandoffBase]={}ifhandoffsisnotNone:ifmodel_client.model_info["function_calling"]isFalse:raiseValueError("The model does not support function calling, which is needed for handoffs.")forhandoffinhandoffs:ifisinstance(handoff,str):handoff=HandoffBase(target=handoff)ifisinstance(handoff,HandoffBase):self._handoff_tools.append(handoff.handoff_tool)self._handoffs[handoff.name]=handoffelse:raiseValueError(f"Unsupported handoff type: {type(handoff)}")# Check if handoff tool names are unique.handoff_tool_names=[tool.namefortoolinself._handoff_tools]iflen(handoff_tool_names)!=len(set(handoff_tool_names)):raiseValueError(f"Handoff names must be unique: {handoff_tool_names}")# Check if handoff tool names not in tool names.ifany(nameintool_namesfornameinhandoff_tool_names):raiseValueError(f"Handoff names must be unique from tool names. "f"Handoff names: {handoff_tool_names}; tool names: {tool_names}")ifmodel_contextisnotNone:self._model_context=model_contextelse:self._model_context=UnboundedChatCompletionContext()self._reflect_on_tool_use=reflect_on_tool_useself._tool_call_summary_format=tool_call_summary_formatself._is_running=False@propertydefproduced_message_types(self)->Sequence[type[ChatMessage]]:message_types:List[type[ChatMessage]]=[TextMessage]ifself._handoffs:message_types.append(HandoffMessage)ifself._tools:message_types.append(ToolCallSummaryMessage)returntuple(message_types)
[docs]asyncdefon_messages(self,messages:Sequence[ChatMessage],cancellation_token:CancellationToken)->Response:asyncformessageinself.on_messages_stream(messages,cancellation_token):ifisinstance(message,Response):returnmessageraiseAssertionError("The stream should have returned the final result.")
[docs]asyncdefon_messages_stream(self,messages:Sequence[ChatMessage],cancellation_token:CancellationToken)->AsyncGenerator[AgentEvent|ChatMessage|Response,None]:""" Process the incoming messages with the assistant agent and yield events/responses as they happen. """# Gather all relevant state hereagent_name=self.namemodel_context=self._model_contextmemory=self._memorysystem_messages=self._system_messagestools=self._toolshandoff_tools=self._handoff_toolshandoffs=self._handoffsmodel_client=self._model_clientmodel_client_stream=self._model_client_streamreflect_on_tool_use=self._reflect_on_tool_usetool_call_summary_format=self._tool_call_summary_format# STEP 1: Add new user/handoff messages to the model contextawaitself._add_messages_to_context(model_context=model_context,messages=messages,)# STEP 2: Update model context with any relevant memoryinner_messages:List[AgentEvent|ChatMessage]=[]forevent_msginawaitself._update_model_context_with_memory(memory=memory,model_context=model_context,agent_name=agent_name,):inner_messages.append(event_msg)yieldevent_msg# STEP 3: Run the first inferencemodel_result=Noneasyncforinference_outputinself._call_llm(model_client=model_client,model_client_stream=model_client_stream,system_messages=system_messages,model_context=model_context,tools=tools,handoff_tools=handoff_tools,agent_name=agent_name,cancellation_token=cancellation_token,):ifisinstance(inference_output,CreateResult):model_result=inference_outputelse:# Streaming chunk eventyieldinference_outputassertmodel_resultisnotNone,"No model result was produced."# --- NEW: If the model produced a hidden "thought," yield it as an event ---ifmodel_result.thought:thought_event=ThoughtEvent(content=model_result.thought,source=agent_name)yieldthought_eventinner_messages.append(thought_event)# Add the assistant message to the model context (including thought if present)awaitmodel_context.add_message(AssistantMessage(content=model_result.content,source=agent_name,thought=getattr(model_result,"thought",None),))# STEP 4: Process the model outputasyncforoutput_eventinself._process_model_result(model_result=model_result,inner_messages=inner_messages,cancellation_token=cancellation_token,agent_name=agent_name,model_context=model_context,tools=tools,handoff_tools=handoff_tools,handoffs=handoffs,model_client=model_client,model_client_stream=model_client_stream,reflect_on_tool_use=reflect_on_tool_use,tool_call_summary_format=tool_call_summary_format,):yieldoutput_event
@staticmethodasyncdef_add_messages_to_context(model_context:ChatCompletionContext,messages:Sequence[ChatMessage],)->None:""" Add incoming user (and possibly handoff) messages to the model context. """formsginmessages:ifisinstance(msg,HandoffMessage):# Add handoff context to the model context.forcontext_msginmsg.context:awaitmodel_context.add_message(context_msg)awaitmodel_context.add_message(UserMessage(content=msg.content,source=msg.source))@staticmethodasyncdef_update_model_context_with_memory(memory:Optional[Sequence[Memory]],model_context:ChatCompletionContext,agent_name:str,)->List[MemoryQueryEvent]:""" If memory modules are present, update the model context and return the events produced. """events:List[MemoryQueryEvent]=[]ifmemory:formeminmemory:update_context_result=awaitmem.update_context(model_context)ifupdate_context_resultandlen(update_context_result.memories.results)>0:memory_query_event_msg=MemoryQueryEvent(content=update_context_result.memories.results,source=agent_name,)events.append(memory_query_event_msg)returnevents@classmethodasyncdef_call_llm(cls,model_client:ChatCompletionClient,model_client_stream:bool,system_messages:List[SystemMessage],model_context:ChatCompletionContext,tools:List[BaseTool[Any,Any]],handoff_tools:List[BaseTool[Any,Any]],agent_name:str,cancellation_token:CancellationToken,)->AsyncGenerator[Union[CreateResult,ModelClientStreamingChunkEvent],None]:""" Perform a model inference and yield either streaming chunk events or the final CreateResult. """all_messages=awaitmodel_context.get_messages()llm_messages=cls._get_compatible_context(model_client=model_client,messages=system_messages+all_messages)all_tools=tools+handoff_toolsifmodel_client_stream:model_result:Optional[CreateResult]=Noneasyncforchunkinmodel_client.create_stream(llm_messages,tools=all_tools,cancellation_token=cancellation_token):ifisinstance(chunk,CreateResult):model_result=chunkelifisinstance(chunk,str):yieldModelClientStreamingChunkEvent(content=chunk,source=agent_name)else:raiseRuntimeError(f"Invalid chunk type: {type(chunk)}")ifmodel_resultisNone:raiseRuntimeError("No final model result in streaming mode.")yieldmodel_resultelse:model_result=awaitmodel_client.create(llm_messages,tools=all_tools,cancellation_token=cancellation_token)yieldmodel_result@classmethodasyncdef_process_model_result(cls,model_result:CreateResult,inner_messages:List[AgentEvent|ChatMessage],cancellation_token:CancellationToken,agent_name:str,model_context:ChatCompletionContext,tools:List[BaseTool[Any,Any]],handoff_tools:List[BaseTool[Any,Any]],handoffs:Dict[str,HandoffBase],model_client:ChatCompletionClient,model_client_stream:bool,reflect_on_tool_use:bool,tool_call_summary_format:str,)->AsyncGenerator[AgentEvent|ChatMessage|Response,None]:""" Handle final or partial responses from model_result, including tool calls, handoffs, and reflection if needed. """# If direct text response (string)ifisinstance(model_result.content,str):yieldResponse(chat_message=TextMessage(content=model_result.content,source=agent_name,models_usage=model_result.usage,),inner_messages=inner_messages,)return# Otherwise, we have function callsassertisinstance(model_result.content,list)andall(isinstance(item,FunctionCall)foriteminmodel_result.content)# STEP 4A: Yield ToolCallRequestEventtool_call_msg=ToolCallRequestEvent(content=model_result.content,source=agent_name,models_usage=model_result.usage,)event_logger.debug(tool_call_msg)inner_messages.append(tool_call_msg)yieldtool_call_msg# STEP 4B: Execute tool callsexecuted_calls_and_results=awaitasyncio.gather(*[cls._execute_tool_call(tool_call=call,tools=tools,handoff_tools=handoff_tools,agent_name=agent_name,cancellation_token=cancellation_token,)forcallinmodel_result.content])exec_results=[resultfor_,resultinexecuted_calls_and_results]# Yield ToolCallExecutionEventtool_call_result_msg=ToolCallExecutionEvent(content=exec_results,source=agent_name,)event_logger.debug(tool_call_result_msg)awaitmodel_context.add_message(FunctionExecutionResultMessage(content=exec_results))inner_messages.append(tool_call_result_msg)yieldtool_call_result_msg# STEP 4C: Check for handoffhandoff_output=cls._check_and_handle_handoff(model_result=model_result,executed_calls_and_results=executed_calls_and_results,inner_messages=inner_messages,handoffs=handoffs,agent_name=agent_name,)ifhandoff_output:yieldhandoff_outputreturn# STEP 4D: Reflect or summarize tool resultsifreflect_on_tool_use:asyncforreflection_responseinAssistantAgent._reflect_on_tool_use_flow(model_client=model_client,model_client_stream=model_client_stream,model_context=model_context,agent_name=agent_name,inner_messages=inner_messages,):yieldreflection_responseelse:yieldAssistantAgent._summarize_tool_use(executed_calls_and_results=executed_calls_and_results,inner_messages=inner_messages,handoffs=handoffs,tool_call_summary_format=tool_call_summary_format,agent_name=agent_name,)@staticmethoddef_check_and_handle_handoff(model_result:CreateResult,executed_calls_and_results:List[Tuple[FunctionCall,FunctionExecutionResult]],inner_messages:List[AgentEvent|ChatMessage],handoffs:Dict[str,HandoffBase],agent_name:str,)->Optional[Response]:""" Detect handoff calls, generate the HandoffMessage if needed, and return a Response. If multiple handoffs exist, only the first is used. """handoff_reqs=[callforcallinmodel_result.contentifisinstance(call,FunctionCall)andcall.nameinhandoffs]iflen(handoff_reqs)>0:# We have at least one handoff function callselected_handoff=handoffs[handoff_reqs[0].name]iflen(handoff_reqs)>1:warnings.warn((f"Multiple handoffs detected. Only the first is executed: "f"{[handoffs[c.name].nameforcinhandoff_reqs]}. ""Disable parallel tool calls in the model client to avoid this warning."),stacklevel=2,)# Collect normal tool calls (not handoff) into the handoff contexttool_calls:List[FunctionCall]=[]tool_call_results:List[FunctionExecutionResult]=[]forexec_call,exec_resultinexecuted_calls_and_results:ifexec_call.namenotinhandoffs:tool_calls.append(exec_call)tool_call_results.append(exec_result)handoff_context:List[LLMMessage]=[]iflen(tool_calls)>0:# Include the thought in the AssistantMessage if model_result has ithandoff_context.append(AssistantMessage(content=tool_calls,source=agent_name,thought=getattr(model_result,"thought",None),))handoff_context.append(FunctionExecutionResultMessage(content=tool_call_results))# Return response for the first handoffreturnResponse(chat_message=HandoffMessage(content=selected_handoff.message,target=selected_handoff.target,source=agent_name,context=handoff_context,),inner_messages=inner_messages,)returnNone@classmethodasyncdef_reflect_on_tool_use_flow(cls,model_client:ChatCompletionClient,model_client_stream:bool,model_context:ChatCompletionContext,agent_name:str,inner_messages:List[AgentEvent|ChatMessage],)->AsyncGenerator[Response|ModelClientStreamingChunkEvent|ThoughtEvent,None]:""" If reflect_on_tool_use=True, we do another inference based on tool results and yield the final text response (or streaming chunks). """all_messages=awaitmodel_context.get_messages()llm_messages=cls._get_compatible_context(model_client=model_client,messages=all_messages)reflection_result:Optional[CreateResult]=Noneifmodel_client_stream:asyncforchunkinmodel_client.create_stream(llm_messages):ifisinstance(chunk,CreateResult):reflection_result=chunkelifisinstance(chunk,str):yieldModelClientStreamingChunkEvent(content=chunk,source=agent_name)else:raiseRuntimeError(f"Invalid chunk type: {type(chunk)}")else:reflection_result=awaitmodel_client.create(llm_messages)ifnotreflection_resultornotisinstance(reflection_result.content,str):raiseRuntimeError("Reflect on tool use produced no valid text response.")# --- NEW: If the reflection produced a thought, yield it ---ifreflection_result.thought:thought_event=ThoughtEvent(content=reflection_result.thought,source=agent_name)yieldthought_eventinner_messages.append(thought_event)# Add to context (including thought if present)awaitmodel_context.add_message(AssistantMessage(content=reflection_result.content,source=agent_name,thought=getattr(reflection_result,"thought",None),))yieldResponse(chat_message=TextMessage(content=reflection_result.content,source=agent_name,models_usage=reflection_result.usage,),inner_messages=inner_messages,)@staticmethoddef_summarize_tool_use(executed_calls_and_results:List[Tuple[FunctionCall,FunctionExecutionResult]],inner_messages:List[AgentEvent|ChatMessage],handoffs:Dict[str,HandoffBase],tool_call_summary_format:str,agent_name:str,)->Response:""" If reflect_on_tool_use=False, create a summary message of all tool calls. """# Filter out calls which were actually handoffsnormal_tool_calls=[(call,result)forcall,resultinexecuted_calls_and_resultsifcall.namenotinhandoffs]tool_call_summaries:List[str]=[]fortool_call,tool_call_resultinnormal_tool_calls:tool_call_summaries.append(tool_call_summary_format.format(tool_name=tool_call.name,arguments=tool_call.arguments,result=tool_call_result.content,))tool_call_summary="\n".join(tool_call_summaries)returnResponse(chat_message=ToolCallSummaryMessage(content=tool_call_summary,source=agent_name,),inner_messages=inner_messages,)@staticmethodasyncdef_execute_tool_call(tool_call:FunctionCall,tools:List[BaseTool[Any,Any]],handoff_tools:List[BaseTool[Any,Any]],agent_name:str,cancellation_token:CancellationToken,)->Tuple[FunctionCall,FunctionExecutionResult]:"""Execute a single tool call and return the result."""try:all_tools=tools+handoff_toolsifnotall_tools:raiseValueError("No tools are available.")tool=next((tfortinall_toolsift.name==tool_call.name),None)iftoolisNone:raiseValueError(f"The tool '{tool_call.name}' is not available.")arguments:Dict[str,Any]=json.loads(tool_call.arguments)iftool_call.argumentselse{}result=awaittool.run_json(arguments,cancellation_token)result_as_str=tool.return_value_as_string(result)return(tool_call,FunctionExecutionResult(content=result_as_str,call_id=tool_call.id,is_error=False,name=tool_call.name,),)exceptExceptionase:return(tool_call,FunctionExecutionResult(content=f"Error: {e}",call_id=tool_call.id,is_error=True,name=tool_call.name,),)
[docs]asyncdefon_reset(self,cancellation_token:CancellationToken)->None:"""Reset the assistant agent to its initialization state."""awaitself._model_context.clear()
[docs]asyncdefsave_state(self)->Mapping[str,Any]:"""Save the current state of the assistant agent."""model_context_state=awaitself._model_context.save_state()returnAssistantAgentState(llm_context=model_context_state).model_dump()
[docs]asyncdefload_state(self,state:Mapping[str,Any])->None:"""Load the state of the assistant agent"""assistant_agent_state=AssistantAgentState.model_validate(state)# Load the model context state.awaitself._model_context.load_state(assistant_agent_state.llm_context)
@staticmethoddef_get_compatible_context(model_client:ChatCompletionClient,messages:List[LLMMessage])->Sequence[LLMMessage]:"""Ensure that the messages are compatible with the underlying client, by removing images if needed."""ifmodel_client.model_info["vision"]:returnmessageselse:returnremove_images(messages)def_to_config(self)->AssistantAgentConfig:"""Convert the assistant agent to a declarative config."""returnAssistantAgentConfig(name=self.name,model_client=self._model_client.dump_component(),tools=[tool.dump_component()fortoolinself._tools],handoffs=list(self._handoffs.values())ifself._handoffselseNone,model_context=self._model_context.dump_component(),memory=[memory.dump_component()formemoryinself._memory]ifself._memoryelseNone,description=self.description,system_message=self._system_messages[0].contentifself._system_messagesandisinstance(self._system_messages[0].content,str)elseNone,model_client_stream=self._model_client_stream,reflect_on_tool_use=self._reflect_on_tool_use,tool_call_summary_format=self._tool_call_summary_format,)@classmethoddef_from_config(cls,config:AssistantAgentConfig)->Self:"""Create an assistant agent from a declarative config."""returncls(name=config.name,model_client=ChatCompletionClient.load_component(config.model_client),tools=[BaseTool.load_component(tool)fortoolinconfig.tools]ifconfig.toolselseNone,handoffs=config.handoffs,model_context=None,memory=[Memory.load_component(memory)formemoryinconfig.memory]ifconfig.memoryelseNone,description=config.description,system_message=config.system_message,model_client_stream=config.model_client_stream,reflect_on_tool_use=config.reflect_on_tool_use,tool_call_summary_format=config.tool_call_summary_format,)