Source code for autogen_agentchat.agents._society_of_mind_agent
fromtypingimportAny,AsyncGenerator,List,Mapping,Sequencefromautogen_coreimportCancellationToken,Component,ComponentModelfromautogen_core.modelsimportChatCompletionClient,LLMMessage,SystemMessage,UserMessagefrompydanticimportBaseModelfromtyping_extensionsimportSelffromautogen_agentchat.baseimportResponsefromautogen_agentchat.stateimportSocietyOfMindAgentStatefrom..baseimportTaskResult,Teamfrom..messagesimport(AgentEvent,BaseChatMessage,ChatMessage,TextMessage,)from._base_chat_agentimportBaseChatAgentclassSocietyOfMindAgentConfig(BaseModel):"""The declarative configuration for a SocietyOfMindAgent."""name:strteam:ComponentModelmodel_client:ComponentModeldescription:strinstruction:strresponse_prompt:str
[docs]classSocietyOfMindAgent(BaseChatAgent,Component[SocietyOfMindAgentConfig]):"""An agent that uses an inner team of agents to generate responses. Each time the agent's :meth:`on_messages` or :meth:`on_messages_stream` method is called, it runs the inner team of agents and then uses the model client to generate a response based on the inner team's messages. Once the response is generated, the agent resets the inner team by calling :meth:`Team.reset`. Args: name (str): The name of the agent. team (Team): The team of agents to use. model_client (ChatCompletionClient): The model client to use for preparing responses. description (str, optional): The description of the agent. instruction (str, optional): The instruction to use when generating a response using the inner team's messages. Defaults to :attr:`DEFAULT_INSTRUCTION`. It assumes the role of 'system'. response_prompt (str, optional): The response prompt to use when generating a response using the inner team's messages. Defaults to :attr:`DEFAULT_RESPONSE_PROMPT`. It assumes the role of 'system'. Example: .. code-block:: python import asyncio from autogen_agentchat.ui import Console from autogen_agentchat.agents import AssistantAgent, SocietyOfMindAgent from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.conditions import TextMentionTermination async def main() -> None: model_client = OpenAIChatCompletionClient(model="gpt-4o") agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a writer, write well.") agent2 = AssistantAgent( "assistant2", model_client=model_client, system_message="You are an editor, provide critical feedback. Respond with 'APPROVE' if the text addresses all feedbacks.", ) inner_termination = TextMentionTermination("APPROVE") inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination) society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client) agent3 = AssistantAgent( "assistant3", model_client=model_client, system_message="Translate the text to Spanish." ) team = RoundRobinGroupChat([society_of_mind_agent, agent3], max_turns=2) stream = team.run_stream(task="Write a short story with a surprising ending.") await Console(stream) asyncio.run(main()) """component_config_schema=SocietyOfMindAgentConfigcomponent_provider_override="autogen_agentchat.agents.SocietyOfMindAgent"DEFAULT_INSTRUCTION="Earlier you were asked to fulfill a request. You and your team worked diligently to address that request. Here is a transcript of that conversation:""""str: The default instruction to use when generating a response using the inner team's messages. The instruction will be prepended to the inner team's messages when generating a response using the model. It assumes the role of 'system'."""DEFAULT_RESPONSE_PROMPT=("Output a standalone response to the original request, without mentioning any of the intermediate discussion.")"""str: The default response prompt to use when generating a response using the inner team's messages. It assumes the role of 'system'."""def__init__(self,name:str,team:Team,model_client:ChatCompletionClient,*,description:str="An agent that uses an inner team of agents to generate responses.",instruction:str=DEFAULT_INSTRUCTION,response_prompt:str=DEFAULT_RESPONSE_PROMPT,)->None:super().__init__(name=name,description=description)self._team=teamself._model_client=model_clientself._instruction=instructionself._response_prompt=response_prompt@propertydefproduced_message_types(self)->Sequence[type[ChatMessage]]:return(TextMessage,)
[docs]asyncdefon_messages(self,messages:Sequence[ChatMessage],cancellation_token:CancellationToken)->Response:# Call the stream method and collect the messages.response:Response|None=Noneasyncformsginself.on_messages_stream(messages,cancellation_token):ifisinstance(msg,Response):response=msgassertresponseisnotNonereturnresponse
[docs]asyncdefon_messages_stream(self,messages:Sequence[ChatMessage],cancellation_token:CancellationToken)->AsyncGenerator[AgentEvent|ChatMessage|Response,None]:# Prepare the task for the team of agents.task=list(messages)# Run the team of agents.result:TaskResult|None=Noneinner_messages:List[AgentEvent|ChatMessage]=[]count=0asyncforinner_msginself._team.run_stream(task=task,cancellation_token=cancellation_token):ifisinstance(inner_msg,TaskResult):result=inner_msgelse:count+=1ifcount<=len(task):# Skip the task messages.continueyieldinner_msginner_messages.append(inner_msg)assertresultisnotNoneiflen(inner_messages)==0:yieldResponse(chat_message=TextMessage(source=self.name,content="No response."),inner_messages=inner_messages)else:# Generate a response using the model client.llm_messages:List[LLMMessage]=[SystemMessage(content=self._instruction)]llm_messages.extend([UserMessage(content=message.content,source=message.source)formessageininner_messagesifisinstance(message,BaseChatMessage)])llm_messages.append(SystemMessage(content=self._response_prompt))completion=awaitself._model_client.create(messages=llm_messages,cancellation_token=cancellation_token)assertisinstance(completion.content,str)yieldResponse(chat_message=TextMessage(source=self.name,content=completion.content,models_usage=completion.usage),inner_messages=inner_messages,)# Reset the team.awaitself._team.reset()