autogen_ext.agents.openai#

class OpenAIAssistantAgent(name: str, description: str, client: AsyncClient, model: str, instructions: str, tools: Iterable[Literal['code_interpreter', 'file_search'] | Tool | Callable[[...], Any] | Callable[[...], Awaitable[Any]]] | None = None, assistant_id: str | None = None, thread_id: str | None = None, metadata: object | None = None, response_format: AssistantResponseFormatOptionParam | None = None, temperature: float | None = None, tool_resources: ToolResources | None = None, top_p: float | None = None)[source]#

Bases: BaseChatAgent

An agent implementation that uses the OpenAI Assistant API to generate responses.

This agent leverages the OpenAI Assistant API to create AI assistants with capabilities like:

  • Code interpretation and execution

  • File handling and search

  • Custom function calling

  • Multi-turn conversations

The agent maintains a thread of conversation and can use various tools including

  • Code interpreter: For executing code and working with files

  • File search: For searching through uploaded documents

  • Custom functions: For extending capabilities with user-defined tools

Key Features:

  • Supports multiple file formats including code, documents, images

  • Can handle up to 128 tools per assistant

  • Maintains conversation context in threads

  • Supports file uploads for code interpreter and search

  • Vector store integration for efficient file search

  • Automatic file parsing and embedding

Example

from openai import AsyncClient
from autogen_core import CancellationToken
import asyncio
from autogen_ext.agents.openai import OpenAIAssistantAgent
from autogen_agentchat.messages import TextMessage


async def example():
    cancellation_token = CancellationToken()

    # Create an OpenAI client
    client = AsyncClient(api_key="your-api-key", base_url="your-base-url")

    # Create an assistant with code interpreter
    assistant = OpenAIAssistantAgent(
        name="Python Helper",
        description="Helps with Python programming",
        client=client,
        model="gpt-4",
        instructions="You are a helpful Python programming assistant.",
        tools=["code_interpreter"],
    )

    # Upload files for the assistant to use
    await assistant.on_upload_for_code_interpreter("data.csv", cancellation_token)

    # Get response from the assistant
    _response = await assistant.on_messages(
        [TextMessage(source="user", content="Analyze the data in data.csv")], cancellation_token
    )

    # Clean up resources
    await assistant.delete_uploaded_files(cancellation_token)
    await assistant.delete_assistant(cancellation_token)


asyncio.run(example())
Parameters:
  • name (str) – Name of the assistant

  • description (str) – Description of the assistant’s purpose

  • client (AsyncClient) – OpenAI API client instance

  • model (str) – Model to use (e.g. “gpt-4”)

  • instructions (str) – System instructions for the assistant

  • tools (Optional[Iterable[Union[Literal["code_interpreter", "file_search"], Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]]]]) – Tools the assistant can use

  • assistant_id (Optional[str]) – ID of existing assistant to use

  • metadata (Optional[object]) – Additional metadata for the assistant

  • response_format (Optional[AssistantResponseFormatOptionParam]) – Response format settings

  • temperature (Optional[float]) – Temperature for response generation

  • tool_resources (Optional[ToolResources]) – Additional tool configuration

  • top_p (Optional[float]) – Top p sampling parameter

async delete_assistant(cancellation_token: CancellationToken) None[source]#

Delete the assistant if it was created by this instance.

async delete_uploaded_files(cancellation_token: CancellationToken) None[source]#

Delete all files that were uploaded by this agent instance.

async delete_vector_store(cancellation_token: CancellationToken) None[source]#

Delete the vector store if it was created by this instance.

async handle_text_message(content: str, cancellation_token: CancellationToken) None[source]#

Handle regular text messages by adding them to the thread.

property messages: AsyncMessages#
async on_messages(messages: Sequence[Annotated[TextMessage | MultiModalMessage | StopMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], cancellation_token: CancellationToken) Response[source]#

Handle incoming messages and return a response.

async on_messages_stream(messages: Sequence[Annotated[TextMessage | MultiModalMessage | StopMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], cancellation_token: CancellationToken) AsyncGenerator[Annotated[TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallMessage | ToolCallResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')] | Response, None][source]#

Handle incoming messages and return a response.

async on_reset(cancellation_token: CancellationToken) None[source]#

Handle reset command by deleting new messages and runs since initialization.

async on_upload_for_code_interpreter(file_paths: str | Iterable[str], cancellation_token: CancellationToken) None[source]#

Handle file uploads for the code interpreter.

Handle file uploads for file search.

property produced_message_types: List[type[Annotated[TextMessage | MultiModalMessage | StopMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]]]#

The types of messages that the assistant agent produces.

property runs: AsyncRuns#
property threads: AsyncThreads#