The following example shows how to create an OpenAIChatAgent and chat with it.

Firsly, import the required namespaces:

using AutoGen.Core;
using AutoGen.OpenAI;
using AutoGen.OpenAI.Extension;

Then, create an OpenAIChatAgent and chat with it:

var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
var modelId = "gpt-4o-mini";
var openAIClient = new OpenAIClient(openAIKey);

// create an open ai chat agent
var openAIChatAgent = new OpenAIChatAgent(
    chatClient: openAIClient.GetChatClient(modelId),
    name: "assistant",
    systemMessage: "You are an assistant that help user to do some tasks.");

// OpenAIChatAgent supports the following message types:
// - IMessage<ChatRequestMessage> where ChatRequestMessage is from Azure.AI.OpenAI

var helloMessage = new UserChatMessage("Hello");

// Use MessageEnvelope.Create to create an IMessage<ChatRequestMessage>
var chatMessageContent = MessageEnvelope.Create(helloMessage);
var reply = await openAIChatAgent.SendAsync(chatMessageContent);

// The type of reply is MessageEnvelope<ChatCompletion> where ChatResponseMessage is from Azure.AI.OpenAI
reply.Should().BeOfType<MessageEnvelope<ChatCompletion>>();

// You can un-envelop the reply to get the ChatResponseMessage
ChatCompletion response = reply.As<MessageEnvelope<ChatCompletion>>().Content;
response.Role.Should().Be(ChatMessageRole.Assistant);

OpenAIChatAgent also supports streaming chat via @AutoGen.Core.IAgent.GenerateStreamingReplyAsync*.

var streamingReply = openAIChatAgent.GenerateStreamingReplyAsync(new[] { chatMessageContent });

await foreach (var streamingMessage in streamingReply)
{
    streamingMessage.Should().BeOfType<MessageEnvelope<StreamingChatCompletionUpdate>>();
    streamingMessage.As<MessageEnvelope<StreamingChatCompletionUpdate>>().Content.Role.Should().Be(ChatMessageRole.Assistant);
}