Table of Contents

You can chat with SemanticKernelAgent using both streaming and non-streaming methods and use native ChatMessageContent type via IMessage<ChatMessageContent>.

The following example shows how to create an SemanticKernelAgent and chat with it using non-streaming method:

var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
var modelId = "gpt-3.5-turbo";
var builder = Kernel.CreateBuilder()
   .AddOpenAIChatCompletion(modelId: modelId, apiKey: openAIKey);
var kernel = builder.Build();

// create a semantic kernel agent
var semanticKernelAgent = new SemanticKernelAgent(
    kernel: kernel,
    name: "assistant",
    systemMessage: "You are an assistant that help user to do some tasks.");

// SemanticKernelAgent supports the following message types:
// - IMessage<ChatMessageContent> where ChatMessageContent is from Azure.AI.OpenAI

var helloMessage = new ChatMessageContent(AuthorRole.User, "Hello");

// Use MessageEnvelope.Create to create an IMessage<ChatRequestMessage>
var chatMessageContent = MessageEnvelope.Create(helloMessage);
var reply = await semanticKernelAgent.SendAsync(chatMessageContent);

// The type of reply is MessageEnvelope<ChatResponseMessage> where ChatResponseMessage is from Azure.AI.OpenAI
reply.Should().BeOfType<MessageEnvelope<ChatMessageContent>>();

// You can un-envelop the reply to get the ChatResponseMessage
ChatMessageContent response = reply.As<MessageEnvelope<ChatMessageContent>>().Content;
response.Role.Should().Be(AuthorRole.Assistant);

SemanticKernelAgent also supports streaming chat via GenerateStreamingReplyAsync.

var streamingReply = semanticKernelAgent.GenerateStreamingReplyAsync(new[] { chatMessageContent });

await foreach (var streamingMessage in streamingReply)
{
    streamingMessage.Should().BeOfType<MessageEnvelope<StreamingChatMessageContent>>();
    streamingMessage.As<MessageEnvelope<StreamingChatMessageContent>>().From.Should().Be("assistant");
}