{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Structured output using GPT-4o models\n",
    "\n",
    "This cookbook demonstrates how to obtain structured output using GPT-4o models. The OpenAI beta client SDK provides a parse helper that allows you to use your own Pydantic model, eliminating the need to define a JSON schema. This approach is recommended for supported models.\n",
    "\n",
    "Currently, this feature is supported for:\n",
    "\n",
    "- gpt-4o-mini on OpenAI\n",
    "- gpt-4o-2024-08-06 on OpenAI\n",
    "- gpt-4o-2024-08-06 on Azure"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's define a simple message type that carries explanation and output for a Math problem"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pydantic import BaseModel\n",
    "\n",
    "\n",
    "class MathReasoning(BaseModel):\n",
    "    class Step(BaseModel):\n",
    "        explanation: str\n",
    "        output: str\n",
    "\n",
    "    steps: list[Step]\n",
    "    final_answer: str"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "# Set the environment variable\n",
    "os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://YOUR_ENDPOINT_DETAILS.openai.azure.com/\"\n",
    "os.environ[\"AZURE_OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n",
    "os.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = \"gpt-4o-2024-08-06\"\n",
    "os.environ[\"AZURE_OPENAI_API_VERSION\"] = \"2024-08-01-preview\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import os\n",
    "from typing import Optional\n",
    "\n",
    "from autogen_core.models import UserMessage\n",
    "from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n",
    "\n",
    "\n",
    "# Function to get environment variable and ensure it is not None\n",
    "def get_env_variable(name: str) -> str:\n",
    "    value = os.getenv(name)\n",
    "    if value is None:\n",
    "        raise ValueError(f\"Environment variable {name} is not set\")\n",
    "    return value\n",
    "\n",
    "\n",
    "# Create the client with type-checked environment variables\n",
    "client = AzureOpenAIChatCompletionClient(\n",
    "    azure_deployment=get_env_variable(\"AZURE_OPENAI_DEPLOYMENT_NAME\"),\n",
    "    model=get_env_variable(\"AZURE_OPENAI_MODEL\"),\n",
    "    api_version=get_env_variable(\"AZURE_OPENAI_API_VERSION\"),\n",
    "    azure_endpoint=get_env_variable(\"AZURE_OPENAI_ENDPOINT\"),\n",
    "    api_key=get_env_variable(\"AZURE_OPENAI_API_KEY\"),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'steps': [{'explanation': 'Start by aligning the numbers vertically.', 'output': '\\n  16\\n+ 32'}, {'explanation': 'Add the units digits: 6 + 2 = 8.', 'output': '\\n  16\\n+ 32\\n   8'}, {'explanation': 'Add the tens digits: 1 + 3 = 4.', 'output': '\\n  16\\n+ 32\\n  48'}], 'final_answer': '48'}\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "MathReasoning(steps=[Step(explanation='Start by aligning the numbers vertically.', output='\\n  16\\n+ 32'), Step(explanation='Add the units digits: 6 + 2 = 8.', output='\\n  16\\n+ 32\\n   8'), Step(explanation='Add the tens digits: 1 + 3 = 4.', output='\\n  16\\n+ 32\\n  48')], final_answer='48')"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Define the user message\n",
    "messages = [\n",
    "    UserMessage(content=\"What is 16 + 32?\", source=\"user\"),\n",
    "]\n",
    "\n",
    "# Call the create method on the client, passing the messages and additional arguments\n",
    "# The extra_create_args dictionary includes the response format as MathReasoning model we defined above\n",
    "# Providing the response format and pydantic model will use the new parse method from beta SDK\n",
    "response = await client.create(messages=messages, extra_create_args={\"response_format\": MathReasoning})\n",
    "\n",
    "# Ensure the response content is a valid JSON string before loading it\n",
    "response_content: Optional[str] = response.content if isinstance(response.content, str) else None\n",
    "if response_content is None:\n",
    "    raise ValueError(\"Response content is not a valid JSON string\")\n",
    "\n",
    "# Print the response content after loading it as JSON\n",
    "print(json.loads(response_content))\n",
    "\n",
    "# Validate the response content with the MathReasoning model\n",
    "MathReasoning.model_validate(json.loads(response_content))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}