Skip to content

Python POML Core APIs

Estimated time to read: 1 minute

poml

clear_trace()

Clear the collected trace log.

Source code in python/poml/api.py
def clear_trace() -> None:
    """Clear the collected trace log."""
    _trace_log.clear()

get_trace()

Return a copy of the trace log.

Source code in python/poml/api.py
def get_trace() -> List[Dict[str, Any]]:
    """Return a copy of the trace log."""
    return list(_trace_log)

poml(markup, context=None, stylesheet=None, chat=True, output_file=None, format='message_dict', extra_args=None)

Process POML markup and return the result in the specified format.

POML (Prompt Orchestration Markup Language) is a markup language for creating structured prompts and conversations. This function processes POML markup with optional context and styling, returning the result in various formats optimized for different LLM frameworks and use cases.

Parameters:

Name Type Description Default
markup str | Path

POML markup content as a string, or path to a POML file. If a string that looks like a file path but doesn't exist, a warning is issued and it's treated as markup content.

required
context dict | str | Path | None

Optional context data to inject into the POML template. Can be a dictionary, JSON string, or path to a JSON file.

None
stylesheet dict | str | Path | None

Optional stylesheet for customizing POML rendering. Can be a dictionary, JSON string, or path to a JSON file.

None
chat bool

If True, process as a chat conversation (default). If False, process as a single prompt.

True
output_file str | Path | None

Optional path to save the output. If not provided, output is returned directly without saving to disk.

None
format OutputFormat

Output format for the result: - "raw": Raw string output from POML processor - "message_dict": Legacy format returning just messages array (default) - "dict": Full CLI result structure with messages, schema, tools, runtime - "openai_chat": OpenAI Chat Completion API format with tool support - "langchain": LangChain message format with structured data - "pydantic": PomlFrame object with typed Pydantic models

'message_dict'
extra_args Optional[List[str]]

Additional command-line arguments to pass to the POML processor.

None

Returns:

Name Type Description
list | dict | str | PomlFrame

The processed result in the specified format:

list | dict | str | PomlFrame
  • str when format="raw"
list | dict | str | PomlFrame
  • list when format="message_dict" (legacy messages array)
list | dict | str | PomlFrame
  • dict when format="dict", "openai_chat", or "langchain"
list | dict | str | PomlFrame
  • PomlFrame when format="pydantic"
list | dict | str | PomlFrame

For format="message_dict": Returns just the messages array for backward

list | dict | str | PomlFrame

compatibility. Example: [{"speaker": "human", "content": "Hello"}]

list | dict | str | PomlFrame

For format="dict": Returns complete structure with all metadata.

Example list | dict | str | PomlFrame

{"messages": [...], "schema": {...}, "tools": [...], "runtime": {...}}

list | dict | str | PomlFrame

For format="openai_chat": Returns OpenAI Chat Completion format with tool/schema

list | dict | str | PomlFrame

support. Includes "messages" in OpenAI format, "tools" if present, "response_format"

list | dict | str | PomlFrame

for JSON schema if present, and runtime parameters converted to snake_case.

list | dict | str | PomlFrame

For format="langchain": Returns LangChain format preserving all metadata with

list | dict | str | PomlFrame

"messages" in LangChain format plus schema, tools, and runtime if present.

list | dict | str | PomlFrame

For format="pydantic": Returns strongly-typed PomlFrame object containing

list | dict | str | PomlFrame

messages as PomlMessage objects, output_schema, tools, and runtime.

Raises:

Type Description
FileNotFoundError

When a specified file path doesn't exist.

RuntimeError

When the POML processor fails or backend tracing requirements aren't met.

ValueError

When an invalid output format is specified.

Examples:

Basic usage with markup string:

>>> result = poml("<p>Hello {{name}}!</p>", context={"name": "World"})

Load from file with context:

>>> result = poml("template.poml", context="context.json")

Get OpenAI chat format:

>>> messages = poml("chat.poml", format="openai_chat")

Use with custom stylesheet:

>>> result = poml(
...     markup="template.poml",
...     context={"user": "Alice"},
...     stylesheet={"role": {"captionStyle": "bold"}},
...     format="pydantic"
... )

Save output to file:

>>> poml("template.poml", output_file="output.json", format="raw")
Note
  • When tracing is enabled via set_trace(), call details are automatically logged
  • The function supports various backend integrations (Weave, AgentOps, MLflow)
  • Multi-modal content (images, etc.) is supported in chat format
Source code in python/poml/api.py
def poml(
    markup: str | Path,
    context: dict | str | Path | None = None,
    stylesheet: dict | str | Path | None = None,
    chat: bool = True,
    output_file: str | Path | None = None,
    format: OutputFormat = "message_dict",
    extra_args: Optional[List[str]] = None,
) -> list | dict | str | PomlFrame:
    """Process POML markup and return the result in the specified format.

    POML (Prompt Orchestration Markup Language) is a markup language for creating
    structured prompts and conversations. This function processes POML markup
    with optional context and styling, returning the result in various formats
    optimized for different LLM frameworks and use cases.

    Args:
        markup: POML markup content as a string, or path to a POML file.
            If a string that looks like a file path but doesn't exist,
            a warning is issued and it's treated as markup content.
        context: Optional context data to inject into the POML template.
            Can be a dictionary, JSON string, or path to a JSON file.
        stylesheet: Optional stylesheet for customizing POML rendering.
            Can be a dictionary, JSON string, or path to a JSON file.
        chat: If True, process as a chat conversation (default).
            If False, process as a single prompt.
        output_file: Optional path to save the output. If not provided,
            output is returned directly without saving to disk.
        format: Output format for the result:
            - "raw": Raw string output from POML processor
            - "message_dict": Legacy format returning just messages array (default)
            - "dict": Full CLI result structure with messages, schema, tools, runtime
            - "openai_chat": OpenAI Chat Completion API format with tool support
            - "langchain": LangChain message format with structured data
            - "pydantic": PomlFrame object with typed Pydantic models
        extra_args: Additional command-line arguments to pass to the POML processor.

    Returns:
        The processed result in the specified format:
        - str when format="raw"
        - list when format="message_dict" (legacy messages array)
        - dict when format="dict", "openai_chat", or "langchain"
        - PomlFrame when format="pydantic"

        For format="message_dict": Returns just the messages array for backward 
        compatibility. Example: `[{"speaker": "human", "content": "Hello"}]`

        For format="dict": Returns complete structure with all metadata.
        Example: `{"messages": [...], "schema": {...}, "tools": [...], "runtime": {...}}`

        For format="openai_chat": Returns OpenAI Chat Completion format with tool/schema 
        support. Includes "messages" in OpenAI format, "tools" if present, "response_format" 
        for JSON schema if present, and runtime parameters converted to `snake_case`.

        For format="langchain": Returns LangChain format preserving all metadata with
        "messages" in LangChain format plus schema, tools, and runtime if present.

        For format="pydantic": Returns strongly-typed PomlFrame object containing
        messages as PomlMessage objects, output_schema, tools, and runtime.

    Raises:
        FileNotFoundError: When a specified file path doesn't exist.
        RuntimeError: When the POML processor fails or backend tracing requirements aren't met.
        ValueError: When an invalid output format is specified.

    Examples:
        Basic usage with markup string:
        >>> result = poml("<p>Hello {{name}}!</p>", context={"name": "World"})

        Load from file with context:
        >>> result = poml("template.poml", context="context.json")

        Get OpenAI chat format:
        >>> messages = poml("chat.poml", format="openai_chat")

        Use with custom stylesheet:
        >>> result = poml(
        ...     markup="template.poml",
        ...     context={"user": "Alice"},
        ...     stylesheet={"role": {"captionStyle": "bold"}},
        ...     format="pydantic"
        ... )

        Save output to file:
        >>> poml("template.poml", output_file="output.json", format="raw")

    Note:
        - When tracing is enabled via set_trace(), call details are automatically logged
        - The function supports various backend integrations (Weave, AgentOps, MLflow)
        - Multi-modal content (images, etc.) is supported in chat format
    """
    temp_input_file = temp_context_file = temp_stylesheet_file = None
    trace_record: Dict[str, Any] | None = None
    try:
        if _trace_enabled:
            trace_record = {}
            if isinstance(markup, Path) or os.path.exists(str(markup)):
                path = Path(markup)
                trace_record["markup_path"] = str(path)
                if path.exists():
                    trace_record["markup"] = path.read_text()
            else:
                trace_record["markup"] = str(markup)

            if isinstance(context, dict):
                trace_record["context"] = json.dumps(context)
            elif context:
                if os.path.exists(str(context)):
                    cpath = Path(context)
                    trace_record["context_path"] = str(cpath)
                    trace_record["context"] = cpath.read_text()
            if isinstance(stylesheet, dict):
                trace_record["stylesheet"] = json.dumps(stylesheet)
            elif stylesheet:
                if os.path.exists(str(stylesheet)):
                    spath = Path(stylesheet)
                    trace_record["stylesheet_path"] = str(spath)
                    trace_record["stylesheet"] = spath.read_text()

        if isinstance(markup, Path):
            if not markup.exists():
                raise FileNotFoundError(f"File not found: {markup}")
        else:
            if os.path.exists(markup):
                markup = Path(markup)
            else:
                # Test if the markup looks like a path.
                if re.match(r"^[\w\-./]+$", markup):
                    warnings.warn(
                        f"The markup '{markup}' looks like a file path, but it does not exist. Assuming it is a POML string."
                    )

                temp_input_file = write_file(markup)
                markup = Path(temp_input_file.name)
        with tempfile.NamedTemporaryFile("r") as temp_output_file:
            if output_file is None:
                output_file = temp_output_file.name
                output_file_specified = False
            else:
                output_file_specified = True
                if isinstance(output_file, Path):
                    output_file = str(output_file)
            args = ["-f", str(markup), "-o", output_file]
            if isinstance(context, dict):
                temp_context_file = write_file(json.dumps(context))
                args.extend(["--context-file", temp_context_file.name])
            elif context:
                if os.path.exists(context):
                    args.extend(["--context-file", str(context)])
                else:
                    raise FileNotFoundError(f"File not found: {context}")

            if isinstance(stylesheet, dict):
                temp_stylesheet_file = write_file(json.dumps(stylesheet))
                args.extend(["--stylesheet-file", temp_stylesheet_file.name])
            elif stylesheet:
                if os.path.exists(stylesheet):
                    args.extend(["--stylesheet-file", str(stylesheet)])
                else:
                    raise FileNotFoundError(f"File not found: {stylesheet}")

            if chat:
                args.extend(["--chat", "true"])
            else:
                args.extend(["--chat", "false"])

            if _trace_enabled and _trace_dir is not None:
                args.extend(["--traceDir", str(_trace_dir)])

            if extra_args:
                args.extend(extra_args)
            process = run(*args)
            if process.returncode != 0:
                raise RuntimeError(
                    f"POML command failed with return code {process.returncode}. See the log for details."
                )

            if output_file_specified:
                with open(output_file, "r") as output_file_handle:
                    result = output_file_handle.read()
            else:
                result = temp_output_file.read()

            if format == "raw":
                # Do nothing
                return_result = trace_result = result
            else:
                parsed_result = trace_result = json.loads(result)

                # Handle the new CLI result format with messages, schema, tools, runtime
                if isinstance(parsed_result, dict) and "messages" in parsed_result:
                    cli_result = parsed_result
                    messages_data = cli_result["messages"]
                else:
                    # Legacy format - just messages
                    cli_result: dict = {"messages": parsed_result}
                    messages_data = parsed_result

                if format == "message_dict":
                    # Legacy behavior - return just the messages
                    return_result = messages_data
                elif format == "dict":
                    # Return the full CLI result structure
                    return_result = cli_result
                else:
                    # Convert to pydantic messages for other formats
                    if chat:
                        pydantic_messages = [PomlMessage(**item) for item in messages_data]
                    else:
                        # TODO: Make it a RichContent object
                        pydantic_messages = [PomlMessage(speaker="human", content=messages_data)]  # type: ignore

                    # Create PomlFrame with full data
                    poml_frame = PomlFrame(
                        messages=pydantic_messages,
                        output_schema=cli_result.get("schema"),
                        tools=cli_result.get("tools"), 
                        runtime=cli_result.get("runtime")
                    )

                    if format == "pydantic":
                        return_result = poml_frame
                    elif format == "openai_chat":
                        # Return OpenAI-compatible format
                        openai_messages = _poml_response_to_openai_chat(pydantic_messages)
                        openai_result: dict = {"messages": openai_messages}

                        # Add tools if present
                        if poml_frame.tools:
                            openai_result["tools"] = [{
                                "type": "function",
                                "function": {
                                    "name": tool.get("name", ""),
                                    "description": tool.get("description", ""),
                                    "parameters": tool.get("parameters", {})
                                }  # FIXME: hot-fix for the wrong format at node side
                            } for tool in poml_frame.tools]
                        if poml_frame.output_schema:
                            openai_result["response_format"] = {
                                "type": "json_schema",
                                "json_schema": {
                                    "name": "schema",  # TODO: support schema name
                                    "schema": poml_frame.output_schema,
                                    "strict": True,  # Ensure strict validation
                                }
                            }
                        if poml_frame.runtime:
                            openai_result.update({
                                _camel_case_to_snake_case(k): v
                                for k, v in poml_frame.runtime.items()
                            })

                        return_result = openai_result
                    elif format == "langchain":
                        messages_data = _poml_response_to_langchain(pydantic_messages)
                        return_result = {
                            "messages": messages_data,
                            **{k: v for k, v in cli_result.items() if k != "messages"},
                        }
                    else:
                        raise ValueError(f"Unknown output format: {format}")

            if _weave_enabled:
                from .integration import weave

                trace_prefix = _latest_trace_prefix()
                current_version = _current_trace_version()
                if trace_prefix is None or current_version is None:
                    raise RuntimeError("Weave tracing requires local tracing to be enabled.")
                poml_content = _read_latest_traced_file(".poml")
                context_content = _read_latest_traced_file(".context.json")
                stylesheet_content = _read_latest_traced_file(".stylesheet.json")

                weave.log_poml_call(
                    trace_prefix.name,
                    poml_content or str(markup),
                    json.loads(context_content) if context_content else None,
                    json.loads(stylesheet_content) if stylesheet_content else None,
                    trace_result,
                )

            if _agentops_enabled:
                from .integration import agentops

                trace_prefix = _latest_trace_prefix()
                current_version = _current_trace_version()
                if trace_prefix is None or current_version is None:
                    raise RuntimeError("AgentOps tracing requires local tracing to be enabled.")
                poml_content = _read_latest_traced_file(".poml")
                context_content = _read_latest_traced_file(".context.json")
                stylesheet_content = _read_latest_traced_file(".stylesheet.json")
                agentops.log_poml_call(
                    trace_prefix.name,
                    str(markup),
                    json.loads(context_content) if context_content else None,
                    json.loads(stylesheet_content) if stylesheet_content else None,
                    trace_result,
                )

            if _mlflow_enabled:
                from .integration import mlflow

                trace_prefix = _latest_trace_prefix()
                current_version = _current_trace_version()
                if trace_prefix is None or current_version is None:
                    raise RuntimeError("MLflow tracing requires local tracing to be enabled.")
                poml_content = _read_latest_traced_file(".poml")
                context_content = _read_latest_traced_file(".context.json")
                stylesheet_content = _read_latest_traced_file(".stylesheet.json")
                mlflow.log_poml_call(
                    trace_prefix.name,
                    poml_content or str(markup),
                    json.loads(context_content) if context_content else None,
                    json.loads(stylesheet_content) if stylesheet_content else None,
                    trace_result,
                )

            if trace_record is not None:
                trace_record["result"] = trace_result
            return return_result
    finally:
        if temp_input_file:
            temp_input_file.close()
        if temp_context_file:
            temp_context_file.close()
        if temp_stylesheet_file:
            temp_stylesheet_file.close()
        if trace_record is not None:
            _trace_log.append(trace_record)

set_trace(enabled=True, /, *, trace_dir=None)

Enable or disable tracing of poml calls with optional backend integrations.

Parameters:

Name Type Description Default
enabled bool | List[Backend] | Backend

Controls which tracing backends to enable. Can be: - True: Enable local tracing only (equivalent to ["local"]) - False: Disable all tracing (equivalent to []) - str: Enable a single backend ("local", "weave", "agentops", "mlflow") - List[str]: Enable multiple backends. "local" is auto-enabled if any backends are specified.

True
trace_dir Optional[str | Path]

Optional directory for local trace files. If provided when local tracing is enabled, a subdirectory named by the current timestamp (YYYYMMDDHHMMSSffffff) is created inside trace_dir.

None

Returns:

Type Description
Optional[Path]

Path to the trace directory if local tracing is enabled, None otherwise.

Optional[Path]

The directory may be shared with POML Node.js by setting the

Optional[Path]

POML_TRACE environment variable in the invoking script.

Available backends
  • "local": Save trace files to disk
  • "weave": Log to Weights & Biases Weave (requires local tracing)
  • "agentops": Log to AgentOps (requires local tracing)
  • "mlflow": Log to MLflow (requires local tracing)
Source code in python/poml/api.py
def set_trace(
    enabled: bool | List[Backend] | Backend = True, /, *, trace_dir: Optional[str | Path] = None
) -> Optional[Path]:
    """Enable or disable tracing of ``poml`` calls with optional backend integrations.

    Args:
        enabled: Controls which tracing backends to enable. Can be:
            - True: Enable local tracing only (equivalent to ["local"])
            - False: Disable all tracing (equivalent to [])
            - str: Enable a single backend ("local", "weave", "agentops", "mlflow")
            - List[str]: Enable multiple backends. "local" is auto-enabled if any backends are specified.
        trace_dir: Optional directory for local trace files. If provided when local
            tracing is enabled, a subdirectory named by the current timestamp
            (YYYYMMDDHHMMSSffffff) is created inside trace_dir.

    Returns:
        Path to the trace directory if local tracing is enabled, None otherwise.
        The directory may be shared with POML Node.js by setting the
        POML_TRACE environment variable in the invoking script.

    Available backends:
        - "local": Save trace files to disk
        - "weave": Log to Weights & Biases Weave (requires local tracing)
        - "agentops": Log to AgentOps (requires local tracing)
        - "mlflow": Log to MLflow (requires local tracing)
    """

    if enabled is True:
        enabled = ["local"]
    elif enabled is False:
        enabled = []

    if isinstance(enabled, str):
        enabled = [enabled]

    global _trace_enabled, _trace_dir, _weave_enabled, _agentops_enabled, _mlflow_enabled
    if enabled or "local" in enabled:
        # When enabled is non-empty, we always enable local tracing.
        _trace_enabled = True
        env_dir = os.environ.get("POML_TRACE")
        if trace_dir is not None:
            base = Path(trace_dir)
            base.mkdir(parents=True, exist_ok=True)
            ts = datetime.now().strftime("%Y%m%d%H%M%S%f")
            run_dir = base / ts
            run_dir.mkdir(parents=True, exist_ok=True)
            _trace_dir = run_dir
        elif env_dir:
            run_dir = Path(env_dir)
            run_dir.mkdir(parents=True, exist_ok=True)
            _trace_dir = run_dir
        else:
            _trace_dir = None
    else:
        _trace_enabled = False
        _trace_dir = None

    if "weave" in enabled:
        _weave_enabled = True
    else:
        _weave_enabled = False

    if "agentops" in enabled:
        _agentops_enabled = True
    else:
        _agentops_enabled = False

    if "mlflow" in enabled:
        _mlflow_enabled = True
    else:
        _mlflow_enabled = False

    return _trace_dir

trace_artifact(file_suffix, contents)

Write an additional artifact file for the most recent poml call. This API is experimental.

Source code in python/poml/api.py
def trace_artifact(file_suffix: str, contents: str | bytes) -> Optional[Path]:
    """Write an additional artifact file for the most recent ``poml`` call. This API is experimental."""
    prefix = _latest_trace_prefix()
    if prefix is None:
        return None
    suffix = file_suffix if file_suffix.startswith(".") else f".{file_suffix}"
    path = Path(str(prefix) + suffix)
    mode = "wb" if isinstance(contents, (bytes, bytearray)) else "w"
    with open(path, mode) as f:
        f.write(contents)
    return path