Skip to content

POML Integrations

Estimated time to read: 1 minute

poml.integration.agentops

log_poml_call(name, prompt, context, stylesheet, result)

Log the entire poml call to agentops.

Source code in python/poml/integration/agentops.py
def log_poml_call(name: str, prompt: str, context: dict | None, stylesheet: dict | None, result: Any) -> Any:
    """Log the entire poml call to agentops."""

    @agentops.operation(name="poml")
    def poml(prompt, context, stylesheet):
        return result

    poml(prompt, context, stylesheet)

poml.integration.langchain

LangchainPomlTemplate

Bases: PromptTemplate

A LangChain-compatible prompt template that uses POML (Prompt Markup Language) for formatting.

This class extends LangChain's PromptTemplate to support POML markup, enabling rich prompt formatting with speaker modes and structured content. It can load templates from files or strings and format them into either ChatPromptValue or StringPromptValue objects.

Attributes:

Name Type Description
template_file Union[str, Path, None]

Path to the POML template file, if loaded from file.

speaker_mode bool

Whether to format output as chat messages (True) or plain text (False). Defaults to True.

Examples:

Create from a template string:

>>> template = LangchainPomlTemplate.from_template(
...     "Hello {{name}}!", speaker_mode=True
... )
>>> result = template.format(name="Alice")

Load from a POML file:

>>> template = LangchainPomlTemplate.from_file(
...     "path/to/template.poml", speaker_mode=False
... )
>>> result = template.format(user_input="What is AI?")
Note
  • In speaker_mode=True, returns ChatPromptValue with structured messages
  • In speaker_mode=False, returns StringPromptValue with plain text
  • The from_examples() method is not supported and will raise NotImplementedError
Source code in python/poml/integration/langchain.py
class LangchainPomlTemplate(PromptTemplate):
    """A LangChain-compatible prompt template that uses POML (Prompt Markup Language) for formatting.

    This class extends LangChain's PromptTemplate to support POML markup, enabling rich prompt
    formatting with speaker modes and structured content. It can load templates from files or
    strings and format them into either ChatPromptValue or StringPromptValue objects.

    Attributes:
        template_file (Union[str, Path, None]): Path to the POML template file, if loaded from file.
        speaker_mode (bool): Whether to format output as chat messages (True) or plain text (False).
            Defaults to True.

    Examples:
        Create from a template string:
        >>> template = LangchainPomlTemplate.from_template(
        ...     "Hello {{name}}!", speaker_mode=True
        ... )
        >>> result = template.format(name="Alice")

        Load from a POML file:
        >>> template = LangchainPomlTemplate.from_file(
        ...     "path/to/template.poml", speaker_mode=False
        ... )
        >>> result = template.format(user_input="What is AI?")

    Note:
        - In speaker_mode=True, returns ChatPromptValue with structured messages
        - In speaker_mode=False, returns StringPromptValue with plain text
        - The from_examples() method is not supported and will raise NotImplementedError
    """

    template_file: Union[str, Path, None] = None
    speaker_mode: bool = True

    @property
    @override
    def lc_attributes(self) -> dict[str, Any]:
        return {
            "template_file": self.template_file,
            "speaker_mode": self.speaker_mode,
            # Template format is not used
            # "template_format": self.template_format,
        }

    @classmethod
    @override
    def get_lc_namespace(cls) -> list[str]:
        return ["poml", "integration", "langchain"]

    @classmethod
    def from_examples(cls, *args, **kwargs):
        raise NotImplementedError(
            "LangchainPomlTemplate does not support from_examples. Use from_template or from_file instead."
        )

    @classmethod
    def from_file(
        cls, template_file: Union[str, Path], *args, speaker_mode: bool = True, **kwargs
    ) -> "LangchainPomlTemplate":
        instance: LangchainPomlTemplate = super().from_file(template_file, **kwargs)  # type: ignore
        instance.template_file = template_file
        instance.speaker_mode = speaker_mode
        return instance

    @classmethod
    def from_template(cls, *args, speaker_mode: bool = True, **kwargs) -> "LangchainPomlTemplate":
        instance: LangchainPomlTemplate = super().from_template(*args, **kwargs)  # type: ignore
        instance.speaker_mode = speaker_mode
        return instance

    def format(self, **kwargs) -> Union[ChatPromptValue, StringPromptValue]:  # type: ignore
        kwargs = self._merge_partial_and_user_variables(**kwargs)
        if self.template_file:
            formatted_messages = poml_formatter(self.template_file, self.speaker_mode, kwargs)
        else:
            formatted_messages = poml_formatter(self.template, self.speaker_mode, kwargs)
        if self.speaker_mode:
            return ChatPromptValue(messages=formatted_messages)
        else:
            if len(formatted_messages) == 1:
                if isinstance(formatted_messages[0].content, str):
                    return StringPromptValue(text=formatted_messages[0].content)
                elif isinstance(formatted_messages[0].content, list):
                    # If the content is a list, we assume it's a single message with multiple parts.
                    if len(formatted_messages[0].content) == 1:
                        # If there's only one part, return it as a StringPromptValue
                        if isinstance(formatted_messages[0].content[0], str):
                            return StringPromptValue(text=formatted_messages[0].content[0])
                        else:
                            raise ValueError(
                                f"Unsupported content type for non-speaker mode: {formatted_messages[0].content[0]}"
                            )
                    else:
                        raise ValueError(
                            f"Multi-part contents is not supported for non-speaker mode: {formatted_messages[0].content}"
                        )
                else:
                    raise ValueError(f"Unsupported content type for non-speaker mode: {formatted_messages[0].content}")
            else:
                raise ValueError(
                    f"Multiple messages returned, but non-speaker mode requires a single message: {formatted_messages}"
                )

    def format_prompt(self, **kwargs):
        return self.format(**kwargs)

poml.integration.mlflow

log_poml_call(name, prompt, context, stylesheet, result)

Log the entire poml call to mlflow.

Source code in python/poml/integration/mlflow.py
def log_poml_call(name: str, prompt: str, context: dict | None, stylesheet: dict | None, result: Any) -> Any:
    """Log the entire poml call to mlflow."""

    @mlflow.trace
    def poml(prompt, context, stylesheet):
        return result

    prompt_registered = mlflow.genai.register_prompt(
        name=name,
        template=prompt,
        tags={
            "format": "poml",
            "source": "auto"
        },
    )

    poml(prompt, context, stylesheet)

poml.integration.pydantic

NotGiven

A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).

Source code in python/poml/integration/pydantic.py
class NotGiven:
    """
    A sentinel singleton class used to distinguish omitted keyword arguments
    from those passed in with the value None (which may have different behavior).
    """

    def __bool__(self) -> Literal[False]:
        return False

    @override
    def __repr__(self) -> str:
        return "NOT_GIVEN"

is_dataclass_like_type(typ)

Returns True if the given type likely used @pydantic.dataclass

Source code in python/poml/integration/pydantic.py
def is_dataclass_like_type(typ: type) -> bool:
    """Returns True if the given type likely used `@pydantic.dataclass`"""
    return hasattr(typ, "__pydantic_config__")

to_strict_json_schema(model)

Convert a Pydantic model to a strict JSON schema suitable for OpenAI function calling and response format.

This function takes a Pydantic BaseModel class or TypeAdapter and converts it to a JSON schema that conforms to the strict schema requirements for OpenAI's function calling API. The resulting schema ensures all objects have additionalProperties: false and all properties are required.

Most of the implementation is adapted from OpenAI Python SDK.

Parameters:

Name Type Description Default
model type[BaseModel] | TypeAdapter[Any]

A Pydantic BaseModel class or TypeAdapter instance to convert to JSON schema

required

Returns:

Type Description
dict[str, Any]

A dictionary representing the strict JSON schema

Raises:

Type Description
TypeError

If the model is not a BaseModel type and Pydantic v2 is not available, or if a non-BaseModel type is used with Pydantic v1

Example
from pydantic import BaseModel, Field
from poml.integration.pydantic import to_strict_json_schema

class Query(BaseModel):
    name: str = Field(description="Query name")
    limit: int = Field(description="Result limit", default=10)

schema = to_strict_json_schema(Query)
# Returns a strict JSON schema with additionalProperties: false
Source code in python/poml/integration/pydantic.py
def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]:
    """
    Convert a Pydantic model to a strict JSON schema suitable for OpenAI function calling and response format.

    This function takes a Pydantic BaseModel class or TypeAdapter and converts it to a JSON schema
    that conforms to the strict schema requirements for OpenAI's function calling API. The resulting
    schema ensures all objects have `additionalProperties: false` and all properties are required.

    Most of the implementation is adapted from [OpenAI Python SDK](https://github.com/openai/openai-python/blob/4e28a424e6afd60040e3bdf7c76eebb63bc0c407/src/openai/lib/_pydantic.py).

    Args:
        model: A Pydantic BaseModel class or TypeAdapter instance to convert to JSON schema

    Returns:
        A dictionary representing the strict JSON schema

    Raises:
        TypeError: If the model is not a BaseModel type and Pydantic v2 is not available,
                  or if a non-BaseModel type is used with Pydantic v1

    Example:
        ```python
        from pydantic import BaseModel, Field
        from poml.integration.pydantic import to_strict_json_schema

        class Query(BaseModel):
            name: str = Field(description="Query name")
            limit: int = Field(description="Result limit", default=10)

        schema = to_strict_json_schema(Query)
        # Returns a strict JSON schema with additionalProperties: false
        ```
    """
    if inspect.isclass(model) and is_basemodel_type(model):
        if PYDANTIC_V2:
            schema = model.model_json_schema()
        else:
            schema = model.schema()  # type: ignore
    elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter):
        schema = model.json_schema()
    else:
        raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}")

    return _ensure_strict_json_schema(schema, path=(), root=schema)

poml.integration.weave

log_poml_call(name, prompt, context, stylesheet, result)

Log the entire poml call to weave.

Source code in python/poml/integration/weave.py
def log_poml_call(name: str, prompt: str, context: dict | None, stylesheet: dict | None, result: Any) -> Any:
    """Log the entire poml call to weave."""

    @weave.op
    def poml(prompt, context, stylesheet):
        return result

    prompt_ref = weave.publish(prompt, name=name)
    if context is not None:
        context_ref = weave.publish(context, name=name + ".context")
    else:
        context_ref = context
    if stylesheet is not None and stylesheet != "{}":
        stylesheet_ref = weave.publish(stylesheet, name=name + ".stylesheet")
    else:
        stylesheet_ref = stylesheet

    poml(prompt_ref, context_ref, stylesheet_ref)