Module tinytroupe.validation.tiny_person_validator

Expand source code
import os
import json
import chevron
import logging
from pydantic import BaseModel
from typing import Optional, List

from tinytroupe.clients import client
from tinytroupe.agent import TinyPerson
from tinytroupe import config
import tinytroupe.utils as utils


default_max_content_display_length = config["OpenAI"].getint("MAX_CONTENT_DISPLAY_LENGTH", 1024)


class ValidationResponse(BaseModel):
    """Response structure for the validation process"""
    questions: Optional[List[str]] = None
    next_phase_description: Optional[str] = None
    score: Optional[float] = None
    justification: Optional[str] = None
    is_complete: bool = False


class TinyPersonValidator:

    @staticmethod
    def validate_person(person, expectations=None, include_agent_spec=True, max_content_length=default_max_content_display_length) -> tuple[float, str]:
        """
        Validate a TinyPerson instance using OpenAI's LLM.

        This method sends a series of questions to the TinyPerson instance to validate its responses using OpenAI's LLM.
        The method returns a float value representing the confidence score of the validation process.
        If the validation process fails, the method returns None.

        Args:
            person (TinyPerson): The TinyPerson instance to be validated.
            expectations (str, optional): The expectations to be used in the validation process. Defaults to None.
            include_agent_spec (bool, optional): Whether to include the agent specification in the prompt. Defaults to False.
            max_content_length (int, optional): The maximum length of the content to be displayed when rendering the conversation.

        Returns:
            float: The confidence score of the validation process (0.0 to 1.0), or None if the validation process fails.
            str: The justification for the validation score, or None if the validation process fails.
        """
        # Initiating the current messages
        current_messages = []
        
        # Generating the prompt to check the person
        check_person_prompt_template_path = os.path.join(os.path.dirname(__file__), 'prompts/check_person.mustache')
        with open(check_person_prompt_template_path, 'r', encoding='utf-8', errors='replace') as f:
            check_agent_prompt_template = f.read()
        
        system_prompt = chevron.render(check_agent_prompt_template, {"expectations": expectations})

        # use dedent
        import textwrap
        user_prompt = textwrap.dedent(\
        """
        Now, based on the following characteristics of the person being interviewed, and following the rules given previously, 
        create your questions and interview the person. Good luck!

        """)

        if include_agent_spec:
            user_prompt += f"\n\n{json.dumps(person._persona, indent=4)}"
        
        # TODO this was confusing the expect
        #else:
        #    user_prompt += f"\n\nMini-biography of the person being interviewed: {person.minibio()}"


        logger = logging.getLogger("tinytroupe")

        logger.info(f"Starting validation of the person: {person.name}")

        # Sending the initial messages to the LLM
        current_messages.append({"role": "system", "content": system_prompt})
        current_messages.append({"role": "user", "content": user_prompt})

        message = client().send_message(current_messages, response_format=ValidationResponse, enable_pydantic_model_return=True)

        max_iterations = 10  # Limit the number of iterations to prevent infinite loops
        cur_iteration = 0
        while cur_iteration < max_iterations and message is not None and not message.is_complete:
            cur_iteration += 1
            
            # Check if we have questions to ask
            if message.questions:
                # Format questions as a text block
                if message.next_phase_description:
                    questions_text = f"{message.next_phase_description}\n\n"
                else:
                    questions_text = ""
                
                questions_text += "\n".join([f"{i+1}. {q}" for i, q in enumerate(message.questions)])
                
                current_messages.append({"role": "assistant", "content": questions_text})
                logger.info(f"Question validation:\n{questions_text}")

                # Asking the questions to the persona
                person.listen_and_act(questions_text, max_content_length=max_content_length)
                responses = person.pop_actions_and_get_contents_for("TALK", False)
                logger.info(f"Person reply:\n{responses}")

                # Appending the responses to the current conversation and checking the next message
                current_messages.append({"role": "user", "content": responses})
                message = client().send_message(current_messages, response_format=ValidationResponse, enable_pydantic_model_return=True)
            else:
                # If no questions but not complete, something went wrong
                logger.warning("LLM did not provide questions but validation is not complete")
                break

        if message is not None and message.is_complete and message.score is not None:
            logger.info(f"Validation score: {message.score:.2f}; Justification: {message.justification}")
            return message.score, message.justification
        else:
            logger.error("Validation process failed to complete properly")
            return None, None

Classes

class TinyPersonValidator
Expand source code
class TinyPersonValidator:

    @staticmethod
    def validate_person(person, expectations=None, include_agent_spec=True, max_content_length=default_max_content_display_length) -> tuple[float, str]:
        """
        Validate a TinyPerson instance using OpenAI's LLM.

        This method sends a series of questions to the TinyPerson instance to validate its responses using OpenAI's LLM.
        The method returns a float value representing the confidence score of the validation process.
        If the validation process fails, the method returns None.

        Args:
            person (TinyPerson): The TinyPerson instance to be validated.
            expectations (str, optional): The expectations to be used in the validation process. Defaults to None.
            include_agent_spec (bool, optional): Whether to include the agent specification in the prompt. Defaults to False.
            max_content_length (int, optional): The maximum length of the content to be displayed when rendering the conversation.

        Returns:
            float: The confidence score of the validation process (0.0 to 1.0), or None if the validation process fails.
            str: The justification for the validation score, or None if the validation process fails.
        """
        # Initiating the current messages
        current_messages = []
        
        # Generating the prompt to check the person
        check_person_prompt_template_path = os.path.join(os.path.dirname(__file__), 'prompts/check_person.mustache')
        with open(check_person_prompt_template_path, 'r', encoding='utf-8', errors='replace') as f:
            check_agent_prompt_template = f.read()
        
        system_prompt = chevron.render(check_agent_prompt_template, {"expectations": expectations})

        # use dedent
        import textwrap
        user_prompt = textwrap.dedent(\
        """
        Now, based on the following characteristics of the person being interviewed, and following the rules given previously, 
        create your questions and interview the person. Good luck!

        """)

        if include_agent_spec:
            user_prompt += f"\n\n{json.dumps(person._persona, indent=4)}"
        
        # TODO this was confusing the expect
        #else:
        #    user_prompt += f"\n\nMini-biography of the person being interviewed: {person.minibio()}"


        logger = logging.getLogger("tinytroupe")

        logger.info(f"Starting validation of the person: {person.name}")

        # Sending the initial messages to the LLM
        current_messages.append({"role": "system", "content": system_prompt})
        current_messages.append({"role": "user", "content": user_prompt})

        message = client().send_message(current_messages, response_format=ValidationResponse, enable_pydantic_model_return=True)

        max_iterations = 10  # Limit the number of iterations to prevent infinite loops
        cur_iteration = 0
        while cur_iteration < max_iterations and message is not None and not message.is_complete:
            cur_iteration += 1
            
            # Check if we have questions to ask
            if message.questions:
                # Format questions as a text block
                if message.next_phase_description:
                    questions_text = f"{message.next_phase_description}\n\n"
                else:
                    questions_text = ""
                
                questions_text += "\n".join([f"{i+1}. {q}" for i, q in enumerate(message.questions)])
                
                current_messages.append({"role": "assistant", "content": questions_text})
                logger.info(f"Question validation:\n{questions_text}")

                # Asking the questions to the persona
                person.listen_and_act(questions_text, max_content_length=max_content_length)
                responses = person.pop_actions_and_get_contents_for("TALK", False)
                logger.info(f"Person reply:\n{responses}")

                # Appending the responses to the current conversation and checking the next message
                current_messages.append({"role": "user", "content": responses})
                message = client().send_message(current_messages, response_format=ValidationResponse, enable_pydantic_model_return=True)
            else:
                # If no questions but not complete, something went wrong
                logger.warning("LLM did not provide questions but validation is not complete")
                break

        if message is not None and message.is_complete and message.score is not None:
            logger.info(f"Validation score: {message.score:.2f}; Justification: {message.justification}")
            return message.score, message.justification
        else:
            logger.error("Validation process failed to complete properly")
            return None, None

Static methods

def validate_person(person, expectations=None, include_agent_spec=True, max_content_length=4000) ‑> tuple[float, str]

Validate a TinyPerson instance using OpenAI's LLM.

This method sends a series of questions to the TinyPerson instance to validate its responses using OpenAI's LLM. The method returns a float value representing the confidence score of the validation process. If the validation process fails, the method returns None.

Args

person : TinyPerson
The TinyPerson instance to be validated.
expectations : str, optional
The expectations to be used in the validation process. Defaults to None.
include_agent_spec : bool, optional
Whether to include the agent specification in the prompt. Defaults to False.
max_content_length : int, optional
The maximum length of the content to be displayed when rendering the conversation.

Returns

float
The confidence score of the validation process (0.0 to 1.0), or None if the validation process fails.
str
The justification for the validation score, or None if the validation process fails.
Expand source code
@staticmethod
def validate_person(person, expectations=None, include_agent_spec=True, max_content_length=default_max_content_display_length) -> tuple[float, str]:
    """
    Validate a TinyPerson instance using OpenAI's LLM.

    This method sends a series of questions to the TinyPerson instance to validate its responses using OpenAI's LLM.
    The method returns a float value representing the confidence score of the validation process.
    If the validation process fails, the method returns None.

    Args:
        person (TinyPerson): The TinyPerson instance to be validated.
        expectations (str, optional): The expectations to be used in the validation process. Defaults to None.
        include_agent_spec (bool, optional): Whether to include the agent specification in the prompt. Defaults to False.
        max_content_length (int, optional): The maximum length of the content to be displayed when rendering the conversation.

    Returns:
        float: The confidence score of the validation process (0.0 to 1.0), or None if the validation process fails.
        str: The justification for the validation score, or None if the validation process fails.
    """
    # Initiating the current messages
    current_messages = []
    
    # Generating the prompt to check the person
    check_person_prompt_template_path = os.path.join(os.path.dirname(__file__), 'prompts/check_person.mustache')
    with open(check_person_prompt_template_path, 'r', encoding='utf-8', errors='replace') as f:
        check_agent_prompt_template = f.read()
    
    system_prompt = chevron.render(check_agent_prompt_template, {"expectations": expectations})

    # use dedent
    import textwrap
    user_prompt = textwrap.dedent(\
    """
    Now, based on the following characteristics of the person being interviewed, and following the rules given previously, 
    create your questions and interview the person. Good luck!

    """)

    if include_agent_spec:
        user_prompt += f"\n\n{json.dumps(person._persona, indent=4)}"
    
    # TODO this was confusing the expect
    #else:
    #    user_prompt += f"\n\nMini-biography of the person being interviewed: {person.minibio()}"


    logger = logging.getLogger("tinytroupe")

    logger.info(f"Starting validation of the person: {person.name}")

    # Sending the initial messages to the LLM
    current_messages.append({"role": "system", "content": system_prompt})
    current_messages.append({"role": "user", "content": user_prompt})

    message = client().send_message(current_messages, response_format=ValidationResponse, enable_pydantic_model_return=True)

    max_iterations = 10  # Limit the number of iterations to prevent infinite loops
    cur_iteration = 0
    while cur_iteration < max_iterations and message is not None and not message.is_complete:
        cur_iteration += 1
        
        # Check if we have questions to ask
        if message.questions:
            # Format questions as a text block
            if message.next_phase_description:
                questions_text = f"{message.next_phase_description}\n\n"
            else:
                questions_text = ""
            
            questions_text += "\n".join([f"{i+1}. {q}" for i, q in enumerate(message.questions)])
            
            current_messages.append({"role": "assistant", "content": questions_text})
            logger.info(f"Question validation:\n{questions_text}")

            # Asking the questions to the persona
            person.listen_and_act(questions_text, max_content_length=max_content_length)
            responses = person.pop_actions_and_get_contents_for("TALK", False)
            logger.info(f"Person reply:\n{responses}")

            # Appending the responses to the current conversation and checking the next message
            current_messages.append({"role": "user", "content": responses})
            message = client().send_message(current_messages, response_format=ValidationResponse, enable_pydantic_model_return=True)
        else:
            # If no questions but not complete, something went wrong
            logger.warning("LLM did not provide questions but validation is not complete")
            break

    if message is not None and message.is_complete and message.score is not None:
        logger.info(f"Validation score: {message.score:.2f}; Justification: {message.justification}")
        return message.score, message.justification
    else:
        logger.error("Validation process failed to complete properly")
        return None, None
class ValidationResponse (**data: Any)

Response structure for the validation process

Create a new model by parsing and validating input data from keyword arguments.

Raises [ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.

self is explicitly positional-only to allow self as a field name.

Expand source code
class ValidationResponse(BaseModel):
    """Response structure for the validation process"""
    questions: Optional[List[str]] = None
    next_phase_description: Optional[str] = None
    score: Optional[float] = None
    justification: Optional[str] = None
    is_complete: bool = False

Ancestors

  • pydantic.main.BaseModel

Class variables

var is_complete : bool
var justification : Optional[str]
var model_config
var next_phase_description : Optional[str]
var questions : Optional[List[str]]
var score : Optional[float]