Module tinytroupe.extraction

Simulations produce a lot of data, and it is often useful to extract these data in a structured way. For instance, you might wish to: - Extract the main points from an agent's interactions history, so that you can consult them later in a concise form. - Generate synthetic data from a simulation, so that you can use it for training machine learning models or testing software. - Simply turn some of the data into a more machine-readable format, such as JSON or CSV, so that you can analyze it more easily.

This module provides various utilities to help you extract data from TinyTroupe elements, such as agents and worlds. It also provides a mechanism to reduce the extracted data to a more concise form, and to export artifacts from TinyTroupe elements. Incidentaly, it showcases one of the many ways in which agent simulations differ from AI assistants, as the latter are not designed to be introspected in this way.

Expand source code
"""
Simulations produce a lot of data, and it is often useful to extract these data in a structured way. For instance, you might wish to:
  - Extract the main points from an agent's interactions history, so that you can consult them later in a concise form.
  - Generate synthetic data from a simulation, so that you can use it for training machine learning models or testing software.
  - Simply turn some of the data into a more machine-readable format, such as JSON or CSV, so that you can analyze it more easily.

This module provides various utilities to help you extract data from TinyTroupe elements, such as agents and worlds. It also provides a 
mechanism to reduce the extracted data to a more concise form, and to export artifacts from TinyTroupe elements. Incidentaly, it showcases 
one of the many ways in which agent simulations differ from AI assistants, as the latter are not designed to be introspected in this way.
"""

import logging
logger = logging.getLogger("tinytroupe")

###########################################################################
# Exposed API
###########################################################################
from tinytroupe.extraction.artifact_exporter import ArtifactExporter
from tinytroupe.extraction.normalizer import Normalizer
from tinytroupe.extraction.results_extractor import ResultsExtractor
from tinytroupe.extraction.results_reducer import ResultsReducer
from tinytroupe.extraction.results_reporter import ResultsReporter

__all__ = ["ArtifactExporter", "Normalizer", "ResultsExtractor", "ResultsReducer", "ResultsReporter"]

Sub-modules

tinytroupe.extraction.artifact_exporter
tinytroupe.extraction.normalizer
tinytroupe.extraction.results_extractor
tinytroupe.extraction.results_reducer
tinytroupe.extraction.results_reporter

Classes

class ArtifactExporter (base_output_folder: str)

An artifact exporter is responsible for exporting artifacts from TinyTroupe elements, for example in order to create synthetic data files from simulations.

Expand source code
class ArtifactExporter(JsonSerializableRegistry):
    """
    An artifact exporter is responsible for exporting artifacts from TinyTroupe elements, for example 
    in order to create synthetic data files from simulations. 
    """

    def __init__(self, base_output_folder:str) -> None:
        self.base_output_folder = base_output_folder

    def export(self, artifact_name:str, artifact_data:Union[dict, str], content_type:str, content_format:str=None, target_format:str="txt", verbose:bool=False):
        """
        Exports the specified artifact data to a file.

        Args:
            artifact_name (str): The name of the artifact.
            artifact_data (Union[dict, str]): The data to export. If a dict is given, it will be saved as JSON. 
                If a string is given, it will be saved as is.
            content_type (str): The type of the content within the artifact.
            content_format (str, optional): The format of the content within the artifact (e.g., md, csv, etc). Defaults to None.
            target_format (str): The format to export the artifact to (e.g., json, txt, docx, etc).
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """
        
        # dedent inputs, just in case
        if isinstance(artifact_data, str):
            artifact_data = utils.dedent(artifact_data)
        elif isinstance(artifact_data, dict):
            artifact_data['content'] = utils.dedent(artifact_data['content'])
        else:
            raise ValueError("The artifact data must be either a string or a dictionary.")
        
        # clean the artifact name of invalid characters
        invalid_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\t', '\r', ';']
        for char in invalid_chars:
            # check if the character is in the artifact name
            if char in artifact_name:
                # replace the character with an underscore
                artifact_name = artifact_name.replace(char, "-")
                logger.warning(f"Replaced invalid character {char} with hyphen in artifact name '{artifact_name}'.")
        
        artifact_file_path = self._compose_filepath(artifact_data, artifact_name, content_type, target_format, verbose)


        if target_format == "json":
            self._export_as_json(artifact_file_path, artifact_data, content_type, verbose)
        elif target_format == "txt" or target_format == "text" or target_format == "md" or target_format == "markdown":
            self._export_as_txt(artifact_file_path, artifact_data, content_type, verbose)
        elif target_format == "docx":
            self._export_as_docx(artifact_file_path, artifact_data, content_format, verbose)
        else:
            raise ValueError(f"Unsupported target format: {target_format}.")


    def _export_as_txt(self, artifact_file_path:str, artifact_data:Union[dict, str], content_type:str, verbose:bool=False):
        """
        Exports the specified artifact data to a text file.
        """

        with open(artifact_file_path, 'w', encoding="utf-8") as f:
            if isinstance(artifact_data, dict):
                content = artifact_data['content']
            else:
                content = artifact_data
        
            f.write(content)
    
    def _export_as_json(self, artifact_file_path:str, artifact_data:Union[dict, str], content_type:str, verbose:bool=False):
        """
        Exports the specified artifact data to a JSON file.
        """

        with open(artifact_file_path, 'w', encoding="utf-8") as f:
            if isinstance(artifact_data, dict):
                json.dump(artifact_data, f, indent=4)                
            else:
                raise ValueError("The artifact data must be a dictionary to export to JSON.")
    
    def _export_as_docx(self, artifact_file_path:str, artifact_data:Union[dict, str], content_original_format:str, verbose:bool=False):
        """
        Exports the specified artifact data to a DOCX file.
        """

        # original format must be 'text' or 'markdown'
        if content_original_format not in ['text', 'txt', 'markdown', 'md']:
            raise ValueError(f"The original format cannot be {content_original_format} to export to DOCX.")
        else:
            # normalize content value
            content_original_format = 'markdown' if content_original_format == 'md' else content_original_format

        # first, get the content to export. If `artifact_date` is a dict, the contant should be under the key `content`.
        # If it is a string, the content is the string itself.
        # using pypandoc
        if isinstance(artifact_data, dict):
            content = artifact_data['content']
        else:
            content = artifact_data
        
        # first, convert to HTML. This is necessary because pypandoc does not support a GOOD direct conversion from markdown to DOCX.
        html_content = markdown.markdown(content)

        ## write this intermediary HTML to file
        #html_file_path = artifact_file_path.replace(".docx", ".html")
        #with open(html_file_path, 'w', encoding="utf-8") as f:
        #    f.write(html_content)

        # then, convert to DOCX
        pypandoc.convert_text(html_content, 'docx', format='html', outputfile=artifact_file_path)   
    
    ###########################################################
    # IO
    ###########################################################
                  
    def _compose_filepath(self, artifact_data:Union[dict, str], artifact_name:str, content_type:str, target_format:str=None, verbose:bool=False):
        """
        Composes the file path for the artifact to export.

        Args:
            artifact_data (Union[dict, str]): The data to export.
            artifact_name (str): The name of the artifact.
            content_type (str): The type of the content within the artifact.
            content_format (str, optional): The format of the content within the artifact (e.g., md, csv, etc). Defaults to None.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """        

        # Extension definition: 
        #
        # - If the content format is specified, we use it as the part of the extension.
        # - If artificat_data is a dict, we add .json to the extension. Note that if content format was specified, we'd get <content_format>.json.
        # - If artifact_data is a string and no content format is specified, we add .txt to the extension.
        extension = None
        if target_format is not None:
            extension = f"{target_format}"
        elif isinstance(artifact_data, str) and target_format is None:
            extension = "txt"
        
        # content type definition
        if content_type is None:
            subfolder = ""
        else:
            subfolder = content_type

        # save to the specified file name or path, considering the base output folder.
        artifact_file_path = os.path.join(self.base_output_folder, subfolder, f"{artifact_name}.{extension}")    

        # create intermediate directories if necessary
        os.makedirs(os.path.dirname(artifact_file_path), exist_ok=True)

        return artifact_file_path

Ancestors

Methods

def export(self, artifact_name: str, artifact_data: Union[str, dict], content_type: str, content_format: str = None, target_format: str = 'txt', verbose: bool = False)

Exports the specified artifact data to a file.

Args

artifact_name : str
The name of the artifact.
artifact_data : Union[dict, str]
The data to export. If a dict is given, it will be saved as JSON. If a string is given, it will be saved as is.
content_type : str
The type of the content within the artifact.
content_format : str, optional
The format of the content within the artifact (e.g., md, csv, etc). Defaults to None.
target_format : str
The format to export the artifact to (e.g., json, txt, docx, etc).
verbose : bool, optional
Whether to print debug messages. Defaults to False.
Expand source code
def export(self, artifact_name:str, artifact_data:Union[dict, str], content_type:str, content_format:str=None, target_format:str="txt", verbose:bool=False):
    """
    Exports the specified artifact data to a file.

    Args:
        artifact_name (str): The name of the artifact.
        artifact_data (Union[dict, str]): The data to export. If a dict is given, it will be saved as JSON. 
            If a string is given, it will be saved as is.
        content_type (str): The type of the content within the artifact.
        content_format (str, optional): The format of the content within the artifact (e.g., md, csv, etc). Defaults to None.
        target_format (str): The format to export the artifact to (e.g., json, txt, docx, etc).
        verbose (bool, optional): Whether to print debug messages. Defaults to False.
    """
    
    # dedent inputs, just in case
    if isinstance(artifact_data, str):
        artifact_data = utils.dedent(artifact_data)
    elif isinstance(artifact_data, dict):
        artifact_data['content'] = utils.dedent(artifact_data['content'])
    else:
        raise ValueError("The artifact data must be either a string or a dictionary.")
    
    # clean the artifact name of invalid characters
    invalid_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|', '\n', '\t', '\r', ';']
    for char in invalid_chars:
        # check if the character is in the artifact name
        if char in artifact_name:
            # replace the character with an underscore
            artifact_name = artifact_name.replace(char, "-")
            logger.warning(f"Replaced invalid character {char} with hyphen in artifact name '{artifact_name}'.")
    
    artifact_file_path = self._compose_filepath(artifact_data, artifact_name, content_type, target_format, verbose)


    if target_format == "json":
        self._export_as_json(artifact_file_path, artifact_data, content_type, verbose)
    elif target_format == "txt" or target_format == "text" or target_format == "md" or target_format == "markdown":
        self._export_as_txt(artifact_file_path, artifact_data, content_type, verbose)
    elif target_format == "docx":
        self._export_as_docx(artifact_file_path, artifact_data, content_format, verbose)
    else:
        raise ValueError(f"Unsupported target format: {target_format}.")

Inherited members

class Normalizer (elements: List[str], n: int, verbose: bool = False)

A mechanism to normalize passages, concepts and other textual elements.

Normalizes the specified elements.

Args

elements : list
The elements to normalize.
n : int
The number of normalized elements to output.
verbose : bool, optional
Whether to print debug messages. Defaults to False.
Expand source code
class Normalizer:
    """
    A mechanism to normalize passages, concepts and other textual elements.
    """

    def __init__(self, elements:List[str], n:int, verbose:bool=False):
        """
        Normalizes the specified elements.

        Args:
            elements (list): The elements to normalize.
            n (int): The number of normalized elements to output.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """
        # ensure elements are unique
        self.elements = list(set(elements))
        
        self.n = n  
        self.verbose = verbose 
        
        # a JSON-based structure, where each output element is a key to a list of input elements that were merged into it
        self.normalized_elements = None
        # a dict that maps each input element to its normalized output. This will be used as cache later.
        self.normalizing_map = {}      

        rendering_configs = {"n": n,
                             "elements": self.elements}

        messages = utils.compose_initial_LLM_messages_with_templates("normalizer.system.mustache", "normalizer.user.mustache",                                                                      
                                                                     base_module_folder="extraction",
                                                                     rendering_configs=rendering_configs)
        
        next_message = openai_utils.client().send_message(messages, temperature=0.1)
        
        debug_msg = f"Normalization result message: {next_message}"
        logger.debug(debug_msg)
        if self.verbose:
            print(debug_msg)

        result = utils.extract_json(next_message["content"])
        logger.debug(result)
        if self.verbose:
            print(result)

        self.normalized_elements = result

    
    def normalize(self, element_or_elements:Union[str, List[str]]) -> Union[str, List[str]]:
        """
        Normalizes the specified element or elements.

        This method uses a caching mechanism to improve performance. If an element has been normalized before, 
        its normalized form is stored in a cache (self.normalizing_map). When the same element needs to be 
        normalized again, the method will first check the cache and use the stored normalized form if available, 
        instead of normalizing the element again.

        The order of elements in the output will be the same as in the input. This is ensured by processing 
        the elements in the order they appear in the input and appending the normalized elements to the output 
        list in the same order.

        Args:
            element_or_elements (Union[str, List[str]]): The element or elements to normalize.

        Returns:
            str: The normalized element if the input was a string.
            list: The normalized elements if the input was a list, preserving the order of elements in the input.
        """
        if isinstance(element_or_elements, str):
            denormalized_elements = [element_or_elements]
        elif isinstance(element_or_elements, list):
            denormalized_elements = element_or_elements
        else:
            raise ValueError("The element_or_elements must be either a string or a list.")
        
        normalized_elements = []
        elements_to_normalize = []
        for element in denormalized_elements:
            if element not in self.normalizing_map:
                elements_to_normalize.append(element)
        
        if elements_to_normalize:
            rendering_configs = {"categories": self.normalized_elements,
                                    "elements": elements_to_normalize}
            
            messages = utils.compose_initial_LLM_messages_with_templates("normalizer.applier.system.mustache", "normalizer.applier.user.mustache",                                      
                                                                     base_module_folder="extraction",
                                                                     rendering_configs=rendering_configs)
            
            next_message = openai_utils.client().send_message(messages, temperature=0.1)
            
            debug_msg = f"Normalization result message: {next_message}"
            logger.debug(debug_msg)
            if self.verbose:
                print(debug_msg)
    
            normalized_elements_from_llm = utils.extract_json(next_message["content"])
            assert isinstance(normalized_elements_from_llm, list), "The normalized element must be a list."
            assert len(normalized_elements_from_llm) == len(elements_to_normalize), "The number of normalized elements must be equal to the number of elements to normalize."
    
            for i, element in enumerate(elements_to_normalize):
                normalized_element = normalized_elements_from_llm[i]
                self.normalizing_map[element] = normalized_element
        
        for element in denormalized_elements:
            normalized_elements.append(self.normalizing_map[element])
        
        return normalized_elements

Methods

def normalize(self, element_or_elements: Union[str, List[str]]) ‑> Union[str, List[str]]

Normalizes the specified element or elements.

This method uses a caching mechanism to improve performance. If an element has been normalized before, its normalized form is stored in a cache (self.normalizing_map). When the same element needs to be normalized again, the method will first check the cache and use the stored normalized form if available, instead of normalizing the element again.

The order of elements in the output will be the same as in the input. This is ensured by processing the elements in the order they appear in the input and appending the normalized elements to the output list in the same order.

Args

element_or_elements : Union[str, List[str]]
The element or elements to normalize.

Returns

str
The normalized element if the input was a string.
list
The normalized elements if the input was a list, preserving the order of elements in the input.
Expand source code
def normalize(self, element_or_elements:Union[str, List[str]]) -> Union[str, List[str]]:
    """
    Normalizes the specified element or elements.

    This method uses a caching mechanism to improve performance. If an element has been normalized before, 
    its normalized form is stored in a cache (self.normalizing_map). When the same element needs to be 
    normalized again, the method will first check the cache and use the stored normalized form if available, 
    instead of normalizing the element again.

    The order of elements in the output will be the same as in the input. This is ensured by processing 
    the elements in the order they appear in the input and appending the normalized elements to the output 
    list in the same order.

    Args:
        element_or_elements (Union[str, List[str]]): The element or elements to normalize.

    Returns:
        str: The normalized element if the input was a string.
        list: The normalized elements if the input was a list, preserving the order of elements in the input.
    """
    if isinstance(element_or_elements, str):
        denormalized_elements = [element_or_elements]
    elif isinstance(element_or_elements, list):
        denormalized_elements = element_or_elements
    else:
        raise ValueError("The element_or_elements must be either a string or a list.")
    
    normalized_elements = []
    elements_to_normalize = []
    for element in denormalized_elements:
        if element not in self.normalizing_map:
            elements_to_normalize.append(element)
    
    if elements_to_normalize:
        rendering_configs = {"categories": self.normalized_elements,
                                "elements": elements_to_normalize}
        
        messages = utils.compose_initial_LLM_messages_with_templates("normalizer.applier.system.mustache", "normalizer.applier.user.mustache",                                      
                                                                 base_module_folder="extraction",
                                                                 rendering_configs=rendering_configs)
        
        next_message = openai_utils.client().send_message(messages, temperature=0.1)
        
        debug_msg = f"Normalization result message: {next_message}"
        logger.debug(debug_msg)
        if self.verbose:
            print(debug_msg)

        normalized_elements_from_llm = utils.extract_json(next_message["content"])
        assert isinstance(normalized_elements_from_llm, list), "The normalized element must be a list."
        assert len(normalized_elements_from_llm) == len(elements_to_normalize), "The number of normalized elements must be equal to the number of elements to normalize."

        for i, element in enumerate(elements_to_normalize):
            normalized_element = normalized_elements_from_llm[i]
            self.normalizing_map[element] = normalized_element
    
    for element in denormalized_elements:
        normalized_elements.append(self.normalizing_map[element])
    
    return normalized_elements
class ResultsExtractor (extraction_prompt_template_path: str = 'C:\\Users\\pdasilva\\repos\\TinyTroupe\\tinytroupe\\extraction\\./prompts/interaction_results_extractor.mustache', extraction_objective: str = "The main points present in the agents' interactions history.", situation: str = '', fields: List[str] = None, fields_hints: dict = None, verbose: bool = False)

Initializes the ResultsExtractor with default parameters.

Args

extraction_prompt_template_path : str
The path to the extraction prompt template.
extraction_objective : str
The default extraction objective.
situation : str
The default situation to consider.
fields : List[str], optional
The default fields to extract. Defaults to None.
fields_hints : dict, optional
The default hints for the fields to extract. Defaults to None.
verbose : bool, optional
Whether to print debug messages by default. Defaults to False.
Expand source code
class ResultsExtractor:

    def __init__(self, 
                 extraction_prompt_template_path:str = os.path.join(os.path.dirname(__file__), './prompts/interaction_results_extractor.mustache'),
                 extraction_objective:str = "The main points present in the agents' interactions history.",
                 situation:str = "",
                 fields:List[str] = None,
                 fields_hints:dict = None,
                 verbose:bool = False):
        """
        Initializes the ResultsExtractor with default parameters.

        Args:
            extraction_prompt_template_path (str): The path to the extraction prompt template.
            extraction_objective (str): The default extraction objective.
            situation (str): The default situation to consider.
            fields (List[str], optional): The default fields to extract. Defaults to None.
            fields_hints (dict, optional): The default hints for the fields to extract. Defaults to None.
            verbose (bool, optional): Whether to print debug messages by default. Defaults to False.
        """
        self._extraction_prompt_template_path = extraction_prompt_template_path

        # Default parameters
        self.default_extraction_objective = extraction_objective
        self.default_situation = situation
        self.default_fields = fields
        self.default_fields_hints = fields_hints
        self.default_verbose = verbose

        # Cache for the last extraction results
        self.agent_extraction = {}
        self.world_extraction = {}

    def extract_results_from_agents(self,
                                    agents:List[TinyPerson],
                                    extraction_objective:str=None,
                                    situation:str =None,
                                    fields:list=None,
                                    fields_hints:dict=None,
                                    verbose:bool=None):
        """
        Extracts results from a list of TinyPerson instances.

        Args:
            agents (List[TinyPerson]): The list of TinyPerson instances to extract results from.
            extraction_objective (str): The extraction objective.
            situation (str): The situation to consider.
            fields (list, optional): The fields to extract. If None, the extractor will decide what names to use. 
                Defaults to None.
            fields_hints (dict, optional): Hints for the fields to extract. Maps field names to strings with the hints. Defaults to None.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.

        
        """
        results = []
        for agent in agents:
            result = self.extract_results_from_agent(agent, extraction_objective, situation, fields, fields_hints, verbose)
            results.append(result)
        
        return results
        
    def extract_results_from_agent(self, 
                        tinyperson:TinyPerson, 
                        extraction_objective:str="The main points present in the agent's interactions history.", 
                        situation:str = "", 
                        fields:list=None,
                        fields_hints:dict=None,
                        verbose:bool=None):
        """
        Extracts results from a TinyPerson instance.

        Args:
            tinyperson (TinyPerson): The TinyPerson instance to extract results from.
            extraction_objective (str): The extraction objective.
            situation (str): The situation to consider.
            fields (list, optional): The fields to extract. If None, the extractor will decide what names to use. 
                Defaults to None.
            fields_hints (dict, optional): Hints for the fields to extract. Maps field names to strings with the hints. Defaults to None.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """

        extraction_objective, situation, fields, fields_hints, verbose = self._get_default_values_if_necessary(
            extraction_objective, situation, fields, fields_hints, verbose
        )

        messages = []

        rendering_configs = {}
        if fields is not None:
            rendering_configs["fields"] = ", ".join(fields)
        
        if fields_hints is not None:
            rendering_configs["fields_hints"] = list(fields_hints.items())
        
        messages.append({"role": "system", 
                         "content": chevron.render(
                             open(self._extraction_prompt_template_path).read(), 
                             rendering_configs)})


        interaction_history = tinyperson.pretty_current_interactions(max_content_length=None)

        extraction_request_prompt = \
f"""
## Extraction objective

{extraction_objective}

## Situation
You are considering a single agent, named {tinyperson.name}. Your objective thus refers to this agent specifically.
{situation}

## Agent Interactions History

You will consider an agent's history of interactions, which include stimuli it received as well as actions it 
performed.

{interaction_history}
"""
        messages.append({"role": "user", "content": extraction_request_prompt})

        next_message = openai_utils.client().send_message(messages, temperature=0.0, frequency_penalty=0.0, presence_penalty=0.0)
        
        debug_msg = f"Extraction raw result message: {next_message}"
        logger.debug(debug_msg)
        if verbose:
            print(debug_msg)

        if next_message is not None:
            result = utils.extract_json(next_message["content"])
        else:
            result = None
        
        # cache the result
        self.agent_extraction[tinyperson.name] = result

        return result
    

    def extract_results_from_world(self, 
                                   tinyworld:TinyWorld, 
                                   extraction_objective:str="The main points that can be derived from the agents conversations and actions.", 
                                   situation:str="", 
                                   fields:list=None,
                                   fields_hints:dict=None,
                                   verbose:bool=None):
        """
        Extracts results from a TinyWorld instance.

        Args:
            tinyworld (TinyWorld): The TinyWorld instance to extract results from.
            extraction_objective (str): The extraction objective.
            situation (str): The situation to consider.
            fields (list, optional): The fields to extract. If None, the extractor will decide what names to use. 
                Defaults to None.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """

        extraction_objective, situation, fields, fields_hints, verbose = self._get_default_values_if_necessary(
            extraction_objective, situation, fields, fields_hints, verbose
        )

        messages = []

        rendering_configs = {}
        if fields is not None:
            rendering_configs["fields"] = ", ".join(fields)
        
        if fields_hints is not None:
            rendering_configs["fields_hints"] = list(fields_hints.items())
        
        messages.append({"role": "system", 
                         "content": chevron.render(
                             open(self._extraction_prompt_template_path).read(), 
                             rendering_configs)})

        # TODO: either summarize first or break up into multiple tasks
        interaction_history = tinyworld.pretty_current_interactions(max_content_length=None)

        extraction_request_prompt = \
f"""
## Extraction objective

{extraction_objective}

## Situation
You are considering various agents.
{situation}

## Agents Interactions History

You will consider the history of interactions from various agents that exist in an environment called {tinyworld.name}. 
Each interaction history includes stimuli the corresponding agent received as well as actions it performed.

{interaction_history}
"""
        messages.append({"role": "user", "content": extraction_request_prompt})

        next_message = openai_utils.client().send_message(messages, temperature=0.0)
        
        debug_msg = f"Extraction raw result message: {next_message}"
        logger.debug(debug_msg)
        if verbose:
            print(debug_msg)

        if next_message is not None:
            result = utils.extract_json(next_message["content"])
        else:
            result = None
        
        # cache the result
        self.world_extraction[tinyworld.name] = result

        return result
    
    def save_as_json(self, filename:str, verbose:bool=False):
        """
        Saves the last extraction results as JSON.

        Args:
            filename (str): The filename to save the JSON to.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """
        with open(filename, 'w') as f:
            json.dump({"agent_extractions": self.agent_extraction, 
                       "world_extraction": self.world_extraction}, f, indent=4)
        
        if verbose:
            print(f"Saved extraction results to {filename}")
    
    def _get_default_values_if_necessary(self,
                            extraction_objective:str,
                            situation:str,
                            fields:List[str],
                            fields_hints:dict,
                            verbose:bool):
        
        if extraction_objective is None:
            extraction_objective = self.default_extraction_objective

        if situation is None:
            situation = self.default_situation

        if fields is None:
            fields = self.default_fields

        if fields_hints is None:
            fields_hints = self.default_fields_hints

        if verbose is None:
            verbose = self.default_verbose

        return extraction_objective, situation, fields, fields_hints, verbose

Methods

def extract_results_from_agent(self, tinyperson: TinyPerson, extraction_objective: str = "The main points present in the agent's interactions history.", situation: str = '', fields: list = None, fields_hints: dict = None, verbose: bool = None)

Extracts results from a TinyPerson instance.

Args

tinyperson : TinyPerson
The TinyPerson instance to extract results from.
extraction_objective : str
The extraction objective.
situation : str
The situation to consider.
fields : list, optional
The fields to extract. If None, the extractor will decide what names to use. Defaults to None.
fields_hints : dict, optional
Hints for the fields to extract. Maps field names to strings with the hints. Defaults to None.
verbose : bool, optional
Whether to print debug messages. Defaults to False.
Expand source code
    def extract_results_from_agent(self, 
                        tinyperson:TinyPerson, 
                        extraction_objective:str="The main points present in the agent's interactions history.", 
                        situation:str = "", 
                        fields:list=None,
                        fields_hints:dict=None,
                        verbose:bool=None):
        """
        Extracts results from a TinyPerson instance.

        Args:
            tinyperson (TinyPerson): The TinyPerson instance to extract results from.
            extraction_objective (str): The extraction objective.
            situation (str): The situation to consider.
            fields (list, optional): The fields to extract. If None, the extractor will decide what names to use. 
                Defaults to None.
            fields_hints (dict, optional): Hints for the fields to extract. Maps field names to strings with the hints. Defaults to None.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """

        extraction_objective, situation, fields, fields_hints, verbose = self._get_default_values_if_necessary(
            extraction_objective, situation, fields, fields_hints, verbose
        )

        messages = []

        rendering_configs = {}
        if fields is not None:
            rendering_configs["fields"] = ", ".join(fields)
        
        if fields_hints is not None:
            rendering_configs["fields_hints"] = list(fields_hints.items())
        
        messages.append({"role": "system", 
                         "content": chevron.render(
                             open(self._extraction_prompt_template_path).read(), 
                             rendering_configs)})


        interaction_history = tinyperson.pretty_current_interactions(max_content_length=None)

        extraction_request_prompt = \
f"""
## Extraction objective

{extraction_objective}

## Situation
You are considering a single agent, named {tinyperson.name}. Your objective thus refers to this agent specifically.
{situation}

## Agent Interactions History

You will consider an agent's history of interactions, which include stimuli it received as well as actions it 
performed.

{interaction_history}
"""
        messages.append({"role": "user", "content": extraction_request_prompt})

        next_message = openai_utils.client().send_message(messages, temperature=0.0, frequency_penalty=0.0, presence_penalty=0.0)
        
        debug_msg = f"Extraction raw result message: {next_message}"
        logger.debug(debug_msg)
        if verbose:
            print(debug_msg)

        if next_message is not None:
            result = utils.extract_json(next_message["content"])
        else:
            result = None
        
        # cache the result
        self.agent_extraction[tinyperson.name] = result

        return result
def extract_results_from_agents(self, agents: List[TinyPerson], extraction_objective: str = None, situation: str = None, fields: list = None, fields_hints: dict = None, verbose: bool = None)

Extracts results from a list of TinyPerson instances.

Args

agents : List[TinyPerson]
The list of TinyPerson instances to extract results from.
extraction_objective : str
The extraction objective.
situation : str
The situation to consider.
fields : list, optional
The fields to extract. If None, the extractor will decide what names to use. Defaults to None.
fields_hints : dict, optional
Hints for the fields to extract. Maps field names to strings with the hints. Defaults to None.
verbose : bool, optional
Whether to print debug messages. Defaults to False.
Expand source code
def extract_results_from_agents(self,
                                agents:List[TinyPerson],
                                extraction_objective:str=None,
                                situation:str =None,
                                fields:list=None,
                                fields_hints:dict=None,
                                verbose:bool=None):
    """
    Extracts results from a list of TinyPerson instances.

    Args:
        agents (List[TinyPerson]): The list of TinyPerson instances to extract results from.
        extraction_objective (str): The extraction objective.
        situation (str): The situation to consider.
        fields (list, optional): The fields to extract. If None, the extractor will decide what names to use. 
            Defaults to None.
        fields_hints (dict, optional): Hints for the fields to extract. Maps field names to strings with the hints. Defaults to None.
        verbose (bool, optional): Whether to print debug messages. Defaults to False.

    
    """
    results = []
    for agent in agents:
        result = self.extract_results_from_agent(agent, extraction_objective, situation, fields, fields_hints, verbose)
        results.append(result)
    
    return results
def extract_results_from_world(self, tinyworld: TinyWorld, extraction_objective: str = 'The main points that can be derived from the agents conversations and actions.', situation: str = '', fields: list = None, fields_hints: dict = None, verbose: bool = None)

Extracts results from a TinyWorld instance.

Args

tinyworld : TinyWorld
The TinyWorld instance to extract results from.
extraction_objective : str
The extraction objective.
situation : str
The situation to consider.
fields : list, optional
The fields to extract. If None, the extractor will decide what names to use. Defaults to None.
verbose : bool, optional
Whether to print debug messages. Defaults to False.
Expand source code
    def extract_results_from_world(self, 
                                   tinyworld:TinyWorld, 
                                   extraction_objective:str="The main points that can be derived from the agents conversations and actions.", 
                                   situation:str="", 
                                   fields:list=None,
                                   fields_hints:dict=None,
                                   verbose:bool=None):
        """
        Extracts results from a TinyWorld instance.

        Args:
            tinyworld (TinyWorld): The TinyWorld instance to extract results from.
            extraction_objective (str): The extraction objective.
            situation (str): The situation to consider.
            fields (list, optional): The fields to extract. If None, the extractor will decide what names to use. 
                Defaults to None.
            verbose (bool, optional): Whether to print debug messages. Defaults to False.
        """

        extraction_objective, situation, fields, fields_hints, verbose = self._get_default_values_if_necessary(
            extraction_objective, situation, fields, fields_hints, verbose
        )

        messages = []

        rendering_configs = {}
        if fields is not None:
            rendering_configs["fields"] = ", ".join(fields)
        
        if fields_hints is not None:
            rendering_configs["fields_hints"] = list(fields_hints.items())
        
        messages.append({"role": "system", 
                         "content": chevron.render(
                             open(self._extraction_prompt_template_path).read(), 
                             rendering_configs)})

        # TODO: either summarize first or break up into multiple tasks
        interaction_history = tinyworld.pretty_current_interactions(max_content_length=None)

        extraction_request_prompt = \
f"""
## Extraction objective

{extraction_objective}

## Situation
You are considering various agents.
{situation}

## Agents Interactions History

You will consider the history of interactions from various agents that exist in an environment called {tinyworld.name}. 
Each interaction history includes stimuli the corresponding agent received as well as actions it performed.

{interaction_history}
"""
        messages.append({"role": "user", "content": extraction_request_prompt})

        next_message = openai_utils.client().send_message(messages, temperature=0.0)
        
        debug_msg = f"Extraction raw result message: {next_message}"
        logger.debug(debug_msg)
        if verbose:
            print(debug_msg)

        if next_message is not None:
            result = utils.extract_json(next_message["content"])
        else:
            result = None
        
        # cache the result
        self.world_extraction[tinyworld.name] = result

        return result
def save_as_json(self, filename: str, verbose: bool = False)

Saves the last extraction results as JSON.

Args

filename : str
The filename to save the JSON to.
verbose : bool, optional
Whether to print debug messages. Defaults to False.
Expand source code
def save_as_json(self, filename:str, verbose:bool=False):
    """
    Saves the last extraction results as JSON.

    Args:
        filename (str): The filename to save the JSON to.
        verbose (bool, optional): Whether to print debug messages. Defaults to False.
    """
    with open(filename, 'w') as f:
        json.dump({"agent_extractions": self.agent_extraction, 
                   "world_extraction": self.world_extraction}, f, indent=4)
    
    if verbose:
        print(f"Saved extraction results to {filename}")
class ResultsReducer
Expand source code
class ResultsReducer:

    def __init__(self):
        self.results = {}

        self.rules = {}
    
    def add_reduction_rule(self, trigger: str, func: callable):
        if trigger in self.rules:
            raise Exception(f"Rule for {trigger} already exists.")
        
        self.rules[trigger] = func
    
    def reduce_agent(self, agent: TinyPerson) -> list:
        reduction = []
        for message in agent.episodic_memory.retrieve_all():
            if message['role'] == 'system':
                continue # doing nothing for `system` role yet at least

            elif message['role'] == 'user':
                # User role is related to stimuli only
                stimulus_type = message['content']['stimuli'][0].get('type', None)
                stimulus_content = message['content']['stimuli'][0].get('content', None)
                stimulus_source = message['content']['stimuli'][0].get('source', None)
                stimulus_timestamp = message['simulation_timestamp']

                if stimulus_type in self.rules:
                    extracted = self.rules[stimulus_type](focus_agent=agent, source_agent=TinyPerson.get_agent_by_name(stimulus_source), target_agent=agent, kind='stimulus', event=stimulus_type, content=stimulus_content, timestamp=stimulus_timestamp)
                    if extracted is not None:
                        reduction.append(extracted)

            elif message['role'] == 'assistant':
                # Assistant role is related to actions only
                if 'action' in message['content']:
                    action_type = message['content']['action'].get('type', None)
                    action_content = message['content']['action'].get('content', None)
                    action_target = message['content']['action'].get('target', None)
                    action_timestamp = message['simulation_timestamp']
                    
                    if action_type in self.rules:
                        extracted = self.rules[action_type](focus_agent=agent, source_agent=agent, target_agent=TinyPerson.get_agent_by_name(action_target), kind='action', event=action_type, content=action_content, timestamp=action_timestamp)
                        if extracted is not None:
                            reduction.append(extracted)
            
        return reduction

    def reduce_agent_to_dataframe(self, agent: TinyPerson, column_names: list=None) -> pd.DataFrame:
        reduction = self.reduce_agent(agent)
        return pd.DataFrame(reduction, columns=column_names)

Methods

def add_reduction_rule(self, trigger: str, func: )
Expand source code
def add_reduction_rule(self, trigger: str, func: callable):
    if trigger in self.rules:
        raise Exception(f"Rule for {trigger} already exists.")
    
    self.rules[trigger] = func
def reduce_agent(self, agent: TinyPerson) ‑> list
Expand source code
def reduce_agent(self, agent: TinyPerson) -> list:
    reduction = []
    for message in agent.episodic_memory.retrieve_all():
        if message['role'] == 'system':
            continue # doing nothing for `system` role yet at least

        elif message['role'] == 'user':
            # User role is related to stimuli only
            stimulus_type = message['content']['stimuli'][0].get('type', None)
            stimulus_content = message['content']['stimuli'][0].get('content', None)
            stimulus_source = message['content']['stimuli'][0].get('source', None)
            stimulus_timestamp = message['simulation_timestamp']

            if stimulus_type in self.rules:
                extracted = self.rules[stimulus_type](focus_agent=agent, source_agent=TinyPerson.get_agent_by_name(stimulus_source), target_agent=agent, kind='stimulus', event=stimulus_type, content=stimulus_content, timestamp=stimulus_timestamp)
                if extracted is not None:
                    reduction.append(extracted)

        elif message['role'] == 'assistant':
            # Assistant role is related to actions only
            if 'action' in message['content']:
                action_type = message['content']['action'].get('type', None)
                action_content = message['content']['action'].get('content', None)
                action_target = message['content']['action'].get('target', None)
                action_timestamp = message['simulation_timestamp']
                
                if action_type in self.rules:
                    extracted = self.rules[action_type](focus_agent=agent, source_agent=agent, target_agent=TinyPerson.get_agent_by_name(action_target), kind='action', event=action_type, content=action_content, timestamp=action_timestamp)
                    if extracted is not None:
                        reduction.append(extracted)
        
    return reduction
def reduce_agent_to_dataframe(self, agent: TinyPerson, column_names: list = None) ‑> pandas.core.frame.DataFrame
Expand source code
def reduce_agent_to_dataframe(self, agent: TinyPerson, column_names: list=None) -> pd.DataFrame:
    reduction = self.reduce_agent(agent)
    return pd.DataFrame(reduction, columns=column_names)
class ResultsReporter (default_reporting_task: str = 'Summarize the key findings, insights, and outcomes from the simulation data.', verbose: bool = False)

Initializes the ResultsReporter.

Args

default_reporting_task : str
The default task to ask agents when generating reports.
verbose : bool
Whether to print debug messages.
Expand source code
class ResultsReporter:
    
    def __init__(self, 
                 default_reporting_task: str = "Summarize the key findings, insights, and outcomes from the simulation data.",
                 verbose: bool = False):
        """
        Initializes the ResultsReporter.
        
        Args:
            default_reporting_task (str): The default task to ask agents when generating reports.
            verbose (bool): Whether to print debug messages.
        """
        self.default_reporting_task = default_reporting_task
        self.verbose = verbose
        self.console = Console()
        
        # Cache for generated reports
        self.last_report = None
    
    def report_from_agents(self,
                          agents: Union[TinyPerson, TinyWorld, List[TinyPerson]],
                          reporting_task: str = None,
                          report_title: str = "Simulation Report",
                          include_agent_summaries: bool = True,
                          consolidate_responses: bool = True,
                          requirements: str = "Present the findings in a clear, structured manner.") -> str:
        """
        Option 1: Generate a report by asking agents about specific reporting tasks.
        
        Args:
            agents: Single agent, TinyWorld, or list of agents to interview.
            reporting_task: The specific task to ask agents about.
            report_title: Title for the generated report.
            include_agent_summaries: Whether to include agent mini-bios in the report.
            consolidate_responses: Whether to consolidate all responses into a single report.
            requirements: Formatting or content requirements for the report.
            
        Returns:
            str: The generated Markdown report.
        """
        if reporting_task is None:
            reporting_task = self.default_reporting_task
            
        # Extract agents from input
        agent_list = self._extract_agents(agents)
        
        if self.verbose:
            logger.info(f"Interviewing {len(agent_list)} agents for report generation.")
        
        # Collect responses from agents
        agent_responses = []
        for agent in agent_list:
            response = self._interview_agent(agent, reporting_task)
            agent_responses.append({
                "agent": agent,
                "response": response
            })
        
        # Generate the report
        report = self._format_agent_interview_report(
            agent_responses, 
            report_title, 
            reporting_task,
            include_agent_summaries,
            consolidate_responses,
            requirements
        )
        
        self.last_report = report
        return report
    
    def report_from_interactions(self,
                                agents: Union[TinyPerson, TinyWorld, List[TinyPerson]],
                                report_title: str = "Interaction Analysis Report",
                                include_agent_summaries: bool = True,
                                first_n: int = None,
                                last_n: int = None,
                                max_content_length: int = None,
                                requirements: str = "Present the findings in a clear, structured manner.") -> str:
        """
        Option 2: Generate a report by analyzing agents' historical interactions.
        
        Args:
            agents: Single agent, TinyWorld, or list of agents to analyze.
            report_title: Title for the generated report.
            include_agent_summaries: Whether to include agent mini-bios.
            first_n: Number of first interactions to include.
            last_n: Number of last interactions to include.
            max_content_length: Maximum content length for interactions.
            requirements: Formatting or content requirements for the report.
            
        Returns:
            str: The generated Markdown report.
        """
        # Extract agents from input
        agent_list = self._extract_agents(agents)
        
        if self.verbose:
            logger.info(f"Analyzing interactions from {len(agent_list)} agents.")
        
        # Collect interaction data
        interactions_data = []
        for agent in agent_list:
            interactions = agent.pretty_current_interactions(
                simplified=True,
                first_n=first_n,
                last_n=last_n,
                max_content_length=max_content_length
            )
            interactions_data.append({
                "agent": agent,
                "interactions": interactions
            })
        
        # Generate the report
        report = self._format_interactions_report(
            interactions_data,
            report_title,
            include_agent_summaries,
            requirements
        )
        
        self.last_report = report
        return report
    
    def report_from_data(self,
                        data: Union[str, Dict[str, Any], List[Dict[str, Any]]],
                        report_title: str = "Data Report",
                        requirements: str = "Present the findings in a clear, structured manner.") -> str:
        """
        Option 3: Generate a report from raw text or structured data.
        
        Args:
            data: Raw text, dictionary, or list of dictionaries to format.
            report_title: Title for the generated report.
            requirements: Formatting or content requirements for the report. If None, uses simple formatting.
            
        Returns:
            str: The generated Markdown report.
        """
        if self.verbose:
            logger.info("Generating report from raw data.")
        
        # Generate the report
        report = self._format_data_report(data, report_title, requirements)
        
        self.last_report = report
        return report
    
    def display_report(self, report: str = None):
        """
        Display a report on the console with rich formatting.
        
        Args:
            report: The report to display. If None, uses the last generated report.
        """
        if report is None:
            report = self.last_report
            
        if report is None:
            self.console.print("[red]No report available to display.[/red]")
            return
            
        markdown = Markdown(report)
        self.console.print(markdown)
    
    def save_report(self, 
                   filename: str,
                   report: str = None,
                   verbose: bool = None):
        """
        Save a report to a file.
        
        Args:
            filename: The filename to save the report to.
            report: The report to save. If None, uses the last generated report.
            verbose: Whether to print confirmation message.
        """
        if report is None:
            report = self.last_report
            
        if report is None:
            raise ValueError("No report available to save.")
            
        if verbose is None:
            verbose = self.verbose
            
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(report)
            
        if verbose:
            logger.info(f"Report saved to {filename}")
    
    def _extract_agents(self, agents) -> List[TinyPerson]:
        """Extract a list of TinyPerson objects from various input types."""
        if isinstance(agents, TinyPerson):
            return [agents]
        elif isinstance(agents, TinyWorld):
            return agents.agents
        elif isinstance(agents, list):
            return agents
        else:
            raise ValueError("Agents must be a TinyPerson, TinyWorld, or list of TinyPerson objects.")
    
    def _interview_agent(self, agent: TinyPerson, reporting_task: str) -> str:
        """Interview a single agent about the reporting task."""
        if self.verbose:
            logger.debug(f"Interviewing agent {agent.name} about: {reporting_task}")
        
        # Following TinyTroupe patterns - directly interact with the agent
        prompt = f"""
        I need you to provide a comprehensive report based on your experiences and observations.
        
        Reporting task: {reporting_task}
        
        Please provide detailed insights, specific examples, and key findings from your perspective.
        Focus on what you've learned, observed, and experienced during the simulation.
        """
        
        # Use listen_and_act pattern to get agent's response
        agent.listen(prompt)
        actions = agent.act(return_actions=True)
        
        # Extract the response from the agent's actions
        response = ""
        for action in actions:
            if action["action"]["type"] == "TALK":
                response += action["action"]["content"] + "\n"
        
        if self.verbose:
            logger.debug(f"Agent {agent.name} response received.")
        
        return response.strip()
    
    def _format_agent_interview_report(self, 
                                     agent_responses: List[Dict],
                                     title: str,
                                     task: str,
                                     include_summaries: bool,
                                     consolidate: bool,
                                     requirements: str) -> str:
        """Format agent interview responses into a Markdown report."""
        # Prepare data for LLM formatting
        agents_data = []
        for resp in agent_responses:
            agent_info = {
                "name": resp["agent"].name,
                "response": resp["response"]
            }
            if include_summaries:
                agent_info["bio"] = resp["agent"].minibio(extended=False)
            agents_data.append(agent_info)
        
        # Generate report using LLM
        return self._generate_report_with_llm(
            title=title,
            report_type="agent_interview",
            data={
                "reporting_task": task,
                "agents_data": agents_data,
                "consolidate": consolidate
            },
            include_summaries=include_summaries,
            requirements=requirements
        )
    
    def _format_interactions_report(self,
                                  interactions_data: List[Dict],
                                  title: str,
                                  include_summaries: bool,
                                  requirements: str) -> str:
        """Format interaction data into a Markdown report."""
        # Prepare data for LLM formatting
        agents_data = []
        for data in interactions_data:
            agent_info = {
                "name": data["agent"].name,
                "interactions": data["interactions"]
            }
            if include_summaries:
                agent_info["bio"] = data["agent"].minibio(extended=False)
            agents_data.append(agent_info)
        
        # Generate report using LLM
        return self._generate_report_with_llm(
            title=title,
            report_type="interactions",
            data={"agents_data": agents_data},
            include_summaries=include_summaries,
            requirements=requirements
        )
    
    def _format_data_report(self,
                          data: Any,
                          title: str,
                          requirements: str) -> str:
        """Format raw data into a Markdown report."""
        return self._generate_report_with_llm(
            title=title,
            report_type="custom_data",
            data=data,
            requirements=requirements
        )

    
    def _generate_report_with_llm(self,
                                title: str,
                                report_type: str,
                                data: Any,
                                include_summaries: bool = False,
                                requirements: str = None) -> str:
        """Generate a report using LLM based on the report type and data."""
        
        # Base system prompt
        system_prompt = "You are a professional report writer who creates clear, well-structured Markdown reports."
        
        # Type-specific prompts and instructions
        if report_type == "agent_interview":
            system_prompt += " You specialize in synthesizing interview responses from multiple agents."
            user_prompt = f"""
            ## Task
            Create a comprehensive report based on agent interviews such that it fulfills the 
            specified requirements below.
            
            ## Report Title
            {title}
            
            ## Report Details
            - **Reporting Task:** {data['reporting_task']}
            - **Number of Agents Interviewed:** {len(data['agents_data'])}
            - **Generated on:** {self._get_timestamp()}
            
            ## Agent Responses
            {json.dumps(data['agents_data'], indent=2)}
            
            ## Instructions
            - Start with the title as a level-1 header
            - Write a direct, clear report, but do not simplify or summarize the information
            - Make sure all important details are included. This is not a summary, but a detailed report, so you never remove information, you just make it more readable
            - Do not include the original data or agent responses, but only the resulting report information
            - For each agent, include their bio if provided
            - Use proper Markdown formatting throughout
            - Follow the requirements given next, which can also override any of these rules
            
            ## Requirements
            {requirements}
            """
            
        elif report_type == "interactions":
            system_prompt += " You specialize in analyzing and presenting agent interaction histories."
            user_prompt = f"""
            ## Task
            Create a report analyzing agent interactions from a simulation such that it fulfills the 
            specified requirements below.
            
            ## Report Title
            {title}
            
            ## Report Details
            - **Number of Agents Analyzed:** {len(data['agents_data'])}
            - **Generated on:** {self._get_timestamp()}
            
            ## Agent Interaction Data
            {json.dumps(data['agents_data'], indent=2)}
            
            ## Instructions
            - Start with the title as a level-1 header
            - Write a direct, clear report, but do not simplify or summarize the information
            - Make sure all important details are included. This is not a summary, but a detailed report, so you never remove information, you just make it more readable
            - Do not include agents' interaction history, but only the resulting report information
            - For each agent, include their bio if provided
            - Use proper Markdown formatting throughout
            - Follow the requirements given next, which can also override any of these rules
            
            ## Requirements
            {requirements}
            """
            
        elif report_type == "custom_data":
            # Handle arbitrary data without assuming any structure
            if isinstance(data, str):
                data_representation = data
            else:
                # For any other type, convert to JSON for a clean representation
                data_representation = json.dumps(data, indent=2)
                
            user_prompt = f"""
            ## Task
            Create a well-structured Markdown report based on the provided data such that it fulfills the 
            specified requirements below.
            
            ## Report Title
            {title}
            
            ## Generated on
            {self._get_timestamp()}
            
            ## Data to Format
            {data_representation}
            
            ## Instructions
            - Start with the title as a level-1 header
            - Write a direct, clear report, but do not simplify or summarize the information
            - Make sure all important details are included. This is not a summary, but a detailed report, so you never remove information, you just make it more readable
            - Use proper Markdown formatting throughout
            - Follow the requirements given next, which can also override any of these rules
            
            ## Requirements
            {requirements if requirements else "Use your best judgment to create a clear, informative report that presents the data in an organized and readable manner."}
            """
        
        else:
            raise ValueError(f"Unknown report type: {report_type}")
        
        # Generate the report
        report_chat = LLMChat(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            output_type=str,
            enable_json_output_format=False,
            model=default["model"],
            temperature=0.3
        )
        
        return report_chat()
    
    
    def _get_timestamp(self) -> str:
        """Get current timestamp for report headers."""
        from datetime import datetime
        return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

Methods

def display_report(self, report: str = None)

Display a report on the console with rich formatting.

Args

report
The report to display. If None, uses the last generated report.
Expand source code
def display_report(self, report: str = None):
    """
    Display a report on the console with rich formatting.
    
    Args:
        report: The report to display. If None, uses the last generated report.
    """
    if report is None:
        report = self.last_report
        
    if report is None:
        self.console.print("[red]No report available to display.[/red]")
        return
        
    markdown = Markdown(report)
    self.console.print(markdown)
def report_from_agents(self, agents: Union[TinyPersonTinyWorld, List[TinyPerson]], reporting_task: str = None, report_title: str = 'Simulation Report', include_agent_summaries: bool = True, consolidate_responses: bool = True, requirements: str = 'Present the findings in a clear, structured manner.') ‑> str

Option 1: Generate a report by asking agents about specific reporting tasks.

Args

agents
Single agent, TinyWorld, or list of agents to interview.
reporting_task
The specific task to ask agents about.
report_title
Title for the generated report.
include_agent_summaries
Whether to include agent mini-bios in the report.
consolidate_responses
Whether to consolidate all responses into a single report.
requirements
Formatting or content requirements for the report.

Returns

str
The generated Markdown report.
Expand source code
def report_from_agents(self,
                      agents: Union[TinyPerson, TinyWorld, List[TinyPerson]],
                      reporting_task: str = None,
                      report_title: str = "Simulation Report",
                      include_agent_summaries: bool = True,
                      consolidate_responses: bool = True,
                      requirements: str = "Present the findings in a clear, structured manner.") -> str:
    """
    Option 1: Generate a report by asking agents about specific reporting tasks.
    
    Args:
        agents: Single agent, TinyWorld, or list of agents to interview.
        reporting_task: The specific task to ask agents about.
        report_title: Title for the generated report.
        include_agent_summaries: Whether to include agent mini-bios in the report.
        consolidate_responses: Whether to consolidate all responses into a single report.
        requirements: Formatting or content requirements for the report.
        
    Returns:
        str: The generated Markdown report.
    """
    if reporting_task is None:
        reporting_task = self.default_reporting_task
        
    # Extract agents from input
    agent_list = self._extract_agents(agents)
    
    if self.verbose:
        logger.info(f"Interviewing {len(agent_list)} agents for report generation.")
    
    # Collect responses from agents
    agent_responses = []
    for agent in agent_list:
        response = self._interview_agent(agent, reporting_task)
        agent_responses.append({
            "agent": agent,
            "response": response
        })
    
    # Generate the report
    report = self._format_agent_interview_report(
        agent_responses, 
        report_title, 
        reporting_task,
        include_agent_summaries,
        consolidate_responses,
        requirements
    )
    
    self.last_report = report
    return report
def report_from_data(self, data: Union[str, Dict[str, Any], List[Dict[str, Any]]], report_title: str = 'Data Report', requirements: str = 'Present the findings in a clear, structured manner.') ‑> str

Option 3: Generate a report from raw text or structured data.

Args

data
Raw text, dictionary, or list of dictionaries to format.
report_title
Title for the generated report.
requirements
Formatting or content requirements for the report. If None, uses simple formatting.

Returns

str
The generated Markdown report.
Expand source code
def report_from_data(self,
                    data: Union[str, Dict[str, Any], List[Dict[str, Any]]],
                    report_title: str = "Data Report",
                    requirements: str = "Present the findings in a clear, structured manner.") -> str:
    """
    Option 3: Generate a report from raw text or structured data.
    
    Args:
        data: Raw text, dictionary, or list of dictionaries to format.
        report_title: Title for the generated report.
        requirements: Formatting or content requirements for the report. If None, uses simple formatting.
        
    Returns:
        str: The generated Markdown report.
    """
    if self.verbose:
        logger.info("Generating report from raw data.")
    
    # Generate the report
    report = self._format_data_report(data, report_title, requirements)
    
    self.last_report = report
    return report
def report_from_interactions(self, agents: Union[TinyPersonTinyWorld, List[TinyPerson]], report_title: str = 'Interaction Analysis Report', include_agent_summaries: bool = True, first_n: int = None, last_n: int = None, max_content_length: int = None, requirements: str = 'Present the findings in a clear, structured manner.') ‑> str

Option 2: Generate a report by analyzing agents' historical interactions.

Args

agents
Single agent, TinyWorld, or list of agents to analyze.
report_title
Title for the generated report.
include_agent_summaries
Whether to include agent mini-bios.
first_n
Number of first interactions to include.
last_n
Number of last interactions to include.
max_content_length
Maximum content length for interactions.
requirements
Formatting or content requirements for the report.

Returns

str
The generated Markdown report.
Expand source code
def report_from_interactions(self,
                            agents: Union[TinyPerson, TinyWorld, List[TinyPerson]],
                            report_title: str = "Interaction Analysis Report",
                            include_agent_summaries: bool = True,
                            first_n: int = None,
                            last_n: int = None,
                            max_content_length: int = None,
                            requirements: str = "Present the findings in a clear, structured manner.") -> str:
    """
    Option 2: Generate a report by analyzing agents' historical interactions.
    
    Args:
        agents: Single agent, TinyWorld, or list of agents to analyze.
        report_title: Title for the generated report.
        include_agent_summaries: Whether to include agent mini-bios.
        first_n: Number of first interactions to include.
        last_n: Number of last interactions to include.
        max_content_length: Maximum content length for interactions.
        requirements: Formatting or content requirements for the report.
        
    Returns:
        str: The generated Markdown report.
    """
    # Extract agents from input
    agent_list = self._extract_agents(agents)
    
    if self.verbose:
        logger.info(f"Analyzing interactions from {len(agent_list)} agents.")
    
    # Collect interaction data
    interactions_data = []
    for agent in agent_list:
        interactions = agent.pretty_current_interactions(
            simplified=True,
            first_n=first_n,
            last_n=last_n,
            max_content_length=max_content_length
        )
        interactions_data.append({
            "agent": agent,
            "interactions": interactions
        })
    
    # Generate the report
    report = self._format_interactions_report(
        interactions_data,
        report_title,
        include_agent_summaries,
        requirements
    )
    
    self.last_report = report
    return report
def save_report(self, filename: str, report: str = None, verbose: bool = None)

Save a report to a file.

Args

filename
The filename to save the report to.
report
The report to save. If None, uses the last generated report.
verbose
Whether to print confirmation message.
Expand source code
def save_report(self, 
               filename: str,
               report: str = None,
               verbose: bool = None):
    """
    Save a report to a file.
    
    Args:
        filename: The filename to save the report to.
        report: The report to save. If None, uses the last generated report.
        verbose: Whether to print confirmation message.
    """
    if report is None:
        report = self.last_report
        
    if report is None:
        raise ValueError("No report available to save.")
        
    if verbose is None:
        verbose = self.verbose
        
    with open(filename, 'w', encoding='utf-8') as f:
        f.write(report)
        
    if verbose:
        logger.info(f"Report saved to {filename}")