Module tinytroupe.steering
Expand source code
import logging
logger = logging.getLogger("tinytroupe")
###########################################################################
# Exposed API
###########################################################################
from tinytroupe.steering.tiny_story import TinyStory
from tinytroupe.steering.intervention import Intervention
__all__ = ["TinyStory", "Intervention"]
Sub-modules
tinytroupe.steering.intervention
tinytroupe.steering.tiny_story
Classes
class Intervention (targets: Union[TinyPerson, TinyWorld, List[TinyPerson], List[TinyWorld]], first_n: int = 10, last_n: int = 100, name: str = None)
-
Initialize the intervention.
Args
target
:Union[TinyPerson, TinyWorld, List[TinyPerson], List[TinyWorld]]
- the target to intervene on
first_n
:int
- the number of first interactions to consider in the context
last_n
:int
- the number of last interactions (most recent) to consider in the context
name
:str
- the name of the intervention
Expand source code
class Intervention: def __init__(self, targets: Union[TinyPerson, TinyWorld, List[TinyPerson], List[TinyWorld]], first_n:int=DEFAULT_FIRST_N, last_n:int=DEFAULT_LAST_N, name: str = None): """ Initialize the intervention. Args: target (Union[TinyPerson, TinyWorld, List[TinyPerson], List[TinyWorld]]): the target to intervene on first_n (int): the number of first interactions to consider in the context last_n (int): the number of last interactions (most recent) to consider in the context name (str): the name of the intervention """ self.targets = targets # initialize the possible preconditions self.text_precondition = None self.precondition_func = None # effects self.effect_func = None # which events to pay attention to? self.first_n = first_n self.last_n = last_n # name if name is None: self.name = self.name = f"Intervention {utils.fresh_id(self.__class__.__name__)}" else: self.name = name # the most recent precondition proposition used to check the precondition self._last_text_precondition_proposition = None self._last_functional_precondition_check = None # propositional precondition (optional) self.propositional_precondition = None self.propositional_precondition_threshold = None self._last_propositional_precondition_check = None ################################################################################################ # Intervention flow ################################################################################################ @classmethod def create_for_each(cls, targets, first_n=DEFAULT_FIRST_N, last_n=DEFAULT_LAST_N, name=None): """ Create separate interventions for each target in the list. Args: targets (list): List of targets (TinyPerson or TinyWorld instances) first_n (int): the number of first interactions to consider in the context last_n (int): the number of last interactions (most recent) to consider in the context name (str): the name of the intervention Returns: InterventionBatch: A wrapper that allows chaining set_* methods that will apply to all interventions """ if not isinstance(targets, list): targets = [targets] interventions = [cls(target, first_n=first_n, last_n=last_n, name=f"{name}_{i}" if name else None) for i, target in enumerate(targets)] return InterventionBatch(interventions) def __call__(self): """ Execute the intervention. Returns: bool: whether the intervention effect was applied. """ return self.execute() def execute(self): """ Execute the intervention. It first checks the precondition, and if it is met, applies the effect. This is the simplest method to run the intervention. Returns: bool: whether the intervention effect was applied. """ logger.debug(f"Executing intervention: {self}") if self.check_precondition(): self.apply_effect() logger.debug(f"Precondition was true, intervention effect was applied.") return True logger.debug(f"Precondition was false, intervention effect was not applied.") return False def check_precondition(self): """ Check if the precondition for the intervention is met. """ # # Textual precondition # if self.text_precondition is not None: self._last_text_precondition_proposition = Proposition(claim=self.text_precondition, target=self.targets, first_n=self.first_n, last_n=self.last_n) llm_precondition_check = self._last_text_precondition_proposition.check() else: llm_precondition_check = True # # Functional precondition # if self.precondition_func is not None: self._last_functional_precondition_check = self.precondition_func(self.targets) else: self._last_functional_precondition_check = True # default to True if no functional precondition is set # # Propositional precondition # self._last_propositional_precondition_check = True if self.propositional_precondition is not None: if self.propositional_precondition_threshold is not None: score = self.propositional_precondition.score(target=self.targets) if score >= self.propositional_precondition_threshold: self._last_propositional_precondition_check = False else: if not self.propositional_precondition.check(target=self.targets): self._last_propositional_precondition_check = False return llm_precondition_check and self._last_functional_precondition_check and self._last_propositional_precondition_check def apply_effect(self): """ Apply the intervention's effects. This won't check the precondition, so it should be called after check_precondition. """ self.effect_func(self.targets) ################################################################################################ # Pre and post conditions ################################################################################################ def set_textual_precondition(self, text): """ Set a precondition as text, to be interpreted by a language model. Args: text (str): the text of the precondition """ self.text_precondition = text return self # for chaining def set_functional_precondition(self, func): """ Set a precondition as a function, to be evaluated by the code. Args: func (function): the function of the precondition. Must have the a single argument, targets (either a TinyWorld or TinyPerson, or a list). Must return a boolean. """ self.precondition_func = func return self # for chaining def set_effect(self, effect_func): """ Set the effect of the intervention. Args: effect (str): the effect function of the intervention """ self.effect_func = effect_func return self # for chaining def set_propositional_precondition(self, proposition:Proposition, threshold:int=None): """ Set a propositional precondition using the Proposition class, optionally with a score threshold. """ self.propositional_precondition = proposition self.propositional_precondition_threshold = threshold return self ################################################################################################ # Inspection ################################################################################################ def precondition_justification(self): """ Get the justification for the precondition. """ justification = "" # text precondition justification if self._last_text_precondition_proposition is not None: justification += f"{self._last_text_precondition_proposition.justification} (confidence = {self._last_text_precondition_proposition.confidence})\n\n" # functional precondition justification if self.precondition_func is not None: if self._last_functional_precondition_check == True: justification += f"Functional precondition was met.\n\n" else: justification += "Preconditions do not appear to be met.\n\n" # propositional precondition justification if self.propositional_precondition is not None: if self._last_propositional_precondition_check == True: justification += f"Propositional precondition was met.\n\n" else: justification += "Propositional precondition was not met.\n\n" return justification return justification
Static methods
def create_for_each(targets, first_n=10, last_n=100, name=None)
-
Create separate interventions for each target in the list.
Args
targets
:list
- List of targets (TinyPerson or TinyWorld instances)
first_n
:int
- the number of first interactions to consider in the context
last_n
:int
- the number of last interactions (most recent) to consider in the context
name
:str
- the name of the intervention
Returns
InterventionBatch
- A wrapper that allows chaining set_* methods that will apply to all interventions
Expand source code
@classmethod def create_for_each(cls, targets, first_n=DEFAULT_FIRST_N, last_n=DEFAULT_LAST_N, name=None): """ Create separate interventions for each target in the list. Args: targets (list): List of targets (TinyPerson or TinyWorld instances) first_n (int): the number of first interactions to consider in the context last_n (int): the number of last interactions (most recent) to consider in the context name (str): the name of the intervention Returns: InterventionBatch: A wrapper that allows chaining set_* methods that will apply to all interventions """ if not isinstance(targets, list): targets = [targets] interventions = [cls(target, first_n=first_n, last_n=last_n, name=f"{name}_{i}" if name else None) for i, target in enumerate(targets)] return InterventionBatch(interventions)
Methods
def apply_effect(self)
-
Apply the intervention's effects. This won't check the precondition, so it should be called after check_precondition.
Expand source code
def apply_effect(self): """ Apply the intervention's effects. This won't check the precondition, so it should be called after check_precondition. """ self.effect_func(self.targets)
def check_precondition(self)
-
Check if the precondition for the intervention is met.
Expand source code
def check_precondition(self): """ Check if the precondition for the intervention is met. """ # # Textual precondition # if self.text_precondition is not None: self._last_text_precondition_proposition = Proposition(claim=self.text_precondition, target=self.targets, first_n=self.first_n, last_n=self.last_n) llm_precondition_check = self._last_text_precondition_proposition.check() else: llm_precondition_check = True # # Functional precondition # if self.precondition_func is not None: self._last_functional_precondition_check = self.precondition_func(self.targets) else: self._last_functional_precondition_check = True # default to True if no functional precondition is set # # Propositional precondition # self._last_propositional_precondition_check = True if self.propositional_precondition is not None: if self.propositional_precondition_threshold is not None: score = self.propositional_precondition.score(target=self.targets) if score >= self.propositional_precondition_threshold: self._last_propositional_precondition_check = False else: if not self.propositional_precondition.check(target=self.targets): self._last_propositional_precondition_check = False return llm_precondition_check and self._last_functional_precondition_check and self._last_propositional_precondition_check
def execute(self)
-
Execute the intervention. It first checks the precondition, and if it is met, applies the effect. This is the simplest method to run the intervention.
Returns
bool
- whether the intervention effect was applied.
Expand source code
def execute(self): """ Execute the intervention. It first checks the precondition, and if it is met, applies the effect. This is the simplest method to run the intervention. Returns: bool: whether the intervention effect was applied. """ logger.debug(f"Executing intervention: {self}") if self.check_precondition(): self.apply_effect() logger.debug(f"Precondition was true, intervention effect was applied.") return True logger.debug(f"Precondition was false, intervention effect was not applied.") return False
def precondition_justification(self)
-
Get the justification for the precondition.
Expand source code
def precondition_justification(self): """ Get the justification for the precondition. """ justification = "" # text precondition justification if self._last_text_precondition_proposition is not None: justification += f"{self._last_text_precondition_proposition.justification} (confidence = {self._last_text_precondition_proposition.confidence})\n\n" # functional precondition justification if self.precondition_func is not None: if self._last_functional_precondition_check == True: justification += f"Functional precondition was met.\n\n" else: justification += "Preconditions do not appear to be met.\n\n" # propositional precondition justification if self.propositional_precondition is not None: if self._last_propositional_precondition_check == True: justification += f"Propositional precondition was met.\n\n" else: justification += "Propositional precondition was not met.\n\n" return justification return justification
def set_effect(self, effect_func)
-
Set the effect of the intervention.
Args
effect
:str
- the effect function of the intervention
Expand source code
def set_effect(self, effect_func): """ Set the effect of the intervention. Args: effect (str): the effect function of the intervention """ self.effect_func = effect_func return self # for chaining
def set_functional_precondition(self, func)
-
Set a precondition as a function, to be evaluated by the code.
Args
func
:function
- the function of the precondition. Must have the a single argument, targets (either a TinyWorld or TinyPerson, or a list). Must return a boolean.
Expand source code
def set_functional_precondition(self, func): """ Set a precondition as a function, to be evaluated by the code. Args: func (function): the function of the precondition. Must have the a single argument, targets (either a TinyWorld or TinyPerson, or a list). Must return a boolean. """ self.precondition_func = func return self # for chaining
def set_propositional_precondition(self, proposition: Proposition, threshold: int = None)
-
Set a propositional precondition using the Proposition class, optionally with a score threshold.
Expand source code
def set_propositional_precondition(self, proposition:Proposition, threshold:int=None): """ Set a propositional precondition using the Proposition class, optionally with a score threshold. """ self.propositional_precondition = proposition self.propositional_precondition_threshold = threshold return self
def set_textual_precondition(self, text)
-
Set a precondition as text, to be interpreted by a language model.
Args
text
:str
- the text of the precondition
Expand source code
def set_textual_precondition(self, text): """ Set a precondition as text, to be interpreted by a language model. Args: text (str): the text of the precondition """ self.text_precondition = text return self # for chaining
class TinyStory (environment: TinyWorld = None, agent: TinyPerson = None, purpose: str = 'Be a realistic simulation.', context: str = '', first_n=10, last_n=20, include_omission_info: bool = True)
-
Every simulation tells a story. This class provides helper mechanisms to help with crafting appropriate stories in TinyTroupe.
Initialize the story. The story can be about an environment or an agent. It also has a purpose, which is used to guide the story generation. Stories are aware that they are related to simulations, so one can specify simulation-related purposes.
Args
environment
:TinyWorld
, optional- The environment in which the story takes place. Defaults to None.
agent
:TinyPerson
, optional- The agent in the story. Defaults to None.
purpose
:str
, optional- The purpose of the story. Defaults to "Be a realistic simulation.".
context
:str
, optional- The current story context. Defaults to "". The actual story will be appended to this context.
first_n
:int
, optional- The number of first interactions to include in the story. Defaults to 10.
last_n
:int
, optional- The number of last interactions to include in the story. Defaults to 20.
include_omission_info
:bool
, optional- Whether to include information about omitted interactions. Defaults to True.
Expand source code
class TinyStory: """ Every simulation tells a story. This class provides helper mechanisms to help with crafting appropriate stories in TinyTroupe. """ def __init__(self, environment:TinyWorld=None, agent:TinyPerson=None, purpose:str="Be a realistic simulation.", context:str="", first_n=10, last_n=20, include_omission_info:bool=True) -> None: """ Initialize the story. The story can be about an environment or an agent. It also has a purpose, which is used to guide the story generation. Stories are aware that they are related to simulations, so one can specify simulation-related purposes. Args: environment (TinyWorld, optional): The environment in which the story takes place. Defaults to None. agent (TinyPerson, optional): The agent in the story. Defaults to None. purpose (str, optional): The purpose of the story. Defaults to "Be a realistic simulation.". context (str, optional): The current story context. Defaults to "". The actual story will be appended to this context. first_n (int, optional): The number of first interactions to include in the story. Defaults to 10. last_n (int, optional): The number of last interactions to include in the story. Defaults to 20. include_omission_info (bool, optional): Whether to include information about omitted interactions. Defaults to True. """ # exactly one of these must be provided if environment and agent: raise Exception("Either 'environment' or 'agent' should be provided, not both") if not (environment or agent): raise Exception("At least one of the parameters should be provided") self.environment = environment self.agent = agent self.purpose = purpose self.current_story = context self.first_n = first_n self.last_n = last_n self.include_omission_info = include_omission_info def start_story(self, requirements="Start some interesting story about the agents.", number_of_words:int=100, include_plot_twist:bool=False) -> str: """ Start a new story. """ rendering_configs = { "purpose": self.purpose, "requirements": requirements, "current_simulation_trace": self._current_story(), "number_of_words": number_of_words, "include_plot_twist": include_plot_twist } messages = utils.compose_initial_LLM_messages_with_templates("story.start.system.mustache", "story.start.user.mustache", base_module_folder="steering", rendering_configs=rendering_configs) next_message = openai_utils.client().send_message(messages, temperature=1.5) start = next_message["content"] self.current_story += utils.dedent(\ f""" ## The story begins {start} """ ) return start def continue_story(self, requirements="Continue the story in an interesting way.", number_of_words:int=100, include_plot_twist:bool=False) -> str: """ Propose a continuation of the story. """ rendering_configs = { "purpose": self.purpose, "requirements": requirements, "current_simulation_trace": self._current_story(), "number_of_words": number_of_words, "include_plot_twist": include_plot_twist } messages = utils.compose_initial_LLM_messages_with_templates("story.continuation.system.mustache", "story.continuation.user.mustache", base_module_folder="steering", rendering_configs=rendering_configs) next_message = openai_utils.client().send_message(messages)#, temperature=1.5) continuation = next_message["content"] self.current_story += utils.dedent(\ f""" ## The story continues {continuation} """ ) return continuation def _current_story(self) -> str: """ Get the current story. """ interaction_history = "" if self.agent is not None: interaction_history += self.agent.pretty_current_interactions(first_n=self.first_n, last_n=self.last_n, include_omission_info=self.include_omission_info) elif self.environment is not None: interaction_history += self.environment.pretty_current_interactions(first_n=self.first_n, last_n=self.last_n, include_omission_info=self.include_omission_info) tmp_current_story = self.current_story tmp_current_story += utils.dedent(\ f""" ## New simulation interactions to consider {interaction_history} """ ) return tmp_current_story
Methods
def continue_story(self, requirements='Continue the story in an interesting way.', number_of_words: int = 100, include_plot_twist: bool = False) ‑> str
-
Propose a continuation of the story.
Expand source code
def continue_story(self, requirements="Continue the story in an interesting way.", number_of_words:int=100, include_plot_twist:bool=False) -> str: """ Propose a continuation of the story. """ rendering_configs = { "purpose": self.purpose, "requirements": requirements, "current_simulation_trace": self._current_story(), "number_of_words": number_of_words, "include_plot_twist": include_plot_twist } messages = utils.compose_initial_LLM_messages_with_templates("story.continuation.system.mustache", "story.continuation.user.mustache", base_module_folder="steering", rendering_configs=rendering_configs) next_message = openai_utils.client().send_message(messages)#, temperature=1.5) continuation = next_message["content"] self.current_story += utils.dedent(\ f""" ## The story continues {continuation} """ ) return continuation
def start_story(self, requirements='Start some interesting story about the agents.', number_of_words: int = 100, include_plot_twist: bool = False) ‑> str
-
Start a new story.
Expand source code
def start_story(self, requirements="Start some interesting story about the agents.", number_of_words:int=100, include_plot_twist:bool=False) -> str: """ Start a new story. """ rendering_configs = { "purpose": self.purpose, "requirements": requirements, "current_simulation_trace": self._current_story(), "number_of_words": number_of_words, "include_plot_twist": include_plot_twist } messages = utils.compose_initial_LLM_messages_with_templates("story.start.system.mustache", "story.start.user.mustache", base_module_folder="steering", rendering_configs=rendering_configs) next_message = openai_utils.client().send_message(messages, temperature=1.5) start = next_message["content"] self.current_story += utils.dedent(\ f""" ## The story begins {start} """ ) return start