Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 74 additions & 22 deletions game_simulation/agents/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
import random
from utils.text_generation import generate, get_rating
import networkx as nx
from utils.promt import prompt_meta
from utils.time import global_time
from utils.logs import log_output
from utils.states import locations, town_areas
from utils.configs import log_actions, log_plans, log_ratings, log_memories, print_locations, print_actions, print_plans, print_ratings, print_memories

class Agent:

Expand Down Expand Up @@ -54,22 +59,74 @@ def __init__(self, name, description, starting_location, world_graph, use_openai
def __repr__(self):
return f"Agent({self.name}, {self.description}, {self.location})"

def plan(self, global_time, prompt_meta):
def tick(self):
self.plan()

if self.location not in log_output:
log_output[self.location] = ""

if log_plans:
log_output[self.location] += f"{self.name} plans: {self.plans}\n"
if print_plans:
print(f"{self.name} plans: {self.plans}")

agents = locations.get_location(self.location).get_people()
other_agents = [agent for agent in agents if agent.name != self.name]
action = self.execute_action()

if log_actions:
log_output[self.location] += f"{self.name} action: {action}\n"
if print_actions:
print(f"{self.name} action: {action}")

for other_agent in other_agents:
memory = f'[Time: {global_time}. Person: {self.name}. Memory: {action}]'
other_agent.memories.append(memory)
if log_memories:
log_output[self.location] += f"{other_agent.name} remembers: {memory}\n"
if print_memories:
print(f"{other_agent.name} remembers: {memory}")

for agent in agents:
agent.compress_memories(global_time)
agent.rate_memories()
if log_ratings:
log_output[self.location] += f"{agent.name} memory ratings: {agent.memory_ratings}\n"
if print_ratings:
print(f"{agent.name} memory ratings: {agent.memory_ratings}")

place_ratings = self.rate_locations()

if log_ratings:
log_output[self.location] += f"=== UPDATED LOCATION RATINGS {global_time} FOR {self.name}===\n"
log_output[self.location] += f"{self.name} location ratings: {place_ratings}\n"
if print_ratings:
print(f"=== UPDATED LOCATION RATINGS {global_time} FOR {self.name}===\n")
print(f"{self.name} location ratings: {place_ratings}\n")

old_location = self.location

new_location_name = place_ratings[0][0]
self.move(new_location_name)

if print_locations:
log_output[self.location] += f"=== UPDATED LOCATIONS AT TIME {global_time} FOR {self.name}===\n"
log_output[self.location] += f"{self.name} moved from {old_location} to {new_location_name}\n"
if print_ratings:
print(f"=== UPDATED LOCATIONS AT TIME {global_time} FOR {self.name}===\n")
print(f"{self.name} moved from {old_location} to {new_location_name}\n")

return

def plan(self,):
"""
Generates the agent's daily plan.

Parameters:
-----------
global_time : int
The current time in the simulation.
prompt_meta : str
The prompt used to generate the plan.
"""

prompt = "You are {}. The following is your description: {} You just woke up. What is your goal for today? Write it down in an hourly basis, starting at {}:00. Write only one or two very short sentences. Be very brief. Use at most 50 words.".format(self.name, self.description, str(global_time))
self.plans = generate(prompt_meta.format(prompt), self.use_openai)

def execute_action(self, other_agents, location, global_time, town_areas, prompt_meta):
def execute_action(self,):

"""Executes the agent's action based on their current situation and interactions with other agents.

Expand All @@ -79,8 +136,6 @@ def execute_action(self, other_agents, location, global_time, town_areas, prompt
A list of other Agent objects in the simulation.
location : Location
The current Location object where the agent is located.
global_time : int
The current time in the simulation.
town_areas : dict
A dictionary of Location objects representing different areas in the simulated environment.
prompt_meta : str
Expand All @@ -91,19 +146,20 @@ def execute_action(self, other_agents, location, global_time, town_areas, prompt
action : str
The action executed by the agent.
"""
location = locations.get_location(self.location)
other_agents = [agent for agent in location.get_people() if agent.name != self.name]
people = [agent.name for agent in other_agents]

people = [agent.name for agent in other_agents if agent.location == location]

prompt = "You are {}. Your plans are: {}. You are currently in {} with the following description: {}. It is currently {}:00. The following people are in this area: {}. You can interact with them.".format(self.name, self.plans, location.name, town_areas[location.name], str(global_time), ', '.join(people))

people_description = [f"{agent.name}: {agent.description}" for agent in other_agents if agent.location == location.name]
people_description = [f"{agent.name}: {agent.description}" for agent in other_agents]
prompt += ' You know the following about people: ' + '. '.join(people_description)

prompt += "What do you do in the next hour? Use at most 10 words to explain."
action = generate(prompt_meta.format(prompt), self.use_openai)
return action

def update_memories(self, other_agents, global_time, action_results):
def update_memories(self, other_agents, action_results):

"""
Updates the agent's memories based on their interactions with other agents.
Expand All @@ -112,8 +168,6 @@ def update_memories(self, other_agents, global_time, action_results):
-----------
other_agents : list
A list of other Agent objects in the simulation.
global_time : int
The current time in the simulation.
action_results : dict
A dictionary of the results of each agent's action.
"""
Expand All @@ -122,7 +176,7 @@ def update_memories(self, other_agents, global_time, action_results):
if agent.location == self.location:
self.memories.append('[Time: {}. Person: {}. Memory: {}]\n'.format(str(global_time), agent.name, action_results[agent.name]))

def compress_memories(self, global_time, MEMORY_LIMIT=10):
def compress_memories(self, MEMORY_LIMIT=10):

"""
Compresses the agent's memories to a more manageable and relevant set.
Expand All @@ -145,7 +199,7 @@ def compress_memories(self, global_time, MEMORY_LIMIT=10):
memory_string_to_compress = '.'.join([a[0] for a in relevant_memories])
return '[Recollection at Time {}:00: {}]'.format(str(global_time), memory_string_to_compress)

def rate_memories(self, locations, global_time, prompt_meta):
def rate_memories(self,):

"""
Rates the agent's memories based on their relevance and importance.
Expand Down Expand Up @@ -182,7 +236,7 @@ def rate_memories(self, locations, global_time, prompt_meta):
return memory_ratings


def rate_locations(self, locations, global_time, prompt_meta):
def rate_locations(self,):

"""
Rates different locations in the simulated environment based on the agent's preferences and experiences.
Expand All @@ -191,8 +245,6 @@ def rate_locations(self, locations, global_time, prompt_meta):
-----------
locations : Locations
The Locations object representing different areas in the simulated environment.
global_time : int
The current time in the simulation.
prompt_meta : str
The prompt used to rate the locations.

Expand Down
14 changes: 14 additions & 0 deletions game_simulation/locations/locations.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,21 @@ class Location:
def __init__(self, name, description):
self.name = name
self.description = description
self.people = []

def __str__(self):
return self.name

def add_people(self, people):
if isinstance(people, list):
self.people = self.people + people
else:
self.people = self.people + [people]

def get_people(self):
return self.people


def describe(self):
print(self.description)

Expand Down Expand Up @@ -52,6 +63,9 @@ def __init__(self):
def add_location(self, name, description):
self.locations[name] = Location(name, description)

def get_locations(self):
return self.locations

def get_location(self, name):
return self.locations.get(name)

Expand Down
145 changes: 22 additions & 123 deletions game_simulation/main.py
Original file line number Diff line number Diff line change
@@ -1,143 +1,42 @@
import json
import networkx as nx
from agents.agent import Agent
from locations.locations import Locations
from utils.text_generation import summarize_simulation

# Set default value for prompt_meta if not defined elsewhere
prompt_meta = '### Instruction:\n{}\n### Response:'

# Initialize global time and simulation variables
global_time = 0
repeats = 5

log_locations = False
log_actions = True
log_plans = False
log_ratings = False
log_memories = False

print_locations = True
print_actions = True
print_plans = True
print_ratings = True
print_memories = False

use_openai=True

# Start simulation loop
whole_simulation_output = ""

# Load town areas and people from JSON file
with open('simulation_config.json', 'r') as f:
town_data = json.load(f)

town_people = town_data['town_people']
town_areas = town_data['town_areas']

# Create world_graph
world_graph = nx.Graph()
last_town_area = None
for town_area in town_areas.keys():
world_graph.add_node(town_area)
world_graph.add_edge(town_area, town_area) # Add an edge to itself
if last_town_area is not None:
world_graph.add_edge(town_area, last_town_area)
last_town_area = town_area

# Add the edge between the first and the last town areas to complete the cycle
world_graph.add_edge(list(town_areas.keys())[0], last_town_area)
from utils.text_generation import summarize_simulation
from utils.time import global_time
from utils.logs import log_output
from utils.states import locations, whole_simulation_output, town_areas, town_people, world_graph
from utils.configs import repeats, log_locations, print_locations, use_openai
from agents.agent import Agent

# Initialize agents and locations
agents = []
locations = Locations()

for name, description in town_areas.items():
locations.add_location(name, description)

for name, description in town_people.items():
starting_location = description['starting_location']
agents.append(Agent(name, description['description'], starting_location, world_graph, use_openai))

for name, description in town_areas.items():
locations.add_location(name, description)
agent = Agent(name, description['description'], starting_location, world_graph, use_openai)
locations.get_location(starting_location).add_people(agent)

for repeat in range(repeats):
#log_output for one repeat
log_output = ""

print(f"====================== REPEAT {repeat} ======================\n")
log_output += f"====================== REPEAT {repeat} ======================\n"
if log_locations:
log_output += f"=== LOCATIONS AT START OF REPEAT {repeat} ===\n"
log_output += str(locations) + "\n"
if print_locations:
print(f"=== LOCATIONS AT START OF REPEAT {repeat} ===")
print(str(locations) + "\n")

# Plan actions for each agent
for agent in agents:
agent.plan(global_time, prompt_meta)
if log_plans:
log_output += f"{agent.name} plans: {agent.plans}\n"
if print_plans:
print(f"{agent.name} plans: {agent.plans}")

# Execute planned actions and update memories
for agent in agents:
# Execute action
action = agent.execute_action(agents, locations.get_location(agent.location), global_time, town_areas, prompt_meta)
if log_actions:
log_output += f"{agent.name} action: {action}\n"
if print_actions:
print(f"{agent.name} action: {action}")

# Update memories
for other_agent in agents:
if other_agent != agent:
memory = f'[Time: {global_time}. Person: {agent.name}. Memory: {action}]'
other_agent.memories.append(memory)
if log_memories:
log_output += f"{other_agent.name} remembers: {memory}\n"
if print_memories:
print(f"{other_agent.name} remembers: {memory}")

# Compress and rate memories for each agent
for agent in agents:
agent.compress_memories(global_time)
agent.rate_memories(locations, global_time, prompt_meta)
if log_ratings:
log_output += f"{agent.name} memory ratings: {agent.memory_ratings}\n"
if print_ratings:
print(f"{agent.name} memory ratings: {agent.memory_ratings}")

# Rate locations and determine where agents will go next
for agent in agents:
place_ratings = agent.rate_locations(locations, global_time, prompt_meta)
if log_ratings:
log_output += f"=== UPDATED LOCATION RATINGS {global_time} FOR {agent.name}===\n"
log_output += f"{agent.name} location ratings: {place_ratings}\n"
if print_ratings:
print(f"=== UPDATED LOCATION RATINGS {global_time} FOR {agent.name}===\n")
print(f"{agent.name} location ratings: {place_ratings}\n")

old_location = agent.location

new_location_name = place_ratings[0][0]
agent.move(new_location_name)

if print_locations:
log_output += f"=== UPDATED LOCATIONS AT TIME {global_time} FOR {agent.name}===\n"
log_output += f"{agent.name} moved from {old_location} to {new_location_name}\n"
if print_ratings:
print(f"=== UPDATED LOCATIONS AT TIME {global_time} FOR {agent.name}===\n")
print(f"{agent.name} moved from {old_location} to {new_location_name}\n")

print(f"----------------------- SUMMARY FOR REPEAT {repeat} -----------------------")

print(summarize_simulation(log_output=log_output))

whole_simulation_output += log_output
# Plan actions for each location and agent
for location_key in locations.get_locations():
print(f"====================== {location_key} REPEAT {repeat} ======================\n")
log_output[location_key] = f"====================== {location_key} REPEAT {repeat} ======================\n"
location = locations.get_location(location_key)
for agent in location.get_people():
agent.tick()
print(log_output[location_key])
print(summarize_simulation(log_output=log_output[location_key]))

whole_simulation_output += log_output[location_key]
print(f"----------------------- SUMMARY FOR REPEAT {repeat} -----------------------")

# Increment time
global_time += 1

# Write log output to file
Expand Down
Loading