Skip to content

Commit

Permalink
refactor and testing new action system
Browse files Browse the repository at this point in the history
  • Loading branch information
Jason committed Mar 2, 2024
1 parent 048e3fe commit 064000d
Show file tree
Hide file tree
Showing 13 changed files with 277 additions and 26 deletions.
14 changes: 14 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -222,3 +222,17 @@ THINKING ABOUT:
* make all cognitive modules flags work on a user basis
* normalize flag names
* improve convo when meeting strangers


# changes over time
* moving objects
* information spreading
* building stuff
* people dead/people born
* control world via some kind of discovery
* resource mining
* breakups
* people move
* things growing/shrinking
* things getting destroyed

3 changes: 1 addition & 2 deletions configs/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,7 @@
"Learn and share intriguing stories.",
"Help others in times of need."
]
#DEFAULT_ACTIONS = ["move", "talk", "stay", "continue"]
DEFAULT_ACTIONS = ["move", "talk","continue_to_destination","meta_cognize"]
DEFAULT_ACTIONS = ["move", "talk","continue_to_destination","meta_cognize","fine_move"]
DEFAULT_QUESTIONS = [
{ "who": "all", "question": "What have you learned so far?" },
{ "who": "all", "question": "What is your most cherished memory and why?" },
Expand Down
7 changes: 7 additions & 0 deletions prompts/general/decide.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@ Currently, {{agent}} can talk to: {{agents_available_to_talk}}
Currently, {{agent}} is at {{area}} {{location}} at {{time}} and can see the following: {{objects}}.
{{agent}} can only move to one of these places: {{spatial_memory}}

{% if perceived_directions|length > 0 %}
You can see
{% for pd in perceived_directions %}
{{pd['string']}}
{% endfor %}
{% endif %}

{% if agent.meta_questions %}
When choosing your action, take into consideration "{{agent.meta_questions|random}}"
{% endif %}
Expand Down
2 changes: 1 addition & 1 deletion prompts/general/talk.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
roleplay
roleplay as {{agent}}
{{selfContext}}
{{relevant_memories}}

Expand Down
Binary file added src/actions/.fine_move_action.py.swp
Binary file not shown.
Binary file not shown.
9 changes: 9 additions & 0 deletions src/actions/action.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
class Action:
def __init__(self, name):
self.name = name

def execute(self):
raise NotImplementedError("Subclasses must implement the execute method")

def example():
raise NotImplementedError("Subclasses must implement the example method")
150 changes: 150 additions & 0 deletions src/actions/decide.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
class DecideAction(Action):
def execute(agent):
llm_decide()
if self.matrix.llm_action_flag == 1 and agent.kind != 'zombie':
agent.llm_decide()
else:
agent.deterministic_decide()



def llm_decide():
agent = self
unix_time = self.matrix.unix_time
if agent.status == "dead":
return agent

# It is 12:00, time to make plans
if unix_time % 86400 == 0 and self.matrix.allow_plan_flag == 1:
agent.make_plans(unix_to_strftime(unix_time))

# Recent memories importance > 100, time to reflect
if self.matrix.allow_reflect_flag == 1 and agent.recent_memories_importance() > self.matrix.reflect_threshold:
agent.reflect(unix_to_strftime(unix_time))

if self.matrix.allow_meta_flag == 1 and random.randint(0,100) < 50:
agent.evaluate_progress()

agent.conversation_cooldown -= 1

# In here, we double check first if agent's conversation messages depth > CONVERSATION_DEPTH
# we will summarize it and then clear
if agent.last_conversation is not None:
if len(agent.last_conversation.messages) >= CONVERSATION_DEPTH:
other_agent = agent.last_conversation.other_agent

agent.summarize_conversation(unix_to_strftime(unix_time))
other_agent.summarize_conversation(unix_to_strftime(unix_time))

agent.conversation_cooldown = CONVERSATION_COOLDOWN
other_agent.conversation_cooldown = CONVERSATION_COOLDOWN

# In here, if agent is locked to a conversation, no need then to let them decide
# we let them talk
if agent.is_locked_to_convo():
agent.talk({ "other_agents": [agent.last_conversation.other_agent], "timestamp": unix_to_strftime(unix_time) })
return agent

perceived_agents, perceived_locations, perceived_areas, perceived_objects = agent.perceive([a for a in self.matrix.agents if a != agent], self.matrix.environment, unix_to_strftime(unix_time))

relevant_memories = agent.getMemories(agent.goal, unix_to_strftime(unix_time))
relevant_memories_string = "\n".join(f"Memory {i + 1}:\n{memory}" for i, memory in enumerate(relevant_memories)) if relevant_memories else ""
current_location = self.matrix.environment.get_location_from_coordinates(agent.x, agent.y)
current_area = self.matrix.environment.get_area_from_coordinates(agent.x, agent.y)
if agent.last_conversation is not None:
relevant_memories_string += f"\n{agent} is currently in a conversation with {agent.last_conversation.other_agent}.\n"

other_agents = [a for a in perceived_agents if a.status != "dead" and a.kind != "zombie"]

#valid_actions = ["stay"]
#example_strings = "\n\nExplanation: George will stay because it is still too early to go outside.\nAnswer: stay"
valid_actions = []
example_strings = "\n\n"
agents_available_to_talk = []

if "move" in agent.actions and not agent.is_locked_to_convo() and self.matrix.allow_movement == 1:
# You can move, and have not decided where to move yet
valid_actions.append("move <location>")
example_strings = example_strings + "\n\nExplanation: George will move because he needs to be at the Park at 18:00.\nAnswer: move Park"

if "continue_to_destination" in agent.actions and agent.current_destination is not None and not agent.is_locked_to_convo() and self.matrix.allow_movement == 1:
# You can move, and have already decided where to move
valid_actions.append("continue_to_destination")
example_strings = example_strings + "\n\nExplanation: George will continue travelling to the Park because he wants to be there by 18:00.\nAnswer: continue_to_destination"
if random.randint(0, 100) < 10 and self.allow_meta_flag == 1 and "meta_cognize" in agent.actions:
valid_actions.append("meta_cognize")
example_strings = example_strings + "\n\nExplanation: George will meta_cognize because he wants to improve its strategy towards his goal.\nAnswer: meta_cognize"

if "talk" in agent.actions and not agent.is_locked_to_convo() and agent.conversation_cooldown <= 0:
agents_available_to_talk = [a for a in other_agents if not a.is_locked_to_convo() and a.conversation_cooldown <= 0]
if len(agents_available_to_talk) > 0:
valid_actions.append("talk <person to talk to>")
example_strings = example_strings + "\n\nExplanation: George will talk to Anne because he is trying to make new friends.\nAnswer: talk Anne"

if "kill" in agent.actions and len(perceived_agents) > 0 and not agent.is_locked_to_convo():
valid_actions.append("kill <person to kill>")
example_strings = example_strings + "\n\nExplanation: George will kill Anne because no one else is around.\nAnswer: kill Anne"

if len(valid_actions) == 0 and len(agent.destination_cache) > 0:
interaction = f"{agent} is travelling to {self.environment.get_location_from_coordinates(agent.destination_cache[-1][0], agent.destination_cache[-1][1]).name}"
print_and_log(interaction, f"{self.id}:events:{agent.name}")
agent.move()
return agent
if mem:
objects = mem
else:
objects = [obj.name.lower() for obj in perceived_objects] + [a.name.lower() for a in perceived_agents if a.kind != "human"],

variables = {
"selfContext": agent.getSelfContext(),
"relevant_memories": relevant_memories_string,
"agent": agent,
"other_agents": [a.name for a in other_agents],
"agents_available_to_talk": [a.name for a in agents_available_to_talk],
'objects': objects,
'examples': example_strings,
'actions': valid_actions,
'location': current_location.name if current_location is not None else "",
'area': current_area if current_area is not None else "",
'spatial_memory': [loc.name for loc in agent.spatial_memory],
'time': unix_to_strftime(unix_time)
}

msg = llm.prompt("decide", variables)
match = re.search(r"Answer:\s*(.+)", msg)
explanation_match = re.search(r"Explanation:\s*(.+)", msg)
explanation = explanation_match.group(1) if explanation_match else None

msg = match.group(1) if match else None

if msg is None:
return "stay", ""

decision, parameters = msg.split(" ", 1) + [""] * (1 - msg.count(" "))

if decision == "talk":
if len(agents_available_to_talk) > 0:
agent.talk({ "target": parameters, "other_agents": agents_available_to_talk, "timestamp": unix_to_strftime(unix_time) })
self.conversation_counter += 1
elif decision == "move":
agent.move({ "target": parameters, "environment": self.matrix.environment })
elif decision == "continue_to_destination":
if agent.current_destination is not None:
agent.move({ "environment": self.environment })
elif decision == "meta_cognize":
agent.meta_cognize(unix_to_strftime(unix_time),True)
elif decision == "kill":
if len(other_agents) > 0:
target = find_most_similar(parameters, [a.name for a in other_agents])
for a in other_agents:
if target == a.name:
agent.kill(a, unix_to_strftime(unix_time))
if a.status == "dead":
witnesses = (set(perceived_agents) - {a})
for witness in witnesses:
witness.addMemory("perceived", f"{a} was murdered by {agent} at {self.environment.get_area_from_coordinates(a.x, a.y)} {self.environment.get_location_from_coordinates(a.x, a.y)}", unix_to_strftime(unix_time), 9)

memory = agent.addMemory("decision",f"I decided to {decision} because {explanation}",unix_to_strftime(unix_time),random.randint(1,4))
if memory and memory.importance >= 6:
agent.update_goals()
return agent
52 changes: 52 additions & 0 deletions src/actions/fine_move_action.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import re
class FineMoveAction:
#for fine move, i need to know who is near me, perception needs to be updated to provide that info or passed into here
@classmethod
def description(cls):
print("to control the direction to move in")

@classmethod
def example_usage(cls):
return "fine_move <up|up-left|up-right|down|down-left|down-right|left|right>"

@classmethod
def explanation(cls):
return "george will \"fine_move right\" to walk towards the car"

@classmethod
def llm_param_cleanup(cls):
# method to take output from cli and clean it up
print("this is a class method in finemoveaction.")

@classmethod
def act(cls,agent, pre_processed_direction):
current_x = agent.x
current_y = agent.y
pattern = r'\b(up-left|up-right|down-left|down-right|up|down|left|right)\b'
match = re.search(pattern, pre_processed_direction)
if match:
direction = match.group(1)
else:
direction = "current"
if direction == "up":
new_x, new_y = current_x, current_y - 1
elif direction == "down":
new_x, new_y = current_x, current_y + 1
elif direction == "left":
new_x, new_y = current_x - 1, current_y
elif direction == "right":
new_x, new_y = current_x + 1, current_y
elif direction == "up-left":
new_x, new_y = current_x - 1, current_y - 1
elif direction == "up-right":
new_x, new_y = current_x + 1, current_y - 1
elif direction == "down-left":
new_x, new_y = current_x - 1, current_y + 1
elif direction == "down-right":
new_x, new_y = current_x + 1, current_y + 1
elif direction == "current":
new_x, new_y = current_x, current_y
else:
new_x, new_y = current_x, current_y
agent.x = new_x
agent.y = new_y
46 changes: 28 additions & 18 deletions src/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import heapq
import re
from fuzzywuzzy import fuzz
#from FineMoveAction import FineMoveAction


class Agent:
Expand Down Expand Up @@ -455,12 +456,25 @@ def add_short_memory(self, content, timestamp, n=SHORT_MEMORY_CAPACITY):

self.short_memory.append(content)

def calculate_perceived_direction(self, dx, dy):
if dx == 0 and dy == 0:
return "current"
elif dy == -1:
return "up" if dx == 0 else "up-left" if dx == -1 else "up-right" if dx == 1 else "unknown"
elif dy == 1:
return "down" if dx == 0 else "down-left" if dx == -1 else "down-right" if dx == 1 else "unknown"
elif dy == 0:
return "left" if dx == -1 else "right" if dx == 1 else "unknown"
else:
return "unknown"

def perceive(self, other_agents, environment, timestamp):
perceived_objects = []
perceived_locations = []
perceived_agents = []
perceived_areas = []
perceived_directions = []
if (self.matrix is not None and self.matrix.allow_observance_flag == 0) or (self.matrix is None and ALLOW_OBSERVE == 0):
perceived_objects = []
perceived_locations = []
perceived_agents = []
perceived_areas = []

return perceived_agents, perceived_locations, perceived_areas, perceived_objects

Expand All @@ -472,6 +486,8 @@ def perceive(self, other_agents, environment, timestamp):

# Vector Directions
directions = [(1, 0), (-1, 0), (0, 1), (0, -1), (0, 0), (1, 1), (-1, 1), (1, -1), (-1, -1)]
direction_names = ["right", "left", "down", "up", "current", "down-right", "up-right", "down-left", "up-left"]


for direction in directions:
dx, dy = direction
Expand Down Expand Up @@ -510,15 +526,12 @@ def perceive(self, other_agents, environment, timestamp):
interaction = f"{timestamp} - {self} saw {a.kind} at {location_name}"
self.addMemory("observation", interaction, timestamp, random.randint(6, 9))

#if a.name not in self.connections:
#self.connections.append(a.name)

print_and_log(interaction, f"{self.matrix.id}:events:{self.name}")

if a.name not in self.connections:
self.connections.append(a.name)
direction_vector = (a.x - self.x, a.y - self.y)
perceived_directions.append({"type": "agent", "name": a.name, "at": direction_vector, "direction": self.calculate_perceived_direction(*direction_vector),"string": f"{a.name} is to your {self.calculate_perceived_direction(*direction_vector)}"})

perceived_locations = []
perceived_areas = []
perceived_objects = []

for coordinate in perceived_coordinates:
loc = environment.get_location_from_coordinates(coordinate[0], coordinate[1])
Expand All @@ -532,18 +545,15 @@ def perceive(self, other_agents, environment, timestamp):
if self.kind != "zombie":
for obj in perceived_objects:
interaction = f"{timestamp} - {self} saw {obj.name.lower()} at {obj.area.name} of {obj.area.location.name}."
if self.matrix is not None:
print_and_log(interaction, f"{self.matrix.id}:events:{self.name}")
print_and_log(interaction, f"{self.matrix.id}:agent_conversations")

self.addMemory("observation", interaction, timestamp, random.randint(0, 2))
#direction_vector = (obj.x - self.x, obj.y - self.y)
#perceived_directions.append({"type": "object", "name": obj.name, "at": direction_vector, "direction": self.calculate_perceived_direction(*direction_vector)})


for loc in perceived_locations:
if loc not in self.spatial_memory:
interaction = f"{timestamp} - {self} discovered {loc.name}."
if self.matrix is not None:
print_and_log(interaction, f"{self.matrix.id}:events:{self.name}")
print_and_log(interaction, f"{self.matrix.id}:agent_conversations")

self.addMemory("observation", interaction, timestamp, random.randint(2, 5))
self.spatial_memory.append(loc)
Expand All @@ -552,7 +562,7 @@ def perceive(self, other_agents, environment, timestamp):
if self.matrix:
self.matrix.add_to_logs({"agent_id":self.mid,"step_type":"perceived","perceived_agents":perceived_agent_ids,"perceived_locations":[],"perceived_areas":[],"perceived_objects":[]})
#missing locations,areas,objects
return perceived_agents, perceived_locations, perceived_areas, perceived_objects
return perceived_agents, perceived_locations, perceived_areas, perceived_objects,perceived_directions

def addMemory(self, kind, content, timestamp=None, score=None,embedding=None):
memory = None
Expand Down
Loading

0 comments on commit 064000d

Please sign in to comment.