Skip to content

Commit

Permalink
better stranger support
Browse files Browse the repository at this point in the history
  • Loading branch information
Jason committed Feb 27, 2024
1 parent 4dce58f commit b174eec
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 14 deletions.
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,10 @@ This should start a basic webserver that would allow us to view the game state a

3. Pass in to `engine.py` with `--environment` flag.

# engine
When working on the main engine, often times we can shut off all llm calls, in that case, you can turn off all llm calls with a command such as:
`LLM_IMPORTANCE=0 LLM_ACTION=0 python engine.py --scenario configs/zombie_situation.json --env configs/largev2.tmj`



THINKING ABOUT:
Expand All @@ -215,4 +219,4 @@ THINKING ABOUT:
* nearby moving vs far away destinations
* make all cognitive modules flags work on a user basis
* normalize flag names
* see a stranger
* improve convo when meeting strangers
16 changes: 11 additions & 5 deletions prompts/general/talk.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,19 @@ prime your thoughts but dont mention: "{{primer}}".

{{relevant_memories}}


{% if other_agent == "stranger" %}
This is your first time talking to this stranger
{% else %}
Below is the current chat history between {{agent}} and {{other_agent}}.

{{previous_conversations}}

{% if convo_types %}
You can use one of the following response types that make the conversation flow more naturally:
{{ convo_types|join("\n") }}
{% endif %}
{% endif %}

{% if agent.meta_questions %}
Before answering, consider "{{agent.meta_questions|random}}"
Expand All @@ -19,13 +28,10 @@ DO NOT greet them again.
Do NOT use the word "Hey" too often.
Do not keep agreeing positively.

{% if convo_types %}
You can use one of the following response types that make the conversation flow more naturally:
{{ convo_types|join("\n") }}
{% endif %}

Avoid reiterating previous content.
keep responses to under 3 sentences.
Craft an informal spoken response.
Do not start responses with affirmations.
Only write the response from {agent} and nothing else.
Only write the response from {{agent}} and nothing else.

22 changes: 16 additions & 6 deletions src/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,11 @@ def talk(self, opts={}):
"Transactional: Efficiently exchange information or complete tasks."
]
random.shuffle(all_convo_types)
if other_agent.name not in self.connections:
other_agent_name = "stranger"
else:
other_agent_name = other_agent.name

variables = {
'selfContext': self.getSelfContext(),
'relevant_memories': relevant_memories_string,
Expand All @@ -372,8 +377,8 @@ def talk(self, opts={}):
'connections': self.connections,
'meta_questions': self.meta_questions or "",
'primer': random.randint(1, 1000000),
'other_agent': other_agent,
"previous_conversations": f"Current Conversation:\n{self.name}\n{previous_conversations}" if previous_conversations else f"Initiate a conversation with {other_agent.name}.",
'other_agent': other_agent_name,
"previous_conversations": f"Current Conversation:\n{self.name}\n{previous_conversations}" if previous_conversations else f"Initiate a conversation with {other_agent_name}.",
}

msg = llm.prompt(prompt_name="talk", variables=variables)
Expand All @@ -384,6 +389,11 @@ def talk(self, opts={}):
msg = match.group(1).strip()
else:
msg = msg.split(": ", 1)[-1] if ": " in msg else msg
if other_agent.name not in self.connections:
if other_agent.kind == "human":
# TODO add back location, need current location!!
self.addMemory("observation", f"{timestamp} - {self.name} met {other_agent.name}", timestamp, random.randint(2, 5))
self.connections.append(other_agent.name)

interaction = f"{timestamp} - {self} said to {other_agent}: {msg}"
if self.matrix is not None:
Expand Down Expand Up @@ -497,10 +507,10 @@ def perceive(self, other_agents, environment, timestamp):
a_area = environment.get_area_from_coordinates(a.x, a.y)
a_loc = environment.get_location_from_coordinates(a.x, a.y)
location_name = f"{'' if a_area is None else a_area.name} {a_loc.name}"
if a.name not in self.connections:
if a.kind == "human":
self.addMemory("observation", f"{timestamp} - {self.name} met {a.name} at {location_name}", timestamp, random.randint(2, 5))
self.connections.append(a.name)
#if a.name not in self.connections:
# if a.kind == "human":
# self.addMemory("observation", f"{timestamp} - {self.name} met {a.name} at {location_name}", timestamp, random.randint(2, 5))
# self.connections.append(a.name)


if a.status == "dead":
Expand Down
43 changes: 43 additions & 0 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,49 @@ def test_embeddings(self):
print(f"Current Memory: {mem}")
print(f"Relevance Score: {Memory.calculateRelevanceScore(mem.embedding, context_embedding)}")
self.assertTrue(len(sorted_memory) > 0)
def test_talk_stranger(self):
agent1_data = {
"name": "Viktor",
"description": "You love physics",
"goal": "Answer questions, think, be rational.",
}
agent2_data = {
"name": "Natasha",
"description": "You love art"
}
agent1 = Agent(agent1_data)
agent2 = Agent(agent2_data)
unix_time = 1704067200
for i in range(2):
timestamp = datetime.fromtimestamp(unix_time).strftime("%Y-%m-%d %H:%M:%S")
response = agent1.talk({ "target": agent2.name, "other_agents": [agent2], "timestamp": timestamp })
msg = f"{agent1} said to {agent2}: {response}"
print(msg)
response = agent2.talk({ "target": agent1.name, "other_agents": [agent1], "timestamp": timestamp })
msg = f"{agent2} said to {agent1}: {response}"
print(msg)
unix_time = unix_time + 10
agent1_data = {
"name": "John",
"description": "Scared of zombies",
}
agent2_data = {
"name": "Alucard",
"description": "shy and confused about what is happening"
}
agent1 = Agent(agent1_data)
agent2 = Agent(agent2_data)
unix_time = 1704067200
for i in range(2):
timestamp = datetime.fromtimestamp(unix_time).strftime("%Y-%m-%d %H:%M:%S")
response = agent1.talk({ "target": agent2.name, "other_agents": [agent2], "timestamp": timestamp })
msg = f"{agent1} said to {agent2}: {response}"
print(msg)
response = agent2.talk({ "target": agent1.name, "other_agents": [agent1], "timestamp": timestamp })
msg = f"{agent2} said to {agent1}: {response}"
print(msg)
unix_time = unix_time + 10


def test_talk(self):
agent1_data = {
Expand Down
8 changes: 6 additions & 2 deletions utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def generate(self, prompt, fallback="Llm Error"):
self.call_times.append(end_time - start_time)
if len(self.urls) > 1:
pd(f"current url {current_url}")
#print(f"INPUT:\n {prompt}")
print(f"INPUT:\n {prompt}")
#print(f"OUTPUT:\n {msg}")
pd(f"runtime: {end_time - start_time}")
return msg
Expand Down Expand Up @@ -265,7 +265,11 @@ def prompt(self, prompt_name, variables=None, fallback="Llm Error"):
'''
REDIS
'''
redis_connection = redis.Redis.from_url(REDIS_URL)
if "REDIS_URL" in os.environ:
redis_connection = redis.Redis.from_url(os.environ["REDIS_URL"])
else:
print("REDIS_URL environment variable is not set.")
redis_connection = None

def print_and_log(content, key):
return
Expand Down

0 comments on commit b174eec

Please sign in to comment.