From 10412387b8dc2e44d93bc33ecc5fde23f1b71fa4 Mon Sep 17 00:00:00 2001 From: Jason Date: Wed, 14 Feb 2024 11:58:57 -0800 Subject: [PATCH] fixed a bug, still refactoring --- README.md | 1 + configs/defaults.json | 2 +- src/agents.py | 27 +++++++++++++++++---------- src/matrix.py | 4 ++++ src/memory.py | 12 ++++++++++-- utils/utils.py | 10 ++++++---- 6 files changed, 39 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index eb563ce..817db67 100644 --- a/README.md +++ b/README.md @@ -198,6 +198,7 @@ This should start a basic webserver that would allow us to view the game state a THINKING ABOUT:: +* fix time, dont need to pass time everywhere! * finalize on logging, data vs flat * remove the llm calls in init * refactor add_agents bc of jumping diff --git a/configs/defaults.json b/configs/defaults.json index 2cf9faf..217740f 100644 --- a/configs/defaults.json +++ b/configs/defaults.json @@ -30,7 +30,7 @@ "MEMORY_QUERY_COUNT": 20, "CONVERSATION_DEPTH": 5, "CONVERSATION_COOLDOWN": 5, - "LLM_IMPORTANCE": 0, + "LLM_IMPORTANCE": 1, "ALLOW_OBSERVANCE": 1, "DEFAULT_NAMES": [ "Alice", diff --git a/src/agents.py b/src/agents.py index 2e7ea29..d934141 100644 --- a/src/agents.py +++ b/src/agents.py @@ -66,24 +66,28 @@ def update_goals(self): # this assumes 8+ importance is always worth changing /reevaluating goals # do i need to update my goals. if so, give me new goals relevant_memories = self.getMemories(None,unix_to_strftime(self.matrix.unix_time)) - relevant_memories_string = "\n".join(f"Memory {i + 1}:\n{memory}" for i, memory in enumerate(relevant_memories)) if relevant_memories else "" + relevant_memories_string = "\n".join(f"Memory {i + 1}: \"{memory}\"" for i, memory in enumerate(relevant_memories)) if relevant_memories else "" prompt = f''' -About {self}: +you are a character in a world. {self.getSelfContext()} -{self}'s goal is {self.goal}. +goal:"{self.goal}". And {self}'s recent memories: {relevant_memories_string} -Write out what my new overarching goal should be in a short sentence. +Write out what my new goal should be in a short sentence given recent memories. +Take into account what happened recently to help you focus. Write in the first person from the point of view of {self}. -Write in natural short response style. +Write out the goal only in json: +{{"goal":"goal"}} ''' msg = llm.generate(prompt) self.addMemory("observation",f"I updated my goal to be \"{msg}\"", unix_to_strftime(self.matrix.unix_time), random.randint(5, 8)) self.goal = msg + def decide(self): + self.matrix.llm_action(self,self.matrix.unix_time) + def ask_meta_questions(self, timestamp): - #relevant_memories = self.memory[-50:] relevant_memories = self.getMemories(None, timestamp) relevant_memories_string = "\n".join(f"Memory {i + 1}:\n{memory}" for i, memory in enumerate(relevant_memories)) if relevant_memories else "" vars = {"agent":self,"relevant_memories_string":relevant_memories_string} @@ -93,7 +97,6 @@ def ask_meta_questions(self, timestamp): self.meta_questions.extend(x[1] for x in m if x[1] not in self.meta_questions) def evaluate_progress(self): - #relevant_memories = self.memory[-50:] relevant_memories = self.getMemories(self.goal, unix_to_strftime(self.matrix.unix_time)) relevant_memories_string = "\n".join(f"Memory {i + 1}:\n{memory}" for i, memory in enumerate(relevant_memories)) if relevant_memories else "" prompt = f''' @@ -121,7 +124,8 @@ def evaluate_progress(self): if score and explanation: self.addMemory("meta", explanation, unix_to_strftime(self.matrix.unix_time) , 10) if score and int(score) < 3: - self.meta_cognize(unix_to_strftime(self.matrix.unix_time), True) + #self.meta_cognize(unix_to_strftime(self.matrix.unix_time), True) + pass def meta_cognize(self,timestamp,force=False): #if not force and random.randint(0, 100) < 50: @@ -549,8 +553,8 @@ def perceive(self, other_agents, environment, timestamp): def addMemory(self, kind, content, timestamp=None, score=None,embedding=None): memory = None if timestamp is None: - timestamp = datetime.now() - timestamp = timestamp.strftime("%A %B %d, %Y") + timestamp = datetime.utcfromtimestamp(self.matrix.unix_time).strftime("%A %B %d, %Y") + if kind == "observation": if (self.matrix is not None and self.matrix.allow_observance_flag == 1): @@ -669,6 +673,9 @@ def __init__(self, dictionary): min_relevancy_score = min(min_relevancy_score, relevancy_score) max_relevancy_score = max(max_relevancy_score, relevancy_score) + #print("PPPP") + #print(mem.content) + #print(mem.kind) recency_score = Memory.calculateRecencyScore(mem.last_accessed_at, timestamp) min_recency_score = min(min_recency_score, recency_score) max_recency_score = max(max_recency_score, recency_score) diff --git a/src/matrix.py b/src/matrix.py index 579bf3f..8f90627 100644 --- a/src/matrix.py +++ b/src/matrix.py @@ -25,6 +25,7 @@ def set_globals(config): for key, value in config.items(): globals()[key] = value + class Matrix: def __init__(self, config={}): set_globals(config) @@ -49,6 +50,8 @@ def __init__(self, config={}): self.cur_step = 0 self.current_substep = 0 self.unix_time = DEFAULT_TIME + #print(f"PPPP {self.unix_time} {type(self.unix_time)}") + #print(f"PPPP {LLM_IMPORTANCE} OOOOOO") self.status = "init" self.conversation_counter = 0 self.sim_start_time = None @@ -62,6 +65,7 @@ def __init__(self, config={}): self.perception_range = PERCEPTION_RANGE self.allow_movement = ALLOW_MOVEMENT self.model = MODEL + self.replay = None self.add_to_logs({"step_type":"matrix_init","data":config}) self.agent_locks = { agent: threading.Lock() for agent in self.agents } diff --git a/src/memory.py b/src/memory.py index bca17d8..d779e26 100644 --- a/src/memory.py +++ b/src/memory.py @@ -12,13 +12,19 @@ def __init__(self, kind, content, created_at, last_accessed_at, score=None,embed self.kind = kind self.content = content self.created_at = created_at - self.last_accessed_at = last_accessed_at - if not isinstance(self.last_accessed_at, str): + if not isinstance(last_accessed_at, str): print("last_accessed_at should be string timestamp format") + self.last_accessed_at = unix_to_strftime(last_accessed_at) + print(type(self.last_accessed_at)) + else: + self.last_accessed_at = last_accessed_at + #print(f"WTF {content} {type(created_at)} {type(last_accessed_at)} ") + #print(f"{type(LLM_IMPORTANCE)} {LLM_IMPORTANCE}") if LLM_IMPORTANCE == 0: self.importance = score else: self.importance = Memory.calculateImportanceScore(self.content, memories) + self.recency_score = 1 self.relevancy_score = 1 self.overall_score = 1 @@ -107,6 +113,8 @@ def calculateRelevanceScore(memory_embedding, context_embedding): def calculateRecencyScore(last_accessed_at, time, decay=0.99): try: + #print(f"laa {last_accessed_at} {type(last_accessed_at)}") + #print(f"time {time} {type(time)}") last_accessed_at = int(datetime.strptime(last_accessed_at, "%Y-%m-%d %H:%M:%S").timestamp()) time = int(datetime.strptime(time, "%Y-%m-%d %H:%M:%S").timestamp()) return float(decay ** int(time - last_accessed_at)) diff --git a/utils/utils.py b/utils/utils.py index a277356..9e2a9e0 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -1,4 +1,5 @@ from configs.configs import * +#TODO remove this import requests import time import os @@ -133,6 +134,8 @@ def generate(self, prompt, fallback="Llm Error"): "prompt": prompt, "stream": False } + #"temperature": 0.4, + #print(data) current_url = self.urls[self.call_counter % len(self.urls)] try: if self.model == "powerinf": @@ -162,9 +165,9 @@ def generate(self, prompt, fallback="Llm Error"): self.log_calls(prompt, msg, end_time - start_time) self.call_times.append(end_time - start_time) if len(self.urls) > 1: - pd(f"current url {current_url}") - pd(f"INPUT:\n {prompt}") - pd(f"OUTPUT:\n {msg}") + pd(f"current url {current_url}") + #print(f"INPUT:\n {prompt}") + #print(f"OUTPUT:\n {msg}") pd(f"runtime: {end_time - start_time}") return msg @@ -247,7 +250,6 @@ def prompt(self, prompt_name, variables=None, fallback="Llm Error"): return output llm = Llm() - ''' REDIS '''