Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add context to conversations #20

Merged
merged 4 commits into from
Jul 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 26 additions & 31 deletions app/bot/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import random
import logging

import discord

import globalconf as _globalconf
from . import botconf as _botconf
from . import bothist as _bothist
from . import llm

logger = logging.getLogger(__name__)
Expand All @@ -16,12 +16,12 @@


def _is_command(text: str) -> bool:
return text.startswith(_botconf.botconfig.command_prefix)
return text.startswith(_botconf.bot_config.command_prefix)


def _is_greeting(message: discord.Message) -> bool:
has_greeting_prefix = False
for greeting in _botconf.botconfig.greetings:
for greeting in _botconf.bot_config.greetings:
if message.content.lower().startswith(greeting):
has_greeting_prefix = True
break
Expand Down Expand Up @@ -79,7 +79,7 @@ async def _handle_command(
message: discord.Message,
):
response = ""
pre = _botconf.botconfig.command_prefix
pre = _botconf.bot_config.command_prefix
match command.lower():
case "help" | "h":
response += (
Expand All @@ -90,10 +90,10 @@ async def _handle_command(
)

case "resources" | "r":
if len(_botconf.botconfig.resources) == 0:
if len(_botconf.bot_config.resources) == 0:
response += "There are currently no resources"
else:
for r in _botconf.botconfig.resources:
for r in _botconf.bot_config.resources:
response += "- " + str(r) + "\n"
case _:
response += "Unknown command. Type `" + pre+"help` for help"
Expand All @@ -110,58 +110,53 @@ async def on_ready():
async def on_message(message: discord.Message):
# Don't respond to messages from different guilds if it is enforced
if not _in_guild(message):
if _botconf.botconfig.enforce_guild:
if _botconf.bot_config.enforce_guild:
return

# Don't respond to this bot's own messages
if message.author == client.user:
return

if _is_greeting(message):
logger.info("received greeting")
greeting = random.choice(_botconf.botconfig.greetings)
logger.info(f"greeting: {greeting}")
response = await llm.generate_response(
message,
_botconf.botconfig.system_prompt +
" You will respond with a somewhat short greeting and mention their name." +
f" You will incorperate the phrase \"{greeting}\" into your greeting." +
f" Their name is {message.author.name}.",
)
logger.info(f"response: `{response}`")
if response is not None:
await message.reply(response)
await client.change_presence(status=discord.Status.online)
else:
await client.change_presence(status=discord.Status.idle)
return

if _is_command(message.content):
logger.info("received command")
split_message = message.content.strip(" \t\n").split()
command = split_message[0][len(_botconf.botconfig.command_prefix):]
command = split_message[0][len(_botconf.bot_config.command_prefix):]
args = split_message[1:]

await _handle_command(command, args, message)
return

# Add message to history
_bothist.bot_history.add_message([message])

if client.user in message.mentions:
logger.info("received message")

response = await llm.generate_response(
message,
_botconf.botconfig.system_prompt +
f" The user's name is {message.author.name}",
_botconf.bot_config.system_prompt,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't make a lot of sense with how chat system prompts work, afaik. Essentially the first message in the chat history will be your system prompt, with a role of system, and the content of your system prompt.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

)
logger.info(f"response: `{response}`")
if response is not None:
if len(response) > 2000:
# Split message into <=2000 character chunks
message_chunks: list[discord.Message] = []
for response_chunk in _split_text(response):
await message.reply(response_chunk)
message_chunks.append(await message.reply(response_chunk))

_bothist.bot_history.add_message(
message_chunks,
is_bot=True,
)
else:
await message.reply(response)
# Reply to the message, and add the reply to the history
_bothist.bot_history.add_message(
[await message.reply(response)],
is_bot=True,
)

await client.change_presence(status=discord.Status.online)
else:
await client.change_presence(status=discord.Status.idle)
return
logger.info("message didn't mention the bot")
49 changes: 29 additions & 20 deletions app/bot/botconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ class BotConfig:
llm_enabled: bool
llm_model: str
auto_pull_model: bool
history_length: int
system_prompt: str

def __init__(self):
Expand All @@ -50,6 +51,7 @@ def __init__(self):
self.llm_enabled = False
self.llm_model = "llama2"
self.auto_pull_model = False
self.history_length = 30
self.system_prompt = ""

def load_from_file(self, f):
Expand All @@ -69,6 +71,7 @@ def load_from_file(self, f):
self.llm_enabled = merged_config["llm_enabled"]
self.llm_model = merged_config["llm_model"]
self.auto_pull_model = merged_config["auto_pull_model"]
self.history_length = merged_config["history_length"]
self.system_prompt = merged_config["system_prompt"]

logger.debug(f"Merged Config:\n{yaml.dump(merged_config)}")
Expand Down Expand Up @@ -185,35 +188,41 @@ def _merge_configs(user_config: Any, default_config: Any) -> dict[Any, Any]:
else:
merged_config["auto_pull_model"] = default_config["auto_pull_model"]

if not _has_key_of_type(default_config, "history_length", int):
raise Exception("default_config is missing a key `history_length`")
if _has_key_of_type(user_config, "history_length", int):
merged_config["history_length"] = user_config["history_length"]
else:
merged_config["history_length"] = default_config["history_length"]

other_prompt = (
# FIXME: This is a bad place to put the information about commands
" You can help people if they run the command" +
f" `{merged_config['command_prefix']}help`." +
" You can give information about resources if they type the" +
f" command `{merged_config['command_prefix']}resources`." +
" The users' messages will all be prefixed with" +
" (user_name user_id), where the user's name is user_name, and" +
" the user's id is user_id (the angle brackets are required)." +
" To mention someone, use their user_id with angle brackets and @."
)
name_prompt = (
"" if merged_config["bot_name"] == ""
else f" Your name is {merged_config['bot_name']}."
)

if not _has_key_of_type(default_config, "system_prompt", str):
raise Exception("default_config is missing a key `system_prompt`")
if _has_key_of_type(user_config, "system_prompt", str):
# FIXME: This is a bad place to put the information about commands
name_prompt = (
"" if merged_config["bot_name"] == ""
else f" Your name is {merged_config['bot_name']}."
)
merged_config["system_prompt"] = (
user_config["system_prompt"] + name_prompt +
" You can help people if they run the command" +
f" `{merged_config['command_prefix']}help`." +
" You can give information about resources if they type the" +
f" command `{merged_config['command_prefix']}resources`."
user_config["system_prompt"] + name_prompt + other_prompt
)
else:
name_prompt = (
"" if merged_config["bot_name"] == ""
else f" Your name is {merged_config['bot_name']}."
)
merged_config["system_prompt"] = (
default_config["system_prompt"] + name_prompt +
" You can help people if they run the command" +
f" `{merged_config['command_prefix']}help`." +
" You can give information about resources if they type the" +
f"command `{merged_config['command_prefix']}resources`."
default_config["system_prompt"] + name_prompt + other_prompt
)

return merged_config


botconfig = BotConfig()
bot_config = BotConfig()
66 changes: 66 additions & 0 deletions app/bot/bothist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import discord

from . import botconf as _botconf


class MessageHistory:
message_histories: dict[int, list[dict[str, str]]]
"""A dictionary that associates channel IDs to their histories"""

def __init__(self) -> None:
self.message_histories = {}

# NOTE: This method can add messages out of order
def add_message(
self,
message_list: list[discord.Message],
is_bot: bool = False
) -> None:
"""
Concatenate the contents of a list of messages into a
single dictionary, and add it to the history.
The resulting message dictionary will be added to the
history of the first message's channel.
The channel's message history will then be truncated
to the length specified by `history_length` in the
bot's config.
If `is_bot` is True, the message's role will be
"assistant", otherwise it will be "user".
If `is_bot` is False, the message's content will also
be prepended with `(user_name <@user_id>)`, where
`user_name` is the author of the first message's display
name, and `<@user_id>` is the text used to mention the
author of the first message.
"""

if len(message_list) < 1:
return

author = message_list[0].author
content = (
# Add author name and mention to content if it is not a bot
"" if is_bot else f"({author.display_name} {author.mention}) "
)
content += " ".join([m.content for m in message_list])

# Create message dict
msg_dict = {
"role": "assistant" if is_bot else "user",
"content": content,
}

chan_id = message_list[0].channel.id
hist_len = _botconf.bot_config.history_length
if chan_id in self.message_histories:
self.message_histories[chan_id].append(msg_dict)
if len(self.message_histories[chan_id]) > hist_len:
# Truncate the channel's history to the last
# `history_length` messages
self.message_histories[chan_id] = (
self.message_histories[chan_id][-hist_len:]
)
else:
self.message_histories[chan_id] = [msg_dict]


bot_history = MessageHistory()
26 changes: 16 additions & 10 deletions app/bot/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import globalconf as _globalconf
from . import botconf as _botconf
from . import bothist as _bothist

import discord

Expand All @@ -16,9 +17,9 @@

async def generate_response(
message: discord.Message,
system_prompt: str = _botconf.botconfig.system_prompt,
system_prompt: str = _botconf.bot_config.system_prompt,
# TODO: Use this again
auto_pull_model: bool = _botconf.botconfig.auto_pull_model
auto_pull_model: bool = _botconf.bot_config.auto_pull_model
) -> str | None:
url = f"http://{_globalconf.LLM_HOST}:{_globalconf.LLM_PORT}/api/chat"
logger.info(f"url: {url}")
Expand All @@ -31,17 +32,22 @@ async def generate_response(
f"User prompt:\n{message.content}"
)

messages: list[dict[str, str]] = [
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm. Well, it's not the most efficient, but this does let you set the system prompt dynamically.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What would a more efficient way be? This is the exact same format that the api needs, so we would have to convert it to this eventually

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're recalculating the message array. Could store the whole thing rather than concatenating the system prompt every time. Not a big deal though

{"role": "system", "content": system_prompt},
*_bothist.bot_history.message_histories[message.channel.id],
]

logger.info(", ".join([
f"{{{m['role'][0:1]}: '{m['content'][0:10]}'}}"
for m in messages
]))

async with aiohttp.ClientSession() as cs:
try:
async with cs.post(url, json={
"model": _botconf.botconfig.llm_model,
"model": _botconf.bot_config.llm_model,
"stream": False,
"messages": [
{"role": "system",
"content": system_prompt},
{"role": "user",
"content": message.content},
],
"messages": messages,
}) as res:
data = await res.json()
if "error" in data:
Expand All @@ -55,6 +61,6 @@ async def generate_response(
return data["message"]["content"]

except Exception as e:
logger.error(f"{e}\nType: {type(e)}")
logger.error(f"{type(e)}: {e}")

return None
4 changes: 4 additions & 0 deletions app/default-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,17 @@ llm_model: llama2

# Whether to automatically pull the `llm_model` if it is
# not available on the Ollama server
# FIXME: Currently unused
auto_pull_model: false

# System prompt to pass to the LLM.
# Use this to customize the responses of the LLM.
# (e.g. "You are a robot built by the Raspberry Pi Club")
system_prompt: ""

# Maximum message history length.
history_length: 30


# List of resources to show when `!resources` is run.
# The `name` and `link` fields are mandatory, `desc` is optional.
Expand Down
3 changes: 2 additions & 1 deletion app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,15 @@
logger.info(f"LLM_HOST={globalconf.LLM_HOST}")
logger.info(f"LLM_PORT={globalconf.LLM_PORT}")


# -------- Main --------

# Import internal modules that depend on configuration after changes
import bot # TODO: Why can't I do `from . import bot`?
from bot import botconf

with open(globalconf.CONFIG_FILE, "r") as f:
botconf.botconfig.load_from_file(f)
botconf.bot_config.load_from_file(f)

# Run bot
try:
Expand Down