diff --git a/fai-rag-app/fai-backend/fai_backend/chat/__init__.py b/fai-rag-app/fai-backend/fai_backend/chat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fai-rag-app/fai-backend/fai_backend/chat/prompt.py b/fai-rag-app/fai-backend/fai_backend/chat/prompt.py new file mode 100644 index 00000000..6c450e0c --- /dev/null +++ b/fai-rag-app/fai-backend/fai_backend/chat/prompt.py @@ -0,0 +1,133 @@ +from typing import Literal, Optional + +from langstream.contrib import OpenAIChatMessage + +from fai_backend.chat.settings import OpenAIStreamSettings, default_settings + + +class MessageChatPrompt: + """ + Represents a chat prompt message. + + Args: + template (str): The content of the message template. + role (Optional[Literal['system', 'assistant', 'user', 'function']]): The role of the message. Defaults to None. + name (Optional[str]): The name of the message. Defaults to None. + """ + + def __init__(self, template: str, role: Optional[Literal['system', 'assistant', 'user', 'function']] = None, + name: Optional[str] = None, input_map_fn=lambda input: {}): + self.content = template + self.template = template + self.role = role + self.name = name + + +class UserChatPrompt(MessageChatPrompt): + """ + Represents a chat prompt message from the user. + + Args: + template (str): The content of the message template. + """ + + def __init__(self, template: str): + super().__init__(template, "user") + + +class SystemChatPrompt(MessageChatPrompt): + """ + Represents a chat prompt message from the system. + + Args: + template (str): The content of the message template. + """ + + def __init__(self, template: str): + super().__init__(template, "system") + + +class FunctionChatPrompt(MessageChatPrompt): + """ + Represents a chat prompt message from a function. + + Args: + template (str): The content of the message template. + name (str): The name of the function. + """ + + def __init__(self, template: str, name: str): + super().__init__(template, "function", name) + + +class AssistantChatPrompt(MessageChatPrompt): + """ + Represents a chat prompt message from the assistant. + + Args: + template (str): The content of the message template. + """ + + def __init__(self, template: str): + super().__init__(template, "assistant") + + +class ChatPrompt: + """ + Represents a chat prompt. + + Args: + name (str): The name of the chat prompt. + messages (list[MessageChatPrompt], optional): The list of chat prompt messages. Defaults to []. + settings (OpenAIStreamSettings, optional): The settings for the chat prompt. Defaults to default_settings. + """ + + def __init__(self, name: str, messages: list[MessageChatPrompt] = [], + settings: OpenAIStreamSettings = default_settings): + self.templates = messages + self.input_vars = {} + self.settings = settings + self.name = name + + def format_prompt(self, input_vars: dict[str, str]): + """ + Formats the chat prompt with the given input variables. + + Args: + input_vars (dict[str, str]): The input variables to format the chat prompt with. + """ + self.input_vars = input_vars + print(input_vars) + for template in self.templates: + template.content = template.template.format(**{**input_vars}) + return self + + def to_messages(self) -> list[OpenAIChatMessage]: + """ + Converts the chat prompt to a list of OpenAIChatMessage objects. + + Returns: + list[OpenAIChatMessage]: The list of OpenAIChatMessage objects. + """ + return [ + OpenAIChatMessage(content=prompt.content, role=prompt.role, name=prompt.name) # type: ignore + for prompt in self.templates + ] + + # def to_prompt(self) -> Prompt: + # """ + # Converts the chat prompt to a Prompt object. + # + # Returns: + # Prompt: The Prompt object. + # """ + # return Prompt( + # provider=ChatOpenAI.id, + # inputs=self.input_vars, + # settings={**(self.settings if self.settings else {})}, + # messages=[ + # PromptMessage(template=prompt.template, formatted=prompt.content, role=prompt.role, name=prompt.name) + # # type: ignore + # for prompt in self.templates + # ], + # ) diff --git a/fai-rag-app/fai-backend/fai_backend/chat/settings.py b/fai-rag-app/fai-backend/fai_backend/chat/settings.py new file mode 100644 index 00000000..eae7d077 --- /dev/null +++ b/fai-rag-app/fai-backend/fai_backend/chat/settings.py @@ -0,0 +1,8 @@ +from typing import TypedDict + +OpenAIStreamSettings = TypedDict('OpenAIStreamSettings', { + 'model': str, + 'temperature': float +}) + +default_settings = OpenAIStreamSettings(model='gpt-3.5-turbo', temperature=0) \ No newline at end of file diff --git a/fai-rag-app/fai-backend/fai_backend/chat/stream.py b/fai-rag-app/fai-backend/fai_backend/chat/stream.py new file mode 100644 index 00000000..00588385 --- /dev/null +++ b/fai-rag-app/fai-backend/fai_backend/chat/stream.py @@ -0,0 +1,83 @@ +from typing import Tuple, TypeVar, Iterable, Callable + +from langstream.contrib import OpenAIChatStream, OpenAIChatMessage + +from fai_backend.chat.prompt import ChatPrompt +from fai_backend.chat.settings import OpenAIStreamSettings, default_settings + +T = TypeVar('T') +U = TypeVar('U') + + +def create_chat_prompt(prompt_args: dict) -> ChatPrompt: + """ + Create a ChatPrompt object. + + Args: + prompt_args (dict): Arguments for creating a ChatPrompt object. + + Returns: + ChatPrompt: The ChatPrompt object. + """ + return ChatPrompt(**prompt_args) + + +def create_chat_stream( + name: str, + messages_fn: Callable[[T], Iterable[OpenAIChatMessage]], + settings: OpenAIStreamSettings = default_settings, +) -> OpenAIChatStream[T, U]: + """ + Create a chat stream. + + Args: + name (str): The name of the chat stream. + settings (OpenAIStreamSettings): Settings for the chat stream. + input_map_fn (Callable[[T], U], optional): Function that maps input of type T to output of type U. Defaults to identity function. + + Returns: + OpenAIChatStream[T, U]: The chat stream. + """ + return OpenAIChatStream[T, U]( + name, + lambda delta: [*messages_fn(delta)], + **settings + ) + + +def create_chat_stream_from_prompt( + prompt_args: dict, +) -> Tuple[OpenAIChatStream[T, U], ChatPrompt]: + """ + Create a general chat stream with a prompt. + + Args: + prompt_args (dict): Arguments for creating a ChatPrompt object. + settings (OpenAIStreamSettings): Settings for the chat stream. + history (list[OpenAIChatMessage], optional): Chat history. Defaults to an empty list. + input_map_fn (Callable[[T], U], optional): Function that maps input of type T to output of type U. Defaults to identity function. + + Returns: + tuple[OpenAIChatStream[T, U], ChatPrompt]: A tuple containing the chat stream and prompt objects. + """ + + input_map_fn = prompt_args["input_map_fn"] if "input_map_fn" in prompt_args else lambda x: x + + prompt = create_chat_prompt({ + "name": prompt_args["name"], + "messages": prompt_args["messages"], + "settings": prompt_args["settings"] if "settings" in prompt_args else default_settings, + }) + + def messages(p: T) -> Iterable[OpenAIChatMessage]: + prompt.format_prompt(input_map_fn(p)) + + return prompt.to_messages() + + chat_stream = create_chat_stream( + prompt.name, + messages, + prompt.settings + ) + + return chat_stream, prompt diff --git a/fai-rag-app/fai-backend/fai_backend/chat/template.py b/fai-rag-app/fai-backend/fai_backend/chat/template.py new file mode 100644 index 00000000..cd22671d --- /dev/null +++ b/fai-rag-app/fai-backend/fai_backend/chat/template.py @@ -0,0 +1,55 @@ +import os +from fai_backend.chat.prompt import UserChatPrompt, SystemChatPrompt + +CHAT_PROMPT_TEMPLATE_ARGS = { + "name": "ChatStream", + "messages": [ + SystemChatPrompt( + "You are a helpful AI assistant that helps people with answering questions about planning " + "permission.
If you can't find the answer in the search result below, just say (in Swedish) " + "\"Tyvärr kan jag inte svara på det.\" Don't try to make up an answer.
If the " + "question is not related to the context, politely respond that you are tuned to only " + "answer questions that are related to the context.
The questions are going to be " + "asked in Swedish. Your response must always be in Swedish." + ), + UserChatPrompt("{query}"), + UserChatPrompt("Here are the results of the search:\n\n {results}"), + ], + "input_map_fn": lambda input: { + "query": list(input)[0]['query'], + "results": ' | '.join([doc for doc, _ in list(input)[0]['results']]) + }, + "settings": { + "model": os.environ.get("GPT_4_MODEL_NAME", "gpt-4"), + "temperature": 0 + }, +} + +SCORING_PROMPT_TEMPLATE_ARGS = { + "name": "ScoringStream", + "messages": [ + SystemChatPrompt("You are a scoring systems that classifies documents from 0-100 based on how well they answer a query."), + UserChatPrompt("Query: {query}\n\nDocument: {document}"), + ], + "input_map_fn": lambda input: {**(input)}, + "settings": { + "model": "gpt-3.5-turbo", + "temperature": 0, + "functions": [ + { + "name": "score_document", + "description": "Scores the previous document according to the user query\n\n Parameters\n ----------\n score\n A number from 0-100 scoring how well does the document matches the query. The higher the score, the better match for the query\n ", + "parameters": { + "type": "object", + "properties": { + "score": { + "type": "number", + } + }, + "required": ["score"], + } + } + ], + "function_call": {"name": "score_document"}, + }, +} \ No newline at end of file diff --git a/fai-rag-app/fai-backend/fai_backend/llm/service.py b/fai-rag-app/fai-backend/fai_backend/llm/service.py index 7e537b79..1c6e7e4e 100644 --- a/fai-rag-app/fai-backend/fai_backend/llm/service.py +++ b/fai-rag-app/fai-backend/fai_backend/llm/service.py @@ -1,9 +1,13 @@ -import asyncio -from typing import AsyncGenerator, Iterable, List, Tuple +import json from langstream import Stream, join_final_output, as_async_generator from langstream.contrib import OpenAIChatStream, OpenAIChatMessage, OpenAIChatDelta +from fai_backend.chat.stream import create_chat_stream_from_prompt +from fai_backend.chat.template import CHAT_PROMPT_TEMPLATE_ARGS, SCORING_PROMPT_TEMPLATE_ARGS +from fai_backend.vector.service import VectorService +from fai_backend.vector.factory import vector_db + SYSTEM_TEMPLATE = "You are a helpful AI assistant that helps people with answering questions about planning " "permission.
If you can't find the answer in the search result below, just say (in Swedish) " "\"Tyvärr kan jag inte svara på det.\" Don't try to make up an answer.
If the " @@ -12,9 +16,24 @@ "asked in Swedish. Your response must always be in Swedish." +async def query_vector(vector_service, collection_name, query, n_results=10): + vector_result = await vector_service.query_from_collection( + collection_name=collection_name, + query_texts=[query], + n_results=n_results, + ) + + documents = vector_result['documents'][0] if 'documents' in vector_result and vector_result['documents'] else [] + + return Stream[None, str]( + "QueryVectorStream", + lambda _: as_async_generator(*documents) + ) + + async def ask_llm_question(question: str): llm_stream: Stream[str, str] = OpenAIChatStream[str, OpenAIChatDelta]( - "RecipeStream", + "AskLLMStream", lambda user_question: [ OpenAIChatMessage( role="system", @@ -32,50 +51,59 @@ async def ask_llm_question(question: str): return await join_final_output(llm_stream(question)) -async def ask_llm_raq_question(question: str): +async def ask_llm_raq_question(question: str, collection_name: str): add_document, list_documents = (lambda documents: ( lambda document: (documents.append(document), document)[1], lambda: [*documents] ))([]) - def retrieve_documents(query: str, n_results: int) -> AsyncGenerator[str, None]: - mock_results = [ - "Document 1", - "Document 2", - "Document 3" - ][0:n_results] + chat_stream, _ = create_chat_stream_from_prompt(CHAT_PROMPT_TEMPLATE_ARGS) + scoring_stream, _ = create_chat_stream_from_prompt(SCORING_PROMPT_TEMPLATE_ARGS) + vector_service = VectorService(vector_db=vector_db) - return as_async_generator(*mock_results) + def append_score_to_documents(scores): + return zip(list_documents(), [s[0] for s in scores]) + + def sort_and_slice_documents(scored_documents, slice_size: int): + first_element = list(scored_documents)[0] + sorted_scores = sorted(first_element, key=lambda x: x[1], reverse=True) + return sorted_scores[:slice_size] + + def create_query_document_pair(query, document): + return {"query": query, "document": document} + + vector_db_query_result = await query_vector( + vector_service=vector_service, + collection_name=collection_name, + query=question, + ) + + scoring_stream = scoring_stream.map( + lambda delta: json.loads(delta.content)['score'] + if delta.role == "function" and delta.name == "score_document" + else 0 + ) def stream(query): - return ( - Stream[str, str]( - "RetrieveDocumentsStream", - lambda query: retrieve_documents(query, n_results=3) - ) - .map(add_document) - # .map(lambda document: {"query": query, "document": document}) - # .gather() - # .and_then(lambda results: {"query": query, "results": results[0]}) - .and_then( - OpenAIChatStream[Iterable[List[Tuple[str, int]]], OpenAIChatDelta]( - "AnswerStream", - lambda results: [ - OpenAIChatMessage( - role="system", - content=SYSTEM_TEMPLATE, - ), - OpenAIChatMessage(role="user", content=query), - OpenAIChatMessage( - role="user", - # content=f"Here are the results of the search:\n\n {' | '.join([doc for doc, _ in list(results)[0]])}", - content=f"Here are the results of the search:\n\n Det behövs ingen bygglov för att sätta upp en flaggstång. Det är däremot viktigt att tänka på att flaggstången inte får vara högre än 12 meter. Om flaggstången är högre än 3 meter behöver du anmäla detta till kommunen. Det är också viktigt att tänka på att flaggstången inte får placeras så att den skymmer sikten för trafikanter eller för grannar. Om du är osäker på om du behöver anmäla flaggstången eller inte kan du kontakta kommunen för att få hjälp.", - ), - ], - model="gpt-4", - temperature=0, - ).map(lambda delta: delta.content) - ) - )(query) - - return await join_final_output(stream(question)) + try: + return ( + vector_db_query_result + .map(add_document) + .map(lambda document: create_query_document_pair(query, document)) + .map(scoring_stream) + .gather() + .and_then(append_score_to_documents) + .and_then(lambda scored_documents: sort_and_slice_documents(scored_documents, 6)) + .and_then(lambda results: {"query": query, "results": results[0]}) + .and_then(chat_stream) + .map(lambda delta: delta.content) + )(query) + except Exception as e: + print(f"Error processing query: {e}", {str(e)}) + raise e + + try: + return await join_final_output(stream(question)) + except Exception as e: + print(f"Error joining final output '{question}': {str(e)}") + raise e diff --git a/fai-rag-app/fai-backend/fai_backend/qaf/dependencies.py b/fai-rag-app/fai-backend/fai_backend/qaf/dependencies.py index 139468ef..0e132c7c 100644 --- a/fai-rag-app/fai-backend/fai_backend/qaf/dependencies.py +++ b/fai-rag-app/fai-backend/fai_backend/qaf/dependencies.py @@ -1,6 +1,8 @@ from fastapi import Depends from fai_backend.dependencies import get_project_user +from fai_backend.files.dependecies import get_file_upload_service +from fai_backend.files.service import FileUploadService from fai_backend.llm.service import ask_llm_raq_question from fai_backend.logger.console import console from fai_backend.qaf.schema import ( @@ -32,9 +34,16 @@ async def submit_question_request( async def submit_question_and_generate_answer_request( question: QuestionDetails = Depends(submit_question_request), service: QAFService = Depends(QAFService.factory), + file_service: FileUploadService = Depends(get_file_upload_service), user: ProjectUser = Depends(get_project_user), ) -> QuestionDetails: - response = await ask_llm_raq_question(question.question.content) + latest_upload_path = file_service.get_latest_upload_path(user.project_id) + if not latest_upload_path: + raise Exception('No upload path found') + + directory_name = latest_upload_path.split('/')[-1] + + response = await ask_llm_raq_question(question=question.question.content, collection_name=directory_name) await service.add_message( user, GenerateAnswerPayload(question_id=question.id, answer=response) diff --git a/fai-rag-app/fai-backend/fai_backend/qaf/routes.py b/fai-rag-app/fai-backend/fai_backend/qaf/routes.py index 03962ff5..fbaac78b 100644 --- a/fai-rag-app/fai-backend/fai_backend/qaf/routes.py +++ b/fai-rag-app/fai-backend/fai_backend/qaf/routes.py @@ -46,9 +46,12 @@ async def llm_question_endpoint(question: str): @router.get('/llm-raq-question', response_model=Any) -async def llm_raq_question_endpoint(question: str): +async def llm_raq_question_endpoint( + question: str, + vector_collection_name: str, +): try: - response = await ask_llm_raq_question(question) + response = await ask_llm_raq_question(question=question, collection_name=vector_collection_name) return {'response': response} except Exception as exception: raise HTTPException(status_code=500, detail=str(exception))