diff --git a/example-apps/chatbot-rag-app/api/chat.py b/example-apps/chatbot-rag-app/api/chat.py index 4f2e84d7..87db3c32 100644 --- a/example-apps/chatbot-rag-app/api/chat.py +++ b/example-apps/chatbot-rag-app/api/chat.py @@ -38,17 +38,21 @@ def ask_question(question, session_id): condense_question_prompt = render_template( 'condense_question_prompt.txt', question=question, chat_history=chat_history.messages) - question = get_llm().invoke(condense_question_prompt).content + condensed_question = get_llm().invoke(condense_question_prompt).content + else: + condensed_question = question + current_app.logger.debug('Condensed question: %s', condensed_question) current_app.logger.debug('Question: %s', question) - docs = store.as_retriever().invoke(question) + docs = store.as_retriever().invoke(condensed_question) for doc in docs: doc_source = {**doc.metadata, 'page_content': doc.page_content} current_app.logger.debug('Retrieved document passage from: %s', doc.metadata['name']) yield f'data: {SOURCE_TAG} {json.dumps(doc_source)}\n\n' - qa_prompt = render_template('rag_prompt.txt', question=question, docs=docs) + qa_prompt = render_template('rag_prompt.txt', question=question, docs=docs, + chat_history=chat_history.messages) answer = '' for chunk in get_llm().stream(qa_prompt): diff --git a/example-apps/chatbot-rag-app/api/templates/rag_prompt.txt b/example-apps/chatbot-rag-app/api/templates/rag_prompt.txt index df8ee3ba..0935116a 100644 --- a/example-apps/chatbot-rag-app/api/templates/rag_prompt.txt +++ b/example-apps/chatbot-rag-app/api/templates/rag_prompt.txt @@ -1,4 +1,4 @@ -Use the following passages to answer the user's question. +Use the following passages and chat history to answer the user's question. Each passage has a NAME which is the title of the document. After your answer, leave a blank line and then give the source name of the passages you answered from. Put them in a comma separated list, prefixed with SOURCES:. Example: @@ -22,5 +22,10 @@ PASSAGE: {% endfor -%} ---- +Chat history: +{% for dialogue_turn in chat_history -%} +{% if dialogue_turn.type == 'human' %}Question: {{ dialogue_turn.content }}{% elif dialogue_turn.type == 'ai' %}Response: {{ dialogue_turn.content }}{% endif %} +{% endfor -%} + Question: {{ question }} Response: diff --git a/example-apps/chatbot-rag-app/data/index_data.py b/example-apps/chatbot-rag-app/data/index_data.py index 181e0831..f0407b16 100644 --- a/example-apps/chatbot-rag-app/data/index_data.py +++ b/example-apps/chatbot-rag-app/data/index_data.py @@ -91,6 +91,9 @@ def main(): es_connection=elasticsearch_client, index_name=INDEX, strategy=ElasticsearchStore.SparseVectorRetrievalStrategy(model_id=ELSER_MODEL), + bulk_kwargs={ + 'request_timeout': 60, + }, ) diff --git a/example-apps/chatbot-rag-app/frontend/src/store/provider.tsx b/example-apps/chatbot-rag-app/frontend/src/store/provider.tsx index 40e318a5..b3637540 100644 --- a/example-apps/chatbot-rag-app/frontend/src/store/provider.tsx +++ b/example-apps/chatbot-rag-app/frontend/src/store/provider.tsx @@ -238,7 +238,7 @@ export const thunkActions = { dispatch( actions.updateMessage({ id: conversationId, - content: message.replace(/SOURCES: (.+)+/, ''), + content: message.replace(/SOURCES:(.+)*/, ''), }) ) } @@ -300,8 +300,8 @@ const parseSources = ( message: string ) => { message = message.replaceAll("\"", ""); - const match = message.match(/SOURCES: (.+)+/) - if (match) { + const match = message.match(/SOURCES:(.+)*/) + if (match && match[1]) { return match[1].split(',').map(element => { return element.trim(); });