From ba14c760c00573187ef50709bb7b5d73d25b6ab5 Mon Sep 17 00:00:00 2001 From: pramitchoudhary Date: Wed, 14 Feb 2024 12:02:48 -0800 Subject: [PATCH 1/6] Update pending blocking func calls for Q generation #44 --- sidekick/utils.py | 12 ++++++------ ui/app.py | 31 ++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/sidekick/utils.py b/sidekick/utils.py index ee9e87f..148133b 100644 --- a/sidekick/utils.py +++ b/sidekick/utils.py @@ -629,7 +629,7 @@ def check_vulnerability(input_query: str): def generate_suggestions(remote_url, client_key:str, column_names: list, n_qs: int=10): - results = [] + text_completion = [] column_info = ','.join(column_names) _system_prompt = f"Act as a data analyst, based on below data schema help answer the question" @@ -640,12 +640,13 @@ def generate_suggestions(remote_url, client_key:str, column_names: list, n_qs: i if "h2ogpt-" in recommender_model: try: client = H2OGPTE(address=remote_url, api_key=client_key) - text_completion = client.answer_question( + response = client.answer_question( system_prompt=_system_prompt, text_context_list=[], question=_user_prompt, llm=recommender_model ) + text_completion = response.content.split("\n")[2:] except Exception as e: remote_url = os.getenv("H2OGPT_BASE_URL", None) client_key = os.getenv("H2OGPT_BASE_API_TOKEN", None) @@ -660,7 +661,7 @@ def generate_suggestions(remote_url, client_key:str, column_names: list, n_qs: i max_tokens=512, temperature=0.5, seed=42) - text_completion = completion.choices[0].message + text_completion = completion.choices[0].message.content.split("\n")[2:] elif 'gpt-3.5' in recommender_model.lower() or 'gpt-4' in recommender_model.lower(): # Check if the API key is set, else inform user logger.info(f"Using OpenAI model: {recommender_model}") @@ -674,9 +675,8 @@ def generate_suggestions(remote_url, client_key:str, column_names: list, n_qs: i seed=42, temperature=0.7 ) - text_completion = completion.choices[0].message + text_completion = completion.choices[0].message.content.split("\n") else: raise Exception("Model url or key is missing.") - _res = text_completion.content.split("\n")[2:] - results = "\n".join(_res) + results = "\n".join(text_completion) return results diff --git a/ui/app.py b/ui/app.py index be587bc..623717a 100644 --- a/ui/app.py +++ b/ui/app.py @@ -1,8 +1,10 @@ +import asyncio import concurrent.futures import gc import hashlib import json import os +import time from pathlib import Path from typing import List, Optional @@ -218,6 +220,7 @@ async def chat(q: Q): ui.form_card( box=ui.box("vertical", height="120px"), items=[ + ui.progress(name='progress', label='Thinking ...', value=0), ui.buttons( [ ui.button( @@ -269,6 +272,30 @@ async def chat(q: Q): logging.info(f"Chatbot response: {q.args.chatbot}") +async def update_ui(q: Q, value: int): + q.page['additional_actions'].progress.value = value + await q.page.save() + + +def _execute_suggestions(q: Q, loop: asyncio.AbstractEventLoop): + count = 0 + time_out = 50 + future = executor = None + result = task = None + while count < time_out: + if not task: + executor = concurrent.futures.ThreadPoolExecutor() + task = executor.submit(recommend_suggestions, cache_path=q.client.table_info_path, table_name=q.client.table_name) + time.sleep(1) + count += 1 + if not future or future.done(): + future = asyncio.ensure_future(update_ui(q, count / time_out), loop=loop) + if task.done(): + result = task.result(timeout=1) + break + return result + + @on("chatbot") async def chatbot(q: Q): q.page["sidebar"].value = "#chat" @@ -308,8 +335,10 @@ async def chatbot(q: Q): n_cols = len(_response_df.columns) llm_response = f"The selected dataset has total number of {n_cols} columns.\nBelow is quick preview:\n{df_markdown}" elif q.args.chatbot and (q.args.chatbot.lower() == "recommend questions" or q.args.chatbot.lower() == "recommend qs"): + await q.page.save() + loop = asyncio.get_event_loop() with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response = await q.exec(pool, recommend_suggestions, cache_path=q.client.table_info_path, table_name=q.client.table_name) + llm_response = await q.exec(pool, _execute_suggestions, q, loop=loop) if not llm_response: llm_response = "Something went wrong, check the API Keys provided." logging.info(f"Recommended Questions:\n{llm_response}") From 57824452d55137e1fe82eccbf43153b804a87445 Mon Sep 17 00:00:00 2001 From: pramitchoudhary Date: Wed, 14 Feb 2024 22:20:09 -0800 Subject: [PATCH 2/6] Logical correctness, restructuring and clean-up #44 --- ui/app.py | 154 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 92 insertions(+), 62 deletions(-) diff --git a/ui/app.py b/ui/app.py index 623717a..8413a06 100644 --- a/ui/app.py +++ b/ui/app.py @@ -83,6 +83,12 @@ async def user_variable(q: Q): async def client_variable(q: Q): q.client.query = None + _gretting_msg = ["""Welcome to the SQL Sidekick!\nI am an AI assistant, i am here to help you find answers to questions on structured data. + To get started, please select a table from the dropdown and ask your question. + One could start by learning about the dataset by asking questions like: + - Describe data. + - Preview.""", True] + q.client.chat_buffer = [_gretting_msg] # Reference: https://wave.h2o.ai/docs/examples/table-markdown-pandas/ def make_markdown_row(values): @@ -113,6 +119,52 @@ def clear_cards(q, ignore: Optional[List[str]] = []) -> None: del q.page[name] q.client.cards.remove(name) +async def draw_additional_actions(q: Q): + add_card( + q, + "additional_actions", + ui.form_card( + box=ui.box("vertical", height="120px"), + items=[ + ui.buttons( + [ + ui.button( + name="suggest", + icon="", + caption="Suggests possible questions one could start with", + label="Discover", + ), + ui.button( + name="regenerate", + icon="RepeatOne", + caption="Attempts regeneration of the last response", + label="Try Again", + primary=True, + ), + ui.button( + name="regenerate_with_options", + icon="RepeatAll", + caption="Regenerates with options", + label="Try Harder", + ), + ui.button( + name="save_conversation", + caption="Saves the conversation in the history for future reference to improve response", + label="Accept", + icon="Emoji2", + ), + ui.button( + name="save_rejected_conversation", + caption="Saves the disappointed conversation to improve response.", + label="Reject", + icon="EmojiDisappointed", + ), + ], + justify="center", + ) + ], + ), +) @on("#chat") async def chat(q: Q): @@ -209,77 +261,24 @@ async def chat(q: Q): ui.chatbot_card( box=ui.box("vertical", height="500px"), name="chatbot", - data=data(fields="content from_user", t="list", size=-50), + placeholder = "Type your question here, happy to help!", + data=data(fields="content from_user", t="list", size=-50, rows=q.client.chat_buffer), commands=chat_card_command_items, - events=["scroll"], + events=["scroll_up"], ), ), - add_card( - q, - "additional_actions", - ui.form_card( - box=ui.box("vertical", height="120px"), - items=[ - ui.progress(name='progress', label='Thinking ...', value=0), - ui.buttons( - [ - ui.button( - name="suggest", - icon="", - caption="Suggests possible questions one could start with", - label="Discover", - ), - ui.button( - name="regenerate", - icon="RepeatOne", - caption="Attempts regeneration of the last response", - label="Try Again", - primary=True, - ), - ui.button( - name="regenerate_with_options", - icon="RepeatAll", - caption="Regenerates with options", - label="Try Harder", - ), - ui.button( - name="save_conversation", - caption="Saves the conversation in the history for future reference to improve response", - label="Accept", - icon="Emoji2", - ), - ui.button( - name="save_rejected_conversation", - caption="Saves the disappointed conversation to improve response.", - label="Reject", - icon="EmojiDisappointed", - ), - ], - justify="center", - ) - ], - ), - ) - if q.args.chatbot is None or q.args.chatbot.strip() == "": - _msg = """Welcome to the SQL Sidekick!\nI am an AI assistant, i am here to help you find answers to questions on structured data. -To get started, please select a table from the dropdown and ask your question. -One could start by learning about the dataset by asking questions like: -- Describe data. -- Preview.""" - q.args.chatbot = _msg - q.page["chat_card"].data += [q.args.chatbot, False] - logging.info(f"Chatbot response: {q.args.chatbot}") + # additional actions + await draw_additional_actions(q) async def update_ui(q: Q, value: int): - q.page['additional_actions'].progress.value = value + q.page['chat_card_progress'].progress.value = value await q.page.save() -def _execute_suggestions(q: Q, loop: asyncio.AbstractEventLoop): +def _execute_suggestions(q: Q, loop: asyncio.AbstractEventLoop, time_out=50): count = 0 - time_out = 50 future = executor = None result = task = None while count < time_out: @@ -301,10 +300,12 @@ async def chatbot(q: Q): q.page["sidebar"].value = "#chat" # Append user message. + q.client.chat_buffer.append([q.args.chatbot, True]) q.page["chat_card"].data += [q.args.chatbot, True] if q.page["select_tables"].table_dropdown.value is None or q.client.table_name is None: q.page["chat_card"].data += ["Please select a table to continue!", False] + q.client.chat_buffer.append([q.args.chatbot, False]) return if ( @@ -336,12 +337,42 @@ async def chatbot(q: Q): llm_response = f"The selected dataset has total number of {n_cols} columns.\nBelow is quick preview:\n{df_markdown}" elif q.args.chatbot and (q.args.chatbot.lower() == "recommend questions" or q.args.chatbot.lower() == "recommend qs"): await q.page.save() + #_rows = q.page["chat_card"].data.rows + #import pdb; pdb.set_trace() + del q.page["chat_card"] + del q.page["additional_actions"] + add_card( + q, + "chat_card_progress", + ui.form_card( + box=ui.box("vertical", height="500px"), items=[ui.progress(name='progress', label='Thinking ...', value=0)])) + await draw_additional_actions(q) loop = asyncio.get_event_loop() with concurrent.futures.ThreadPoolExecutor() as pool: llm_response = await q.exec(pool, _execute_suggestions, q, loop=loop) if not llm_response: llm_response = "Something went wrong, check the API Keys provided." logging.info(f"Recommended Questions:\n{llm_response}") + # Re-draw the chat-card + del q.page["chat_card_progress"] + del q.page["additional_actions"] + chat_card_command_items = [ + ui.command(name="download_accept", label="Download QnA history", icon="Download"), + ui.command(name="download_reject", label="Download in-correct QnA history", icon="Download"), + ] + _chat_history = q.client.chat_buffer + add_card( + q, + "chat_card", + ui.chatbot_card( + box=ui.box("vertical", height="500px"), + name="chatbot", + placeholder = "Type your question here, happy to help!", + data=data(fields="content from_user", t="list", size=-50, rows=_chat_history), + commands=chat_card_command_items, + events=["scroll_up"], + )), + await draw_additional_actions(q) q.args.chatbot = None elif q.args.chatbot and q.args.chatbot.lower() == "db setup": llm_response, err = db_setup( @@ -415,7 +446,7 @@ async def chatbot(q: Q): torch.cuda.empty_cache() llm_response = "Something went wrong, try executing the query again!" q.client.llm_response = llm_response - q.page["chat_card"].data += [llm_response, False] + q.client.chat_buffer.append([llm_response, False]) @on("submit_url_keys") @@ -752,7 +783,6 @@ async def init(q: Q) -> None: ], ) - # Connect to LLM openai.api_key = "" await user_variable(q) From c32568b24855cc25aa943cc2490d7aae32751410 Mon Sep 17 00:00:00 2001 From: pramitchoudhary Date: Thu, 15 Feb 2024 14:13:48 -0800 Subject: [PATCH 3/6] Fix chatbox re-draw issue #44 --- ui/app.py | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/ui/app.py b/ui/app.py index 8413a06..f1fc611 100644 --- a/ui/app.py +++ b/ui/app.py @@ -254,19 +254,18 @@ async def chat(q: Q): ], ), ), - if not q.args.chatbot: - add_card( - q, - "chat_card", - ui.chatbot_card( - box=ui.box("vertical", height="500px"), - name="chatbot", - placeholder = "Type your question here, happy to help!", - data=data(fields="content from_user", t="list", size=-50, rows=q.client.chat_buffer), - commands=chat_card_command_items, - events=["scroll_up"], - ), + add_card( + q, + "chat_card", + ui.chatbot_card( + box=ui.box("vertical", height="500px"), + name="chatbot", + placeholder = "Type your question here, happy to help!", + data=data(fields="content from_user", t="list", size=-50, rows=q.client.chat_buffer), + commands=chat_card_command_items, + events=["scroll_up"], ), + ), # additional actions await draw_additional_actions(q) @@ -298,6 +297,7 @@ def _execute_suggestions(q: Q, loop: asyncio.AbstractEventLoop, time_out=50): @on("chatbot") async def chatbot(q: Q): q.page["sidebar"].value = "#chat" + q.args.re_drawn = False # Append user message. q.client.chat_buffer.append([q.args.chatbot, True]) @@ -337,8 +337,6 @@ async def chatbot(q: Q): llm_response = f"The selected dataset has total number of {n_cols} columns.\nBelow is quick preview:\n{df_markdown}" elif q.args.chatbot and (q.args.chatbot.lower() == "recommend questions" or q.args.chatbot.lower() == "recommend qs"): await q.page.save() - #_rows = q.page["chat_card"].data.rows - #import pdb; pdb.set_trace() del q.page["chat_card"] del q.page["additional_actions"] add_card( @@ -373,7 +371,7 @@ async def chatbot(q: Q): events=["scroll_up"], )), await draw_additional_actions(q) - q.args.chatbot = None + q.args.re_drawn = True elif q.args.chatbot and q.args.chatbot.lower() == "db setup": llm_response, err = db_setup( db_name=q.client.db_name, @@ -447,6 +445,8 @@ async def chatbot(q: Q): llm_response = "Something went wrong, try executing the query again!" q.client.llm_response = llm_response q.client.chat_buffer.append([llm_response, False]) + if not q.args.re_drawn: + q.page["chat_card"].data += [llm_response, False] @on("submit_url_keys") @@ -851,12 +851,15 @@ async def on_event(q: Q): if q.args.regenerate_with_options: q.args.chatbot = "try harder" + q.client.chat_buffer.append([q.args.chatbot, True]) elif q.args.regenerate: q.args.chatbot = "regenerate" + q.client.chat_buffer.append([q.args.chatbot, True]) q.client.eval_mode = False if q.args.suggest: q.args.chatbot = "Recommend questions" + q.client.chat_buffer.append([q.args.chatbot, True]) await chatbot(q) event_handled = True if q.args.eval_mode: @@ -868,6 +871,7 @@ async def on_event(q: Q): logging.info(f"User selected table: {q.args.table_dropdown}") await submit_table(q) q.args.chatbot = f"Table {q.args.table_dropdown} selected" + q.client.chat_buffer.append([q.args.chatbot, True]) # Refresh response is triggered when user selects a table via dropdown event_handled = True if ( @@ -878,6 +882,7 @@ async def on_event(q: Q): q.client.model_choice_dropdown = q.args.model_choice_dropdown q.page["select_tables"].model_choice_dropdown.value = q.client.model_choice_dropdown q.args.chatbot = f"Model {q.client.model_choice_dropdown} selected" + q.client.chat_buffer.append([q.args.chatbot, True]) # Refresh response is triggered when user selects a table via dropdown q.args.model_choice_dropdown = None event_handled = True @@ -886,6 +891,7 @@ async def on_event(q: Q): q.client.task_dropdown = q.args.task_dropdown q.page["task_choice"].task_dropdown.value = q.client.task_dropdown q.args.chatbot = f"'{TASK_CHOICE[q.client.task_dropdown]}' mode selected" + q.client.chat_buffer.append([q.args.chatbot, True]) q.args.task_dropdown = None # Refresh response is triggered when user selects a table via dropdown event_handled = True @@ -975,6 +981,7 @@ async def on_event(q: Q): q.args.chatbot = ( f"Demo mode is enabled.\nTry below example questions for the selected data to get started,\n{sample_qs}" ) + q.client.chat_buffer.append([q.args.chatbot, True]) q.page["chat_card"].data += [q.args.chatbot, False] q.args.table_dropdown = None q.args.model_choice_dropdown = None From 3014f0e157784953afc6bbf523bb4a85169635c1 Mon Sep 17 00:00:00 2001 From: pramitchoudhary Date: Thu, 15 Feb 2024 15:03:02 -0800 Subject: [PATCH 4/6] Update fix for pending blocking calls(func: ask) #44 --- ui/app.py | 119 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 69 insertions(+), 50 deletions(-) diff --git a/ui/app.py b/ui/app.py index f1fc611..f3df32a 100644 --- a/ui/app.py +++ b/ui/app.py @@ -276,14 +276,29 @@ async def update_ui(q: Q, value: int): await q.page.save() -def _execute_suggestions(q: Q, loop: asyncio.AbstractEventLoop, time_out=50): +def _execute(q: Q, func_type: str, loop: asyncio.AbstractEventLoop, time_out=50, **kwargs): count = 0 future = executor = None result = task = None + cache_path = q.client.table_info_path + table_name = q.client.table_name while count < time_out: if not task: executor = concurrent.futures.ThreadPoolExecutor() - task = executor.submit(recommend_suggestions, cache_path=q.client.table_info_path, table_name=q.client.table_name) + if func_type == "suggest": + task = executor.submit(recommend_suggestions, cache_path=cache_path, table_name=table_name) + elif func_type == "ask": + task = executor.submit(ask, question=q.client.query, + sample_queries_path=q.client.sample_qna_path, + table_info_path=q.client.table_info_path, + table_name=q.client.table_name, + model_name=q.client.model_choice_dropdown, + is_regenerate=kwargs.get('is_regenerate', False), + is_regen_with_options=kwargs.get('is_regen_with_options', False), + debug_mode=kwargs.get('debug_mode', False) + ) + else: + raise ValueError(f"Invalid function type: {func_type}") time.sleep(1) count += 1 if not future or future.done(): @@ -294,6 +309,41 @@ def _execute_suggestions(q: Q, loop: asyncio.AbstractEventLoop, time_out=50): return result +async def _update_before_job_start(q: Q): + await q.page.save() + del q.page["chat_card"] + del q.page["additional_actions"] + add_card( + q, + "chat_card_progress", + ui.form_card( + box=ui.box("vertical", height="500px"), items=[ui.progress(name='progress', label='Thinking ...', value=0)])) + await draw_additional_actions(q) + + +async def _update_after_job_end(q: Q): + del q.page["chat_card_progress"] + del q.page["additional_actions"] + chat_card_command_items = [ + ui.command(name="download_accept", label="Download QnA history", icon="Download"), + ui.command(name="download_reject", label="Download in-correct QnA history", icon="Download"), + ] + _chat_history = q.client.chat_buffer + add_card( + q, + "chat_card", + ui.chatbot_card( + box=ui.box("vertical", height="500px"), + name="chatbot", + placeholder = "Type your question here, happy to help!", + data=data(fields="content from_user", t="list", size=-50, rows=_chat_history), + commands=chat_card_command_items, + events=["scroll_up"], + )), + await draw_additional_actions(q) + q.args.re_drawn = True + + @on("chatbot") async def chatbot(q: Q): q.page["sidebar"].value = "#chat" @@ -328,6 +378,7 @@ async def chatbot(q: Q): # 2. "Try harder mode (THM)" Slow approach by using the diverse beam search llm_response = None try: + loop = asyncio.get_event_loop() if q.args.chatbot and ("preview data" in q.args.chatbot.lower() or "data preview" in q.args.chatbot.lower() or "preview" in q.args.chatbot.lower()) or f"preview {q.client.table_name}" in q.args.chatbot.lower(): _response_df = data_preview(q.client.table_name) # Format as markdown table @@ -336,42 +387,14 @@ async def chatbot(q: Q): n_cols = len(_response_df.columns) llm_response = f"The selected dataset has total number of {n_cols} columns.\nBelow is quick preview:\n{df_markdown}" elif q.args.chatbot and (q.args.chatbot.lower() == "recommend questions" or q.args.chatbot.lower() == "recommend qs"): - await q.page.save() - del q.page["chat_card"] - del q.page["additional_actions"] - add_card( - q, - "chat_card_progress", - ui.form_card( - box=ui.box("vertical", height="500px"), items=[ui.progress(name='progress', label='Thinking ...', value=0)])) - await draw_additional_actions(q) - loop = asyncio.get_event_loop() + await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response = await q.exec(pool, _execute_suggestions, q, loop=loop) + llm_response = await q.exec(pool, _execute, q, "suggest", loop=loop) if not llm_response: llm_response = "Something went wrong, check the API Keys provided." logging.info(f"Recommended Questions:\n{llm_response}") - # Re-draw the chat-card - del q.page["chat_card_progress"] - del q.page["additional_actions"] - chat_card_command_items = [ - ui.command(name="download_accept", label="Download QnA history", icon="Download"), - ui.command(name="download_reject", label="Download in-correct QnA history", icon="Download"), - ] - _chat_history = q.client.chat_buffer - add_card( - q, - "chat_card", - ui.chatbot_card( - box=ui.box("vertical", height="500px"), - name="chatbot", - placeholder = "Type your question here, happy to help!", - data=data(fields="content from_user", t="list", size=-50, rows=_chat_history), - commands=chat_card_command_items, - events=["scroll_up"], - )), - await draw_additional_actions(q) - q.args.re_drawn = True + # Update + await _update_after_job_end(q) elif q.args.chatbot and q.args.chatbot.lower() == "db setup": llm_response, err = db_setup( db_name=q.client.db_name, @@ -387,16 +410,15 @@ async def chatbot(q: Q): # Attempts to regenerate response on the last supplied query logging.info(f"Attempt for regeneration") if q.client.query is not None and q.client.query.strip() != "": + await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response, alt_response, err = await q.exec(pool, ask, question=q.client.query, - sample_queries_path=q.client.sample_qna_path, - table_info_path=q.client.table_info_path, - table_name=q.client.table_name, - model_name=q.client.model_choice_dropdown, + llm_response, alt_response, _ = await q.exec(pool, _execute, q, "ask", + loop=loop, is_regenerate=True, is_regen_with_options=False ) llm_response = "\n".join(llm_response) + await _update_after_job_end(q) else: llm_response = ( "Sure, I can generate a new response for you. " @@ -406,17 +428,15 @@ async def chatbot(q: Q): # Attempts to regenerate response on the last supplied query logging.info(f"Attempt for regeneration with options.") if q.client.query is not None and q.client.query.strip() != "": + await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response, alt_response, err = await q.exec(pool, ask, - question=q.client.query, - sample_queries_path=q.client.sample_qna_path, - table_info_path=q.client.table_info_path, - table_name=q.client.table_name, - model_name=q.client.model_choice_dropdown, + llm_response, alt_response, _ = await q.exec(pool, _execute, q, "ask", + loop=loop, is_regenerate=False, is_regen_with_options=True ) response = "\n".join(llm_response) + await _update_after_job_end(q) if alt_response: llm_response = response + "\n\n" + "**Alternate options:**\n" + "\n".join(alt_response) logging.info(f"Regenerate response: {llm_response}") @@ -429,15 +449,14 @@ async def chatbot(q: Q): ) else: q.client.query = question + await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response, alt_response, err = await q.exec(pool, ask, question=q.client.query, - sample_queries_path=q.client.sample_qna_path, - table_info_path=q.client.table_info_path, - table_name=q.client.table_name, - model_name=q.client.model_choice_dropdown, + llm_response, alt_response, err = await q.exec(pool, _execute, q, "ask", + loop=loop, debug_mode=q.args.debug_mode ) llm_response = "\n".join(llm_response) + await _update_after_job_end(q) except (MemoryError, RuntimeError) as e: logging.error(f"Something went wrong while generating response: {e}") gc.collect() From 8146eb9a62cf480b0339d2add29123b2e99e2fa3 Mon Sep 17 00:00:00 2001 From: pramitchoudhary Date: Fri, 16 Feb 2024 08:55:36 -0800 Subject: [PATCH 5/6] Update UI cosmetics (progress meter) #44 --- app.toml | 2 +- pyproject.toml | 2 +- sidekick/prompter.py | 2 +- ui/app.py | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/app.toml b/app.toml index 4149155..d4a6d00 100644 --- a/app.toml +++ b/app.toml @@ -5,7 +5,7 @@ Description = "QnA with tabular data using NLQ" LongDescription = "about.md" InstanceLifecycle = "MANAGED" Tags = ["DATA_SCIENCE", "MACHINE_LEARNING", "NLP", "GENERATIVE_AI"] -Version = "0.2.3" +Version = "0.2.4" [Runtime] MemoryLimit = "64Gi" diff --git a/pyproject.toml b/pyproject.toml index dcba8fd..1ae8731 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "sql-sidekick" -version = "0.2.3" +version = "0.2.4" license = "Apache-2.0 license" description = "An AI assistant for SQL generation" authors = [ diff --git a/sidekick/prompter.py b/sidekick/prompter.py index 372eb66..22b62cf 100644 --- a/sidekick/prompter.py +++ b/sidekick/prompter.py @@ -24,7 +24,7 @@ execute_query_pd, extract_table_names, generate_suggestions, save_query, setup_dir) -__version__ = "0.2.3" +__version__ = "0.2.4" # Load the config file and initialize required paths app_base_path = (Path(__file__).parent / "../").resolve() diff --git a/ui/app.py b/ui/app.py index f3df32a..3e8b116 100644 --- a/ui/app.py +++ b/ui/app.py @@ -317,7 +317,9 @@ async def _update_before_job_start(q: Q): q, "chat_card_progress", ui.form_card( - box=ui.box("vertical", height="500px"), items=[ui.progress(name='progress', label='Thinking ...', value=0)])) + box=ui.box("vertical", height="500px"), items=[ui.inline([ui.image(title='', path=q.user.progress_icon_path)], justify='center'), + ui.progress(name='progress', label='Thinking ...', value=0) + ])) await draw_additional_actions(q) @@ -725,6 +727,7 @@ async def init(q: Q) -> None: q.client.timezone = "UTC" q.args.demo_mode = False q.app.toml = toml.load("app.toml") + q.user.progress_icon_path, = await q.site.upload(['./ui/assets/Loading_icon.gif']) username, profile_pic = q.auth.username, q.app.persona_path q.page["meta"] = ui.meta_card( From 74a4b70fdcb6bb159381c4e4e00b8f610d54dd62 Mon Sep 17 00:00:00 2001 From: pramitchoudhary Date: Fri, 16 Feb 2024 15:28:23 -0800 Subject: [PATCH 6/6] Fix timeout exception #44 --- ui/app.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/ui/app.py b/ui/app.py index 3e8b116..8acffe7 100644 --- a/ui/app.py +++ b/ui/app.py @@ -276,7 +276,7 @@ async def update_ui(q: Q, value: int): await q.page.save() -def _execute(q: Q, func_type: str, loop: asyncio.AbstractEventLoop, time_out=50, **kwargs): +def _execute(q: Q, func_type: str, loop: asyncio.AbstractEventLoop, time_out=100, **kwargs): count = 0 future = executor = None result = task = None @@ -304,8 +304,10 @@ def _execute(q: Q, func_type: str, loop: asyncio.AbstractEventLoop, time_out=50, if not future or future.done(): future = asyncio.ensure_future(update_ui(q, count / time_out), loop=loop) if task.done(): - result = task.result(timeout=1) + result = (task.result()) break + if not result: + result = ("Something went wrong, couldn't compile the response. Try again!", None) return result @@ -378,7 +380,7 @@ async def chatbot(q: Q): # For regeneration, currently there are 2 modes # 1. Quick fast approach by throttling the temperature # 2. "Try harder mode (THM)" Slow approach by using the diverse beam search - llm_response = None + llm_response = [] try: loop = asyncio.get_event_loop() if q.args.chatbot and ("preview data" in q.args.chatbot.lower() or "data preview" in q.args.chatbot.lower() or "preview" in q.args.chatbot.lower()) or f"preview {q.client.table_name}" in q.args.chatbot.lower(): @@ -414,11 +416,14 @@ async def chatbot(q: Q): if q.client.query is not None and q.client.query.strip() != "": await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response, alt_response, _ = await q.exec(pool, _execute, q, "ask", + result = await q.exec(pool, _execute, q, "ask", loop=loop, is_regenerate=True, is_regen_with_options=False ) + if result and len(result)>2: + llm_response = result[0] + alt_response = result[1] llm_response = "\n".join(llm_response) await _update_after_job_end(q) else: @@ -432,11 +437,14 @@ async def chatbot(q: Q): if q.client.query is not None and q.client.query.strip() != "": await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response, alt_response, _ = await q.exec(pool, _execute, q, "ask", + result = await q.exec(pool, _execute, q, "ask", loop=loop, is_regenerate=False, is_regen_with_options=True ) + if result and len(result)>2: + llm_response = result[0] + alt_response = result[1] response = "\n".join(llm_response) await _update_after_job_end(q) if alt_response: @@ -453,10 +461,13 @@ async def chatbot(q: Q): q.client.query = question await _update_before_job_start(q) with concurrent.futures.ThreadPoolExecutor() as pool: - llm_response, alt_response, err = await q.exec(pool, _execute, q, "ask", + result = await q.exec(pool, _execute, q, "ask", loop=loop, debug_mode=q.args.debug_mode ) + if result and len(result)>2: + llm_response = result[0] + alt_response = result[1] llm_response = "\n".join(llm_response) await _update_after_job_end(q) except (MemoryError, RuntimeError) as e: