diff --git a/examples/genai-rag-multimodal/multimodal_rag_langchain.ipynb b/examples/genai-rag-multimodal/multimodal_rag_langchain.ipynb index 7e1cea33..4c138eed 100644 --- a/examples/genai-rag-multimodal/multimodal_rag_langchain.ipynb +++ b/examples/genai-rag-multimodal/multimodal_rag_langchain.ipynb @@ -599,7 +599,7 @@ "\n", "\n", "# Image summaries\n", - "img_base64_list, image_summaries = generate_img_summaries(\".\")" + "img_base64_list, image_summaries = generate_img_summaries(\"./intro_multimodal_rag_old_version\")" ] }, { @@ -824,8 +824,17 @@ " for i, s in enumerate(text_summaries + table_summaries + image_summaries)\n", "]\n", "\n", - "retriever_multi_vector_img.docstore.mset(list(zip(doc_ids, doc_contents)))\n", + "list_of_docs = list(zip(doc_ids, doc_contents))\n", "\n", + "retriever_multi_vector_img.docstore.mset(list_of_docs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# If using Vertex AI Vector Search, this will take a while to complete.\n", "# You can cancel this cell and continue later.\n", "retriever_multi_vector_img.vectorstore.add_documents(summary_docs)" @@ -1000,6 +1009,166 @@ "Markdown(result)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### RAGAS Evaluation\n", + "\n", + "On the cells below we will be using RAGAS to evaluate the RAG pipeline for text-based context." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install ragas" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "questions = [\n", + " \"How did COVID-19 initially impact Google's advertising revenue in 2020?\",\n", + " \"How did Google's advertising revenue recover from the initial COVID-19 impact?\",\n", + " \"What was the primary driver of Google's operating cash flow in 2020?\",\n", + " \"How did Google's share repurchases compare to the previous year in 2020?\"\n", + "]\n", + "\n", + "golden_answers = [\n", + " \"COVID-19 initially impacted Google's advertising revenue in 2020 in two ways, Users searched for less commercially-driven topics, reducing the relevance and value of ads displayed and Businesses cut back on advertising budgets due to the economic downturn caused by the pandemic.\",\n", + " \"Google's advertising revenue recovered from the initial COVID-19 impact through a combination of factors, User search activity shifted back to more commercially-driven topics, increasing the effectiveness of advertising and As the economic climate improved, businesses began to invest more heavily in advertising again.\",\n", + " \"The primary driver of Google's operating cash flow in 2020 was revenue generated from its advertising products, totaling $91.7 billion\",\n", + " \"Google's share repurchases in 2020 were $50.3 billion, reflecting a significant increase of 62% compared to the prior year.\"\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def summarize_image_context(doc_base64):\n", + " prompt = \"\"\"You are an assistant tasked with summarizing images for retrieval. \\\n", + " These summaries will be embedded and used to retrieve the raw image. \\\n", + " Give a concise summary of the image that is well optimized for retrieval.\n", + " If it's a table, extract all elements of the table.\n", + " If it's a graph, explain the findings in the graph.\n", + " Do not include any numbers that are not mentioned in the image.\n", + " \"\"\"\n", + " return image_summarize(doc_base64, prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_samples = {\n", + " \"contexts\": [],\n", + " \"question\": [],\n", + " \"answer\": [],\n", + " \"ground_truth\": []\n", + " }\n", + "\n", + "for i, question in enumerate(questions): \n", + " docs = retriever_multi_vector_img.get_relevant_documents(question, limit=10) \n", + " image_contexts = []\n", + " \n", + " source_docs = split_image_text_types(docs)\n", + " \n", + " if len(source_docs[\"images\"]) > 0: \n", + " for image in source_docs[\"images\"]:\n", + " image_contexts.append(summarize_image_context(image))\n", + " \n", + " text_context = source_docs[\"texts\"]\n", + " \n", + " data_samples[\"contexts\"].append(text_context + image_contexts)\n", + " data_samples[\"question\"].append(question)\n", + " data_samples[\"answer\"].append(chain_multimodal_rag.invoke(question))\n", + " data_samples[\"ground_truth\"].append(golden_answers[i])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datasets import Dataset\n", + "\n", + "dataset = Dataset.from_dict(data_samples)\n", + "\n", + "\n", + "from ragas.metrics import (\n", + " context_precision,\n", + " answer_relevancy,\n", + " faithfulness,\n", + " context_recall,\n", + " answer_similarity,\n", + " answer_correctness,\n", + ")\n", + "from ragas.metrics.critique import harmfulness\n", + "\n", + "# list of metrics we're going to use\n", + "metrics = [\n", + " faithfulness,\n", + " answer_relevancy,\n", + " context_recall,\n", + " context_precision,\n", + " harmfulness,\n", + " answer_similarity,\n", + " answer_correctness,\n", + "]\n", + "\n", + "from langchain_google_vertexai import ChatVertexAI, VertexAIEmbeddings\n", + "\n", + "config = { \n", + " \"chat_model_id\": \"gemini-1.0-pro-002\",\n", + " \"embedding_model_id\": \"textembedding-gecko\",\n", + "}\n", + "\n", + "\n", + "vertextai_llm = ChatVertexAI(model_name=config[\"chat_model_id\"],)\n", + "vertextai_embeddings = VertexAIEmbeddings(model_name=config[\"embedding_model_id\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dataset.to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "from ragas import evaluate\n", + "\n", + "result = evaluate(\n", + " dataset, # using 1 as example due to quota constrains\n", + " metrics=metrics,\n", + " llm=vertextai_llm,\n", + " embeddings=vertextai_embeddings,\n", + ")\n", + "\n", + "result.to_pandas()" + ] + }, { "cell_type": "markdown", "metadata": {