From 09f3b2541fe6237294185064e9039946e0a9789e Mon Sep 17 00:00:00 2001 From: Assaf Elovic Date: Wed, 4 Sep 2024 20:34:19 +0300 Subject: [PATCH] Deploy website - based on 6ac04b28493542e6f1027809c68865d43fa878c3 --- 404.html | 4 ++-- assets/js/6331da57.84ddc2e6.js | 1 - assets/js/6331da57.9caf083f.js | 1 + ...runtime~main.0ade560e.js => runtime~main.e5fe446d.js} | 2 +- blog.html | 4 ++-- blog/archive.html | 4 ++-- blog/building-gpt-researcher.html | 4 ++-- blog/building-openai-assistant.html | 4 ++-- blog/gptr-langgraph.html | 4 ++-- blog/tags.html | 4 ++-- blog/tags/assistant-api.html | 4 ++-- blog/tags/autonomous-agent.html | 4 ++-- blog/tags/github.html | 4 ++-- blog/tags/gpt-researcher.html | 4 ++-- blog/tags/langchain.html | 4 ++-- blog/tags/langgraph.html | 4 ++-- blog/tags/multi-agents.html | 4 ++-- blog/tags/openai.html | 4 ++-- blog/tags/opensource.html | 4 ++-- blog/tags/search-api.html | 4 ++-- blog/tags/tavily.html | 4 ++-- docs/contribute.html | 4 ++-- docs/examples/examples.html | 4 ++-- docs/faq.html | 4 ++-- docs/gpt-researcher/config.html | 4 ++-- docs/gpt-researcher/example.html | 4 ++-- docs/gpt-researcher/frontend.html | 4 ++-- docs/gpt-researcher/getting-started.html | 4 ++-- docs/gpt-researcher/introduction.html | 4 ++-- docs/gpt-researcher/langgraph.html | 4 ++-- docs/gpt-researcher/llms.html | 4 ++-- docs/gpt-researcher/pip-package.html | 4 ++-- docs/gpt-researcher/retrievers.html | 4 ++-- docs/gpt-researcher/roadmap.html | 4 ++-- docs/gpt-researcher/tailored-research.html | 9 +++++---- docs/gpt-researcher/troubleshooting.html | 4 ++-- docs/gpt-researcher/vector-stores.html | 4 ++-- docs/reference/config/config.html | 4 ++-- docs/reference/config/singleton.html | 4 ++-- docs/reference/processing/html.html | 4 ++-- docs/reference/processing/text.html | 4 ++-- docs/welcome.html | 4 ++-- index.html | 4 ++-- search-index.json | 2 +- search.html | 4 ++-- 45 files changed, 88 insertions(+), 87 deletions(-) delete mode 100644 assets/js/6331da57.84ddc2e6.js create mode 100644 assets/js/6331da57.9caf083f.js rename assets/js/{runtime~main.0ade560e.js => runtime~main.e5fe446d.js} (98%) diff --git a/404.html b/404.html index 86cf3c0b7..da13ec9ae 100644 --- a/404.html +++ b/404.html @@ -7,13 +7,13 @@ Page Not Found | GPT Researcher - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/6331da57.84ddc2e6.js b/assets/js/6331da57.84ddc2e6.js deleted file mode 100644 index 8f497a5f8..000000000 --- a/assets/js/6331da57.84ddc2e6.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[9611],{5680:(e,r,t)=>{t.d(r,{xA:()=>l,yg:()=>m});var n=t(6540);function o(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function a(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);r&&(n=n.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,n)}return t}function c(e){for(var r=1;r=0||(o[t]=e[t]);return o}(e,r);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(o[t]=e[t])}return o}var p=n.createContext({}),i=function(e){var r=n.useContext(p),t=r;return e&&(t="function"==typeof e?e(r):c(c({},r),e)),t},l=function(e){var r=i(e.components);return n.createElement(p.Provider,{value:r},e.children)},u="mdxType",h={inlineCode:"code",wrapper:function(e){var r=e.children;return n.createElement(n.Fragment,{},r)}},d=n.forwardRef((function(e,r){var t=e.components,o=e.mdxType,a=e.originalType,p=e.parentName,l=s(e,["components","mdxType","originalType","parentName"]),u=i(t),d=o,m=u["".concat(p,".").concat(d)]||u[d]||h[d]||a;return t?n.createElement(m,c(c({ref:r},l),{},{components:t})):n.createElement(m,c({ref:r},l))}));function m(e,r){var t=arguments,o=r&&r.mdxType;if("string"==typeof e||o){var a=t.length,c=new Array(a);c[0]=d;var s={};for(var p in r)hasOwnProperty.call(r,p)&&(s[p]=r[p]);s.originalType=e,s[u]="string"==typeof e?e:o,c[1]=s;for(var i=2;i{t.r(r),t.d(r,{contentTitle:()=>c,default:()=>u,frontMatter:()=>a,metadata:()=>s,toc:()=>p});var n=t(8168),o=(t(6540),t(5680));const a={},c="Tailored Research",s={unversionedId:"gpt-researcher/tailored-research",id:"gpt-researcher/tailored-research",isDocsHomePage:!1,title:"Tailored Research",description:"The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources or local documents, and even specify the agent prompt instruction upon which the research is conducted.",source:"@site/docs/gpt-researcher/tailored-research.md",sourceDirName:"gpt-researcher",slug:"/gpt-researcher/tailored-research",permalink:"/docs/gpt-researcher/tailored-research",editUrl:"https://github.com/assafelovic/gpt-researcher/tree/master/docs/docs/gpt-researcher/tailored-research.md",tags:[],version:"current",frontMatter:{},sidebar:"docsSidebar",previous:{title:"Introduction",permalink:"/docs/gpt-researcher/config"},next:{title:"Retrievers",permalink:"/docs/gpt-researcher/retrievers"}},p=[{value:"Research on Specific Sources \ud83d\udcda",id:"research-on-specific-sources-",children:[],level:3},{value:"Specify Agent Prompt \ud83d\udcdd",id:"specify-agent-prompt-",children:[],level:3},{value:"Research on Local Documents \ud83d\udcc4",id:"research-on-local-documents-",children:[],level:3},{value:"Hybrid Research \ud83d\udd04",id:"hybrid-research-",children:[],level:3},{value:"Research on LangChain Documents \ud83e\udd9c\ufe0f\ud83d\udd17",id:"research-on-langchain-documents-\ufe0f",children:[],level:3}],i={toc:p},l="wrapper";function u(e){let{components:r,...t}=e;return(0,o.yg)(l,(0,n.A)({},i,t,{components:r,mdxType:"MDXLayout"}),(0,o.yg)("h1",{id:"tailored-research"},"Tailored Research"),(0,o.yg)("p",null,"The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources or local documents, and even specify the agent prompt instruction upon which the research is conducted."),(0,o.yg)("h3",{id:"research-on-specific-sources-"},"Research on Specific Sources \ud83d\udcda"),(0,o.yg)("p",null,"You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. The GPT Researcher will then conduct research on the provided sources."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from gpt_researcher import GPTResearcher\nimport asyncio\n\nasync def get_report(query: str, report_type: str, sources: list) -> str:\n researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n\nif __name__ == "__main__":\n query = "What are the latest advancements in AI?"\n report_type = "research_report"\n sources = ["https://en.wikipedia.org/wiki/Artificial_intelligence", "https://www.ibm.com/watson/ai"]\n\n report = asyncio.run(get_report(query, report_type, sources))\n print(report)\n')),(0,o.yg)("h3",{id:"specify-agent-prompt-"},"Specify Agent Prompt \ud83d\udcdd"),(0,o.yg)("p",null,"You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout.\nSimply pass the prompt as the ",(0,o.yg)("inlineCode",{parentName:"p"},"query")," argument to the ",(0,o.yg)("inlineCode",{parentName:"p"},"GPTResearcher"),' class and the "custom_report" ',(0,o.yg)("inlineCode",{parentName:"p"},"report_type"),"."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from gpt_researcher import GPTResearcher\nimport asyncio\n\nasync def get_report(prompt: str, report_type: str) -> str:\n researcher = GPTResearcher(query=prompt, report_type=report_type)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n \nif __name__ == "__main__":\n report_type = "custom_report"\n prompt = "Research the latest advancements in AI and provide a detailed report in APA format including sources."\n\n report = asyncio.run(get_report(prompt=prompt, report_type=report_type))\n print(report)\n')),(0,o.yg)("h3",{id:"research-on-local-documents-"},"Research on Local Documents \ud83d\udcc4"),(0,o.yg)("p",null,"You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents."),(0,o.yg)("p",null,(0,o.yg)("em",{parentName:"p"},"Step 1"),": Add the env variable ",(0,o.yg)("inlineCode",{parentName:"p"},"DOC_PATH")," pointing to the folder where your documents are located."),(0,o.yg)("p",null,"For example:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'export DOC_PATH="./my-docs"\n')),(0,o.yg)("p",null,(0,o.yg)("em",{parentName:"p"},"Step 2"),": When you create an instance of the ",(0,o.yg)("inlineCode",{parentName:"p"},"GPTResearcher")," class, pass the ",(0,o.yg)("inlineCode",{parentName:"p"},"report_source")," argument as ",(0,o.yg)("inlineCode",{parentName:"p"},'"local"'),"."),(0,o.yg)("p",null,"GPT Researcher will then conduct research on the provided documents."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from gpt_researcher import GPTResearcher\nimport asyncio\n\nasync def get_report(query: str, report_type: str, report_source: str) -> str:\n researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n \nif __name__ == "__main__":\n query = "What can you tell me about myself based on my documents?"\n report_type = "research_report"\n report_source = "local" # "local" or "web"\n\n report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source))\n print(report)\n')),(0,o.yg)("h3",{id:"hybrid-research-"},"Hybrid Research \ud83d\udd04"),(0,o.yg)("p",null,"You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents.\nSimply provide the sources and set the ",(0,o.yg)("inlineCode",{parentName:"p"},"report_source")," argument as ",(0,o.yg)("inlineCode",{parentName:"p"},'"hybrid"')," and watch the magic happen."),(0,o.yg)("p",null,"Please note! You should set the proper retrievers for the web sources and doc path for local documents for this to work.\nTo lean more about retrievers check out the ",(0,o.yg)("a",{parentName:"p",href:"https://docs.gptr.dev/docs/gpt-researcher/retrievers"},"Retrievers")," documentation."),(0,o.yg)("h3",{id:"research-on-langchain-documents-\ufe0f"},"Research on LangChain Documents \ud83e\udd9c\ufe0f\ud83d\udd17"),(0,o.yg)("p",null,"You can instruct the GPT Researcher to research on a list of langchain document instances."),(0,o.yg)("p",null,"For example:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from langchain_core.documents import Document\nfrom typing import List, Dict\nfrom gpt_researcher import GPTResearcher\nfrom langchain_postgres.vectorstores import PGVector\nfrom langchain_openai import OpenAIEmbeddings\nfrom sqlalchemy import create_engine\nimport asyncio\n\n\n\nCONNECTION_STRING = \'postgresql://someuser:somepass@localhost:5432/somedatabase\'\n\ndef get_retriever(collection_name: str, search_kwargs: Dict[str, str]):\n engine = create_engine(CONNECTION_STRING)\n embeddings = OpenAIEmbeddings()\n\n index = PGVector.from_existing_index(\n use_jsonb=True,\n embedding=embeddings,\n collection_name=collection_name,\n connection=engine,\n )\n\n return index.as_retriever(search_kwargs=search_kwargs)\n\n\nasync def get_report(query: str, report_type: str, report_source: str, documents: List[Document]) -> str:\n researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source, documents=documents)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n\nif __name__ == "__main__":\n query = "What can you tell me about blue cheese based on my documents?"\n report_type = "research_report"\n report_source = "langchain_documents"\n\n # using a LangChain retriever to get all the documents regarding cheese\n # https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html#langchain_core.retrievers.BaseRetriever.invoke\n langchain_retriever = get_retriever("cheese_collection", { "k": 3 })\n documents = langchain_retriever.invoke("All the documents about cheese")\n report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source, documents=documents))\n print(report)\n')))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/6331da57.9caf083f.js b/assets/js/6331da57.9caf083f.js new file mode 100644 index 000000000..5d0a0253d --- /dev/null +++ b/assets/js/6331da57.9caf083f.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[9611],{5680:(e,r,t)=>{t.d(r,{xA:()=>l,yg:()=>m});var n=t(6540);function o(e,r,t){return r in e?Object.defineProperty(e,r,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[r]=t,e}function a(e,r){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);r&&(n=n.filter((function(r){return Object.getOwnPropertyDescriptor(e,r).enumerable}))),t.push.apply(t,n)}return t}function c(e){for(var r=1;r=0||(o[t]=e[t]);return o}(e,r);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(o[t]=e[t])}return o}var i=n.createContext({}),p=function(e){var r=n.useContext(i),t=r;return e&&(t="function"==typeof e?e(r):c(c({},r),e)),t},l=function(e){var r=p(e.components);return n.createElement(i.Provider,{value:r},e.children)},u="mdxType",h={inlineCode:"code",wrapper:function(e){var r=e.children;return n.createElement(n.Fragment,{},r)}},d=n.forwardRef((function(e,r){var t=e.components,o=e.mdxType,a=e.originalType,i=e.parentName,l=s(e,["components","mdxType","originalType","parentName"]),u=p(t),d=o,m=u["".concat(i,".").concat(d)]||u[d]||h[d]||a;return t?n.createElement(m,c(c({ref:r},l),{},{components:t})):n.createElement(m,c({ref:r},l))}));function m(e,r){var t=arguments,o=r&&r.mdxType;if("string"==typeof e||o){var a=t.length,c=new Array(a);c[0]=d;var s={};for(var i in r)hasOwnProperty.call(r,i)&&(s[i]=r[i]);s.originalType=e,s[u]="string"==typeof e?e:o,c[1]=s;for(var p=2;p{t.r(r),t.d(r,{contentTitle:()=>c,default:()=>u,frontMatter:()=>a,metadata:()=>s,toc:()=>i});var n=t(8168),o=(t(6540),t(5680));const a={},c="Tailored Research",s={unversionedId:"gpt-researcher/tailored-research",id:"gpt-researcher/tailored-research",isDocsHomePage:!1,title:"Tailored Research",description:"The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources or local documents, and even specify the agent prompt instruction upon which the research is conducted.",source:"@site/docs/gpt-researcher/tailored-research.md",sourceDirName:"gpt-researcher",slug:"/gpt-researcher/tailored-research",permalink:"/docs/gpt-researcher/tailored-research",editUrl:"https://github.com/assafelovic/gpt-researcher/tree/master/docs/docs/gpt-researcher/tailored-research.md",tags:[],version:"current",frontMatter:{},sidebar:"docsSidebar",previous:{title:"Introduction",permalink:"/docs/gpt-researcher/config"},next:{title:"Retrievers",permalink:"/docs/gpt-researcher/retrievers"}},i=[{value:"Research on Specific Sources \ud83d\udcda",id:"research-on-specific-sources-",children:[],level:3},{value:"Specify Agent Prompt \ud83d\udcdd",id:"specify-agent-prompt-",children:[],level:3},{value:"Research on Local Documents \ud83d\udcc4",id:"research-on-local-documents-",children:[],level:3},{value:"Hybrid Research \ud83d\udd04",id:"hybrid-research-",children:[],level:3},{value:"Research on LangChain Documents \ud83e\udd9c\ufe0f\ud83d\udd17",id:"research-on-langchain-documents-\ufe0f",children:[],level:3}],p={toc:i},l="wrapper";function u(e){let{components:r,...t}=e;return(0,o.yg)(l,(0,n.A)({},p,t,{components:r,mdxType:"MDXLayout"}),(0,o.yg)("h1",{id:"tailored-research"},"Tailored Research"),(0,o.yg)("p",null,"The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources or local documents, and even specify the agent prompt instruction upon which the research is conducted."),(0,o.yg)("h3",{id:"research-on-specific-sources-"},"Research on Specific Sources \ud83d\udcda"),(0,o.yg)("p",null,"You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. GPT Researcher will then conduct research on the provided sources only.\nSimply pass the sources as the ",(0,o.yg)("inlineCode",{parentName:"p"},"source_urls")," argument to the ",(0,o.yg)("inlineCode",{parentName:"p"},"GPTResearcher"),' class and the "static" ',(0,o.yg)("inlineCode",{parentName:"p"},"report_source"),"."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from gpt_researcher import GPTResearcher\nimport asyncio\n\nasync def get_report(query: str, report_source: str, sources: list) -> str:\n researcher = GPTResearcher(query=query, report_source=report_source, source_urls=sources)\n research_context = await researcher.conduct_research()\n return await researcher.write_report()\n\nif __name__ == "__main__":\n query = "What are the biggest trends in AI lately?"\n report_source = "static"\n sources = [\n "https://en.wikipedia.org/wiki/Artificial_intelligence",\n "https://www.ibm.com/think/insights/artificial-intelligence-trends",\n "https://www.forbes.com/advisor/business/ai-statistics"\n ]\n report = asyncio.run(get_report(query=query, report_source=report_source, sources=sources))\n print(report)\n')),(0,o.yg)("h3",{id:"specify-agent-prompt-"},"Specify Agent Prompt \ud83d\udcdd"),(0,o.yg)("p",null,"You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout.\nSimply pass the prompt as the ",(0,o.yg)("inlineCode",{parentName:"p"},"query")," argument to the ",(0,o.yg)("inlineCode",{parentName:"p"},"GPTResearcher"),' class and the "custom_report" ',(0,o.yg)("inlineCode",{parentName:"p"},"report_type"),"."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from gpt_researcher import GPTResearcher\nimport asyncio\n\nasync def get_report(prompt: str, report_type: str) -> str:\n researcher = GPTResearcher(query=prompt, report_type=report_type)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n \nif __name__ == "__main__":\n report_type = "custom_report"\n prompt = "Research the latest advancements in AI and provide a detailed report in APA format including sources."\n\n report = asyncio.run(get_report(prompt=prompt, report_type=report_type))\n print(report)\n')),(0,o.yg)("h3",{id:"research-on-local-documents-"},"Research on Local Documents \ud83d\udcc4"),(0,o.yg)("p",null,"You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents."),(0,o.yg)("p",null,(0,o.yg)("em",{parentName:"p"},"Step 1"),": Add the env variable ",(0,o.yg)("inlineCode",{parentName:"p"},"DOC_PATH")," pointing to the folder where your documents are located."),(0,o.yg)("p",null,"For example:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-bash"},'export DOC_PATH="./my-docs"\n')),(0,o.yg)("p",null,(0,o.yg)("em",{parentName:"p"},"Step 2"),": When you create an instance of the ",(0,o.yg)("inlineCode",{parentName:"p"},"GPTResearcher")," class, pass the ",(0,o.yg)("inlineCode",{parentName:"p"},"report_source")," argument as ",(0,o.yg)("inlineCode",{parentName:"p"},'"local"'),"."),(0,o.yg)("p",null,"GPT Researcher will then conduct research on the provided documents."),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from gpt_researcher import GPTResearcher\nimport asyncio\n\nasync def get_report(query: str, report_source: str) -> str:\n researcher = GPTResearcher(query=query, report_source=report_source)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n \nif __name__ == "__main__":\n query = "What can you tell me about myself based on my documents?"\n report_source = "local" # "local" or "web"\n\n report = asyncio.run(get_report(query=query, report_source=report_source))\n print(report)\n')),(0,o.yg)("h3",{id:"hybrid-research-"},"Hybrid Research \ud83d\udd04"),(0,o.yg)("p",null,"You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents.\nSimply provide the sources and set the ",(0,o.yg)("inlineCode",{parentName:"p"},"report_source")," argument as ",(0,o.yg)("inlineCode",{parentName:"p"},'"hybrid"')," and watch the magic happen."),(0,o.yg)("p",null,"Please note! You should set the proper retrievers for the web sources and doc path for local documents for this to work.\nTo lean more about retrievers check out the ",(0,o.yg)("a",{parentName:"p",href:"https://docs.gptr.dev/docs/gpt-researcher/retrievers"},"Retrievers")," documentation."),(0,o.yg)("h3",{id:"research-on-langchain-documents-\ufe0f"},"Research on LangChain Documents \ud83e\udd9c\ufe0f\ud83d\udd17"),(0,o.yg)("p",null,"You can instruct the GPT Researcher to research on a list of langchain document instances."),(0,o.yg)("p",null,"For example:"),(0,o.yg)("pre",null,(0,o.yg)("code",{parentName:"pre",className:"language-python"},'from langchain_core.documents import Document\nfrom typing import List, Dict\nfrom gpt_researcher import GPTResearcher\nfrom langchain_postgres.vectorstores import PGVector\nfrom langchain_openai import OpenAIEmbeddings\nfrom sqlalchemy import create_engine\nimport asyncio\n\n\n\nCONNECTION_STRING = \'postgresql://someuser:somepass@localhost:5432/somedatabase\'\n\ndef get_retriever(collection_name: str, search_kwargs: Dict[str, str]):\n engine = create_engine(CONNECTION_STRING)\n embeddings = OpenAIEmbeddings()\n\n index = PGVector.from_existing_index(\n use_jsonb=True,\n embedding=embeddings,\n collection_name=collection_name,\n connection=engine,\n )\n\n return index.as_retriever(search_kwargs=search_kwargs)\n\n\nasync def get_report(query: str, report_type: str, report_source: str, documents: List[Document]) -> str:\n researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source, documents=documents)\n await researcher.conduct_research()\n report = await researcher.write_report()\n return report\n\nif __name__ == "__main__":\n query = "What can you tell me about blue cheese based on my documents?"\n report_type = "research_report"\n report_source = "langchain_documents"\n\n # using a LangChain retriever to get all the documents regarding cheese\n # https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html#langchain_core.retrievers.BaseRetriever.invoke\n langchain_retriever = get_retriever("cheese_collection", { "k": 3 })\n documents = langchain_retriever.invoke("All the documents about cheese")\n report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source, documents=documents))\n print(report)\n')))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.0ade560e.js b/assets/js/runtime~main.e5fe446d.js similarity index 98% rename from assets/js/runtime~main.0ade560e.js rename to assets/js/runtime~main.e5fe446d.js index b6b6e65a3..bbc078fdc 100644 --- a/assets/js/runtime~main.0ade560e.js +++ b/assets/js/runtime~main.e5fe446d.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,d,f,t,c={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var d=r[e]={id:e,loaded:!1,exports:{}};return c[e].call(d.exports,d,d.exports,b),d.loaded=!0,d.exports}b.m=c,b.c=r,e=[],b.O=(a,d,f,t)=>{if(!d){var c=1/0;for(i=0;i=t)&&Object.keys(b.O).every((e=>b.O[e](d[o])))?d.splice(o--,1):(r=!1,t0&&e[i-1][2]>t;i--)e[i]=e[i-1];e[i]=[d,f,t]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},d=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var t=Object.create(null);b.r(t);var c={};a=a||[null,d({}),d([]),d(d)];for(var r=2&f&&e;"object"==typeof r&&!~a.indexOf(r);r=d(r))Object.getOwnPropertyNames(r).forEach((a=>c[a]=()=>e[a]));return c.default=()=>e,b.d(t,c),t},b.d=(e,a)=>{for(var d in a)b.o(a,d)&&!b.o(e,d)&&Object.defineProperty(e,d,{enumerable:!0,get:a[d]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,d)=>(b.f[d](e,a),a)),[])),b.u=e=>"assets/js/"+({30:"5986bbbc",219:"6afd4aac",303:"bcd3bfb0",318:"b205135c",463:"6081ee74",634:"415f4636",708:"d1355481",1178:"78060cbc",1832:"e2a80fb9",1909:"411ad74f",1991:"b2b675dd",2138:"1a4e3797",2206:"4ed95741",2212:"667a64de",2634:"c4f5d8e4",2663:"ae92dbda",2711:"9e4087bc",3107:"33e7285c",3249:"ccc49370",3287:"6222f147",3587:"44b7ef8d",3884:"ae46baf6",3990:"618de1d8",4093:"03136cca",4386:"ebfee794",4492:"9bc9a8f7",4813:"6875c492",4850:"f51c4640",5154:"f086ed95",5232:"de289047",5894:"b2f554cd",6107:"5f72c304",6139:"c7d22e29",6553:"d3f86d34",6827:"edbfcf93",7041:"f3eeb784",7472:"814f3328",7643:"a6aa9e1f",7765:"495881a6",7802:"96b8d7da",8070:"0480b142",8097:"f4e00854",8209:"01a85c17",8256:"1527b239",8401:"17896441",8581:"935f2afb",8714:"1be78505",8995:"c6694585",9267:"a7023ddc",9274:"f82d070b",9591:"4ba7e5a3",9611:"6331da57"}[e]||e)+"."+{30:"c0c34127",219:"32375c43",303:"31a00b98",318:"a29595bc",463:"b9284c7b",489:"ca291d9d",634:"7a31a4c9",708:"59fa44f7",1178:"4a17b02e",1832:"47cfb4dd",1909:"8e73b50e",1991:"d5b9dc70",2138:"9a32e74a",2206:"5e7e3736",2212:"c1b02348",2634:"c559bffc",2663:"fa3e9054",2711:"cb4b7bde",3107:"59452d43",3249:"09b0e9dd",3287:"89a939ea",3587:"66d84936",3802:"2204ea4d",3884:"30f2c9ac",3990:"c0d5f4ef",4093:"5a30bef0",4386:"38391215",4492:"25ac6f8c",4813:"c5267267",4850:"4a4893d2",5154:"1da7fcfe",5232:"b194732b",5741:"12a31c5f",5894:"6d6d9335",6107:"eef0af81",6139:"5ef30dd1",6553:"4b0ef542",6827:"1eac1237",7041:"90fb44e3",7472:"d1c68d87",7643:"0d1f45a8",7765:"2246a31f",7802:"90aaf954",8070:"7bfcffd2",8097:"83fe092e",8209:"fff1baab",8256:"16466a39",8401:"a101c2be",8581:"098ac179",8714:"50559659",8995:"c2ffeac9",9090:"c2a8fc87",9267:"af19e94a",9274:"51754bd3",9591:"9508ca85",9611:"84ddc2e6"}[e]+".js",b.miniCssF=e=>"assets/css/styles.ec55c1be.css",b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},t="website:",b.l=(e,a,d,c)=>{if(f[e])f[e].push(a);else{var r,o;if(void 0!==d)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(l);var t=f[e];if(delete f[e],r.parentNode&&r.parentNode.removeChild(r),t&&t.forEach((e=>e(d))),a)return a(d)},l=setTimeout(u.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=u.bind(null,r.onerror),r.onload=u.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"8401","5986bbbc":"30","6afd4aac":"219",bcd3bfb0:"303",b205135c:"318","6081ee74":"463","415f4636":"634",d1355481:"708","78060cbc":"1178",e2a80fb9:"1832","411ad74f":"1909",b2b675dd:"1991","1a4e3797":"2138","4ed95741":"2206","667a64de":"2212",c4f5d8e4:"2634",ae92dbda:"2663","9e4087bc":"2711","33e7285c":"3107",ccc49370:"3249","6222f147":"3287","44b7ef8d":"3587",ae46baf6:"3884","618de1d8":"3990","03136cca":"4093",ebfee794:"4386","9bc9a8f7":"4492","6875c492":"4813",f51c4640:"4850",f086ed95:"5154",de289047:"5232",b2f554cd:"5894","5f72c304":"6107",c7d22e29:"6139",d3f86d34:"6553",edbfcf93:"6827",f3eeb784:"7041","814f3328":"7472",a6aa9e1f:"7643","495881a6":"7765","96b8d7da":"7802","0480b142":"8070",f4e00854:"8097","01a85c17":"8209","1527b239":"8256","935f2afb":"8581","1be78505":"8714",c6694585:"8995",a7023ddc:"9267",f82d070b:"9274","4ba7e5a3":"9591","6331da57":"9611"}[e]||e,b.p+b.u(e)},(()=>{var e={5354:0,1869:0};b.f.j=(a,d)=>{var f=b.o(e,a)?e[a]:void 0;if(0!==f)if(f)d.push(f[2]);else if(/^(1869|5354)$/.test(a))e[a]=0;else{var t=new Promise(((d,t)=>f=e[a]=[d,t]));d.push(f[2]=t);var c=b.p+b.u(a),r=new Error;b.l(c,(d=>{if(b.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var t=d&&("load"===d.type?"missing":d.type),c=d&&d.target&&d.target.src;r.message="Loading chunk "+a+" failed.\n("+t+": "+c+")",r.name="ChunkLoadError",r.type=t,r.request=c,f[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,d)=>{var f,t,c=d[0],r=d[1],o=d[2],n=0;if(c.some((a=>0!==e[a]))){for(f in r)b.o(r,f)&&(b.m[f]=r[f]);if(o)var i=o(b)}for(a&&a(d);n{"use strict";var e,a,d,f,t,c={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var d=r[e]={id:e,loaded:!1,exports:{}};return c[e].call(d.exports,d,d.exports,b),d.loaded=!0,d.exports}b.m=c,b.c=r,e=[],b.O=(a,d,f,t)=>{if(!d){var c=1/0;for(i=0;i=t)&&Object.keys(b.O).every((e=>b.O[e](d[o])))?d.splice(o--,1):(r=!1,t0&&e[i-1][2]>t;i--)e[i]=e[i-1];e[i]=[d,f,t]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},d=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var t=Object.create(null);b.r(t);var c={};a=a||[null,d({}),d([]),d(d)];for(var r=2&f&&e;"object"==typeof r&&!~a.indexOf(r);r=d(r))Object.getOwnPropertyNames(r).forEach((a=>c[a]=()=>e[a]));return c.default=()=>e,b.d(t,c),t},b.d=(e,a)=>{for(var d in a)b.o(a,d)&&!b.o(e,d)&&Object.defineProperty(e,d,{enumerable:!0,get:a[d]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,d)=>(b.f[d](e,a),a)),[])),b.u=e=>"assets/js/"+({30:"5986bbbc",219:"6afd4aac",303:"bcd3bfb0",318:"b205135c",463:"6081ee74",634:"415f4636",708:"d1355481",1178:"78060cbc",1832:"e2a80fb9",1909:"411ad74f",1991:"b2b675dd",2138:"1a4e3797",2206:"4ed95741",2212:"667a64de",2634:"c4f5d8e4",2663:"ae92dbda",2711:"9e4087bc",3107:"33e7285c",3249:"ccc49370",3287:"6222f147",3587:"44b7ef8d",3884:"ae46baf6",3990:"618de1d8",4093:"03136cca",4386:"ebfee794",4492:"9bc9a8f7",4813:"6875c492",4850:"f51c4640",5154:"f086ed95",5232:"de289047",5894:"b2f554cd",6107:"5f72c304",6139:"c7d22e29",6553:"d3f86d34",6827:"edbfcf93",7041:"f3eeb784",7472:"814f3328",7643:"a6aa9e1f",7765:"495881a6",7802:"96b8d7da",8070:"0480b142",8097:"f4e00854",8209:"01a85c17",8256:"1527b239",8401:"17896441",8581:"935f2afb",8714:"1be78505",8995:"c6694585",9267:"a7023ddc",9274:"f82d070b",9591:"4ba7e5a3",9611:"6331da57"}[e]||e)+"."+{30:"c0c34127",219:"32375c43",303:"31a00b98",318:"a29595bc",463:"b9284c7b",489:"ca291d9d",634:"7a31a4c9",708:"59fa44f7",1178:"4a17b02e",1832:"47cfb4dd",1909:"8e73b50e",1991:"d5b9dc70",2138:"9a32e74a",2206:"5e7e3736",2212:"c1b02348",2634:"c559bffc",2663:"fa3e9054",2711:"cb4b7bde",3107:"59452d43",3249:"09b0e9dd",3287:"89a939ea",3587:"66d84936",3802:"2204ea4d",3884:"30f2c9ac",3990:"c0d5f4ef",4093:"5a30bef0",4386:"38391215",4492:"25ac6f8c",4813:"c5267267",4850:"4a4893d2",5154:"1da7fcfe",5232:"b194732b",5741:"12a31c5f",5894:"6d6d9335",6107:"eef0af81",6139:"5ef30dd1",6553:"4b0ef542",6827:"1eac1237",7041:"90fb44e3",7472:"d1c68d87",7643:"0d1f45a8",7765:"2246a31f",7802:"90aaf954",8070:"7bfcffd2",8097:"83fe092e",8209:"fff1baab",8256:"16466a39",8401:"a101c2be",8581:"098ac179",8714:"50559659",8995:"c2ffeac9",9090:"c2a8fc87",9267:"af19e94a",9274:"51754bd3",9591:"9508ca85",9611:"9caf083f"}[e]+".js",b.miniCssF=e=>"assets/css/styles.ec55c1be.css",b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},t="website:",b.l=(e,a,d,c)=>{if(f[e])f[e].push(a);else{var r,o;if(void 0!==d)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(l);var t=f[e];if(delete f[e],r.parentNode&&r.parentNode.removeChild(r),t&&t.forEach((e=>e(d))),a)return a(d)},l=setTimeout(u.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=u.bind(null,r.onerror),r.onload=u.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"8401","5986bbbc":"30","6afd4aac":"219",bcd3bfb0:"303",b205135c:"318","6081ee74":"463","415f4636":"634",d1355481:"708","78060cbc":"1178",e2a80fb9:"1832","411ad74f":"1909",b2b675dd:"1991","1a4e3797":"2138","4ed95741":"2206","667a64de":"2212",c4f5d8e4:"2634",ae92dbda:"2663","9e4087bc":"2711","33e7285c":"3107",ccc49370:"3249","6222f147":"3287","44b7ef8d":"3587",ae46baf6:"3884","618de1d8":"3990","03136cca":"4093",ebfee794:"4386","9bc9a8f7":"4492","6875c492":"4813",f51c4640:"4850",f086ed95:"5154",de289047:"5232",b2f554cd:"5894","5f72c304":"6107",c7d22e29:"6139",d3f86d34:"6553",edbfcf93:"6827",f3eeb784:"7041","814f3328":"7472",a6aa9e1f:"7643","495881a6":"7765","96b8d7da":"7802","0480b142":"8070",f4e00854:"8097","01a85c17":"8209","1527b239":"8256","935f2afb":"8581","1be78505":"8714",c6694585:"8995",a7023ddc:"9267",f82d070b:"9274","4ba7e5a3":"9591","6331da57":"9611"}[e]||e,b.p+b.u(e)},(()=>{var e={5354:0,1869:0};b.f.j=(a,d)=>{var f=b.o(e,a)?e[a]:void 0;if(0!==f)if(f)d.push(f[2]);else if(/^(1869|5354)$/.test(a))e[a]=0;else{var t=new Promise(((d,t)=>f=e[a]=[d,t]));d.push(f[2]=t);var c=b.p+b.u(a),r=new Error;b.l(c,(d=>{if(b.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var t=d&&("load"===d.type?"missing":d.type),c=d&&d.target&&d.target.src;r.message="Loading chunk "+a+" failed.\n("+t+": "+c+")",r.name="ChunkLoadError",r.type=t,r.request=c,f[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,d)=>{var f,t,c=d[0],r=d[1],o=d[2],n=0;if(c.some((a=>0!==e[a]))){for(f in r)b.o(r,f)&&(b.m[f]=r[f]);if(o)var i=o(b)}for(a&&a(d);n Blog | GPT Researcher - + @@ -15,7 +15,7 @@

· 10 min read
Assaf Elovic

Header

Introducing the GPT Researcher Multi-Agent Assistant

Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents

It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).

Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”

In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.

To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.

Introducing LangGraph

LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.

LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.

Enough with the smalltalk, let’s start building!

Building the Ultimate Autonomous Research Agent

By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.

Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.

The Research Agent Team

The research team consists of seven LLM agents:

  • Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
  • GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor — Responsible for planning the research outline and structure.
  • Reviewer — Validates the correctness of the research results given a set of criteria.
  • Reviser — Revises the research results based on the feedback from the reviewer.
  • Writer — Responsible for compiling and writing the final report.
  • Publisher — Responsible for publishing the final report in various formats.

Architecture

As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:

Architecture

More specifically the process is as follows:

  • Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.

  • Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.

  • For each outline topic (in parallel):

    • Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
    • Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
    • Reviser — Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.

  • Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

  • We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.

Define the Graph State

One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.

Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:

class ResearchState(TypedDict):
task: dict
initial_research: str
sections: List[str]
research_data: List[dict]
# Report layout
title: str
headers: dict
date: str
table_of_contents: str
introduction: str
conclusion: str
sources: List[str]
report: str

As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.

We can then initialize the graph with the following:

from langgraph.graph import StateGraph
workflow = StateGraph(ResearchState)

Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:

from gpt_researcher import GPTResearcher

class ResearchAgent:
def __init__(self):
pass

async def research(self, query: str):
# Initialize the researcher
researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report

As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:

def init_research_team(self):
# Initialize agents
editor_agent = EditorAgent(self.task)
research_agent = ResearchAgent()
writer_agent = WriterAgent()
publisher_agent = PublisherAgent(self.output_dir)

# Define a Langchain StateGraph with the ResearchState
workflow = StateGraph(ResearchState)

# Add nodes for each agent
workflow.add_node("browser", research_agent.run_initial_research)
workflow.add_node("planner", editor_agent.plan_research)
workflow.add_node("researcher", editor_agent.run_parallel_research)
workflow.add_node("writer", writer_agent.run)
workflow.add_node("publisher", publisher_agent.run)

workflow.add_edge('browser', 'planner')
workflow.add_edge('planner', 'researcher')
workflow.add_edge('researcher', 'writer')
workflow.add_edge('writer', 'publisher')

# set up start and end nodes
workflow.set_entry_point("browser")
workflow.add_edge('publisher', END)

return workflow

As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.

Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!

A Graph within a Graph to support stateful Parallelization

This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.

Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.

As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:

class DraftState(TypedDict):
task: dict
topic: str
draft: dict
review: str
revision_notes: str

As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:

async def run_parallel_research(self, research_state: dict):
workflow = StateGraph(DraftState)

workflow.add_node("researcher", research_agent.run_depth_research)
workflow.add_node("reviewer", reviewer_agent.run)
workflow.add_node("reviser", reviser_agent.run)

# set up edges researcher->reviewer->reviser->reviewer...
workflow.set_entry_point("researcher")
workflow.add_edge('researcher', 'reviewer')
workflow.add_edge('reviser', 'reviewer')
workflow.add_conditional_edges('reviewer',
(lambda draft: "accept" if draft['review'] is None else "revise"),
{"accept": END, "revise": "reviser"})

By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.

Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:

{
"query": "Is AI in a hype cycle?",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"follow_guidelines": false,
"model": "gpt-4-turbo",
"guidelines": [
"The report MUST be written in APA format",
"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
"The report MUST be written in spanish"
]
}

The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.

Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.

To download and run the example check out the GPT Researcher x LangGraph open source page.

What’s Next?

Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.

In addition, having support for research about both web and local data would be key for many types of business and personal use cases.

Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.

A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!

To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!

· 6 min read
Assaf Elovic

OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.

The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.

This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.

To skip the tutorial below, feel free to check out the full Github Gist here.

At a high level, a typical integration of the Assistants API has the following steps:

  • Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
  • Create a Thread when a user starts a conversation.
  • Add Messages to the Thread as the user ask questions.
  • Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.

As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.

OpenAI Assistant Object

Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API.

First things first, let’s define the assistant’s instructions:

assistant_prompt_instruction = """You are a finance expert. 
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API:

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)

Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:

thread = client.beta.threads.create()
user_input = input("You: ")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

Finally, we’ll run the assistant on the thread to trigger the function call and get the response:

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)

So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.

Status Diagram

To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.

We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:

if run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:

import os
import json
import time
from openai import OpenAI
from tavily import TavilyClient

# Initialize clients with API keys
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])

assistant_prompt_instruction = """You are a finance expert.
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

# Function to perform a Tavily search
def tavily_search(query):
search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
return search_result

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")

# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")

# Ongoing conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break

# Create a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")

# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)

if run.status == 'failed':
print(run.error)
continue
elif run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

# Print messages from the thread
print_messages_from_thread(thread.id)

The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.

Feel free to drop a comment below if you have any further questions!

· 7 min read
Assaf Elovic

After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.

But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.

Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.

The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research.

In this article, we will share the steps that guided me toward the proposed solution.

Moving from infinite loops to deterministic results

The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.

This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.

Planner-Excutor-Model

As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.

Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?

Aiming for objective and unbiased results

The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.

To tackle this challenges, we assumed the following:

  • Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
  • Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.

After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.

In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.

Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?

Speeding up the research process

Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?

By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.

# Create a list to hold the coroutine agent tasks
tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]

# Gather the results as they become available
responses = await asyncio.gather(*tasks, return_exceptions=True)

In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.

Finalizing the research report

Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.

After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.

The prompt is as follows:

"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.

The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.

The final architecture

Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:

More specifically:

  • Generate an outline of research questions that form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
  • Finally, aggregate all summarized sources and generate a final research report.

Going forward

The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.

Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.

It’s all a matter of time and what GPT Researcher is all about.

- + \ No newline at end of file diff --git a/blog/archive.html b/blog/archive.html index d171af573..230d9a672 100644 --- a/blog/archive.html +++ b/blog/archive.html @@ -7,13 +7,13 @@ Archive | GPT Researcher - + - + \ No newline at end of file diff --git a/blog/building-gpt-researcher.html b/blog/building-gpt-researcher.html index db975fcee..f8fabbd71 100644 --- a/blog/building-gpt-researcher.html +++ b/blog/building-gpt-researcher.html @@ -7,13 +7,13 @@ How we built GPT Researcher | GPT Researcher - +

How we built GPT Researcher

· 7 min read
Assaf Elovic

After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.

But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.

Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.

The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research.

In this article, we will share the steps that guided me toward the proposed solution.

Moving from infinite loops to deterministic results

The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.

This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.

Planner-Excutor-Model

As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.

Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?

Aiming for objective and unbiased results

The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.

To tackle this challenges, we assumed the following:

  • Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
  • Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.

After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.

In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.

Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?

Speeding up the research process

Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?

By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.

# Create a list to hold the coroutine agent tasks
tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]

# Gather the results as they become available
responses = await asyncio.gather(*tasks, return_exceptions=True)

In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.

Finalizing the research report

Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.

After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.

The prompt is as follows:

"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.

The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.

The final architecture

Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:

More specifically:

  • Generate an outline of research questions that form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
  • Finally, aggregate all summarized sources and generate a final research report.

Going forward

The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.

Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.

It’s all a matter of time and what GPT Researcher is all about.

- + \ No newline at end of file diff --git a/blog/building-openai-assistant.html b/blog/building-openai-assistant.html index cce10a8cf..b6f041d1f 100644 --- a/blog/building-openai-assistant.html +++ b/blog/building-openai-assistant.html @@ -7,13 +7,13 @@ How to build an OpenAI Assistant with Internet access | GPT Researcher - +

How to build an OpenAI Assistant with Internet access

· 6 min read
Assaf Elovic

OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.

The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.

This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.

To skip the tutorial below, feel free to check out the full Github Gist here.

At a high level, a typical integration of the Assistants API has the following steps:

  • Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
  • Create a Thread when a user starts a conversation.
  • Add Messages to the Thread as the user ask questions.
  • Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.

As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.

OpenAI Assistant Object

Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API.

First things first, let’s define the assistant’s instructions:

assistant_prompt_instruction = """You are a finance expert. 
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API:

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)

Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:

thread = client.beta.threads.create()
user_input = input("You: ")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

Finally, we’ll run the assistant on the thread to trigger the function call and get the response:

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)

So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.

Status Diagram

To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.

We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:

if run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:

import os
import json
import time
from openai import OpenAI
from tavily import TavilyClient

# Initialize clients with API keys
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])

assistant_prompt_instruction = """You are a finance expert.
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

# Function to perform a Tavily search
def tavily_search(query):
search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
return search_result

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")

# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")

# Ongoing conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break

# Create a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")

# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)

if run.status == 'failed':
print(run.error)
continue
elif run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

# Print messages from the thread
print_messages_from_thread(thread.id)

The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.

Feel free to drop a comment below if you have any further questions!

- + \ No newline at end of file diff --git a/blog/gptr-langgraph.html b/blog/gptr-langgraph.html index 4f837926e..dbc491e4f 100644 --- a/blog/gptr-langgraph.html +++ b/blog/gptr-langgraph.html @@ -7,7 +7,7 @@ How to Build the Ultimate Research Multi-Agent Assistant | GPT Researcher - + @@ -15,7 +15,7 @@

How to Build the Ultimate Research Multi-Agent Assistant

· 10 min read
Assaf Elovic

Header

Introducing the GPT Researcher Multi-Agent Assistant

Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents

It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).

Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”

In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.

To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.

Introducing LangGraph

LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.

LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.

Enough with the smalltalk, let’s start building!

Building the Ultimate Autonomous Research Agent

By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.

Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.

The Research Agent Team

The research team consists of seven LLM agents:

  • Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
  • GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor — Responsible for planning the research outline and structure.
  • Reviewer — Validates the correctness of the research results given a set of criteria.
  • Reviser — Revises the research results based on the feedback from the reviewer.
  • Writer — Responsible for compiling and writing the final report.
  • Publisher — Responsible for publishing the final report in various formats.

Architecture

As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:

Architecture

More specifically the process is as follows:

  • Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.

  • Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.

  • For each outline topic (in parallel):

    • Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
    • Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
    • Reviser — Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.

  • Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

  • We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.

Define the Graph State

One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.

Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:

class ResearchState(TypedDict):
task: dict
initial_research: str
sections: List[str]
research_data: List[dict]
# Report layout
title: str
headers: dict
date: str
table_of_contents: str
introduction: str
conclusion: str
sources: List[str]
report: str

As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.

We can then initialize the graph with the following:

from langgraph.graph import StateGraph
workflow = StateGraph(ResearchState)

Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:

from gpt_researcher import GPTResearcher

class ResearchAgent:
def __init__(self):
pass

async def research(self, query: str):
# Initialize the researcher
researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report

As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:

def init_research_team(self):
# Initialize agents
editor_agent = EditorAgent(self.task)
research_agent = ResearchAgent()
writer_agent = WriterAgent()
publisher_agent = PublisherAgent(self.output_dir)

# Define a Langchain StateGraph with the ResearchState
workflow = StateGraph(ResearchState)

# Add nodes for each agent
workflow.add_node("browser", research_agent.run_initial_research)
workflow.add_node("planner", editor_agent.plan_research)
workflow.add_node("researcher", editor_agent.run_parallel_research)
workflow.add_node("writer", writer_agent.run)
workflow.add_node("publisher", publisher_agent.run)

workflow.add_edge('browser', 'planner')
workflow.add_edge('planner', 'researcher')
workflow.add_edge('researcher', 'writer')
workflow.add_edge('writer', 'publisher')

# set up start and end nodes
workflow.set_entry_point("browser")
workflow.add_edge('publisher', END)

return workflow

As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.

Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!

A Graph within a Graph to support stateful Parallelization

This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.

Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.

As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:

class DraftState(TypedDict):
task: dict
topic: str
draft: dict
review: str
revision_notes: str

As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:

async def run_parallel_research(self, research_state: dict):
workflow = StateGraph(DraftState)

workflow.add_node("researcher", research_agent.run_depth_research)
workflow.add_node("reviewer", reviewer_agent.run)
workflow.add_node("reviser", reviser_agent.run)

# set up edges researcher->reviewer->reviser->reviewer...
workflow.set_entry_point("researcher")
workflow.add_edge('researcher', 'reviewer')
workflow.add_edge('reviser', 'reviewer')
workflow.add_conditional_edges('reviewer',
(lambda draft: "accept" if draft['review'] is None else "revise"),
{"accept": END, "revise": "reviser"})

By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.

Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:

{
"query": "Is AI in a hype cycle?",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"follow_guidelines": false,
"model": "gpt-4-turbo",
"guidelines": [
"The report MUST be written in APA format",
"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
"The report MUST be written in spanish"
]
}

The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.

Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.

To download and run the example check out the GPT Researcher x LangGraph open source page.

What’s Next?

Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.

In addition, having support for research about both web and local data would be key for many types of business and personal use cases.

Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.

A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!

To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!

- + \ No newline at end of file diff --git a/blog/tags.html b/blog/tags.html index 909e459f5..1851d623a 100644 --- a/blog/tags.html +++ b/blog/tags.html @@ -7,13 +7,13 @@ Tags | GPT Researcher - + - + \ No newline at end of file diff --git a/blog/tags/assistant-api.html b/blog/tags/assistant-api.html index c4ff51683..9fe3cf8d4 100644 --- a/blog/tags/assistant-api.html +++ b/blog/tags/assistant-api.html @@ -7,13 +7,13 @@ One post tagged with "assistant-api" | GPT Researcher - +

One post tagged with "assistant-api"

View All Tags

· 6 min read
Assaf Elovic

OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.

The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.

This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.

To skip the tutorial below, feel free to check out the full Github Gist here.

At a high level, a typical integration of the Assistants API has the following steps:

  • Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
  • Create a Thread when a user starts a conversation.
  • Add Messages to the Thread as the user ask questions.
  • Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.

As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.

OpenAI Assistant Object

Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API.

First things first, let’s define the assistant’s instructions:

assistant_prompt_instruction = """You are a finance expert. 
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API:

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)

Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:

thread = client.beta.threads.create()
user_input = input("You: ")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

Finally, we’ll run the assistant on the thread to trigger the function call and get the response:

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)

So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.

Status Diagram

To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.

We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:

if run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:

import os
import json
import time
from openai import OpenAI
from tavily import TavilyClient

# Initialize clients with API keys
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])

assistant_prompt_instruction = """You are a finance expert.
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

# Function to perform a Tavily search
def tavily_search(query):
search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
return search_result

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")

# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")

# Ongoing conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break

# Create a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")

# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)

if run.status == 'failed':
print(run.error)
continue
elif run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

# Print messages from the thread
print_messages_from_thread(thread.id)

The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.

Feel free to drop a comment below if you have any further questions!

- + \ No newline at end of file diff --git a/blog/tags/autonomous-agent.html b/blog/tags/autonomous-agent.html index 43089a757..43377a83d 100644 --- a/blog/tags/autonomous-agent.html +++ b/blog/tags/autonomous-agent.html @@ -7,13 +7,13 @@ One post tagged with "autonomous-agent" | GPT Researcher - +

One post tagged with "autonomous-agent"

View All Tags

· 7 min read
Assaf Elovic

After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.

But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.

Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.

The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research.

In this article, we will share the steps that guided me toward the proposed solution.

Moving from infinite loops to deterministic results

The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.

This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.

Planner-Excutor-Model

As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.

Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?

Aiming for objective and unbiased results

The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.

To tackle this challenges, we assumed the following:

  • Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
  • Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.

After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.

In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.

Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?

Speeding up the research process

Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?

By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.

# Create a list to hold the coroutine agent tasks
tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]

# Gather the results as they become available
responses = await asyncio.gather(*tasks, return_exceptions=True)

In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.

Finalizing the research report

Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.

After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.

The prompt is as follows:

"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.

The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.

The final architecture

Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:

More specifically:

  • Generate an outline of research questions that form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
  • Finally, aggregate all summarized sources and generate a final research report.

Going forward

The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.

Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.

It’s all a matter of time and what GPT Researcher is all about.

- + \ No newline at end of file diff --git a/blog/tags/github.html b/blog/tags/github.html index 5785a1925..f511fc8be 100644 --- a/blog/tags/github.html +++ b/blog/tags/github.html @@ -7,13 +7,13 @@ One post tagged with "github" | GPT Researcher - +

One post tagged with "github"

View All Tags

· 7 min read
Assaf Elovic

After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.

But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.

Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.

The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research.

In this article, we will share the steps that guided me toward the proposed solution.

Moving from infinite loops to deterministic results

The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.

This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.

Planner-Excutor-Model

As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.

Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?

Aiming for objective and unbiased results

The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.

To tackle this challenges, we assumed the following:

  • Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
  • Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.

After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.

In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.

Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?

Speeding up the research process

Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?

By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.

# Create a list to hold the coroutine agent tasks
tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]

# Gather the results as they become available
responses = await asyncio.gather(*tasks, return_exceptions=True)

In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.

Finalizing the research report

Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.

After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.

The prompt is as follows:

"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.

The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.

The final architecture

Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:

More specifically:

  • Generate an outline of research questions that form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
  • Finally, aggregate all summarized sources and generate a final research report.

Going forward

The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.

Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.

It’s all a matter of time and what GPT Researcher is all about.

- + \ No newline at end of file diff --git a/blog/tags/gpt-researcher.html b/blog/tags/gpt-researcher.html index 025893876..95accf9c3 100644 --- a/blog/tags/gpt-researcher.html +++ b/blog/tags/gpt-researcher.html @@ -7,7 +7,7 @@ 2 posts tagged with "gpt-researcher" | GPT Researcher - + @@ -15,7 +15,7 @@

2 posts tagged with "gpt-researcher"

View All Tags

· 10 min read
Assaf Elovic

Header

Introducing the GPT Researcher Multi-Agent Assistant

Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents

It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).

Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”

In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.

To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.

Introducing LangGraph

LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.

LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.

Enough with the smalltalk, let’s start building!

Building the Ultimate Autonomous Research Agent

By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.

Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.

The Research Agent Team

The research team consists of seven LLM agents:

  • Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
  • GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor — Responsible for planning the research outline and structure.
  • Reviewer — Validates the correctness of the research results given a set of criteria.
  • Reviser — Revises the research results based on the feedback from the reviewer.
  • Writer — Responsible for compiling and writing the final report.
  • Publisher — Responsible for publishing the final report in various formats.

Architecture

As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:

Architecture

More specifically the process is as follows:

  • Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.

  • Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.

  • For each outline topic (in parallel):

    • Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
    • Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
    • Reviser — Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.

  • Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

  • We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.

Define the Graph State

One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.

Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:

class ResearchState(TypedDict):
task: dict
initial_research: str
sections: List[str]
research_data: List[dict]
# Report layout
title: str
headers: dict
date: str
table_of_contents: str
introduction: str
conclusion: str
sources: List[str]
report: str

As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.

We can then initialize the graph with the following:

from langgraph.graph import StateGraph
workflow = StateGraph(ResearchState)

Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:

from gpt_researcher import GPTResearcher

class ResearchAgent:
def __init__(self):
pass

async def research(self, query: str):
# Initialize the researcher
researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report

As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:

def init_research_team(self):
# Initialize agents
editor_agent = EditorAgent(self.task)
research_agent = ResearchAgent()
writer_agent = WriterAgent()
publisher_agent = PublisherAgent(self.output_dir)

# Define a Langchain StateGraph with the ResearchState
workflow = StateGraph(ResearchState)

# Add nodes for each agent
workflow.add_node("browser", research_agent.run_initial_research)
workflow.add_node("planner", editor_agent.plan_research)
workflow.add_node("researcher", editor_agent.run_parallel_research)
workflow.add_node("writer", writer_agent.run)
workflow.add_node("publisher", publisher_agent.run)

workflow.add_edge('browser', 'planner')
workflow.add_edge('planner', 'researcher')
workflow.add_edge('researcher', 'writer')
workflow.add_edge('writer', 'publisher')

# set up start and end nodes
workflow.set_entry_point("browser")
workflow.add_edge('publisher', END)

return workflow

As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.

Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!

A Graph within a Graph to support stateful Parallelization

This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.

Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.

As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:

class DraftState(TypedDict):
task: dict
topic: str
draft: dict
review: str
revision_notes: str

As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:

async def run_parallel_research(self, research_state: dict):
workflow = StateGraph(DraftState)

workflow.add_node("researcher", research_agent.run_depth_research)
workflow.add_node("reviewer", reviewer_agent.run)
workflow.add_node("reviser", reviser_agent.run)

# set up edges researcher->reviewer->reviser->reviewer...
workflow.set_entry_point("researcher")
workflow.add_edge('researcher', 'reviewer')
workflow.add_edge('reviser', 'reviewer')
workflow.add_conditional_edges('reviewer',
(lambda draft: "accept" if draft['review'] is None else "revise"),
{"accept": END, "revise": "reviser"})

By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.

Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:

{
"query": "Is AI in a hype cycle?",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"follow_guidelines": false,
"model": "gpt-4-turbo",
"guidelines": [
"The report MUST be written in APA format",
"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
"The report MUST be written in spanish"
]
}

The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.

Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.

To download and run the example check out the GPT Researcher x LangGraph open source page.

What’s Next?

Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.

In addition, having support for research about both web and local data would be key for many types of business and personal use cases.

Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.

A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!

To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!

· 7 min read
Assaf Elovic

After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.

But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.

Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.

The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research.

In this article, we will share the steps that guided me toward the proposed solution.

Moving from infinite loops to deterministic results

The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.

This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.

Planner-Excutor-Model

As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.

Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?

Aiming for objective and unbiased results

The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.

To tackle this challenges, we assumed the following:

  • Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
  • Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.

After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.

In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.

Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?

Speeding up the research process

Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?

By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.

# Create a list to hold the coroutine agent tasks
tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]

# Gather the results as they become available
responses = await asyncio.gather(*tasks, return_exceptions=True)

In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.

Finalizing the research report

Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.

After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.

The prompt is as follows:

"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.

The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.

The final architecture

Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:

More specifically:

  • Generate an outline of research questions that form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
  • Finally, aggregate all summarized sources and generate a final research report.

Going forward

The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.

Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.

It’s all a matter of time and what GPT Researcher is all about.

- + \ No newline at end of file diff --git a/blog/tags/langchain.html b/blog/tags/langchain.html index 0fd145957..3fec8dea9 100644 --- a/blog/tags/langchain.html +++ b/blog/tags/langchain.html @@ -7,7 +7,7 @@ One post tagged with "langchain" | GPT Researcher - + @@ -15,7 +15,7 @@

One post tagged with "langchain"

View All Tags

· 10 min read
Assaf Elovic

Header

Introducing the GPT Researcher Multi-Agent Assistant

Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents

It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).

Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”

In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.

To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.

Introducing LangGraph

LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.

LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.

Enough with the smalltalk, let’s start building!

Building the Ultimate Autonomous Research Agent

By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.

Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.

The Research Agent Team

The research team consists of seven LLM agents:

  • Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
  • GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor — Responsible for planning the research outline and structure.
  • Reviewer — Validates the correctness of the research results given a set of criteria.
  • Reviser — Revises the research results based on the feedback from the reviewer.
  • Writer — Responsible for compiling and writing the final report.
  • Publisher — Responsible for publishing the final report in various formats.

Architecture

As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:

Architecture

More specifically the process is as follows:

  • Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.

  • Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.

  • For each outline topic (in parallel):

    • Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
    • Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
    • Reviser — Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.

  • Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

  • We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.

Define the Graph State

One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.

Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:

class ResearchState(TypedDict):
task: dict
initial_research: str
sections: List[str]
research_data: List[dict]
# Report layout
title: str
headers: dict
date: str
table_of_contents: str
introduction: str
conclusion: str
sources: List[str]
report: str

As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.

We can then initialize the graph with the following:

from langgraph.graph import StateGraph
workflow = StateGraph(ResearchState)

Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:

from gpt_researcher import GPTResearcher

class ResearchAgent:
def __init__(self):
pass

async def research(self, query: str):
# Initialize the researcher
researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report

As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:

def init_research_team(self):
# Initialize agents
editor_agent = EditorAgent(self.task)
research_agent = ResearchAgent()
writer_agent = WriterAgent()
publisher_agent = PublisherAgent(self.output_dir)

# Define a Langchain StateGraph with the ResearchState
workflow = StateGraph(ResearchState)

# Add nodes for each agent
workflow.add_node("browser", research_agent.run_initial_research)
workflow.add_node("planner", editor_agent.plan_research)
workflow.add_node("researcher", editor_agent.run_parallel_research)
workflow.add_node("writer", writer_agent.run)
workflow.add_node("publisher", publisher_agent.run)

workflow.add_edge('browser', 'planner')
workflow.add_edge('planner', 'researcher')
workflow.add_edge('researcher', 'writer')
workflow.add_edge('writer', 'publisher')

# set up start and end nodes
workflow.set_entry_point("browser")
workflow.add_edge('publisher', END)

return workflow

As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.

Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!

A Graph within a Graph to support stateful Parallelization

This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.

Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.

As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:

class DraftState(TypedDict):
task: dict
topic: str
draft: dict
review: str
revision_notes: str

As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:

async def run_parallel_research(self, research_state: dict):
workflow = StateGraph(DraftState)

workflow.add_node("researcher", research_agent.run_depth_research)
workflow.add_node("reviewer", reviewer_agent.run)
workflow.add_node("reviser", reviser_agent.run)

# set up edges researcher->reviewer->reviser->reviewer...
workflow.set_entry_point("researcher")
workflow.add_edge('researcher', 'reviewer')
workflow.add_edge('reviser', 'reviewer')
workflow.add_conditional_edges('reviewer',
(lambda draft: "accept" if draft['review'] is None else "revise"),
{"accept": END, "revise": "reviser"})

By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.

Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:

{
"query": "Is AI in a hype cycle?",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"follow_guidelines": false,
"model": "gpt-4-turbo",
"guidelines": [
"The report MUST be written in APA format",
"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
"The report MUST be written in spanish"
]
}

The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.

Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.

To download and run the example check out the GPT Researcher x LangGraph open source page.

What’s Next?

Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.

In addition, having support for research about both web and local data would be key for many types of business and personal use cases.

Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.

A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!

To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!

- + \ No newline at end of file diff --git a/blog/tags/langgraph.html b/blog/tags/langgraph.html index a39c18c82..0471cb976 100644 --- a/blog/tags/langgraph.html +++ b/blog/tags/langgraph.html @@ -7,7 +7,7 @@ One post tagged with "langgraph" | GPT Researcher - + @@ -15,7 +15,7 @@

One post tagged with "langgraph"

View All Tags

· 10 min read
Assaf Elovic

Header

Introducing the GPT Researcher Multi-Agent Assistant

Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents

It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).

Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”

In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.

To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.

Introducing LangGraph

LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.

LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.

Enough with the smalltalk, let’s start building!

Building the Ultimate Autonomous Research Agent

By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.

Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.

The Research Agent Team

The research team consists of seven LLM agents:

  • Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
  • GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor — Responsible for planning the research outline and structure.
  • Reviewer — Validates the correctness of the research results given a set of criteria.
  • Reviser — Revises the research results based on the feedback from the reviewer.
  • Writer — Responsible for compiling and writing the final report.
  • Publisher — Responsible for publishing the final report in various formats.

Architecture

As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:

Architecture

More specifically the process is as follows:

  • Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.

  • Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.

  • For each outline topic (in parallel):

    • Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
    • Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
    • Reviser — Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.

  • Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

  • We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.

Define the Graph State

One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.

Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:

class ResearchState(TypedDict):
task: dict
initial_research: str
sections: List[str]
research_data: List[dict]
# Report layout
title: str
headers: dict
date: str
table_of_contents: str
introduction: str
conclusion: str
sources: List[str]
report: str

As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.

We can then initialize the graph with the following:

from langgraph.graph import StateGraph
workflow = StateGraph(ResearchState)

Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:

from gpt_researcher import GPTResearcher

class ResearchAgent:
def __init__(self):
pass

async def research(self, query: str):
# Initialize the researcher
researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report

As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:

def init_research_team(self):
# Initialize agents
editor_agent = EditorAgent(self.task)
research_agent = ResearchAgent()
writer_agent = WriterAgent()
publisher_agent = PublisherAgent(self.output_dir)

# Define a Langchain StateGraph with the ResearchState
workflow = StateGraph(ResearchState)

# Add nodes for each agent
workflow.add_node("browser", research_agent.run_initial_research)
workflow.add_node("planner", editor_agent.plan_research)
workflow.add_node("researcher", editor_agent.run_parallel_research)
workflow.add_node("writer", writer_agent.run)
workflow.add_node("publisher", publisher_agent.run)

workflow.add_edge('browser', 'planner')
workflow.add_edge('planner', 'researcher')
workflow.add_edge('researcher', 'writer')
workflow.add_edge('writer', 'publisher')

# set up start and end nodes
workflow.set_entry_point("browser")
workflow.add_edge('publisher', END)

return workflow

As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.

Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!

A Graph within a Graph to support stateful Parallelization

This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.

Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.

As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:

class DraftState(TypedDict):
task: dict
topic: str
draft: dict
review: str
revision_notes: str

As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:

async def run_parallel_research(self, research_state: dict):
workflow = StateGraph(DraftState)

workflow.add_node("researcher", research_agent.run_depth_research)
workflow.add_node("reviewer", reviewer_agent.run)
workflow.add_node("reviser", reviser_agent.run)

# set up edges researcher->reviewer->reviser->reviewer...
workflow.set_entry_point("researcher")
workflow.add_edge('researcher', 'reviewer')
workflow.add_edge('reviser', 'reviewer')
workflow.add_conditional_edges('reviewer',
(lambda draft: "accept" if draft['review'] is None else "revise"),
{"accept": END, "revise": "reviser"})

By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.

Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:

{
"query": "Is AI in a hype cycle?",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"follow_guidelines": false,
"model": "gpt-4-turbo",
"guidelines": [
"The report MUST be written in APA format",
"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
"The report MUST be written in spanish"
]
}

The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.

Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.

To download and run the example check out the GPT Researcher x LangGraph open source page.

What’s Next?

Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.

In addition, having support for research about both web and local data would be key for many types of business and personal use cases.

Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.

A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!

To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!

- + \ No newline at end of file diff --git a/blog/tags/multi-agents.html b/blog/tags/multi-agents.html index c9df76f35..884d1899d 100644 --- a/blog/tags/multi-agents.html +++ b/blog/tags/multi-agents.html @@ -7,7 +7,7 @@ One post tagged with "multi-agents" | GPT Researcher - + @@ -15,7 +15,7 @@

One post tagged with "multi-agents"

View All Tags

· 10 min read
Assaf Elovic

Header

Introducing the GPT Researcher Multi-Agent Assistant

Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents

It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”).

Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.”

In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph.

To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.

Introducing LangGraph

LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents.

LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so.

Enough with the smalltalk, let’s start building!

Building the Ultimate Autonomous Research Agent

By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows.

Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.

The Research Agent Team

The research team consists of seven LLM agents:

  • Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface.
  • GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor — Responsible for planning the research outline and structure.
  • Reviewer — Validates the correctness of the research results given a set of criteria.
  • Reviser — Revises the research results based on the feedback from the reviewer.
  • Writer — Responsible for compiling and writing the final report.
  • Publisher — Responsible for publishing the final report in various formats.

Architecture

As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication:

Architecture

More specifically the process is as follows:

  • Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic.

  • Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline.

  • For each outline topic (in parallel):

    • Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report.
    • Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any).
    • Reviser — Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.

  • Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

  • We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.

Define the Graph State

One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction.

Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so:

class ResearchState(TypedDict):
task: dict
initial_research: str
sections: List[str]
research_data: List[dict]
# Report layout
title: str
headers: dict
date: str
table_of_contents: str
introduction: str
conclusion: str
sources: List[str]
report: str

As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents.

We can then initialize the graph with the following:

from langgraph.graph import StateGraph
workflow = StateGraph(ResearchState)

Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package:

from gpt_researcher import GPTResearcher

class ResearchAgent:
def __init__(self):
pass

async def research(self, query: str):
# Initialize the researcher
researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report

As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph:

def init_research_team(self):
# Initialize agents
editor_agent = EditorAgent(self.task)
research_agent = ResearchAgent()
writer_agent = WriterAgent()
publisher_agent = PublisherAgent(self.output_dir)

# Define a Langchain StateGraph with the ResearchState
workflow = StateGraph(ResearchState)

# Add nodes for each agent
workflow.add_node("browser", research_agent.run_initial_research)
workflow.add_node("planner", editor_agent.plan_research)
workflow.add_node("researcher", editor_agent.run_parallel_research)
workflow.add_node("writer", writer_agent.run)
workflow.add_node("publisher", publisher_agent.run)

workflow.add_edge('browser', 'planner')
workflow.add_edge('planner', 'researcher')
workflow.add_edge('researcher', 'writer')
workflow.add_edge('writer', 'publisher')

# set up start and end nodes
workflow.set_entry_point("browser")
workflow.add_edge('publisher', END)

return workflow

As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point.

Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!

A Graph within a Graph to support stateful Parallelization

This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines.

Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised.

As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information:

class DraftState(TypedDict):
task: dict
topic: str
draft: dict
review: str
revision_notes: str

As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges:

async def run_parallel_research(self, research_state: dict):
workflow = StateGraph(DraftState)

workflow.add_node("researcher", research_agent.run_depth_research)
workflow.add_node("reviewer", reviewer_agent.run)
workflow.add_node("reviser", reviser_agent.run)

# set up edges researcher->reviewer->reviser->reviewer...
workflow.set_entry_point("researcher")
workflow.add_edge('researcher', 'reviewer')
workflow.add_edge('reviser', 'reviewer')
workflow.add_conditional_edges('reviewer',
(lambda draft: "accept" if draft['review'] is None else "revise"),
{"accept": END, "revise": "reviser"})

By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent.

Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file:

{
"query": "Is AI in a hype cycle?",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"follow_guidelines": false,
"model": "gpt-4-turbo",
"guidelines": [
"The report MUST be written in APA format",
"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section",
"The report MUST be written in spanish"
]
}

The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report.

Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx.

To download and run the example check out the GPT Researcher x LangGraph open source page.

What’s Next?

Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here.

In addition, having support for research about both web and local data would be key for many types of business and personal use cases.

Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline.

A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings!

To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!

- + \ No newline at end of file diff --git a/blog/tags/openai.html b/blog/tags/openai.html index 37705f622..a610aa839 100644 --- a/blog/tags/openai.html +++ b/blog/tags/openai.html @@ -7,13 +7,13 @@ One post tagged with "openai" | GPT Researcher - +

One post tagged with "openai"

View All Tags

· 6 min read
Assaf Elovic

OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.

The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.

This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.

To skip the tutorial below, feel free to check out the full Github Gist here.

At a high level, a typical integration of the Assistants API has the following steps:

  • Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
  • Create a Thread when a user starts a conversation.
  • Add Messages to the Thread as the user ask questions.
  • Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.

As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.

OpenAI Assistant Object

Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API.

First things first, let’s define the assistant’s instructions:

assistant_prompt_instruction = """You are a finance expert. 
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API:

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)

Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:

thread = client.beta.threads.create()
user_input = input("You: ")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

Finally, we’ll run the assistant on the thread to trigger the function call and get the response:

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)

So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.

Status Diagram

To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.

We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:

if run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:

import os
import json
import time
from openai import OpenAI
from tavily import TavilyClient

# Initialize clients with API keys
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])

assistant_prompt_instruction = """You are a finance expert.
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

# Function to perform a Tavily search
def tavily_search(query):
search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
return search_result

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")

# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")

# Ongoing conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break

# Create a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")

# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)

if run.status == 'failed':
print(run.error)
continue
elif run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

# Print messages from the thread
print_messages_from_thread(thread.id)

The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.

Feel free to drop a comment below if you have any further questions!

- + \ No newline at end of file diff --git a/blog/tags/opensource.html b/blog/tags/opensource.html index 2e9e60f04..9e5da1651 100644 --- a/blog/tags/opensource.html +++ b/blog/tags/opensource.html @@ -7,13 +7,13 @@ One post tagged with "opensource" | GPT Researcher - +

One post tagged with "opensource"

View All Tags

· 7 min read
Assaf Elovic

After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research.

But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task.

Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated.

The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research.

In this article, we will share the steps that guided me toward the proposed solution.

Moving from infinite loops to deterministic results

The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference.

This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan.

Planner-Excutor-Model

As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research.

Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?

Aiming for objective and unbiased results

The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias.

To tackle this challenges, we assumed the following:

  • Law of large numbers — More content will lead to less biased results. Especially if gathered properly.
  • Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results.

After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly.

In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic.

Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?

Speeding up the research process

Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work?

By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research.

# Create a list to hold the coroutine agent tasks
tasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]

# Gather the results as they become available
responses = await asyncio.gather(*tasks, return_exceptions=True)

In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.

Finalizing the research report

Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it.

After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task.

The prompt is as follows:

"{research_summary}" Using the above information, answer the following question or topic: "{question}" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else.

The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.

The final architecture

Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below:

More specifically:

  • Generate an outline of research questions that form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resource, keep track, filter, and summarize only if it includes relevant information.
  • Finally, aggregate all summarized sources and generate a final research report.

Going forward

The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information.

Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds.

It’s all a matter of time and what GPT Researcher is all about.

- + \ No newline at end of file diff --git a/blog/tags/search-api.html b/blog/tags/search-api.html index cb8f1e4fd..ac97abe52 100644 --- a/blog/tags/search-api.html +++ b/blog/tags/search-api.html @@ -7,13 +7,13 @@ One post tagged with "search-api" | GPT Researcher - +

One post tagged with "search-api"

View All Tags

· 6 min read
Assaf Elovic

OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.

The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.

This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.

To skip the tutorial below, feel free to check out the full Github Gist here.

At a high level, a typical integration of the Assistants API has the following steps:

  • Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
  • Create a Thread when a user starts a conversation.
  • Add Messages to the Thread as the user ask questions.
  • Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.

As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.

OpenAI Assistant Object

Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API.

First things first, let’s define the assistant’s instructions:

assistant_prompt_instruction = """You are a finance expert. 
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API:

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)

Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:

thread = client.beta.threads.create()
user_input = input("You: ")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

Finally, we’ll run the assistant on the thread to trigger the function call and get the response:

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)

So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.

Status Diagram

To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.

We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:

if run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:

import os
import json
import time
from openai import OpenAI
from tavily import TavilyClient

# Initialize clients with API keys
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])

assistant_prompt_instruction = """You are a finance expert.
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

# Function to perform a Tavily search
def tavily_search(query):
search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
return search_result

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")

# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")

# Ongoing conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break

# Create a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")

# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)

if run.status == 'failed':
print(run.error)
continue
elif run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

# Print messages from the thread
print_messages_from_thread(thread.id)

The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.

Feel free to drop a comment below if you have any further questions!

- + \ No newline at end of file diff --git a/blog/tags/tavily.html b/blog/tags/tavily.html index 27e5a9576..b504007b9 100644 --- a/blog/tags/tavily.html +++ b/blog/tags/tavily.html @@ -7,13 +7,13 @@ One post tagged with "tavily" | GPT Researcher - +

One post tagged with "tavily"

View All Tags

· 6 min read
Assaf Elovic

OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools.

The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files.

This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool.

To skip the tutorial below, feel free to check out the full Github Gist here.

At a high level, a typical integration of the Assistants API has the following steps:

  • Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling.
  • Create a Thread when a user starts a conversation.
  • Add Messages to the Thread as the user ask questions.
  • Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools.

As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread.

OpenAI Assistant Object

Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API.

First things first, let’s define the assistant’s instructions:

assistant_prompt_instruction = """You are a finance expert. 
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API:

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)

Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message:

thread = client.beta.threads.create()
user_input = input("You: ")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

Finally, we’ll run the assistant on the thread to trigger the function call and get the response:

run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)

So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually.

Status Diagram

To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’):

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call.

We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API:

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below:

if run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code:

import os
import json
import time
from openai import OpenAI
from tavily import TavilyClient

# Initialize clients with API keys
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])

assistant_prompt_instruction = """You are a finance expert.
Your goal is to provide answers based on information from the internet.
You must use the provided Tavily search API function to find relevant online information.
You should never use your own knowledge to answer questions.
Please include relevant url sources in the end of your answers.
"""

# Function to perform a Tavily search
def tavily_search(query):
search_result = tavily_client.get_search_context(query, search_depth="advanced", max_tokens=8000)
return search_result

# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
time.sleep(1)
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ['completed', 'failed', 'requires_action']:
return run

# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments

if function_name == "tavily_search":
output = tavily_search(query=json.loads(function_args)["query"])

if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})

return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=tool_output_array
)

# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")

# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-4-1106-preview",
tools=[{
"type": "function",
"function": {
"name": "tavily_search",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query to use. For example: 'Latest news on Nvidia stock performance'"},
},
"required": ["query"]
}
}
}]
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")

# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")

# Ongoing conversation loop
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
break

# Create a message
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)

# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")

# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)

if run.status == 'failed':
print(run.error)
continue
elif run.status == 'requires_action':
run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls)
run = wait_for_run_completion(thread.id, run.id)

# Print messages from the thread
print_messages_from_thread(thread.id)

The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter.

Feel free to drop a comment below if you have any further questions!

- + \ No newline at end of file diff --git a/docs/contribute.html b/docs/contribute.html index a40a07150..4dead097f 100644 --- a/docs/contribute.html +++ b/docs/contribute.html @@ -7,13 +7,13 @@ Contribute | GPT Researcher - + - + \ No newline at end of file diff --git a/docs/examples/examples.html b/docs/examples/examples.html index 170ea1b94..ce6a77ff5 100644 --- a/docs/examples/examples.html +++ b/docs/examples/examples.html @@ -7,13 +7,13 @@ Examples | GPT Researcher - +

Examples

Run PIP Package

from gpt_researcher import GPTResearcher
import asyncio


async def main():
"""
This is a sample script that shows how to run a research report.
"""
# Query
query = "What happened in the latest burning man floods?"

# Report Type
report_type = "research_report"

# Initialize the researcher
researcher = GPTResearcher(query=query, report_type=report_type, config_path=None)
# Conduct research on the given query
await researcher.conduct_research()
# Write the report
report = await researcher.write_report()

return report


if __name__ == "__main__":
asyncio.run(main())
- + \ No newline at end of file diff --git a/docs/faq.html b/docs/faq.html index 533054c30..b5723c3ec 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -7,7 +7,7 @@ Frequently Asked Questions | GPT Researcher - + @@ -15,7 +15,7 @@

Frequently Asked Questions

How do I get started?

It really depends on what you're aiming for.

If you're looking to connect your AI application to the internet with Tavily tailored API, check out the Tavily API documentation. If you're looking to build and deploy our open source autonomous research agent GPT Researcher, please see GPT Researcher documentation. You can also check out demos and examples for inspiration here.

What is GPT Researcher?

GPT Researcher is a popular open source autonomous research agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task.

GPT Researcher is built with best practices for leveraging LLMs (prompt engineering, RAG, chains, embeddings, etc), and is optimized for quick and efficient research. It is also fully customizable and can be tailored to your specific needs.

To learn more about GPT Researcher, check out the documentation page.

How much does each research run cost?

A research task using GPT Researcher costs around $0.01 per a single run (for GPT-4 usage). We're constantly optimizing LLM calls to reduce costs and improve performance.

How do you ensure the report is factual and accurate?

we do our best to ensure that the information we provide is factual and accurate. We do this by using multiple sources, and by using proprietary AI to score and rank the most relevant and accurate information. We also use proprietary AI to filter out irrelevant information and sources.

Lastly, by using RAG and other techniques, we ensure that the information is relevant to the context of the research task, leading to more accurate generative AI content and reduced hallucinations.

What are your plans for the future?

We're constantly working on improving our products and services. We're currently working on improving our search API together with design partners, and adding more data sources to our search engine. We're also working on improving our research agent GPT Researcher, and adding more features to it while growing our amazing open source community.

If you're interested in our roadmap or looking to collaborate, check out our roadmap page.

Feel free to contact us if you have any further questions or suggestions!

- + \ No newline at end of file diff --git a/docs/gpt-researcher/config.html b/docs/gpt-researcher/config.html index 25e16636d..54f046c8b 100644 --- a/docs/gpt-researcher/config.html +++ b/docs/gpt-researcher/config.html @@ -7,7 +7,7 @@ Introduction | GPT Researcher - + @@ -16,7 +16,7 @@ In addition, GPT Researcher can be tailored to various report formats (such as APA), word count, research iterations depth, etc.

GPT Researcher defaults to our recommended suite of integrations: OpenAI for LLM calls and Tavily API for retrieving realtime online information.

As seen below, OpenAI still stands as the superior LLM. We assume it will stay this way for some time, and that prices will only continue to decrease, while performance and speed increase over time.

The default config.py file can be found in /gpt_researcher/config/. It supports various options for customizing GPT Researcher to your needs. You can also include your own external JSON file config.json by adding the path in the config_file param. Please follow the config.py file for additional future support.

Below is a list of current supported options:

  • RETRIEVER: Web search engine used for retrieving sources. Defaults to tavily. Options: duckduckgo, bing, google, serper, searx. Check here for supported retrievers
  • EMBEDDING_PROVIDER: Provider for embedding model. Defaults to openai. Options: ollama, huggingface, azure_openai, custom.
  • LLM_PROVIDER: LLM provider. Defaults to openai. Options: google, ollama, groq and much more!
  • FAST_LLM_MODEL: Model name for fast LLM operations such summaries. Defaults to gpt-4o-mini.
  • SMART_LLM_MODEL: Model name for smart operations like generating research reports and reasoning. Defaults to gpt-4o.
  • FAST_TOKEN_LIMIT: Maximum token limit for fast LLM responses. Defaults to 2000.
  • SMART_TOKEN_LIMIT: Maximum token limit for smart LLM responses. Defaults to 4000.
  • BROWSE_CHUNK_MAX_LENGTH: Maximum length of text chunks to browse in web sources. Defaults to 8192.
  • SUMMARY_TOKEN_LIMIT: Maximum token limit for generating summaries. Defaults to 700.
  • TEMPERATURE: Sampling temperature for LLM responses, typically between 0 and 1. A higher value results in more randomness and creativity, while a lower value results in more focused and deterministic responses. Defaults to 0.55.
  • TOTAL_WORDS: Total word count limit for document generation or processing tasks. Defaults to 800.
  • REPORT_FORMAT: Preferred format for report generation. Defaults to APA. Consider formats like MLA, CMS, Harvard style, IEEE, etc.
  • MAX_ITERATIONS: Maximum number of iterations for processes like query expansion or search refinement. Defaults to 3.
  • AGENT_ROLE: Role of the agent. This might be used to customize the behavior of the agent based on its assigned roles. No default value.
  • MAX_SUBTOPICS: Maximum number of subtopics to generate or consider. Defaults to 3.
  • SCRAPER: Web scraper to use for gathering information. Defaults to bs (BeautifulSoup). You can also use newspaper.
  • DOC_PATH: Path to read and research local documents. Defaults to an empty string indicating no path specified.
  • USER_AGENT: Custom User-Agent string for web crawling and web requests.
  • MEMORY_BACKEND: Backend used for memory operations, such as local storage of temporary data. Defaults to local.

To change the default configurations, you can simply add env variables to your .env file as named above or export manually in your local project directory.

For example, to manually change the search engine and report format:

export RETRIEVER=bing
export REPORT_FORMAT=IEEE

Please note that you might need to export additional env vars and obtain API keys for other supported search retrievers and LLM providers. Please follow your console logs for further assistance. To learn more about additional LLM support you can check out the docs here.

You can also include your own external JSON file config.json by adding the path in the config_file param.

- + \ No newline at end of file diff --git a/docs/gpt-researcher/example.html b/docs/gpt-researcher/example.html index 1610e9cb9..289d21c3e 100644 --- a/docs/gpt-researcher/example.html +++ b/docs/gpt-researcher/example.html @@ -7,13 +7,13 @@ Agent Example | GPT Researcher - +

Agent Example

If you're interested in using GPT Researcher as a standalone agent, you can easily import it into any existing Python project. Below, is an example of calling the agent to generate a research report:

from gpt_researcher import GPTResearcher
import asyncio

# It is best to define global constants at the top of your script
QUERY = "What happened in the latest burning man floods?"
REPORT_TYPE = "research_report"

async def fetch_report(query, report_type):
"""
Fetch a research report based on the provided query and report type.
"""
researcher = GPTResearcher(query=query, report_type=report_type, config_path=None)
await researcher.conduct_research()
report = await researcher.write_report()
return report

async def generate_research_report():
"""
This is a sample script that executes an async main function to run a research report.
"""
report = await fetch_report(QUERY, REPORT_TYPE)
print(report)

if __name__ == "__main__":
asyncio.run(generate_research_report())

You can further enhance this example to use the returned report as context for generating valuable content such as news article, marketing content, email templates, newsletters, etc.

You can also use GPT Researcher to gather information about code documentation, business analysis, financial information and more. All of which can be used to complete much more complex tasks that require factual and high quality realtime information.

- + \ No newline at end of file diff --git a/docs/gpt-researcher/frontend.html b/docs/gpt-researcher/frontend.html index c8b2bd3fb..e63ec86e5 100644 --- a/docs/gpt-researcher/frontend.html +++ b/docs/gpt-researcher/frontend.html @@ -7,13 +7,13 @@ Frontend Application | GPT Researcher - +

Frontend Application

This frontend project aims to enhance the user experience of GPT-Researcher, providing an intuitive and efficient interface for automated research. It offers two deployment options to suit different needs and environments.

Option 1: Static Frontend (FastAPI)

A lightweight solution using FastAPI to serve static files.

Prerequisites

  • Python 3.11+
  • pip

Setup and Running

  1. Install required packages:

    pip install -r requirements.txt
  2. Start the server:

    python -m uvicorn main:app
  3. Access at http://localhost:8000

Demo

Option 2: NextJS Frontend

A more robust solution with enhanced features and performance.

Prerequisites

  • Node.js (v18.17.0 recommended)
  • npm

Setup and Running

  1. Navigate to NextJS directory:

    cd nextjs
  2. Set up Node.js:

    nvm install 18.17.0
    nvm use v18.17.0
  3. Install dependencies:

    npm install --legacy-peer-deps
  4. Start development server:

    npm run dev
  5. Access at http://localhost:3000

Note: Requires backend server on localhost:8000 as detailed in option 1.

Demo

Choosing an Option

  • Static Frontend: Quick setup, lightweight deployment.
  • NextJS Frontend: Feature-rich, scalable, better performance and SEO.

For production, NextJS is recommended.

Frontend Features

Our frontend enhances GPT-Researcher by providing:

  1. Intuitive Research Interface: Streamlined input for research queries.
  2. Real-time Progress Tracking: Visual feedback on ongoing research tasks.
  3. Interactive Results Display: Easy-to-navigate presentation of findings.
  4. Customizable Settings: Adjust research parameters to suit specific needs.
  5. Responsive Design: Optimal experience across various devices.

These features aim to make the research process more efficient and user-friendly, complementing GPT-Researcher's powerful agent capabilities.

- + \ No newline at end of file diff --git a/docs/gpt-researcher/getting-started.html b/docs/gpt-researcher/getting-started.html index 4b9c3bb9c..be9ec9703 100644 --- a/docs/gpt-researcher/getting-started.html +++ b/docs/gpt-researcher/getting-started.html @@ -7,13 +7,13 @@ Getting Started | GPT Researcher - +

Getting Started

Step 0 - Install Python 3.11 or later. See here for a step-by-step guide.

Step 1 - Download the project and navigate to its directory

$ git clone https://github.com/assafelovic/gpt-researcher.git
$ cd gpt-researcher

Step 3 - Set up API keys using two methods: exporting them directly or storing them in a .env file.

For Linux/Temporary Windows Setup, use the export method:

export OPENAI_API_KEY={Your OpenAI API Key here}
export TAVILY_API_KEY={Your Tavily API Key here}

For a more permanent setup, create a .env file in the current gpt-researcher directory and input the env vars (without export).

  • For LLM provider, we recommend OpenAI GPT, but you can use any other LLM model (including open sources). To learn how to change the LLM model, please refer to the documentation page.
  • For web search API, we recommend Tavily Search API, but you can also refer to other search APIs of your choice by changing the search provider in config/config.py to duckduckgo, google, bing, serper, searx and more. Then add the corresponding env API key.

Quickstart

Step 1 - Install dependencies

$ pip install -r requirements.txt

Step 2 - Run the agent with FastAPI

$ uvicorn main:app --reload

Step 3 - Go to http://localhost:8000 on any browser and enjoy researching!

Using Virtual Environment or Poetry

Select either based on your familiarity with each:

Virtual Environment

Establishing the Virtual Environment with Activate/Deactivate configuration

Create a virtual environment using the venv package with the environment name <your_name>, for example, env. Execute the following command in the PowerShell/CMD terminal:

python -m venv env

To activate the virtual environment, use the following activation script in PowerShell/CMD terminal:

.\env\Scripts\activate

To deactivate the virtual environment, run the following deactivation script in PowerShell/CMD terminal:

deactivate

Install the dependencies for a Virtual environment

After activating the env environment, install dependencies using the requirements.txt file with the following command:

python -m pip install -r requirements.txt

Poetry

Establishing the Poetry dependencies and virtual environment with Poetry version ~1.7.1

Install project dependencies and simultaneously create a virtual environment for the specified project. By executing this command, Poetry reads the project's "pyproject.toml" file to determine the required dependencies and their versions, ensuring a consistent and isolated development environment. The virtual environment allows for a clean separation of project-specific dependencies, preventing conflicts with system-wide packages and enabling more straightforward dependency management throughout the project's lifecycle.

poetry install

Activate the virtual environment associated with a Poetry project

By running this command, the user enters a shell session within the isolated environment associated with the project, providing a dedicated space for development and execution. This virtual environment ensures that the project dependencies are encapsulated, avoiding conflicts with system-wide packages. Activating the Poetry shell is essential for seamlessly working on a project, as it ensures that the correct versions of dependencies are used and provides a controlled environment conducive to efficient development and testing.

poetry shell

Run the app

Launch the FastAPI application agent on a Virtual Environment or Poetry setup by executing the following command:

python -m uvicorn main:app --reload

Visit http://localhost:8000 in any web browser and explore your research!


Try it with Docker

Step 1 - Install Docker

Follow instructions at https://docs.docker.com/engine/install/

Step 2 - Create .env file with your OpenAI Key or simply export it

$ export OPENAI_API_KEY={Your API Key here}

Step 3 - Run the application

$ docker-compose up

Step 4 - Go to http://localhost:8000 on any browser and enjoy researching!

- + \ No newline at end of file diff --git a/docs/gpt-researcher/introduction.html b/docs/gpt-researcher/introduction.html index 5d13773cc..801703d84 100644 --- a/docs/gpt-researcher/introduction.html +++ b/docs/gpt-researcher/introduction.html @@ -7,7 +7,7 @@ Introduction | GPT Researcher - + @@ -18,7 +18,7 @@ PyPI version Open In Colab

GPT Researcher is an autonomous agent designed for comprehensive online research on a variety of tasks.

The agent can produce detailed, factual and unbiased research reports, with customization options for focusing on relevant resources, outlines, and lessons. Inspired by the recent Plan-and-Solve and RAG papers, GPT Researcher addresses issues of speed, determinism and reliability, offering a more stable performance and increased speed through parallelized agent work, as opposed to synchronous operations.

Why GPT Researcher?

  • To form objective conclusions for manual research tasks can take time, sometimes weeks to find the right resources and information.
  • Current LLMs are trained on past and outdated information, with heavy risks of hallucinations, making them almost irrelevant for research tasks.
  • Current LLMs are limited to short token outputs which are not sufficient for long detailed research reports (2k+ words).
  • Solutions that enable web search (such as ChatGPT + Web Plugin), only consider limited resources and content that in some cases result in superficial conclusions or biased answers.
  • Using only a selection of resources can create bias in determining the right conclusions for research questions or tasks.

Architecture

The main idea is to run "planner" and "execution" agents, whereas the planner generates questions to research, and the execution agents seek the most related information based on each generated research question. Finally, the planner filters and aggregates all related information and creates a research report.

The agents leverage both gpt-4o-mini and gpt-4o (128K context) to complete a research task. We optimize for costs using each only when necessary. The average research task takes around 3 minutes to complete, and costs ~$0.1.

More specifically:

  • Create a domain specific agent based on research query or task.
  • Generate a set of research questions that together form an objective opinion on any given task.
  • For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task.
  • For each scraped resources, summarize based on relevant information and keep track of its sources.
  • Finally, filter and aggregate all summarized sources and generate a final research report.

Demo

Tutorials

Features

  • 📝 Generate research, outlines, resources and lessons reports
  • 📜 Can generate long and detailed research reports (over 2K words)
  • 🌐 Aggregates over 20 web sources per research to form objective and factual conclusions
  • 🖥️ Includes an easy-to-use web interface (HTML/CSS/JS)
  • 🔍 Scrapes web sources with javascript support
  • 📂 Keeps track and context of visited and used web sources
  • 📄 Export research reports to PDF, Word and more...

Let's get started here!

- + \ No newline at end of file diff --git a/docs/gpt-researcher/langgraph.html b/docs/gpt-researcher/langgraph.html index 48853436d..0a42e1c60 100644 --- a/docs/gpt-researcher/langgraph.html +++ b/docs/gpt-researcher/langgraph.html @@ -7,7 +7,7 @@ LangGraph | GPT Researcher - + @@ -15,7 +15,7 @@

LangGraph

LangGraph is a library for building stateful, multi-actor applications with LLMs. This example uses Langgraph to automate the process of an in depth research on any given topic.

Use case

By using Langgraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication.

An average run generates a 5-6 page research report in multiple formats such as PDF, Docx and Markdown.

Please note: This example uses the OpenAI API only for optimized performance.

The Multi Agent Team

The research team is made up of 7 AI agents:

  • Human - The human in the loop that oversees the process and provides feedback to the agents.
  • Chief Editor - Oversees the research process and manages the team. This is the "master" agent that coordinates the other agents using Langgraph.
  • Researcher (gpt-researcher) - A specialized autonomous agent that conducts in depth research on a given topic.
  • Editor - Responsible for planning the research outline and structure.
  • Reviewer - Validates the correctness of the research results given a set of criteria.
  • Revisor - Revises the research results based on the feedback from the reviewer.
  • Writer - Responsible for compiling and writing the final report.
  • Publisher - Responsible for publishing the final report in various formats.

How it works

Generally, the process is based on the following stages:

  1. Planning stage
  2. Data collection and analysis
  3. Review and revision
  4. Writing and submission
  5. Publication

Architecture


Steps

More specifically (as seen in the architecture diagram) the process is as follows:

  • Browser (gpt-researcher) - Browses the internet for initial research based on the given research task.
  • Editor - Plans the report outline and structure based on the initial research.
  • For each outline topic (in parallel):
    • Researcher (gpt-researcher) - Runs an in depth research on the subtopics and writes a draft.
    • Reviewer - Validates the correctness of the draft given a set of criteria and provides feedback.
    • Revisor - Revises the draft until it is satisfactory based on the reviewer feedback.
  • Writer - Compiles and writes the final report including an introduction, conclusion and references section from the given research findings.
  • Publisher - Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.

How to run

  1. Install required packages:
    pip install -r requirements.txt
  2. Update env variables
    export OPENAI_API_KEY={Your OpenAI API Key here}
    export TAVILY_API_KEY={Your Tavily API Key here}
  3. Run the application:
    python main.py

Usage

To change the research query and customize the report, edit the task.json file in the main directory.

Task.json contains the following fields:

  • query - The research query or task.
  • model - The OpenAI LLM to use for the agents.
  • max_sections - The maximum number of sections in the report. Each section is a subtopic of the research query.
  • include_human_feedback - If true, the user can provide feedback to the agents. If false, the agents will work autonomously.
  • publish_formats - The formats to publish the report in. The reports will be written in the output directory.
  • source - The location from which to conduct the research. Options: web or local. For local, please add DOC_PATH env var.
  • follow_guidelines - If true, the research report will follow the guidelines below. It will take longer to complete. If false, the report will be generated faster but may not follow the guidelines.
  • guidelines - A list of guidelines that the report must follow.
  • verbose - If true, the application will print detailed logs to the console.

For example:

{
"query": "Is AI in a hype cycle?",
"model": "gpt-4o",
"max_sections": 3,
"publish_formats": {
"markdown": true,
"pdf": true,
"docx": true
},
"include_human_feedback": false,
"source": "web",
"follow_guidelines": true,
"guidelines": [
"The report MUST fully answer the original question",
"The report MUST be written in apa format",
"The report MUST be written in english"
],
"verbose": true
}

To Deploy

pip install langgraph-cli
langgraph up

From there, see documentation here on how to use the streaming and async endpoints, as well as the playground.

NextJS Frontend App

The React app (located in frontend directory) is our Frontend 2.0 which we hope will enable us to display the robustness of the backend on the frontend, as well.

It comes with loads of added features, such as:

  • a drag-n-drop user interface for uploading and deleting files to be used as local documents by GPTResearcher.
  • a GUI for setting your GPTR environment variables.
  • the ability to trigger the multi_agents flow via the Backend Module or Langgraph Cloud Host (currently in closed beta).
  • stability fixes
  • and more coming soon!

Run the NextJS React App with Docker

Step 1 - Install Docker

Step 2 - Clone the '.env.example' file, add your API Keys to the cloned file and save the file as '.env'

Step 3 - Within the docker-compose file comment out services that you don't want to run with Docker.

$ docker-compose up --build

Step 4 - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes:

  • the Python server running on localhost:8000
  • the React app running on localhost:3000

Visit localhost:3000 on any browser and enjoy researching!

Run the NextJS React App with NPM

cd frontend
nvm install 18.17.0
nvm use v18.17.0
npm install --legacy-peer-deps
npm run dev
- + \ No newline at end of file diff --git a/docs/gpt-researcher/llms.html b/docs/gpt-researcher/llms.html index 31989e0f1..7abb751c2 100644 --- a/docs/gpt-researcher/llms.html +++ b/docs/gpt-researcher/llms.html @@ -7,7 +7,7 @@ Configure LLM | GPT Researcher - + @@ -23,7 +23,7 @@ Then update the corresponding env vars, for example:

LLM_PROVIDER=mistral
ANTHROPIC_API_KEY=[Your key]
FAST_LLM_MODEL=open-mistral-7b
SMART_LLM_MODEL=mistral-large-latest

Together AI

Together AI offers an API to query 50+ leading open-source models in a couple lines of code. Then update corresponding env vars, for example:

LLM_PROVIDER=together
TOGETHER_API_KEY=[Your key]
FAST_LLM_MODEL=meta-llama/Llama-3-8b-chat-hf
SMART_LLM_MODEL=meta-llama/Llama-3-70b-chat-hf

HuggingFace

This integration requires a bit of extra work. Follow this guide to learn more. After you've followed the tutorial above, update the env vars:

LLM_PROVIDER=huggingface
HUGGINGFACE_API_KEY=[Your key]
FAST_LLM_MODEL=HuggingFaceH4/zephyr-7b-beta
SMART_LLM_MODEL=HuggingFaceH4/zephyr-7b-beta

Google Gemini

Sign up here for obtaining a Google Gemini API Key and update the following env vars:

Please make sure to update fast and smart models to corresponding valid Gemini models.

LLM_PROVIDER=google
GEMINI_API_KEY=[Your key]
- + \ No newline at end of file diff --git a/docs/gpt-researcher/pip-package.html b/docs/gpt-researcher/pip-package.html index 9fbb79db1..ce2673c52 100644 --- a/docs/gpt-researcher/pip-package.html +++ b/docs/gpt-researcher/pip-package.html @@ -7,14 +7,14 @@ PIP Package | GPT Researcher - +

PIP Package

PyPI version Open In Colab

🌟 Exciting News! Now, you can integrate gpt-researcher with your apps seamlessly!

Steps to Install GPT Researcher

Follow these easy steps to get started:

  1. Pre-requisite: Ensure Python 3.10+ is installed on your machine 💻
  2. Install gpt-researcher: Grab the official package from PyPi.
pip install gpt-researcher
  1. Environment Variables: Create a .env file with your OpenAI API key or simply export it
export OPENAI_API_KEY={Your OpenAI API Key here}
export TAVILY_API_KEY={Your Tavily API Key here}
  1. Start using GPT Researcher in your own codebase

Example Usage 📝

from gpt_researcher import GPTResearcher
import asyncio


from gpt_researcher import GPTResearcher
import asyncio


async def get_report(query: str, report_type: str) -> str:
researcher = GPTResearcher(query, report_type)
research_result = await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
query = "what team may win the NBA finals?"
report_type = "research_report"

report = asyncio.run(get_report(query, report_type))
print(report)

Specific Examples 🌐

Example 1: Research Report 📚

query = "Latest developments in renewable energy technologies"
report_type = "research_report"

Example 2: Resource Report 📋

query = "List of top AI conferences in 2023"
report_type = "resource_report"

Example 3: Outline Report 📝

query = "Outline for an article on the impact of AI in education"
report_type = "outline_report"

Integration with Web Frameworks 🌍

FastAPI Example

from fastapi import FastAPI
from gpt_researcher import GPTResearcher
import asyncio

app = FastAPI()

@app.get("/report/{report_type}")
async def get_report(query: str, report_type: str) -> dict:
researcher = GPTResearcher(query, report_type)
research_result = await researcher.conduct_research()
report = await researcher.write_report()
return {"report": report}

# Run the server
# uvicorn main:app --reload

Flask Example

Pre-requisite: Install flask with the async extra.

pip install 'flask[async]'
from flask import Flask, request
from gpt_researcher import GPTResearcher

app = Flask(__name__)

@app.route('/report/<report_type>', methods=['GET'])
async def get_report(report_type):
query = request.args.get('query')
researcher = GPTResearcher(query, report_type)
research_result = await researcher.conduct_research()
report = await researcher.write_report()
return report

# Run the server
# flask run

Run the server

flask run

Example Request

curl -X GET "http://localhost:5000/report/research_report?query=what team may win the nba finals?"

Note: The above code snippets are just examples. You can customize them as per your requirements.

Getters and Setters

If you're interested in getting more details about the research, you can use the following getters:

Get Research Sources

Sources are the URLs that were used to gather information for the research.

source_urls = researcher.get_source_urls()

Get Research Context

Context is all the retrieved information from the research. It includes the sources and their corresponding content.

research_context = researcher.get_research_context()

Get Research Costs

Costs are the number of tokens consumed during the research process.

research_costs = researcher.get_costs()

Set Verbose

You can set the verbose mode to get more detailed logs.

researcher.set_verbose(True)

Add Costs

You can also add costs to the research process if you want to track the costs from external usage.

researcher.add_costs(0.22)
- + \ No newline at end of file diff --git a/docs/gpt-researcher/retrievers.html b/docs/gpt-researcher/retrievers.html index 97d67bef2..ffe8180b9 100644 --- a/docs/gpt-researcher/retrievers.html +++ b/docs/gpt-researcher/retrievers.html @@ -7,7 +7,7 @@ Retrievers | GPT Researcher - + @@ -17,7 +17,7 @@ But you can also use other search engines by specifying the RETRIEVER env var. Please note that each search engine has its own API Key requirements and usage limits.

For example:

RETRIEVER=bing

You can also specify multiple retrievers by separating them with commas. The system will use each specified retriever in sequence. For example:

RETRIEVER=tavily, arxiv

Thanks to our community, we have integrated the following web search engines:

Custom Retrievers

You can also use any custom retriever of your choice by specifying the RETRIEVER=custom env var. Custom retrievers allow you to use any search engine that provides an API to retrieve documents and is widely used for enterprise research tasks.

In addition to setting the RETRIEVER env, you also need to set the following env vars:

  • RETRIEVER_ENDPOINT: The endpoint URL of the custom retriever.
  • Additional arguments required by the retriever should be prefixed with RETRIEVER_ARG_ (e.g., RETRIEVER_ARG_API_KEY).

Example

RETRIEVER=custom
RETRIEVER_ENDPOINT=https://api.myretriever.com
RETRIEVER_ARG_API_KEY=YOUR_API_KEY

Response Format

For the custom retriever to work correctly, the response from the endpoint should be in the following format:

[
{
"url": "http://example.com/page1",
"raw_content": "Content of page 1"
},
{
"url": "http://example.com/page2",
"raw_content": "Content of page 2"
}
]

The system assumes this response format and processes the list of sources accordingly.

Missing a retriever? Feel free to contribute to this project by submitting issues or pull requests on our GitHub page.

- + \ No newline at end of file diff --git a/docs/gpt-researcher/roadmap.html b/docs/gpt-researcher/roadmap.html index 237b36d6b..ec239988c 100644 --- a/docs/gpt-researcher/roadmap.html +++ b/docs/gpt-researcher/roadmap.html @@ -7,13 +7,13 @@ Roadmap | GPT Researcher - +

Roadmap

We're constantly working on additional features and improvements to our products and services. We're also working on new products and services to help you build better AI applications using GPT Researcher.

Our vision is to build the #1 autonomous research agent for AI developers and researchers, and we're excited to have you join us on this journey!

The roadmap is prioritized based on the following goals: Performance, Quality, Modularity and Conversational flexibility. The roadmap is public and can be found here.

Interested in collaborating or contributing? Check out our contributing page for more information.

- + \ No newline at end of file diff --git a/docs/gpt-researcher/tailored-research.html b/docs/gpt-researcher/tailored-research.html index 8413922b9..cf3c6c3ce 100644 --- a/docs/gpt-researcher/tailored-research.html +++ b/docs/gpt-researcher/tailored-research.html @@ -7,16 +7,17 @@ Tailored Research | GPT Researcher - +
-

Tailored Research

The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources or local documents, and even specify the agent prompt instruction upon which the research is conducted.

Research on Specific Sources 📚

You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. The GPT Researcher will then conduct research on the provided sources.

from gpt_researcher import GPTResearcher
import asyncio

async def get_report(query: str, report_type: str, sources: list) -> str:
researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources)
await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
query = "What are the latest advancements in AI?"
report_type = "research_report"
sources = ["https://en.wikipedia.org/wiki/Artificial_intelligence", "https://www.ibm.com/watson/ai"]

report = asyncio.run(get_report(query, report_type, sources))
print(report)

Specify Agent Prompt 📝

You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout. -Simply pass the prompt as the query argument to the GPTResearcher class and the "custom_report" report_type.

from gpt_researcher import GPTResearcher
import asyncio

async def get_report(prompt: str, report_type: str) -> str:
researcher = GPTResearcher(query=prompt, report_type=report_type)
await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
report_type = "custom_report"
prompt = "Research the latest advancements in AI and provide a detailed report in APA format including sources."

report = asyncio.run(get_report(prompt=prompt, report_type=report_type))
print(report)

Research on Local Documents 📄

You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents.

Step 1: Add the env variable DOC_PATH pointing to the folder where your documents are located.

For example:

export DOC_PATH="./my-docs"

Step 2: When you create an instance of the GPTResearcher class, pass the report_source argument as "local".

GPT Researcher will then conduct research on the provided documents.

from gpt_researcher import GPTResearcher
import asyncio

async def get_report(query: str, report_type: str, report_source: str) -> str:
researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source)
await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
query = "What can you tell me about myself based on my documents?"
report_type = "research_report"
report_source = "local" # "local" or "web"

report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source))
print(report)

Hybrid Research 🔄

You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents. +

Tailored Research

The GPT Researcher package allows you to tailor the research to your needs such as researching on specific sources or local documents, and even specify the agent prompt instruction upon which the research is conducted.

Research on Specific Sources 📚

You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. GPT Researcher will then conduct research on the provided sources only. +Simply pass the sources as the source_urls argument to the GPTResearcher class and the "static" report_source.

from gpt_researcher import GPTResearcher
import asyncio

async def get_report(query: str, report_source: str, sources: list) -> str:
researcher = GPTResearcher(query=query, report_source=report_source, source_urls=sources)
research_context = await researcher.conduct_research()
return await researcher.write_report()

if __name__ == "__main__":
query = "What are the biggest trends in AI lately?"
report_source = "static"
sources = [
"https://en.wikipedia.org/wiki/Artificial_intelligence",
"https://www.ibm.com/think/insights/artificial-intelligence-trends",
"https://www.forbes.com/advisor/business/ai-statistics"
]
report = asyncio.run(get_report(query=query, report_source=report_source, sources=sources))
print(report)

Specify Agent Prompt 📝

You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout. +Simply pass the prompt as the query argument to the GPTResearcher class and the "custom_report" report_type.

from gpt_researcher import GPTResearcher
import asyncio

async def get_report(prompt: str, report_type: str) -> str:
researcher = GPTResearcher(query=prompt, report_type=report_type)
await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
report_type = "custom_report"
prompt = "Research the latest advancements in AI and provide a detailed report in APA format including sources."

report = asyncio.run(get_report(prompt=prompt, report_type=report_type))
print(report)

Research on Local Documents 📄

You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents.

Step 1: Add the env variable DOC_PATH pointing to the folder where your documents are located.

For example:

export DOC_PATH="./my-docs"

Step 2: When you create an instance of the GPTResearcher class, pass the report_source argument as "local".

GPT Researcher will then conduct research on the provided documents.

from gpt_researcher import GPTResearcher
import asyncio

async def get_report(query: str, report_source: str) -> str:
researcher = GPTResearcher(query=query, report_source=report_source)
await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
query = "What can you tell me about myself based on my documents?"
report_source = "local" # "local" or "web"

report = asyncio.run(get_report(query=query, report_source=report_source))
print(report)

Hybrid Research 🔄

You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents. Simply provide the sources and set the report_source argument as "hybrid" and watch the magic happen.

Please note! You should set the proper retrievers for the web sources and doc path for local documents for this to work. To lean more about retrievers check out the Retrievers documentation.

Research on LangChain Documents 🦜️🔗

You can instruct the GPT Researcher to research on a list of langchain document instances.

For example:

from langchain_core.documents import Document
from typing import List, Dict
from gpt_researcher import GPTResearcher
from langchain_postgres.vectorstores import PGVector
from langchain_openai import OpenAIEmbeddings
from sqlalchemy import create_engine
import asyncio



CONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'

def get_retriever(collection_name: str, search_kwargs: Dict[str, str]):
engine = create_engine(CONNECTION_STRING)
embeddings = OpenAIEmbeddings()

index = PGVector.from_existing_index(
use_jsonb=True,
embedding=embeddings,
collection_name=collection_name,
connection=engine,
)

return index.as_retriever(search_kwargs=search_kwargs)


async def get_report(query: str, report_type: str, report_source: str, documents: List[Document]) -> str:
researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source, documents=documents)
await researcher.conduct_research()
report = await researcher.write_report()
return report

if __name__ == "__main__":
query = "What can you tell me about blue cheese based on my documents?"
report_type = "research_report"
report_source = "langchain_documents"

# using a LangChain retriever to get all the documents regarding cheese
# https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html#langchain_core.retrievers.BaseRetriever.invoke
langchain_retriever = get_retriever("cheese_collection", { "k": 3 })
documents = langchain_retriever.invoke("All the documents about cheese")
report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source, documents=documents))
print(report)
- + \ No newline at end of file diff --git a/docs/gpt-researcher/troubleshooting.html b/docs/gpt-researcher/troubleshooting.html index 0e382963d..ac4e0f0bd 100644 --- a/docs/gpt-researcher/troubleshooting.html +++ b/docs/gpt-researcher/troubleshooting.html @@ -7,7 +7,7 @@ Troubleshooting | GPT Researcher - + @@ -24,7 +24,7 @@ python3.11 -m uvicorn main:app --reload

Error processing the url

We're using Selenium for site scraping. Some sites fail to be scraped. In these cases, restart and try running again.

Chrome version issues

Many users have an issue with their chromedriver because the latest chrome browser version doesn't have a compatible chrome driver yet.

To downgrade your Chrome web browser using slimjet, follow these steps. First, visit the website and scroll down to find the list of available older Chrome versions. Choose the version you wish to install making sure it's compatible with your operating system. Once you've selected the desired version, click on the corresponding link to download the installer. Before proceeding with the installation, it's crucial to uninstall your current version of Chrome to avoid conflicts.

It's important to check if the version you downgrade to, has a chromedriver available in the official chrome driver website

If none of the above work, you can try out our hosted beta

- + \ No newline at end of file diff --git a/docs/gpt-researcher/vector-stores.html b/docs/gpt-researcher/vector-stores.html index 62f6a0975..6d5aef889 100644 --- a/docs/gpt-researcher/vector-stores.html +++ b/docs/gpt-researcher/vector-stores.html @@ -7,7 +7,7 @@ Vector Stores | GPT Researcher - + @@ -15,7 +15,7 @@

Vector Stores

The GPT Researcher package allows you to integrate with existing langchain vector stores that have been populated. For a complete list of supported langchain vector stores, please refer to this link.

You can create a set of embeddings and langchain documents and store them in any supported vector store of your choosing. GPT-Researcher will work with any langchain vector store that implements the asimilarity_search method.

Faiss

from gpt_researcher import GPTResearcher

from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document

# exerpt taken from - https://paulgraham.com/wealth.html
essay = """
May 2004

(This essay was originally published in Hackers & Painters.)

If you wanted to get rich, how would you do it? I think your best bet would be to start or join a startup.
That's been a reliable way to get rich for hundreds of years. The word "startup" dates from the 1960s,
but what happens in one is very similar to the venture-backed trading voyages of the Middle Ages.

Startups usually involve technology, so much so that the phrase "high-tech startup" is almost redundant.
A startup is a small company that takes on a hard technical problem.

Lots of people get rich knowing nothing more than that. You don't have to know physics to be a good pitcher.
But I think it could give you an edge to understand the underlying principles. Why do startups have to be small?
Will a startup inevitably stop being a startup as it grows larger?
And why do they so often work on developing new technology? Why are there so many startups selling new drugs or computer software,
and none selling corn oil or laundry detergent?


The Proposition

Economically, you can think of a startup as a way to compress your whole working life into a few years.
Instead of working at a low intensity for forty years, you work as hard as you possibly can for four.
This pays especially well in technology, where you earn a premium for working fast.

Here is a brief sketch of the economic proposition. If you're a good hacker in your mid twenties,
you can get a job paying about $80,000 per year. So on average such a hacker must be able to do at
least $80,000 worth of work per year for the company just to break even. You could probably work twice
as many hours as a corporate employee, and if you focus you can probably get three times as much done in an hour.[1]
You should get another multiple of two, at least, by eliminating the drag of the pointy-haired middle manager who
would be your boss in a big company. Then there is one more multiple: how much smarter are you than your job
description expects you to be? Suppose another multiple of three. Combine all these multipliers,
and I'm claiming you could be 36 times more productive than you're expected to be in a random corporate job.[2]
If a fairly good hacker is worth $80,000 a year at a big company, then a smart hacker working very hard without
any corporate bullshit to slow him down should be able to do work worth about $3 million a year.
...
...
...
"""

document = [Document(page_content=essay)]
text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=30, separator="\n")
docs = text_splitter.split_documents(documents=document)

vector_store = FAISS.from_documents(documents, OpenAIEmbeddings())

query = """
Summarize the essay into 3 or 4 succinct sections.
Make sure to include key points regarding wealth creation.

Include some recommendations for entrepeneurs in the conclusion.
"""


# Create an instance of GPTResearcher
researcher = GPTResearcher(
query=query,
report_type="research_report",
report_source="langchain_vectorstore",
vector_store=vector_store,
)

# Conduct research and write the report
await researcher.conduct_research()
report = await researcher.write_report()

PGVector

from gpt_researcher import GPTResearcher
from langchain_postgres.vectorstores import PGVector
from langchain_openai import OpenAIEmbeddings

CONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'


# assuming the vector store exists and contains the relevent documents
# also assuming embeddings have been or will be generated
vector_store = PGVector.from_existing_index(
use_jsonb=True,
embedding=OpenAIEmbeddings(),
collection_name='some collection name',
connection=CONNECTION_STRING,
async_mode=True,
)

query = """
Create a short report about apples.
Include a section about which apples are considered best
during each season.
"""

# Create an instance of GPTResearcher
researcher = GPTResearcher(
query=query,
report_type="research_report",
report_source="langchain_vectorstore",
vector_store=vector_store,
)

# Conduct research and write the report
await researcher.conduct_research()
report = await researcher.write_report()
- + \ No newline at end of file diff --git a/docs/reference/config/config.html b/docs/reference/config/config.html index 482129250..037a0387a 100644 --- a/docs/reference/config/config.html +++ b/docs/reference/config/config.html @@ -7,13 +7,13 @@ config.config | GPT Researcher - +

config.config

Configuration class to store the state of bools for different scripts access.

Config Objects

class Config(metaclass=Singleton)

Configuration class to store the state of bools for different scripts access.

__init__

def __init__() -> None

Initialize the Config class

set_fast_llm_model

def set_fast_llm_model(value: str) -> None

Set the fast LLM model value.

set_smart_llm_model

def set_smart_llm_model(value: str) -> None

Set the smart LLM model value.

set_fast_token_limit

def set_fast_token_limit(value: int) -> None

Set the fast token limit value.

set_smart_token_limit

def set_smart_token_limit(value: int) -> None

Set the smart token limit value.

set_browse_chunk_max_length

def set_browse_chunk_max_length(value: int) -> None

Set the browse_website command chunk max length value.

set_openai_api_key

def set_openai_api_key(value: str) -> None

Set the OpenAI API key value.

set_debug_mode

def set_debug_mode(value: bool) -> None

Set the debug mode value.

APIKeyError Objects

class APIKeyError(Exception)

Exception raised when an API key is not set in config.py or as an environment variable.

check_openai_api_key

def check_openai_api_key(cfg) -> None

Check if the OpenAI API key is set in config.py or as an environment variable.

check_tavily_api_key

def check_tavily_api_key(cfg) -> None

Check if the Tavily Search API key is set in config.py or as an environment variable.

check_google_api_key

def check_google_api_key(cfg) -> None

Check if the Google API key is set in config.py or as an environment variable.

check_serp_api_key

def check_serp_api_key(cfg) -> None

Check if the SERP API key is set in config.py or as an environment variable.

check_searx_url

def check_searx_url(cfg) -> None

Check if the Searx URL is set in config.py or as an environment variable.

- + \ No newline at end of file diff --git a/docs/reference/config/singleton.html b/docs/reference/config/singleton.html index 29420b3d5..0123302aa 100644 --- a/docs/reference/config/singleton.html +++ b/docs/reference/config/singleton.html @@ -7,13 +7,13 @@ config.singleton | GPT Researcher - +

config.singleton

The singleton metaclass for ensuring only one instance of a class.

Singleton Objects

class Singleton(abc.ABCMeta, type)

Singleton metaclass for ensuring only one instance of a class.

__call__

def __call__(cls, *args, **kwargs)

Call method for the singleton metaclass.

AbstractSingleton Objects

class AbstractSingleton(abc.ABC, metaclass=Singleton)

Abstract singleton class for ensuring only one instance of a class.

- + \ No newline at end of file diff --git a/docs/reference/processing/html.html b/docs/reference/processing/html.html index 75c98fb83..e0fc86676 100644 --- a/docs/reference/processing/html.html +++ b/docs/reference/processing/html.html @@ -7,13 +7,13 @@ processing.html | GPT Researcher - +

processing.html

HTML processing functions

def extract_hyperlinks(soup: BeautifulSoup,
base_url: str) -> list[tuple[str, str]]

Extract hyperlinks from a BeautifulSoup object

Arguments:

  • soup BeautifulSoup - The BeautifulSoup object
  • base_url str - The base URL

Returns:

List[Tuple[str, str]]: The extracted hyperlinks

def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]

Format hyperlinks to be displayed to the user

Arguments:

  • hyperlinks List[Tuple[str, str]] - The hyperlinks to format

Returns:

  • List[str] - The formatted hyperlinks
- + \ No newline at end of file diff --git a/docs/reference/processing/text.html b/docs/reference/processing/text.html index 0a4fc0e5d..4b12fc629 100644 --- a/docs/reference/processing/text.html +++ b/docs/reference/processing/text.html @@ -7,13 +7,13 @@ processing.text | GPT Researcher - +

processing.text

Text processing functions

split_text

def split_text(text: str,
max_length: int = 8192) -> Generator[str, None, None]

Split text into chunks of a maximum length

Arguments:

  • text str - The text to split
  • max_length int, optional - The maximum length of each chunk. Defaults to 8192.

Yields:

  • str - The next chunk of text

Raises:

  • ValueError - If the text is longer than the maximum length

summarize_text

def summarize_text(url: str,
text: str,
question: str,
driver: Optional[WebDriver] = None) -> str

Summarize text using the OpenAI API

Arguments:

  • url str - The url of the text
  • text str - The text to summarize
  • question str - The question to ask the model
  • driver WebDriver - The webdriver to use to scroll the page

Returns:

  • str - The summary of the text

scroll_to_percentage

def scroll_to_percentage(driver: WebDriver, ratio: float) -> None

Scroll to a percentage of the page

Arguments:

  • driver WebDriver - The webdriver to use
  • ratio float - The percentage to scroll to

Raises:

  • ValueError - If the ratio is not between 0 and 1

create_message

def create_message(chunk: str, question: str) -> Dict[str, str]

Create a message for the chat completion

Arguments:

  • chunk str - The chunk of text to summarize
  • question str - The question to answer

Returns:

Dict[str, str]: The message to send to the chat completion

write_to_file

def write_to_file(filename: str, text: str) -> None

Write text to a file

Arguments:

  • text str - The text to write
  • filename str - The filename to write to
- + \ No newline at end of file diff --git a/docs/welcome.html b/docs/welcome.html index 9806c9e60..d51af8805 100644 --- a/docs/welcome.html +++ b/docs/welcome.html @@ -7,14 +7,14 @@ Welcome | GPT Researcher - +

Welcome

Hey there! 👋

We're a team of AI researchers and developers who are passionate about building the next generation of AI assistants. Our mission is to empower individuals and organizations with accurate, unbiased, and factual information.

GPT Researcher

Quickly accessing relevant and trustworthy information is more crucial than ever. However, we've learned that none of today's search engines provide a suitable tool that provides factual, explicit and objective answers without the need to continuously click and explore multiple sites for a given research task.

This is why we've built the trending open source GPT Researcher. GPT Researcher is an autonomous agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task.

To learn more about GPT Researcher, check out the documentation page.

- + \ No newline at end of file diff --git a/index.html b/index.html index 6b1f68953..9007fc8c9 100644 --- a/index.html +++ b/index.html @@ -7,13 +7,13 @@ Documentation | GPT Researcher - +

GPT Researcher

The leading autonomous AI research agent

GPT Researcher

GPT Researcher

GPT Researcher is an open source autonomous agent designed for comprehensive online research on a variety of tasks.

Multi-Agent Assistant

Multi-Agent Assistant

Learn how a team of AI agents can work together to conduct research on a given topic, from planning to publication.

Examples and Demos

Examples and Demos

Check out Tavily API in action across multiple frameworks and use cases

- + \ No newline at end of file diff --git a/search-index.json b/search-index.json index 14464430f..9820a9534 100644 --- a/search-index.json +++ b/search-index.json @@ -1 +1 @@ -[{"documents":[{"i":1,"t":"How we built GPT Researcher","u":"/blog/building-gpt-researcher","b":["Blog"]},{"i":15,"t":"","u":"/blog/archive","b":["Blog"]},{"i":16,"t":"Contribute","u":"/docs/contribute","b":["Docs"]},{"i":18,"t":"How to build an OpenAI Assistant with Internet access","u":"/blog/building-openai-assistant","b":["Blog"]},{"i":20,"t":"Frequently Asked Questions","u":"/docs/faq","b":["FAQ"]},{"i":32,"t":"Examples","u":"/docs/examples/examples","b":["Docs","Examples"]},{"i":36,"t":"Agent Example","u":"/docs/gpt-researcher/example","b":["Docs","GPT Researcher"]},{"i":38,"t":"Introduction","u":"/docs/gpt-researcher/config","b":["Docs","Customization"]},{"i":40,"t":"Frontend Application","u":"/docs/gpt-researcher/frontend","b":["Docs","GPT Researcher"]},{"i":50,"t":"Getting Started","u":"/docs/gpt-researcher/getting-started","b":["Docs","GPT Researcher"]},{"i":64,"t":"How to Build the Ultimate Research Multi-Agent Assistant","u":"/blog/gptr-langgraph","b":["Blog"]},{"i":83,"t":"Introduction","u":"/docs/gpt-researcher/introduction","b":["Docs","GPT Researcher"]},{"i":94,"t":"LangGraph","u":"/docs/gpt-researcher/langgraph","b":["Docs","Multi-Agent Frameworks"]},{"i":117,"t":"PIP Package","u":"/docs/gpt-researcher/pip-package","b":["Docs","GPT Researcher"]},{"i":147,"t":"Roadmap","u":"/docs/gpt-researcher/roadmap","b":[]},{"i":149,"t":"Configure LLM","u":"/docs/gpt-researcher/llms","b":["Docs","Customization"]},{"i":177,"t":"Retrievers","u":"/docs/gpt-researcher/retrievers","b":["Docs","Customization"]},{"i":187,"t":"Troubleshooting","u":"/docs/gpt-researcher/troubleshooting","b":["Docs","GPT Researcher"]},{"i":199,"t":"config.singleton","u":"/docs/reference/config/singleton","b":[]},{"i":205,"t":"Tailored Research","u":"/docs/gpt-researcher/tailored-research","b":["Docs","Customization"]},{"i":217,"t":"Vector Stores","u":"/docs/gpt-researcher/vector-stores","b":["Docs","Customization"]},{"i":223,"t":"processing.html","u":"/docs/reference/processing/html","b":[]},{"i":225,"t":"config.config","u":"/docs/reference/config/config","b":[]},{"i":231,"t":"Welcome","u":"/docs/welcome","b":["Docs"]},{"i":235,"t":"processing.text","u":"/docs/reference/processing/text","b":[]}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/1",[0,2.214,1,2.214,2,1.557]],["t/15",[]],["t/16",[3,3.465]],["t/18",[4,1.336,5,1.627,6,1.336,7,1.627,8,1.627]],["t/20",[9,2.214,10,2.214,11,2.214]],["t/32",[12,2.844]],["t/36",[12,2.218,13,2.218]],["t/38",[14,2.844]],["t/40",[15,2.702,16,2.702]],["t/50",[17,2.702,18,2.702]],["t/64",[2,1.01,4,1.179,6,1.179,13,1.179,19,1.437,20,1.437]],["t/83",[14,2.844]],["t/94",[21,3.465]],["t/117",[22,2.702,23,2.702]],["t/147",[24,3.465]],["t/149",[25,2.702,26,2.702]],["t/177",[27,3.465]],["t/187",[28,3.465]],["t/199",[29,3.465]],["t/205",[2,1.899,30,2.702]],["t/217",[31,2.702,32,2.702]],["t/223",[33,3.465]],["t/225",[34,3.465]],["t/231",[35,3.465]],["t/235",[36,3.465]]],"invertedIndex":[["access",{"_index":8,"t":{"18":{"position":[[47,6]]}}}],["agent",{"_index":13,"t":{"36":{"position":[[0,5]]},"64":{"position":[[41,5]]}}}],["applic",{"_index":16,"t":{"40":{"position":[[9,11]]}}}],["ask",{"_index":10,"t":{"20":{"position":[[11,5]]}}}],["assist",{"_index":6,"t":{"18":{"position":[[23,9]]},"64":{"position":[[47,9]]}}}],["build",{"_index":4,"t":{"18":{"position":[[7,5]]},"64":{"position":[[7,5]]}}}],["built",{"_index":0,"t":{"1":{"position":[[7,5]]}}}],["config.config",{"_index":34,"t":{"225":{"position":[[0,13]]}}}],["config.singleton",{"_index":29,"t":{"199":{"position":[[0,16]]}}}],["configur",{"_index":25,"t":{"149":{"position":[[0,9]]}}}],["contribut",{"_index":3,"t":{"16":{"position":[[0,10]]}}}],["exampl",{"_index":12,"t":{"32":{"position":[[0,8]]},"36":{"position":[[6,7]]}}}],["frequent",{"_index":9,"t":{"20":{"position":[[0,10]]}}}],["frontend",{"_index":15,"t":{"40":{"position":[[0,8]]}}}],["get",{"_index":17,"t":{"50":{"position":[[0,7]]}}}],["gpt",{"_index":1,"t":{"1":{"position":[[13,3]]}}}],["internet",{"_index":7,"t":{"18":{"position":[[38,8]]}}}],["introduct",{"_index":14,"t":{"38":{"position":[[0,12]]},"83":{"position":[[0,12]]}}}],["langgraph",{"_index":21,"t":{"94":{"position":[[0,9]]}}}],["llm",{"_index":26,"t":{"149":{"position":[[10,3]]}}}],["multi",{"_index":20,"t":{"64":{"position":[[35,5]]}}}],["openai",{"_index":5,"t":{"18":{"position":[[16,6]]}}}],["packag",{"_index":23,"t":{"117":{"position":[[4,7]]}}}],["pip",{"_index":22,"t":{"117":{"position":[[0,3]]}}}],["processing.html",{"_index":33,"t":{"223":{"position":[[0,15]]}}}],["processing.text",{"_index":36,"t":{"235":{"position":[[0,15]]}}}],["question",{"_index":11,"t":{"20":{"position":[[17,9]]}}}],["research",{"_index":2,"t":{"1":{"position":[[17,10]]},"64":{"position":[[26,8]]},"205":{"position":[[9,8]]}}}],["retriev",{"_index":27,"t":{"177":{"position":[[0,10]]}}}],["roadmap",{"_index":24,"t":{"147":{"position":[[0,7]]}}}],["start",{"_index":18,"t":{"50":{"position":[[8,7]]}}}],["store",{"_index":32,"t":{"217":{"position":[[7,6]]}}}],["tailor",{"_index":30,"t":{"205":{"position":[[0,8]]}}}],["troubleshoot",{"_index":28,"t":{"187":{"position":[[0,15]]}}}],["ultim",{"_index":19,"t":{"64":{"position":[[17,8]]}}}],["vector",{"_index":31,"t":{"217":{"position":[[0,6]]}}}],["welcom",{"_index":35,"t":{"231":{"position":[[0,7]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":3,"t":"Moving from infinite loops to deterministic results","u":"/blog/building-gpt-researcher","h":"#moving-from-infinite-loops-to-deterministic-results","p":1},{"i":5,"t":"Aiming for objective and unbiased results","u":"/blog/building-gpt-researcher","h":"#aiming-for-objective-and-unbiased-results","p":1},{"i":7,"t":"Speeding up the research process","u":"/blog/building-gpt-researcher","h":"#speeding-up-the-research-process","p":1},{"i":9,"t":"Finalizing the research report","u":"/blog/building-gpt-researcher","h":"#finalizing-the-research-report","p":1},{"i":11,"t":"The final architecture","u":"/blog/building-gpt-researcher","h":"#the-final-architecture","p":1},{"i":13,"t":"Going forward","u":"/blog/building-gpt-researcher","h":"#going-forward","p":1},{"i":22,"t":"How do I get started?","u":"/docs/faq","h":"#how-do-i-get-started","p":20},{"i":24,"t":"What is GPT Researcher?","u":"/docs/faq","h":"#what-is-gpt-researcher","p":20},{"i":26,"t":"How much does each research run cost?","u":"/docs/faq","h":"#how-much-does-each-research-run-cost","p":20},{"i":28,"t":"How do you ensure the report is factual and accurate?","u":"/docs/faq","h":"#how-do-you-ensure-the-report-is-factual-and-accurate","p":20},{"i":30,"t":"What are your plans for the future?","u":"/docs/faq","h":"#what-are-your-plans-for-the-future","p":20},{"i":34,"t":"Run PIP Package","u":"/docs/examples/examples","h":"#run-pip-package","p":32},{"i":42,"t":"Option 1: Static Frontend (FastAPI)","u":"/docs/gpt-researcher/frontend","h":"#option-1-static-frontend-fastapi","p":40},{"i":44,"t":"Option 2: NextJS Frontend","u":"/docs/gpt-researcher/frontend","h":"#option-2-nextjs-frontend","p":40},{"i":46,"t":"Choosing an Option","u":"/docs/gpt-researcher/frontend","h":"#choosing-an-option","p":40},{"i":48,"t":"Frontend Features","u":"/docs/gpt-researcher/frontend","h":"#frontend-features","p":40},{"i":52,"t":"Quickstart","u":"/docs/gpt-researcher/getting-started","h":"#quickstart","p":50},{"i":54,"t":"Using Virtual Environment or Poetry","u":"/docs/gpt-researcher/getting-started","h":"#using-virtual-environment-or-poetry","p":50},{"i":56,"t":"Virtual Environment","u":"/docs/gpt-researcher/getting-started","h":"#virtual-environment","p":50},{"i":58,"t":"Poetry","u":"/docs/gpt-researcher/getting-started","h":"#poetry","p":50},{"i":60,"t":"Run the app","u":"/docs/gpt-researcher/getting-started","h":"#run-the-app","p":50},{"i":62,"t":"Try it with Docker","u":"/docs/gpt-researcher/getting-started","h":"#try-it-with-docker","p":50},{"i":66,"t":"Introducing the GPT Researcher Multi-Agent Assistant","u":"/blog/gptr-langgraph","h":"","p":64},{"i":67,"t":"Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents","u":"/blog/gptr-langgraph","h":"#learn-how-to-build-an-autonomous-research-assistant-using-langgraph-with-a-team-of-specialized-ai-agents","p":64},{"i":69,"t":"Introducing LangGraph","u":"/blog/gptr-langgraph","h":"#introducing-langgraph","p":64},{"i":71,"t":"Building the Ultimate Autonomous Research Agent","u":"/blog/gptr-langgraph","h":"#building-the-ultimate-autonomous-research-agent","p":64},{"i":73,"t":"The Research Agent Team","u":"/blog/gptr-langgraph","h":"#the-research-agent-team","p":64},{"i":75,"t":"Architecture","u":"/blog/gptr-langgraph","h":"#architecture","p":64},{"i":77,"t":"Define the Graph State","u":"/blog/gptr-langgraph","h":"#define-the-graph-state","p":64},{"i":79,"t":"A Graph within a Graph to support stateful Parallelization","u":"/blog/gptr-langgraph","h":"#a-graph-within-a-graph-to-support-stateful-parallelization","p":64},{"i":81,"t":"What’s Next?","u":"/blog/gptr-langgraph","h":"#whats-next","p":64},{"i":85,"t":"Why GPT Researcher?","u":"/docs/gpt-researcher/introduction","h":"#why-gpt-researcher","p":83},{"i":87,"t":"Architecture","u":"/docs/gpt-researcher/introduction","h":"#architecture","p":83},{"i":89,"t":"Demo","u":"/docs/gpt-researcher/introduction","h":"#demo","p":83},{"i":90,"t":"Tutorials","u":"/docs/gpt-researcher/introduction","h":"#tutorials","p":83},{"i":92,"t":"Features","u":"/docs/gpt-researcher/introduction","h":"#features","p":83},{"i":96,"t":"Use case","u":"/docs/gpt-researcher/langgraph","h":"#use-case","p":94},{"i":98,"t":"The Multi Agent Team","u":"/docs/gpt-researcher/langgraph","h":"#the-multi-agent-team","p":94},{"i":100,"t":"How it works","u":"/docs/gpt-researcher/langgraph","h":"#how-it-works","p":94},{"i":102,"t":"Architecture","u":"/docs/gpt-researcher/langgraph","h":"#architecture","p":94},{"i":103,"t":"Steps","u":"/docs/gpt-researcher/langgraph","h":"#steps","p":94},{"i":105,"t":"How to run","u":"/docs/gpt-researcher/langgraph","h":"#how-to-run","p":94},{"i":107,"t":"Usage","u":"/docs/gpt-researcher/langgraph","h":"#usage","p":94},{"i":109,"t":"To Deploy","u":"/docs/gpt-researcher/langgraph","h":"#to-deploy","p":94},{"i":111,"t":"NextJS Frontend App","u":"/docs/gpt-researcher/langgraph","h":"#nextjs-frontend-app","p":94},{"i":113,"t":"Run the NextJS React App with Docker","u":"/docs/gpt-researcher/langgraph","h":"#run-the-nextjs-react-app-with-docker","p":94},{"i":115,"t":"Run the NextJS React App with NPM","u":"/docs/gpt-researcher/langgraph","h":"#run-the-nextjs-react-app-with-npm","p":94},{"i":119,"t":"Steps to Install GPT Researcher","u":"/docs/gpt-researcher/pip-package","h":"#steps-to-install-gpt-researcher","p":117},{"i":121,"t":"Example Usage 📝","u":"/docs/gpt-researcher/pip-package","h":"#example-usage-","p":117},{"i":123,"t":"Specific Examples 🌐","u":"/docs/gpt-researcher/pip-package","h":"#specific-examples-","p":117},{"i":124,"t":"Example 1: Research Report 📚","u":"/docs/gpt-researcher/pip-package","h":"#example-1-research-report-","p":117},{"i":126,"t":"Example 2: Resource Report 📋","u":"/docs/gpt-researcher/pip-package","h":"#example-2-resource-report-","p":117},{"i":128,"t":"Example 3: Outline Report 📝","u":"/docs/gpt-researcher/pip-package","h":"#example-3-outline-report-","p":117},{"i":130,"t":"Integration with Web Frameworks 🌍","u":"/docs/gpt-researcher/pip-package","h":"#integration-with-web-frameworks-","p":117},{"i":131,"t":"FastAPI Example","u":"/docs/gpt-researcher/pip-package","h":"#fastapi-example","p":117},{"i":133,"t":"Flask Example","u":"/docs/gpt-researcher/pip-package","h":"#flask-example","p":117},{"i":135,"t":"Getters and Setters","u":"/docs/gpt-researcher/pip-package","h":"#getters-and-setters","p":117},{"i":137,"t":"Get Research Sources","u":"/docs/gpt-researcher/pip-package","h":"#get-research-sources","p":117},{"i":139,"t":"Get Research Context","u":"/docs/gpt-researcher/pip-package","h":"#get-research-context","p":117},{"i":141,"t":"Get Research Costs","u":"/docs/gpt-researcher/pip-package","h":"#get-research-costs","p":117},{"i":143,"t":"Set Verbose","u":"/docs/gpt-researcher/pip-package","h":"#set-verbose","p":117},{"i":145,"t":"Add Costs","u":"/docs/gpt-researcher/pip-package","h":"#add-costs","p":117},{"i":151,"t":"Custom OpenAI","u":"/docs/gpt-researcher/llms","h":"#custom-openai","p":149},{"i":153,"t":"Custom OpenAI API LLM","u":"/docs/gpt-researcher/llms","h":"#custom-openai-api-llm","p":149},{"i":155,"t":"Custom OpenAI API Embedding","u":"/docs/gpt-researcher/llms","h":"#custom-openai-api-embedding","p":149},{"i":157,"t":"Azure OpenAI","u":"/docs/gpt-researcher/llms","h":"#azure-openai","p":149},{"i":159,"t":"Ollama","u":"/docs/gpt-researcher/llms","h":"#ollama","p":149},{"i":161,"t":"Groq","u":"/docs/gpt-researcher/llms","h":"#groq","p":149},{"i":163,"t":"Sign up","u":"/docs/gpt-researcher/llms","h":"#sign-up","p":149},{"i":165,"t":"Update env vars","u":"/docs/gpt-researcher/llms","h":"#update-env-vars","p":149},{"i":167,"t":"Anthropic","u":"/docs/gpt-researcher/llms","h":"#anthropic","p":149},{"i":169,"t":"Mistral","u":"/docs/gpt-researcher/llms","h":"#mistral","p":149},{"i":171,"t":"Together AI","u":"/docs/gpt-researcher/llms","h":"#together-ai","p":149},{"i":173,"t":"HuggingFace","u":"/docs/gpt-researcher/llms","h":"#huggingface","p":149},{"i":175,"t":"Google Gemini","u":"/docs/gpt-researcher/llms","h":"#google-gemini","p":149},{"i":179,"t":"Web Search Engines","u":"/docs/gpt-researcher/retrievers","h":"#web-search-engines","p":177},{"i":181,"t":"Custom Retrievers","u":"/docs/gpt-researcher/retrievers","h":"#custom-retrievers","p":177},{"i":183,"t":"Example","u":"/docs/gpt-researcher/retrievers","h":"#example","p":177},{"i":185,"t":"Response Format","u":"/docs/gpt-researcher/retrievers","h":"#response-format","p":177},{"i":189,"t":"model: gpt-4 does not exist","u":"/docs/gpt-researcher/troubleshooting","h":"#model-gpt-4-does-not-exist","p":187},{"i":191,"t":"cannot load library 'gobject-2.0-0'","u":"/docs/gpt-researcher/troubleshooting","h":"#cannot-load-library-gobject-20-0","p":187},{"i":193,"t":"cannot load library 'pango'","u":"/docs/gpt-researcher/troubleshooting","h":"#cannot-load-library-pango","p":187},{"i":195,"t":"Error processing the url","u":"/docs/gpt-researcher/troubleshooting","h":"#error-processing-the-url","p":187},{"i":197,"t":"Chrome version issues","u":"/docs/gpt-researcher/troubleshooting","h":"#chrome-version-issues","p":187},{"i":201,"t":"Singleton Objects","u":"/docs/reference/config/singleton","h":"#singleton-objects","p":199},{"i":203,"t":"AbstractSingleton Objects","u":"/docs/reference/config/singleton","h":"#abstractsingleton-objects","p":199},{"i":207,"t":"Research on Specific Sources 📚","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-specific-sources-","p":205},{"i":209,"t":"Specify Agent Prompt 📝","u":"/docs/gpt-researcher/tailored-research","h":"#specify-agent-prompt-","p":205},{"i":211,"t":"Research on Local Documents 📄","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-local-documents-","p":205},{"i":213,"t":"Hybrid Research 🔄","u":"/docs/gpt-researcher/tailored-research","h":"#hybrid-research-","p":205},{"i":215,"t":"Research on LangChain Documents 🦜️🔗","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-langchain-documents-️","p":205},{"i":219,"t":"Faiss","u":"/docs/gpt-researcher/vector-stores","h":"#faiss","p":217},{"i":221,"t":"PGVector","u":"/docs/gpt-researcher/vector-stores","h":"#pgvector","p":217},{"i":227,"t":"Config Objects","u":"/docs/reference/config/config","h":"#config-objects","p":225},{"i":229,"t":"APIKeyError Objects","u":"/docs/reference/config/config","h":"#apikeyerror-objects","p":225},{"i":233,"t":"GPT Researcher","u":"/docs/welcome","h":"#gpt-researcher","p":231}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/3",[0,3.09,1,3.09,2,3.09,3,3.09,4,2.712]],["t/5",[4,3.055,5,3.482,6,2.397,7,3.482]],["t/7",[8,3.482,9,3.055,10,1.34,11,3.055]],["t/9",[10,1.534,12,3.498,13,2.744]],["t/11",[12,4.091,14,3.434]],["t/13",[15,4.663,16,4.663]],["t/22",[17,5.615]],["t/24",[10,1.794,18,3.023]],["t/26",[10,1.189,19,3.09,20,3.09,21,2.004,22,2.462]],["t/28",[13,2.397,23,3.482,24,3.482,25,3.482]],["t/30",[26,4.663,27,4.663]],["t/34",[21,2.585,28,3.987,29,3.987]],["t/42",[30,2.462,31,2.712,32,3.09,33,2.276,34,2.712]],["t/44",[30,2.774,33,2.564,35,3.055,36,2.564]],["t/46",[30,3.715,37,4.663]],["t/48",[33,3.434,38,4.091]],["t/52",[39,5.615]],["t/54",[40,2.774,41,3.055,42,3.055,43,3.055]],["t/56",[41,4.091,42,4.091]],["t/58",[43,4.927]],["t/60",[21,3.023,44,3.434]],["t/62",[45,4.663,46,4.091]],["t/66",[10,1.069,18,1.801,47,2.438,48,2.438,49,1.801,50,2.438]],["t/67",[10,0.71,40,1.471,49,1.197,50,1.62,51,1.846,52,1.62,53,1.62,54,1.62,55,1.471,56,1.846,57,1.62]],["t/69",[47,4.091,54,4.091]],["t/71",[10,1.189,49,2.004,52,2.712,53,2.712,58,3.09]],["t/73",[10,1.534,49,2.585,55,3.176]],["t/75",[14,4.135]],["t/77",[59,3.987,60,3.498,61,3.498]],["t/79",[60,3.742,61,2.438,62,2.778,63,2.778,64,2.778]],["t/81",[65,4.663,66,4.663]],["t/85",[10,1.794,18,3.023]],["t/87",[14,4.135]],["t/89",[67,5.615]],["t/90",[68,5.615]],["t/92",[38,4.927]],["t/96",[40,3.715,69,4.663]],["t/98",[48,3.498,49,2.585,55,3.176]],["t/100",[70,5.615]],["t/102",[14,4.135]],["t/103",[71,4.927]],["t/105",[21,3.64]],["t/107",[72,4.927]],["t/109",[73,5.615]],["t/111",[33,2.936,36,2.936,44,2.936]],["t/113",[21,2.004,36,2.276,44,2.276,46,2.712,74,2.712]],["t/115",[21,2.004,36,2.276,44,2.276,74,2.712,75,3.09]],["t/119",[10,1.34,18,2.257,71,3.055,76,3.482]],["t/121",[72,3.498,77,2.328,78,2.039]],["t/123",[77,2.328,78,2.039,79,3.498]],["t/124",[10,1.189,13,2.127,31,2.712,77,1.805,78,1.581]],["t/126",[13,2.127,35,2.712,77,1.805,78,1.581,80,3.09]],["t/128",[13,2.127,77,1.805,78,1.581,81,3.09,82,3.09]],["t/130",[78,1.781,83,3.482,84,3.055,85,3.482]],["t/131",[34,4.091,77,2.723]],["t/133",[77,2.723,86,4.663]],["t/135",[87,4.663,88,4.663]],["t/137",[10,1.794,89,4.091]],["t/139",[10,1.794,90,4.663]],["t/141",[10,1.794,22,3.715]],["t/143",[91,4.663,92,4.663]],["t/145",[22,3.715,93,4.663]],["t/151",[94,3.434,95,3.434]],["t/153",[94,2.564,95,2.564,96,3.055,97,3.482]],["t/155",[94,2.564,95,2.564,96,3.055,98,3.482]],["t/157",[95,3.434,99,4.663]],["t/159",[100,5.615]],["t/161",[101,5.615]],["t/163",[9,4.091,102,4.663]],["t/165",[103,3.987,104,3.987,105,3.987]],["t/167",[106,5.615]],["t/169",[107,5.615]],["t/171",[57,4.091,108,4.663]],["t/173",[109,5.615]],["t/175",[110,4.663,111,4.663]],["t/179",[84,3.498,112,3.987,113,3.987]],["t/181",[94,3.434,114,4.663]],["t/183",[77,3.279]],["t/185",[115,4.663,116,4.663]],["t/189",[18,2.257,117,3.482,118,3.482,119,3.482]],["t/191",[120,2.712,121,2.712,122,3.09,123,3.09,124,3.09]],["t/193",[120,3.498,121,3.498,125,3.987]],["t/195",[11,3.498,126,3.987,127,3.987]],["t/197",[128,3.987,129,3.987,130,3.987]],["t/201",[6,3.21,131,4.663]],["t/203",[6,3.21,132,4.663]],["t/207",[10,1.34,78,1.781,79,3.055,89,3.055]],["t/209",[49,2.257,78,1.781,133,3.482,134,3.482]],["t/211",[10,1.34,78,1.781,135,3.482,136,3.055]],["t/213",[10,1.534,78,2.039,137,3.987]],["t/215",[10,1.34,78,1.781,136,3.055,138,3.482]],["t/219",[139,5.615]],["t/221",[140,5.615]],["t/227",[6,3.21,141,4.663]],["t/229",[6,3.21,142,4.663]],["t/233",[10,1.794,18,3.023]]],"invertedIndex":[["",{"_index":78,"t":{"121":{"position":[[14,2]]},"123":{"position":[[18,2]]},"124":{"position":[[27,2]]},"126":{"position":[[27,2]]},"128":{"position":[[26,2]]},"130":{"position":[[32,2]]},"207":{"position":[[29,2]]},"209":{"position":[[21,2]]},"211":{"position":[[28,2]]},"213":{"position":[[16,2]]},"215":{"position":[[32,5]]}}}],["0",{"_index":124,"t":{"191":{"position":[[33,2]]}}}],["1",{"_index":31,"t":{"42":{"position":[[7,2]]},"124":{"position":[[8,2]]}}}],["2",{"_index":35,"t":{"44":{"position":[[7,2]]},"126":{"position":[[8,2]]}}}],["2.0",{"_index":123,"t":{"191":{"position":[[29,3]]}}}],["3",{"_index":81,"t":{"128":{"position":[[8,2]]}}}],["4",{"_index":118,"t":{"189":{"position":[[11,1]]}}}],["abstractsingleton",{"_index":132,"t":{"203":{"position":[[0,17]]}}}],["accur",{"_index":25,"t":{"28":{"position":[[44,9]]}}}],["add",{"_index":93,"t":{"145":{"position":[[0,3]]}}}],["agent",{"_index":49,"t":{"66":{"position":[[37,5]]},"67":{"position":[[98,6]]},"71":{"position":[[42,5]]},"73":{"position":[[13,5]]},"98":{"position":[[10,5]]},"209":{"position":[[8,5]]}}}],["ai",{"_index":57,"t":{"67":{"position":[[95,2]]},"171":{"position":[[9,2]]}}}],["aim",{"_index":5,"t":{"5":{"position":[[0,6]]}}}],["anthrop",{"_index":106,"t":{"167":{"position":[[0,9]]}}}],["api",{"_index":96,"t":{"153":{"position":[[14,3]]},"155":{"position":[[14,3]]}}}],["apikeyerror",{"_index":142,"t":{"229":{"position":[[0,11]]}}}],["app",{"_index":44,"t":{"60":{"position":[[8,3]]},"111":{"position":[[16,3]]},"113":{"position":[[21,3]]},"115":{"position":[[21,3]]}}}],["architectur",{"_index":14,"t":{"11":{"position":[[10,12]]},"75":{"position":[[0,12]]},"87":{"position":[[0,12]]},"102":{"position":[[0,12]]}}}],["assist",{"_index":50,"t":{"66":{"position":[[43,9]]},"67":{"position":[[42,9]]}}}],["autonom",{"_index":53,"t":{"67":{"position":[[22,10]]},"71":{"position":[[22,10]]}}}],["azur",{"_index":99,"t":{"157":{"position":[[0,5]]}}}],["build",{"_index":52,"t":{"67":{"position":[[13,5]]},"71":{"position":[[0,8]]}}}],["case",{"_index":69,"t":{"96":{"position":[[4,4]]}}}],["choos",{"_index":37,"t":{"46":{"position":[[0,8]]}}}],["chrome",{"_index":128,"t":{"197":{"position":[[0,6]]}}}],["config",{"_index":141,"t":{"227":{"position":[[0,6]]}}}],["context",{"_index":90,"t":{"139":{"position":[[13,7]]}}}],["cost",{"_index":22,"t":{"26":{"position":[[32,5]]},"141":{"position":[[13,5]]},"145":{"position":[[4,5]]}}}],["custom",{"_index":94,"t":{"151":{"position":[[0,6]]},"153":{"position":[[0,6]]},"155":{"position":[[0,6]]},"181":{"position":[[0,6]]}}}],["defin",{"_index":59,"t":{"77":{"position":[[0,6]]}}}],["demo",{"_index":67,"t":{"89":{"position":[[0,4]]}}}],["deploy",{"_index":73,"t":{"109":{"position":[[3,6]]}}}],["determinist",{"_index":3,"t":{"3":{"position":[[30,13]]}}}],["docker",{"_index":46,"t":{"62":{"position":[[12,6]]},"113":{"position":[[30,6]]}}}],["document",{"_index":136,"t":{"211":{"position":[[18,9]]},"215":{"position":[[22,9]]}}}],["each",{"_index":20,"t":{"26":{"position":[[14,4]]}}}],["embed",{"_index":98,"t":{"155":{"position":[[18,9]]}}}],["engin",{"_index":113,"t":{"179":{"position":[[11,7]]}}}],["ensur",{"_index":23,"t":{"28":{"position":[[11,6]]}}}],["env",{"_index":104,"t":{"165":{"position":[[7,3]]}}}],["environ",{"_index":42,"t":{"54":{"position":[[14,11]]},"56":{"position":[[8,11]]}}}],["error",{"_index":126,"t":{"195":{"position":[[0,5]]}}}],["exampl",{"_index":77,"t":{"121":{"position":[[0,7]]},"123":{"position":[[9,8]]},"124":{"position":[[0,7]]},"126":{"position":[[0,7]]},"128":{"position":[[0,7]]},"131":{"position":[[8,7]]},"133":{"position":[[6,7]]},"183":{"position":[[0,7]]}}}],["exist",{"_index":119,"t":{"189":{"position":[[22,5]]}}}],["factual",{"_index":24,"t":{"28":{"position":[[32,7]]}}}],["faiss",{"_index":139,"t":{"219":{"position":[[0,5]]}}}],["fastapi",{"_index":34,"t":{"42":{"position":[[26,9]]},"131":{"position":[[0,7]]}}}],["featur",{"_index":38,"t":{"48":{"position":[[9,8]]},"92":{"position":[[0,8]]}}}],["final",{"_index":12,"t":{"9":{"position":[[0,10]]},"11":{"position":[[4,5]]}}}],["flask",{"_index":86,"t":{"133":{"position":[[0,5]]}}}],["format",{"_index":116,"t":{"185":{"position":[[9,6]]}}}],["forward",{"_index":16,"t":{"13":{"position":[[6,7]]}}}],["framework",{"_index":85,"t":{"130":{"position":[[21,10]]}}}],["frontend",{"_index":33,"t":{"42":{"position":[[17,8]]},"44":{"position":[[17,8]]},"48":{"position":[[0,8]]},"111":{"position":[[7,8]]}}}],["futur",{"_index":27,"t":{"30":{"position":[[28,7]]}}}],["gemini",{"_index":111,"t":{"175":{"position":[[7,6]]}}}],["getter",{"_index":87,"t":{"135":{"position":[[0,7]]}}}],["go",{"_index":15,"t":{"13":{"position":[[0,5]]}}}],["gobject",{"_index":122,"t":{"191":{"position":[[20,8]]}}}],["googl",{"_index":110,"t":{"175":{"position":[[0,6]]}}}],["gpt",{"_index":18,"t":{"24":{"position":[[8,3]]},"66":{"position":[[16,3]]},"85":{"position":[[4,3]]},"119":{"position":[[17,3]]},"189":{"position":[[7,3]]},"233":{"position":[[0,3]]}}}],["graph",{"_index":60,"t":{"77":{"position":[[11,5]]},"79":{"position":[[2,5],[17,5]]}}}],["groq",{"_index":101,"t":{"161":{"position":[[0,4]]}}}],["huggingfac",{"_index":109,"t":{"173":{"position":[[0,11]]}}}],["hybrid",{"_index":137,"t":{"213":{"position":[[0,6]]}}}],["infinit",{"_index":1,"t":{"3":{"position":[[12,8]]}}}],["instal",{"_index":76,"t":{"119":{"position":[[9,7]]}}}],["integr",{"_index":83,"t":{"130":{"position":[[0,11]]}}}],["introduc",{"_index":47,"t":{"66":{"position":[[0,11]]},"69":{"position":[[0,11]]}}}],["issu",{"_index":130,"t":{"197":{"position":[[15,6]]}}}],["langchain",{"_index":138,"t":{"215":{"position":[[12,9]]}}}],["langgraph",{"_index":54,"t":{"67":{"position":[[58,9]]},"69":{"position":[[12,9]]}}}],["learn",{"_index":51,"t":{"67":{"position":[[0,5]]}}}],["librari",{"_index":121,"t":{"191":{"position":[[12,7]]},"193":{"position":[[12,7]]}}}],["llm",{"_index":97,"t":{"153":{"position":[[18,3]]}}}],["load",{"_index":120,"t":{"191":{"position":[[7,4]]},"193":{"position":[[7,4]]}}}],["local",{"_index":135,"t":{"211":{"position":[[12,5]]}}}],["loop",{"_index":2,"t":{"3":{"position":[[21,5]]}}}],["mistral",{"_index":107,"t":{"169":{"position":[[0,7]]}}}],["model",{"_index":117,"t":{"189":{"position":[[0,6]]}}}],["move",{"_index":0,"t":{"3":{"position":[[0,6]]}}}],["much",{"_index":19,"t":{"26":{"position":[[4,4]]}}}],["multi",{"_index":48,"t":{"66":{"position":[[31,5]]},"98":{"position":[[4,5]]}}}],["next",{"_index":66,"t":{"81":{"position":[[7,5]]}}}],["nextj",{"_index":36,"t":{"44":{"position":[[10,6]]},"111":{"position":[[0,6]]},"113":{"position":[[8,6]]},"115":{"position":[[8,6]]}}}],["npm",{"_index":75,"t":{"115":{"position":[[30,3]]}}}],["object",{"_index":6,"t":{"5":{"position":[[11,9]]},"201":{"position":[[10,7]]},"203":{"position":[[18,7]]},"227":{"position":[[7,7]]},"229":{"position":[[12,7]]}}}],["ollama",{"_index":100,"t":{"159":{"position":[[0,6]]}}}],["openai",{"_index":95,"t":{"151":{"position":[[7,6]]},"153":{"position":[[7,6]]},"155":{"position":[[7,6]]},"157":{"position":[[6,6]]}}}],["option",{"_index":30,"t":{"42":{"position":[[0,6]]},"44":{"position":[[0,6]]},"46":{"position":[[12,6]]}}}],["outlin",{"_index":82,"t":{"128":{"position":[[11,7]]}}}],["packag",{"_index":29,"t":{"34":{"position":[[8,7]]}}}],["pango",{"_index":125,"t":{"193":{"position":[[20,7]]}}}],["parallel",{"_index":64,"t":{"79":{"position":[[43,15]]}}}],["pgvector",{"_index":140,"t":{"221":{"position":[[0,8]]}}}],["pip",{"_index":28,"t":{"34":{"position":[[4,3]]}}}],["plan",{"_index":26,"t":{"30":{"position":[[14,5]]}}}],["poetri",{"_index":43,"t":{"54":{"position":[[29,6]]},"58":{"position":[[0,6]]}}}],["process",{"_index":11,"t":{"7":{"position":[[25,7]]},"195":{"position":[[6,10]]}}}],["prompt",{"_index":134,"t":{"209":{"position":[[14,6]]}}}],["quickstart",{"_index":39,"t":{"52":{"position":[[0,10]]}}}],["react",{"_index":74,"t":{"113":{"position":[[15,5]]},"115":{"position":[[15,5]]}}}],["report",{"_index":13,"t":{"9":{"position":[[24,6]]},"28":{"position":[[22,6]]},"124":{"position":[[20,6]]},"126":{"position":[[20,6]]},"128":{"position":[[19,6]]}}}],["research",{"_index":10,"t":{"7":{"position":[[16,8]]},"9":{"position":[[15,8]]},"24":{"position":[[12,11]]},"26":{"position":[[19,8]]},"66":{"position":[[20,10]]},"67":{"position":[[33,8]]},"71":{"position":[[33,8]]},"73":{"position":[[4,8]]},"85":{"position":[[8,11]]},"119":{"position":[[21,10]]},"124":{"position":[[11,8]]},"137":{"position":[[4,8]]},"139":{"position":[[4,8]]},"141":{"position":[[4,8]]},"207":{"position":[[0,8]]},"211":{"position":[[0,8]]},"213":{"position":[[7,8]]},"215":{"position":[[0,8]]},"233":{"position":[[4,10]]}}}],["resourc",{"_index":80,"t":{"126":{"position":[[11,8]]}}}],["respons",{"_index":115,"t":{"185":{"position":[[0,8]]}}}],["result",{"_index":4,"t":{"3":{"position":[[44,7]]},"5":{"position":[[34,7]]}}}],["retriev",{"_index":114,"t":{"181":{"position":[[7,10]]}}}],["run",{"_index":21,"t":{"26":{"position":[[28,3]]},"34":{"position":[[0,3]]},"60":{"position":[[0,3]]},"105":{"position":[[7,3]]},"113":{"position":[[0,3]]},"115":{"position":[[0,3]]}}}],["search",{"_index":112,"t":{"179":{"position":[[4,6]]}}}],["set",{"_index":91,"t":{"143":{"position":[[0,3]]}}}],["setter",{"_index":88,"t":{"135":{"position":[[12,7]]}}}],["sign",{"_index":102,"t":{"163":{"position":[[0,4]]}}}],["singleton",{"_index":131,"t":{"201":{"position":[[0,9]]}}}],["sourc",{"_index":89,"t":{"137":{"position":[[13,7]]},"207":{"position":[[21,7]]}}}],["special",{"_index":56,"t":{"67":{"position":[[83,11]]}}}],["specif",{"_index":79,"t":{"123":{"position":[[0,8]]},"207":{"position":[[12,8]]}}}],["specifi",{"_index":133,"t":{"209":{"position":[[0,7]]}}}],["speed",{"_index":8,"t":{"7":{"position":[[0,8]]}}}],["start",{"_index":17,"t":{"22":{"position":[[13,8]]}}}],["state",{"_index":61,"t":{"77":{"position":[[17,5]]},"79":{"position":[[34,8]]}}}],["static",{"_index":32,"t":{"42":{"position":[[10,6]]}}}],["step",{"_index":71,"t":{"103":{"position":[[0,5]]},"119":{"position":[[0,5]]}}}],["support",{"_index":63,"t":{"79":{"position":[[26,7]]}}}],["team",{"_index":55,"t":{"67":{"position":[[75,4]]},"73":{"position":[[19,4]]},"98":{"position":[[16,4]]}}}],["togeth",{"_index":108,"t":{"171":{"position":[[0,8]]}}}],["tri",{"_index":45,"t":{"62":{"position":[[0,3]]}}}],["tutori",{"_index":68,"t":{"90":{"position":[[0,9]]}}}],["ultim",{"_index":58,"t":{"71":{"position":[[13,8]]}}}],["unbias",{"_index":7,"t":{"5":{"position":[[25,8]]}}}],["up",{"_index":9,"t":{"7":{"position":[[9,2]]},"163":{"position":[[5,2]]}}}],["updat",{"_index":103,"t":{"165":{"position":[[0,6]]}}}],["url",{"_index":127,"t":{"195":{"position":[[21,3]]}}}],["us",{"_index":40,"t":{"54":{"position":[[0,5]]},"67":{"position":[[52,5]]},"96":{"position":[[0,3]]}}}],["usag",{"_index":72,"t":{"107":{"position":[[0,5]]},"121":{"position":[[8,5]]}}}],["var",{"_index":105,"t":{"165":{"position":[[11,4]]}}}],["verbos",{"_index":92,"t":{"143":{"position":[[4,7]]}}}],["version",{"_index":129,"t":{"197":{"position":[[7,7]]}}}],["virtual",{"_index":41,"t":{"54":{"position":[[6,7]]},"56":{"position":[[0,7]]}}}],["web",{"_index":84,"t":{"130":{"position":[[17,3]]},"179":{"position":[[0,3]]}}}],["what’",{"_index":65,"t":{"81":{"position":[[0,6]]}}}],["within",{"_index":62,"t":{"79":{"position":[[8,6]]}}}],["work",{"_index":70,"t":{"100":{"position":[[7,5]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":2,"t":"After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research. But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task. Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated. The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research. In this article, we will share the steps that guided me toward the proposed solution.","s":"How we built GPT Researcher","u":"/blog/building-gpt-researcher","h":"","p":1},{"i":4,"t":"The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference. This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan. As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research. Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?","s":"Moving from infinite loops to deterministic results","u":"/blog/building-gpt-researcher","h":"#moving-from-infinite-loops-to-deterministic-results","p":1},{"i":6,"t":"The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias. To tackle this challenges, we assumed the following: Law of large numbers — More content will lead to less biased results. Especially if gathered properly. Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results. After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly. In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic. Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?","s":"Aiming for objective and unbiased results","u":"/blog/building-gpt-researcher","h":"#aiming-for-objective-and-unbiased-results","p":1},{"i":8,"t":"Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work? By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research. # Create a list to hold the coroutine agent taskstasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]# Gather the results as they become availableresponses = await asyncio.gather(*tasks, return_exceptions=True) Copy In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.","s":"Speeding up the research process","u":"/blog/building-gpt-researcher","h":"#speeding-up-the-research-process","p":1},{"i":10,"t":"Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it. After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task. The prompt is as follows: \"{research_summary}\" Using the above information, answer the following question or topic: \"{question}\" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else. Copy The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.","s":"Finalizing the research report","u":"/blog/building-gpt-researcher","h":"#finalizing-the-research-report","p":1},{"i":12,"t":"Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below: More specifically: Generate an outline of research questions that form an objective opinion on any given task. For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task. For each scraped resource, keep track, filter, and summarize only if it includes relevant information. Finally, aggregate all summarized sources and generate a final research report.","s":"The final architecture","u":"/blog/building-gpt-researcher","h":"#the-final-architecture","p":1},{"i":14,"t":"The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information. Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds. It’s all a matter of time and what GPT Researcher is all about.","s":"Going forward","u":"/blog/building-gpt-researcher","h":"#going-forward","p":1},{"i":17,"t":"Contribute We highly welcome contributions! Please check out contributing if you're interested. Please check out our roadmap page and reach out to us via our Discord community if you're interested in joining our mission.","s":"Contribute","u":"/docs/contribute","h":"","p":16},{"i":19,"t":"OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools. The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files. This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool. To skip the tutorial below, feel free to check out the full Github Gist here. At a high level, a typical integration of the Assistants API has the following steps: Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling. Create a Thread when a user starts a conversation. Add Messages to the Thread as the user ask questions. Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools. As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread. Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API. First things first, let’s define the assistant’s instructions: assistant_prompt_instruction = \"\"\"You are a finance expert. Your goal is to provide answers based on information from the internet. You must use the provided Tavily search API function to find relevant online information. You should never use your own knowledge to answer questions.Please include relevant url sources in the end of your answers.\"\"\" Copy Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API: # Create an assistantassistant = client.beta.assistants.create( instructions=assistant_prompt_instruction, model=\"gpt-4-1106-preview\", tools=[{ \"type\": \"function\", \"function\": { \"name\": \"tavily_search\", \"description\": \"Get information on recent events from the web.\", \"parameters\": { \"type\": \"object\", \"properties\": { \"query\": {\"type\": \"string\", \"description\": \"The search query to use. For example: 'Latest news on Nvidia stock performance'\"}, }, \"required\": [\"query\"] } } }]) Copy Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message: thread = client.beta.threads.create()user_input = input(\"You: \")message = client.beta.threads.messages.create( thread_id=thread.id, role=\"user\", content=user_input,) Copy Finally, we’ll run the assistant on the thread to trigger the function call and get the response: run = client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id,) Copy So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually. To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’): # Function to wait for a run to completedef wait_for_run_completion(thread_id, run_id): while True: time.sleep(1) run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f\"Current run status: {run.status}\") if run.status in ['completed', 'failed', 'requires_action']: return run Copy This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call. We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API: # Function to handle tool output submissiondef submit_tool_outputs(thread_id, run_id, tools_to_call): tool_output_array = [] for tool in tools_to_call: output = None tool_call_id = tool.id function_name = tool.function.name function_args = tool.function.arguments if function_name == \"tavily_search\": output = tavily_search(query=json.loads(function_args)[\"query\"]) if output: tool_output_array.append({\"tool_call_id\": tool_call_id, \"output\": output}) return client.beta.threads.runs.submit_tool_outputs( thread_id=thread_id, run_id=run_id, tool_outputs=tool_output_array ) Copy As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below: if run.status == 'requires_action': run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls) run = wait_for_run_completion(thread.id, run.id) Copy That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code: import osimport jsonimport timefrom openai import OpenAIfrom tavily import TavilyClient# Initialize clients with API keysclient = OpenAI(api_key=os.environ[\"OPENAI_API_KEY\"])tavily_client = TavilyClient(api_key=os.environ[\"TAVILY_API_KEY\"])assistant_prompt_instruction = \"\"\"You are a finance expert. Your goal is to provide answers based on information from the internet. You must use the provided Tavily search API function to find relevant online information. You should never use your own knowledge to answer questions.Please include relevant url sources in the end of your answers.\"\"\"# Function to perform a Tavily searchdef tavily_search(query): search_result = tavily_client.get_search_context(query, search_depth=\"advanced\", max_tokens=8000) return search_result# Function to wait for a run to completedef wait_for_run_completion(thread_id, run_id): while True: time.sleep(1) run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f\"Current run status: {run.status}\") if run.status in ['completed', 'failed', 'requires_action']: return run# Function to handle tool output submissiondef submit_tool_outputs(thread_id, run_id, tools_to_call): tool_output_array = [] for tool in tools_to_call: output = None tool_call_id = tool.id function_name = tool.function.name function_args = tool.function.arguments if function_name == \"tavily_search\": output = tavily_search(query=json.loads(function_args)[\"query\"]) if output: tool_output_array.append({\"tool_call_id\": tool_call_id, \"output\": output}) return client.beta.threads.runs.submit_tool_outputs( thread_id=thread_id, run_id=run_id, tool_outputs=tool_output_array )# Function to print messages from a threaddef print_messages_from_thread(thread_id): messages = client.beta.threads.messages.list(thread_id=thread_id) for msg in messages: print(f\"{msg.role}: {msg.content[0].text.value}\")# Create an assistantassistant = client.beta.assistants.create( instructions=assistant_prompt_instruction, model=\"gpt-4-1106-preview\", tools=[{ \"type\": \"function\", \"function\": { \"name\": \"tavily_search\", \"description\": \"Get information on recent events from the web.\", \"parameters\": { \"type\": \"object\", \"properties\": { \"query\": {\"type\": \"string\", \"description\": \"The search query to use. For example: 'Latest news on Nvidia stock performance'\"}, }, \"required\": [\"query\"] } } }])assistant_id = assistant.idprint(f\"Assistant ID: {assistant_id}\")# Create a threadthread = client.beta.threads.create()print(f\"Thread: {thread}\")# Ongoing conversation loopwhile True: user_input = input(\"You: \") if user_input.lower() == 'exit': break # Create a message message = client.beta.threads.messages.create( thread_id=thread.id, role=\"user\", content=user_input, ) # Create a run run = client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id, ) print(f\"Run ID: {run.id}\") # Wait for run to complete run = wait_for_run_completion(thread.id, run.id) if run.status == 'failed': print(run.error) continue elif run.status == 'requires_action': run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls) run = wait_for_run_completion(thread.id, run.id) # Print messages from the thread print_messages_from_thread(thread.id) Copy The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter. Feel free to drop a comment below if you have any further questions!","s":"How to build an OpenAI Assistant with Internet access","u":"/blog/building-openai-assistant","h":"","p":18},{"i":21,"t":"On this page","s":"Frequently Asked Questions","u":"/docs/faq","h":"","p":20},{"i":23,"t":"It really depends on what you're aiming for. If you're looking to connect your AI application to the internet with Tavily tailored API, check out the Tavily API documentation. If you're looking to build and deploy our open source autonomous research agent GPT Researcher, please see GPT Researcher documentation. You can also check out demos and examples for inspiration here.","s":"How do I get started?","u":"/docs/faq","h":"#how-do-i-get-started","p":20},{"i":25,"t":"GPT Researcher is a popular open source autonomous research agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task. GPT Researcher is built with best practices for leveraging LLMs (prompt engineering, RAG, chains, embeddings, etc), and is optimized for quick and efficient research. It is also fully customizable and can be tailored to your specific needs. To learn more about GPT Researcher, check out the documentation page.","s":"What is GPT Researcher?","u":"/docs/faq","h":"#what-is-gpt-researcher","p":20},{"i":27,"t":"A research task using GPT Researcher costs around $0.01 per a single run (for GPT-4 usage). We're constantly optimizing LLM calls to reduce costs and improve performance.","s":"How much does each research run cost?","u":"/docs/faq","h":"#how-much-does-each-research-run-cost","p":20},{"i":29,"t":"we do our best to ensure that the information we provide is factual and accurate. We do this by using multiple sources, and by using proprietary AI to score and rank the most relevant and accurate information. We also use proprietary AI to filter out irrelevant information and sources. Lastly, by using RAG and other techniques, we ensure that the information is relevant to the context of the research task, leading to more accurate generative AI content and reduced hallucinations.","s":"How do you ensure the report is factual and accurate?","u":"/docs/faq","h":"#how-do-you-ensure-the-report-is-factual-and-accurate","p":20},{"i":31,"t":"We're constantly working on improving our products and services. We're currently working on improving our search API together with design partners, and adding more data sources to our search engine. We're also working on improving our research agent GPT Researcher, and adding more features to it while growing our amazing open source community. If you're interested in our roadmap or looking to collaborate, check out our roadmap page. Feel free to contact us if you have any further questions or suggestions!","s":"What are your plans for the future?","u":"/docs/faq","h":"#what-are-your-plans-for-the-future","p":20},{"i":33,"t":"On this page","s":"Examples","u":"/docs/examples/examples","h":"","p":32},{"i":35,"t":"from gpt_researcher import GPTResearcherimport asyncioasync def main(): \"\"\" This is a sample script that shows how to run a research report. \"\"\" # Query query = \"What happened in the latest burning man floods?\" # Report Type report_type = \"research_report\" # Initialize the researcher researcher = GPTResearcher(query=query, report_type=report_type, config_path=None) # Conduct research on the given query await researcher.conduct_research() # Write the report report = await researcher.write_report() return reportif __name__ == \"__main__\": asyncio.run(main()) Copy","s":"Run PIP Package","u":"/docs/examples/examples","h":"#run-pip-package","p":32},{"i":37,"t":"Agent Example If you're interested in using GPT Researcher as a standalone agent, you can easily import it into any existing Python project. Below, is an example of calling the agent to generate a research report: from gpt_researcher import GPTResearcherimport asyncio# It is best to define global constants at the top of your scriptQUERY = \"What happened in the latest burning man floods?\"REPORT_TYPE = \"research_report\"async def fetch_report(query, report_type): \"\"\" Fetch a research report based on the provided query and report type. \"\"\" researcher = GPTResearcher(query=query, report_type=report_type, config_path=None) await researcher.conduct_research() report = await researcher.write_report() return reportasync def generate_research_report(): \"\"\" This is a sample script that executes an async main function to run a research report. \"\"\" report = await fetch_report(QUERY, REPORT_TYPE) print(report)if __name__ == \"__main__\": asyncio.run(generate_research_report()) Copy You can further enhance this example to use the returned report as context for generating valuable content such as news article, marketing content, email templates, newsletters, etc. You can also use GPT Researcher to gather information about code documentation, business analysis, financial information and more. All of which can be used to complete much more complex tasks that require factual and high quality realtime information.","s":"Agent Example","u":"/docs/gpt-researcher/example","h":"","p":36},{"i":39,"t":"Introduction The config.py enables you to customize GPT Researcher to your specific needs and preferences. Thanks to our amazing community and contributions, GPT Researcher supports multiple LLMs and Retrievers. In addition, GPT Researcher can be tailored to various report formats (such as APA), word count, research iterations depth, etc. GPT Researcher defaults to our recommended suite of integrations: OpenAI for LLM calls and Tavily API for retrieving realtime online information. As seen below, OpenAI still stands as the superior LLM. We assume it will stay this way for some time, and that prices will only continue to decrease, while performance and speed increase over time. The default config.py file can be found in /gpt_researcher/config/. It supports various options for customizing GPT Researcher to your needs. You can also include your own external JSON file config.json by adding the path in the config_file param. Please follow the config.py file for additional future support. Below is a list of current supported options: RETRIEVER: Web search engine used for retrieving sources. Defaults to tavily. Options: duckduckgo, bing, google, serper, searx. Check here for supported retrievers EMBEDDING_PROVIDER: Provider for embedding model. Defaults to openai. Options: ollama, huggingface, azure_openai, custom. LLM_PROVIDER: LLM provider. Defaults to openai. Options: google, ollama, groq and much more! FAST_LLM_MODEL: Model name for fast LLM operations such summaries. Defaults to gpt-4o-mini. SMART_LLM_MODEL: Model name for smart operations like generating research reports and reasoning. Defaults to gpt-4o. FAST_TOKEN_LIMIT: Maximum token limit for fast LLM responses. Defaults to 2000. SMART_TOKEN_LIMIT: Maximum token limit for smart LLM responses. Defaults to 4000. BROWSE_CHUNK_MAX_LENGTH: Maximum length of text chunks to browse in web sources. Defaults to 8192. SUMMARY_TOKEN_LIMIT: Maximum token limit for generating summaries. Defaults to 700. TEMPERATURE: Sampling temperature for LLM responses, typically between 0 and 1. A higher value results in more randomness and creativity, while a lower value results in more focused and deterministic responses. Defaults to 0.55. TOTAL_WORDS: Total word count limit for document generation or processing tasks. Defaults to 800. REPORT_FORMAT: Preferred format for report generation. Defaults to APA. Consider formats like MLA, CMS, Harvard style, IEEE, etc. MAX_ITERATIONS: Maximum number of iterations for processes like query expansion or search refinement. Defaults to 3. AGENT_ROLE: Role of the agent. This might be used to customize the behavior of the agent based on its assigned roles. No default value. MAX_SUBTOPICS: Maximum number of subtopics to generate or consider. Defaults to 3. SCRAPER: Web scraper to use for gathering information. Defaults to bs (BeautifulSoup). You can also use newspaper. DOC_PATH: Path to read and research local documents. Defaults to an empty string indicating no path specified. USER_AGENT: Custom User-Agent string for web crawling and web requests. MEMORY_BACKEND: Backend used for memory operations, such as local storage of temporary data. Defaults to local. To change the default configurations, you can simply add env variables to your .env file as named above or export manually in your local project directory. For example, to manually change the search engine and report format: export RETRIEVER=bingexport REPORT_FORMAT=IEEE Copy Please note that you might need to export additional env vars and obtain API keys for other supported search retrievers and LLM providers. Please follow your console logs for further assistance. To learn more about additional LLM support you can check out the docs here. You can also include your own external JSON file config.json by adding the path in the config_file param.","s":"Introduction","u":"/docs/gpt-researcher/config","h":"","p":38},{"i":41,"t":"On this page","s":"Frontend Application","u":"/docs/gpt-researcher/frontend","h":"","p":40},{"i":43,"t":"A lightweight solution using FastAPI to serve static files. Prerequisites​ Python 3.11+ pip Setup and Running​ Install required packages: pip install -r requirements.txt Copy Start the server: python -m uvicorn main:app Copy Access at http://localhost:8000 Demo​","s":"Option 1: Static Frontend (FastAPI)","u":"/docs/gpt-researcher/frontend","h":"#option-1-static-frontend-fastapi","p":40},{"i":45,"t":"A more robust solution with enhanced features and performance. Prerequisites​ Node.js (v18.17.0 recommended) npm Setup and Running​ Navigate to NextJS directory: cd nextjs Copy Set up Node.js: nvm install 18.17.0nvm use v18.17.0 Copy Install dependencies: npm install --legacy-peer-deps Copy Start development server: npm run dev Copy Access at http://localhost:3000 Note: Requires backend server on localhost:8000 as detailed in option 1. Demo​","s":"Option 2: NextJS Frontend","u":"/docs/gpt-researcher/frontend","h":"#option-2-nextjs-frontend","p":40},{"i":47,"t":"Static Frontend: Quick setup, lightweight deployment. NextJS Frontend: Feature-rich, scalable, better performance and SEO. For production, NextJS is recommended.","s":"Choosing an Option","u":"/docs/gpt-researcher/frontend","h":"#choosing-an-option","p":40},{"i":49,"t":"Our frontend enhances GPT-Researcher by providing: Intuitive Research Interface: Streamlined input for research queries. Real-time Progress Tracking: Visual feedback on ongoing research tasks. Interactive Results Display: Easy-to-navigate presentation of findings. Customizable Settings: Adjust research parameters to suit specific needs. Responsive Design: Optimal experience across various devices. These features aim to make the research process more efficient and user-friendly, complementing GPT-Researcher's powerful agent capabilities.","s":"Frontend Features","u":"/docs/gpt-researcher/frontend","h":"#frontend-features","p":40},{"i":51,"t":"On this page","s":"Getting Started","u":"/docs/gpt-researcher/getting-started","h":"","p":50},{"i":53,"t":"Step 1 - Install dependencies $ pip install -r requirements.txt Copy Step 2 - Run the agent with FastAPI $ uvicorn main:app --reload Copy Step 3 - Go to http://localhost:8000 on any browser and enjoy researching!","s":"Quickstart","u":"/docs/gpt-researcher/getting-started","h":"#quickstart","p":50},{"i":55,"t":"Select either based on your familiarity with each:","s":"Using Virtual Environment or Poetry","u":"/docs/gpt-researcher/getting-started","h":"#using-virtual-environment-or-poetry","p":50},{"i":57,"t":"Establishing the Virtual Environment with Activate/Deactivate configuration​ Create a virtual environment using the venv package with the environment name , for example, env. Execute the following command in the PowerShell/CMD terminal: python -m venv env Copy To activate the virtual environment, use the following activation script in PowerShell/CMD terminal: .\\env\\Scripts\\activate Copy To deactivate the virtual environment, run the following deactivation script in PowerShell/CMD terminal: deactivate Copy Install the dependencies for a Virtual environment​ After activating the env environment, install dependencies using the requirements.txt file with the following command: python -m pip install -r requirements.txt Copy","s":"Virtual Environment","u":"/docs/gpt-researcher/getting-started","h":"#virtual-environment","p":50},{"i":59,"t":"Establishing the Poetry dependencies and virtual environment with Poetry version ~1.7.1​ Install project dependencies and simultaneously create a virtual environment for the specified project. By executing this command, Poetry reads the project's \"pyproject.toml\" file to determine the required dependencies and their versions, ensuring a consistent and isolated development environment. The virtual environment allows for a clean separation of project-specific dependencies, preventing conflicts with system-wide packages and enabling more straightforward dependency management throughout the project's lifecycle. poetry install Copy Activate the virtual environment associated with a Poetry project​ By running this command, the user enters a shell session within the isolated environment associated with the project, providing a dedicated space for development and execution. This virtual environment ensures that the project dependencies are encapsulated, avoiding conflicts with system-wide packages. Activating the Poetry shell is essential for seamlessly working on a project, as it ensures that the correct versions of dependencies are used and provides a controlled environment conducive to efficient development and testing. poetry shell Copy","s":"Poetry","u":"/docs/gpt-researcher/getting-started","h":"#poetry","p":50},{"i":61,"t":"Launch the FastAPI application agent on a Virtual Environment or Poetry setup by executing the following command: python -m uvicorn main:app --reload Copy Visit http://localhost:8000 in any web browser and explore your research!","s":"Run the app","u":"/docs/gpt-researcher/getting-started","h":"#run-the-app","p":50},{"i":63,"t":"Step 1 - Install Docker Follow instructions at https://docs.docker.com/engine/install/ Step 2 - Create .env file with your OpenAI Key or simply export it $ export OPENAI_API_KEY={Your API Key here} Copy Step 3 - Run the application $ docker-compose up Copy Step 4 - Go to http://localhost:8000 on any browser and enjoy researching!","s":"Try it with Docker","u":"/docs/gpt-researcher/getting-started","h":"#try-it-with-docker","p":50},{"i":65,"t":"Introducing the GPT Researcher Multi-Agent Assistant","s":"How to Build the Ultimate Research Multi-Agent Assistant","u":"/blog/gptr-langgraph","h":"","p":64},{"i":68,"t":"It has only been a year since the initial release of GPT Researcher, but methods for building, testing, and deploying AI agents have already evolved significantly. That’s just the nature and speed of the current AI progress. What started as simple zero-shot or few-shot prompting, has quickly evolved to agent function calling, RAG and now finally agentic workflows (aka “flow engineering”). Andrew Ng has recently stated, “I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important trend, and I urge everyone who works in AI to pay attention to it.” In this article you will learn why multi-agent workflows are the current best standard and how to build the optimal autonomous research multi-agent assistant using LangGraph. To skip this tutorial, feel free to check out the Github repo of GPT Researcher x LangGraph.","s":"Learn how to build an autonomous research assistant using LangGraph with a team of specialized AI agents","u":"/blog/gptr-langgraph","h":"#learn-how-to-build-an-autonomous-research-assistant-using-langgraph-with-a-team-of-specialized-ai-agents","p":64},{"i":70,"t":"LangGraph is an extension of LangChain aimed at creating agent and multi-agent flows. It adds in the ability to create cyclical flows and comes with memory built in — both important attributes for creating agents. LangGraph provides developers with a high degree of controllability and is important for creating custom agents and flows. Nearly all agents in production are customized towards the specific use case they are trying solve. LangGraph gives you the flexibility to create arbitrary customized agents, while providing an intuitive developer experience for doing so. Enough with the smalltalk, let’s start building!","s":"Introducing LangGraph","u":"/blog/gptr-langgraph","h":"#introducing-langgraph","p":64},{"i":72,"t":"By leveraging LangGraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Having every agent focus and specialize only a specific skill, allows for better separation of concerns, customizability, and further development at scale as the project grows. Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. This example will also leverage the leading autonomous research agent GPT Researcher.","s":"Building the Ultimate Autonomous Research Agent","u":"/blog/gptr-langgraph","h":"#building-the-ultimate-autonomous-research-agent","p":64},{"i":74,"t":"The research team consists of seven LLM agents: Chief Editor — Oversees the research process and manages the team. This is the “master” agent that coordinates the other agents using LangGraph. This agent acts as the main LangGraph interface. GPT Researcher — A specialized autonomous agent that conducts in depth research on a given topic. Editor — Responsible for planning the research outline and structure. Reviewer — Validates the correctness of the research results given a set of criteria. Reviser — Revises the research results based on the feedback from the reviewer. Writer — Responsible for compiling and writing the final report. Publisher — Responsible for publishing the final report in various formats.","s":"The Research Agent Team","u":"/blog/gptr-langgraph","h":"#the-research-agent-team","p":64},{"i":76,"t":"As seen below, the automation process is based on the following stages: Planning the research, data collection and analysis, review and revision, writing the report and finally publication: More specifically the process is as follows: Browser (gpt-researcher) — Browses the internet for initial research based on the given research task. This step is crucial for LLMs to plan the research process based on up to date and relevant information, and not rely solely on pre-trained data for a given task or topic. Editor — Plans the report outline and structure based on the initial research. The Editor is also responsible for triggering the parallel research tasks based on the planned outline. For each outline topic (in parallel): Researcher (gpt-researcher) — Runs an in depth research on the subtopics and writes a draft. This agent leverages the GPT Researcher Python package under the hood, for optimized, in depth and factual research report. Reviewer — Validates the correctness of the draft given a set of guidelines and provides feedback to the reviser (if any). Reviser — Revises the draft until it is satisfactory based on the reviewer feedback. Writer — Compiles and writes the final report including an introduction, conclusion and references section from the given research findings. Publisher — Publishes the final report to multi formats such as PDF, Docx, Markdown, etc. We will not dive into all the code since there’s a lot of it, but focus mostly on the interesting parts I’ve found valuable to share.","s":"Architecture","u":"/blog/gptr-langgraph","h":"#architecture","p":64},{"i":78,"t":"One of my favorite features with LangGraph is state management. States in LangGraph are facilitated through a structured approach where developers define a GraphState that encapsulates the entire state of the application. Each node in the graph can modify this state, allowing for dynamic responses based on the evolving context of the interaction. Like in every start of a technical design, considering the data schema throughout the application is key. In this case we’ll define a ResearchState like so: class ResearchState(TypedDict): task: dict initial_research: str sections: List[str] research_data: List[dict] # Report layout title: str headers: dict date: str table_of_contents: str introduction: str conclusion: str sources: List[str] report: str Copy As seen above, the state is divided into two main areas: the research task and the report layout content. As data circulates through the graph agents, each agent will, in turn, generate new data based on the existing state and update it for subsequent processing further down the graph with other agents. We can then initialize the graph with the following: from langgraph.graph import StateGraphworkflow = StateGraph(ResearchState) Copy Initializing the graph with LangGraph As stated above, one of the great things about multi-agent development is building each agent to have specialized and scoped skills. Let’s take an example of the Researcher agent using GPT Researcher python package: from gpt_researcher import GPTResearcherclass ResearchAgent: def __init__(self): pass async def research(self, query: str): # Initialize the researcher researcher = GPTResearcher(parent_query=parent_query, query=query, report_type=research_report, config_path=None) # Conduct research on the given query await researcher.conduct_research() # Write the report report = await researcher.write_report() return report Copy As you can see above, we’ve created an instance of the Research agent. Now let’s assume we’ve done the same for each of the team’s agent. After creating all of the agents, we’d initialize the graph with LangGraph: def init_research_team(self): # Initialize agents editor_agent = EditorAgent(self.task) research_agent = ResearchAgent() writer_agent = WriterAgent() publisher_agent = PublisherAgent(self.output_dir) # Define a Langchain StateGraph with the ResearchState workflow = StateGraph(ResearchState) # Add nodes for each agent workflow.add_node(\"browser\", research_agent.run_initial_research) workflow.add_node(\"planner\", editor_agent.plan_research) workflow.add_node(\"researcher\", editor_agent.run_parallel_research) workflow.add_node(\"writer\", writer_agent.run) workflow.add_node(\"publisher\", publisher_agent.run) workflow.add_edge('browser', 'planner') workflow.add_edge('planner', 'researcher') workflow.add_edge('researcher', 'writer') workflow.add_edge('writer', 'publisher') # set up start and end nodes workflow.set_entry_point(\"browser\") workflow.add_edge('publisher', END) return workflow Copy As seen above, creating the LangGraph graph is very straight forward and consists of three main functions: add_node, add_edge and set_entry_point. With these main functions you can first add the nodes to the graph, connect the edges and finally set the starting point. Focus check: If you’ve been following the code and architecture properly, you’ll notice that the Reviewer and Reviser agents are missing in the initialization above. Let’s dive into it!","s":"Define the Graph State","u":"/blog/gptr-langgraph","h":"#define-the-graph-state","p":64},{"i":80,"t":"This was the most exciting part of my experience working with LangGraph! One exciting feature of this autonomous assistant is having a parallel run for each research task, that would be reviewed and revised based on a set of predefined guidelines. Knowing how to leverage parallel work within a process is key for optimizing speed. But how would you trigger parallel agent work if all agents report to the same state? This can cause race conditions and inconsistencies in the final data report. To solve this, you can create a sub graph, that would be triggered from the main LangGraph instance. This sub graph would hold its own state for each parallel run, and that would solve the issues that were raised. As we’ve done before, let’s define the LangGraph state and its agents. Since this sub graph basically reviews and revises a research draft, we’ll define the state with draft information: class DraftState(TypedDict): task: dict topic: str draft: dict review: str revision_notes: str Copy As seen in the DraftState, we mostly care about the topic discussed, and the reviewer and revision notes as they communicate between each other to finalize the subtopic research report. To create the circular condition we’ll take advantage of the last important piece of LangGraph which is conditional edges: async def run_parallel_research(self, research_state: dict): workflow = StateGraph(DraftState) workflow.add_node(\"researcher\", research_agent.run_depth_research) workflow.add_node(\"reviewer\", reviewer_agent.run) workflow.add_node(\"reviser\", reviser_agent.run) # set up edges researcher->reviewer->reviser->reviewer... workflow.set_entry_point(\"researcher\") workflow.add_edge('researcher', 'reviewer') workflow.add_edge('reviser', 'reviewer') workflow.add_conditional_edges('reviewer', (lambda draft: \"accept\" if draft['review'] is None else \"revise\"), {\"accept\": END, \"revise\": \"reviser\"}) Copy By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent. Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file: { \"query\": \"Is AI in a hype cycle?\", \"max_sections\": 3, \"publish_formats\": { \"markdown\": true, \"pdf\": true, \"docx\": true }, \"follow_guidelines\": false, \"model\": \"gpt-4-turbo\", \"guidelines\": [ \"The report MUST be written in APA format\", \"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section\", \"The report MUST be written in spanish\" ]} Copy The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report. Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx. To download and run the example check out the GPT Researcher x LangGraph open source page.","s":"A Graph within a Graph to support stateful Parallelization","u":"/blog/gptr-langgraph","h":"#a-graph-within-a-graph-to-support-stateful-parallelization","p":64},{"i":82,"t":"Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here. In addition, having support for research about both web and local data would be key for many types of business and personal use cases. Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline. A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings! To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!","s":"What’s Next?","u":"/blog/gptr-langgraph","h":"#whats-next","p":64},{"i":84,"t":"On this page","s":"Introduction","u":"/docs/gpt-researcher/introduction","h":"","p":83},{"i":86,"t":"To form objective conclusions for manual research tasks can take time, sometimes weeks to find the right resources and information. Current LLMs are trained on past and outdated information, with heavy risks of hallucinations, making them almost irrelevant for research tasks. Current LLMs are limited to short token outputs which are not sufficient for long detailed research reports (2k+ words). Solutions that enable web search (such as ChatGPT + Web Plugin), only consider limited resources and content that in some cases result in superficial conclusions or biased answers. Using only a selection of resources can create bias in determining the right conclusions for research questions or tasks.","s":"Why GPT Researcher?","u":"/docs/gpt-researcher/introduction","h":"#why-gpt-researcher","p":83},{"i":88,"t":"The main idea is to run \"planner\" and \"execution\" agents, whereas the planner generates questions to research, and the execution agents seek the most related information based on each generated research question. Finally, the planner filters and aggregates all related information and creates a research report. The agents leverage both gpt-4o-mini and gpt-4o (128K context) to complete a research task. We optimize for costs using each only when necessary. The average research task takes around 3 minutes to complete, and costs ~$0.1. More specifically: Create a domain specific agent based on research query or task. Generate a set of research questions that together form an objective opinion on any given task. For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task. For each scraped resources, summarize based on relevant information and keep track of its sources. Finally, filter and aggregate all summarized sources and generate a final research report.","s":"Architecture","u":"/docs/gpt-researcher/introduction","h":"#architecture","p":83},{"i":91,"t":"How it Works How to Install Live Demo Homepage","s":"Tutorials","u":"/docs/gpt-researcher/introduction","h":"#tutorials","p":83},{"i":93,"t":"📝 Generate research, outlines, resources and lessons reports 📜 Can generate long and detailed research reports (over 2K words) 🌐 Aggregates over 20 web sources per research to form objective and factual conclusions 🖥️ Includes an easy-to-use web interface (HTML/CSS/JS) 🔍 Scrapes web sources with javascript support 📂 Keeps track and context of visited and used web sources 📄 Export research reports to PDF, Word and more... Let's get started here!","s":"Features","u":"/docs/gpt-researcher/introduction","h":"#features","p":83},{"i":95,"t":"On this page","s":"LangGraph","u":"/docs/gpt-researcher/langgraph","h":"","p":94},{"i":97,"t":"By using Langgraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. An average run generates a 5-6 page research report in multiple formats such as PDF, Docx and Markdown. Please note: This example uses the OpenAI API only for optimized performance.","s":"Use case","u":"/docs/gpt-researcher/langgraph","h":"#use-case","p":94},{"i":99,"t":"The research team is made up of 7 AI agents: Human - The human in the loop that oversees the process and provides feedback to the agents. Chief Editor - Oversees the research process and manages the team. This is the \"master\" agent that coordinates the other agents using Langgraph. Researcher (gpt-researcher) - A specialized autonomous agent that conducts in depth research on a given topic. Editor - Responsible for planning the research outline and structure. Reviewer - Validates the correctness of the research results given a set of criteria. Revisor - Revises the research results based on the feedback from the reviewer. Writer - Responsible for compiling and writing the final report. Publisher - Responsible for publishing the final report in various formats.","s":"The Multi Agent Team","u":"/docs/gpt-researcher/langgraph","h":"#the-multi-agent-team","p":94},{"i":101,"t":"Generally, the process is based on the following stages: Planning stage Data collection and analysis Review and revision Writing and submission Publication","s":"How it works","u":"/docs/gpt-researcher/langgraph","h":"#how-it-works","p":94},{"i":104,"t":"More specifically (as seen in the architecture diagram) the process is as follows: Browser (gpt-researcher) - Browses the internet for initial research based on the given research task. Editor - Plans the report outline and structure based on the initial research. For each outline topic (in parallel): Researcher (gpt-researcher) - Runs an in depth research on the subtopics and writes a draft. Reviewer - Validates the correctness of the draft given a set of criteria and provides feedback. Revisor - Revises the draft until it is satisfactory based on the reviewer feedback. Writer - Compiles and writes the final report including an introduction, conclusion and references section from the given research findings. Publisher - Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.","s":"Steps","u":"/docs/gpt-researcher/langgraph","h":"#steps","p":94},{"i":106,"t":"Install required packages: pip install -r requirements.txt Copy Update env variables export OPENAI_API_KEY={Your OpenAI API Key here}export TAVILY_API_KEY={Your Tavily API Key here} Copy Run the application: python main.py Copy","s":"How to run","u":"/docs/gpt-researcher/langgraph","h":"#how-to-run","p":94},{"i":108,"t":"To change the research query and customize the report, edit the task.json file in the main directory. Task.json contains the following fields:​ query - The research query or task. model - The OpenAI LLM to use for the agents. max_sections - The maximum number of sections in the report. Each section is a subtopic of the research query. include_human_feedback - If true, the user can provide feedback to the agents. If false, the agents will work autonomously. publish_formats - The formats to publish the report in. The reports will be written in the output directory. source - The location from which to conduct the research. Options: web or local. For local, please add DOC_PATH env var. follow_guidelines - If true, the research report will follow the guidelines below. It will take longer to complete. If false, the report will be generated faster but may not follow the guidelines. guidelines - A list of guidelines that the report must follow. verbose - If true, the application will print detailed logs to the console. For example:​ { \"query\": \"Is AI in a hype cycle?\", \"model\": \"gpt-4o\", \"max_sections\": 3, \"publish_formats\": { \"markdown\": true, \"pdf\": true, \"docx\": true }, \"include_human_feedback\": false, \"source\": \"web\", \"follow_guidelines\": true, \"guidelines\": [ \"The report MUST fully answer the original question\", \"The report MUST be written in apa format\", \"The report MUST be written in english\" ], \"verbose\": true} Copy","s":"Usage","u":"/docs/gpt-researcher/langgraph","h":"#usage","p":94},{"i":110,"t":"pip install langgraph-clilanggraph up Copy From there, see documentation here on how to use the streaming and async endpoints, as well as the playground.","s":"To Deploy","u":"/docs/gpt-researcher/langgraph","h":"#to-deploy","p":94},{"i":112,"t":"The React app (located in frontend directory) is our Frontend 2.0 which we hope will enable us to display the robustness of the backend on the frontend, as well. It comes with loads of added features, such as: a drag-n-drop user interface for uploading and deleting files to be used as local documents by GPTResearcher. a GUI for setting your GPTR environment variables. the ability to trigger the multi_agents flow via the Backend Module or Langgraph Cloud Host (currently in closed beta). stability fixes and more coming soon!","s":"NextJS Frontend App","u":"/docs/gpt-researcher/langgraph","h":"#nextjs-frontend-app","p":94},{"i":114,"t":"Step 1 - Install Docker Step 2 - Clone the '.env.example' file, add your API Keys to the cloned file and save the file as '.env' Step 3 - Within the docker-compose file comment out services that you don't want to run with Docker. $ docker-compose up --build Copy Step 4 - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes: the Python server running on localhost:8000 the React app running on localhost:3000 Visit localhost:3000 on any browser and enjoy researching!","s":"Run the NextJS React App with Docker","u":"/docs/gpt-researcher/langgraph","h":"#run-the-nextjs-react-app-with-docker","p":94},{"i":116,"t":"cd frontendnvm install 18.17.0nvm use v18.17.0npm install --legacy-peer-depsnpm run dev Copy","s":"Run the NextJS React App with NPM","u":"/docs/gpt-researcher/langgraph","h":"#run-the-nextjs-react-app-with-npm","p":94},{"i":118,"t":"On this page","s":"PIP Package","u":"/docs/gpt-researcher/pip-package","h":"","p":117},{"i":120,"t":"Follow these easy steps to get started: Pre-requisite: Ensure Python 3.10+ is installed on your machine 💻 Install gpt-researcher: Grab the official package from PyPi. pip install gpt-researcher Copy Environment Variables: Create a .env file with your OpenAI API key or simply export it export OPENAI_API_KEY={Your OpenAI API Key here} Copy export TAVILY_API_KEY={Your Tavily API Key here} Copy Start using GPT Researcher in your own codebase","s":"Steps to Install GPT Researcher","u":"/docs/gpt-researcher/pip-package","h":"#steps-to-install-gpt-researcher","p":117},{"i":122,"t":"from gpt_researcher import GPTResearcherimport asynciofrom gpt_researcher import GPTResearcherimport asyncioasync def get_report(query: str, report_type: str) -> str: researcher = GPTResearcher(query, report_type) research_result = await researcher.conduct_research() report = await researcher.write_report() return reportif __name__ == \"__main__\": query = \"what team may win the NBA finals?\" report_type = \"research_report\" report = asyncio.run(get_report(query, report_type)) print(report) Copy","s":"Example Usage 📝","u":"/docs/gpt-researcher/pip-package","h":"#example-usage-","p":117},{"i":125,"t":"query = \"Latest developments in renewable energy technologies\"report_type = \"research_report\" Copy","s":"Example 1: Research Report 📚","u":"/docs/gpt-researcher/pip-package","h":"#example-1-research-report-","p":117},{"i":127,"t":"query = \"List of top AI conferences in 2023\"report_type = \"resource_report\" Copy","s":"Example 2: Resource Report 📋","u":"/docs/gpt-researcher/pip-package","h":"#example-2-resource-report-","p":117},{"i":129,"t":"query = \"Outline for an article on the impact of AI in education\"report_type = \"outline_report\" Copy","s":"Example 3: Outline Report 📝","u":"/docs/gpt-researcher/pip-package","h":"#example-3-outline-report-","p":117},{"i":132,"t":"from fastapi import FastAPIfrom gpt_researcher import GPTResearcherimport asyncioapp = FastAPI()@app.get(\"/report/{report_type}\")async def get_report(query: str, report_type: str) -> dict: researcher = GPTResearcher(query, report_type) research_result = await researcher.conduct_research() report = await researcher.write_report() return {\"report\": report}# Run the server# uvicorn main:app --reload Copy","s":"FastAPI Example","u":"/docs/gpt-researcher/pip-package","h":"#fastapi-example","p":117},{"i":134,"t":"Pre-requisite: Install flask with the async extra. pip install 'flask[async]' Copy from flask import Flask, requestfrom gpt_researcher import GPTResearcherapp = Flask(__name__)@app.route('/report/', methods=['GET'])async def get_report(report_type): query = request.args.get('query') researcher = GPTResearcher(query, report_type) research_result = await researcher.conduct_research() report = await researcher.write_report() return report# Run the server# flask run Copy Run the server flask run Copy Example Request curl -X GET \"http://localhost:5000/report/research_report?query=what team may win the nba finals?\" Copy Note: The above code snippets are just examples. You can customize them as per your requirements.","s":"Flask Example","u":"/docs/gpt-researcher/pip-package","h":"#flask-example","p":117},{"i":136,"t":"If you're interested in getting more details about the research, you can use the following getters:","s":"Getters and Setters","u":"/docs/gpt-researcher/pip-package","h":"#getters-and-setters","p":117},{"i":138,"t":"Sources are the URLs that were used to gather information for the research. source_urls = researcher.get_source_urls() Copy","s":"Get Research Sources","u":"/docs/gpt-researcher/pip-package","h":"#get-research-sources","p":117},{"i":140,"t":"Context is all the retrieved information from the research. It includes the sources and their corresponding content. research_context = researcher.get_research_context() Copy","s":"Get Research Context","u":"/docs/gpt-researcher/pip-package","h":"#get-research-context","p":117},{"i":142,"t":"Costs are the number of tokens consumed during the research process. research_costs = researcher.get_costs() Copy","s":"Get Research Costs","u":"/docs/gpt-researcher/pip-package","h":"#get-research-costs","p":117},{"i":144,"t":"You can set the verbose mode to get more detailed logs. researcher.set_verbose(True) Copy","s":"Set Verbose","u":"/docs/gpt-researcher/pip-package","h":"#set-verbose","p":117},{"i":146,"t":"You can also add costs to the research process if you want to track the costs from external usage. researcher.add_costs(0.22) Copy","s":"Add Costs","u":"/docs/gpt-researcher/pip-package","h":"#add-costs","p":117},{"i":148,"t":"Roadmap We're constantly working on additional features and improvements to our products and services. We're also working on new products and services to help you build better AI applications using GPT Researcher. Our vision is to build the #1 autonomous research agent for AI developers and researchers, and we're excited to have you join us on this journey! The roadmap is prioritized based on the following goals: Performance, Quality, Modularity and Conversational flexibility. The roadmap is public and can be found here. Interested in collaborating or contributing? Check out our contributing page for more information.","s":"Roadmap","u":"/docs/gpt-researcher/roadmap","h":"","p":147},{"i":150,"t":"On this page","s":"Configure LLM","u":"/docs/gpt-researcher/llms","h":"","p":149},{"i":152,"t":"Create a local OpenAI API using llama.cpp Server.","s":"Custom OpenAI","u":"/docs/gpt-researcher/llms","h":"#custom-openai","p":149},{"i":154,"t":"# use a custom OpenAI API LLM providerLLM_PROVIDER=\"openai\"# set the custom OpenAI API urlOPENAI_BASE_URL=\"http://localhost:1234/v1\"# set the custom OpenAI API keyOPENAI_API_KEY=\"Your Key\"# specify the custom OpenAI API llm model FAST_LLM_MODEL=\"gpt-4o-mini\"# specify the custom OpenAI API llm model SMART_LLM_MODEL=\"gpt-4o\" Copy","s":"Custom OpenAI API LLM","u":"/docs/gpt-researcher/llms","h":"#custom-openai-api-llm","p":149},{"i":156,"t":"# use a custom OpenAI API EMBEDDING providerEMBEDDING_PROVIDER=\"custom\"# set the custom OpenAI API urlOPENAI_BASE_URL=\"http://localhost:1234/v1\"# set the custom OpenAI API keyOPENAI_API_KEY=\"Your Key\"# specify the custom OpenAI API embedding model OPENAI_EMBEDDING_MODEL=\"custom_model\" Copy","s":"Custom OpenAI API Embedding","u":"/docs/gpt-researcher/llms","h":"#custom-openai-api-embedding","p":149},{"i":158,"t":"See also the documentation in the Langchain Azure OpenAI page. On Azure OpenAI you will need to create deployments for each model you want to use. Please also specify the model names/deployment names in your .env file: EMBEDDING_PROVIDER=\"azure_openai\"AZURE_OPENAI_API_KEY=\"Your key\"AZURE_OPENAI_ENDPOINT=\"https://.openai.azure.com/\"OPENAI_API_VERSION=\"2024-05-01-preview\"FAST_LLM_MODEL=\"gpt-4o-mini\"DEFAULT_LLM_MODEL=\"gpt-4o-mini\"SMART_LLM_MODEL=\"gpt-4o\" Copy","s":"Azure OpenAI","u":"/docs/gpt-researcher/llms","h":"#azure-openai","p":149},{"i":160,"t":"GPT Researcher supports both Ollama LLMs and embeddings. You can choose each or both. To use Ollama you can set the following environment variables # Use ollama for both, LLM and EMBEDDING providerLLM_PROVIDER=ollama# Ollama endpoint to useOLLAMA_BASE_URL=http://localhost:11434# Specify one of the LLM models supported by OllamaFAST_LLM_MODEL=llama3# Specify one of the LLM models supported by Ollama SMART_LLM_MODEL=llama3 # The temperature to use, defaults to 0.55TEMPERATURE=0.55 Copy Optional - You can also use ollama for embeddings EMBEDDING_PROVIDER=ollama# Specify one of the embedding models supported by Ollama OLLAMA_EMBEDDING_MODEL=nomic-embed-text Copy","s":"Ollama","u":"/docs/gpt-researcher/llms","h":"#ollama","p":149},{"i":162,"t":"GroqCloud provides advanced AI hardware and software solutions designed to deliver amazingly fast AI inference performance. To leverage Groq in GPT-Researcher, you will need a GroqCloud account and an API Key. (NOTE: Groq has a very generous free tier.)","s":"Groq","u":"/docs/gpt-researcher/llms","h":"#groq","p":149},{"i":164,"t":"You can signup here: https://console.groq.com/login Once you are logged in, you can get an API Key here: https://console.groq.com/keys Once you have an API key, you will need to add it to your systems environment using the variable name: GROQ_API_KEY=\"*********************\"","s":"Sign up","u":"/docs/gpt-researcher/llms","h":"#sign-up","p":149},{"i":166,"t":"And finally, you will need to configure the GPT-Researcher Provider and Model variables: # To use Groq set the llm provider to groqLLM_PROVIDER=groqGROQ_API_KEY=[Your Key]# Set one of the LLM models supported by GroqFAST_LLM_MODEL=Mixtral-8x7b-32768# Set one of the LLM models supported by GroqSMART_LLM_MODEL=Mixtral-8x7b-32768 # The temperature to use defaults to 0.55TEMPERATURE=0.55 Copy NOTE: As of the writing of this Doc (May 2024), the available Language Models from Groq are: Llama3-70b-8192 Llama3-8b-8192 Mixtral-8x7b-32768 Gemma-7b-it","s":"Update env vars","u":"/docs/gpt-researcher/llms","h":"#update-env-vars","p":149},{"i":168,"t":"Anthropic is an AI safety and research company, and is the creator of Claude. This page covers all integrations between Anthropic models and LangChain. LLM_PROVIDER=anthropicANTHROPIC_API_KEY=[Your key] Copy You can then define the fast and smart LLM models for example: FAST_LLM_MODEL=claude-2.1SMART_LLM_MODEL=claude-3-opus-20240229 Copy You can then define the fast and smart LLM models for example: FAST_LLM_MODEL=claude-2.1SMART_LLM_MODEL=claude-3-opus-20240229 Copy","s":"Anthropic","u":"/docs/gpt-researcher/llms","h":"#anthropic","p":149},{"i":170,"t":"Sign up for a Mistral API key. Then update the corresponding env vars, for example: LLM_PROVIDER=mistralANTHROPIC_API_KEY=[Your key]FAST_LLM_MODEL=open-mistral-7bSMART_LLM_MODEL=mistral-large-latest Copy","s":"Mistral","u":"/docs/gpt-researcher/llms","h":"#mistral","p":149},{"i":172,"t":"Together AI offers an API to query 50+ leading open-source models in a couple lines of code. Then update corresponding env vars, for example: LLM_PROVIDER=togetherTOGETHER_API_KEY=[Your key]FAST_LLM_MODEL=meta-llama/Llama-3-8b-chat-hfSMART_LLM_MODEL=meta-llama/Llama-3-70b-chat-hf Copy","s":"Together AI","u":"/docs/gpt-researcher/llms","h":"#together-ai","p":149},{"i":174,"t":"This integration requires a bit of extra work. Follow this guide to learn more. After you've followed the tutorial above, update the env vars: LLM_PROVIDER=huggingfaceHUGGINGFACE_API_KEY=[Your key]FAST_LLM_MODEL=HuggingFaceH4/zephyr-7b-betaSMART_LLM_MODEL=HuggingFaceH4/zephyr-7b-beta Copy","s":"HuggingFace","u":"/docs/gpt-researcher/llms","h":"#huggingface","p":149},{"i":176,"t":"Sign up here for obtaining a Google Gemini API Key and update the following env vars: Please make sure to update fast and smart models to corresponding valid Gemini models. LLM_PROVIDER=googleGEMINI_API_KEY=[Your key] Copy","s":"Google Gemini","u":"/docs/gpt-researcher/llms","h":"#google-gemini","p":149},{"i":178,"t":"On this page","s":"Retrievers","u":"/docs/gpt-researcher/retrievers","h":"","p":177},{"i":180,"t":"GPT Researcher defaults to using the Tavily search engine for retrieving search results. But you can also use other search engines by specifying the RETRIEVER env var. Please note that each search engine has its own API Key requirements and usage limits. For example: RETRIEVER=bing Copy You can also specify multiple retrievers by separating them with commas. The system will use each specified retriever in sequence. For example: RETRIEVER=tavily, arxiv Copy Thanks to our community, we have integrated the following web search engines: Tavily - Default Bing - Env: RETRIEVER=bing Google - Env: RETRIEVER=google Serp API - Env: RETRIEVER=serpapi Serper - Env: RETRIEVER=serper Searx - Env: RETRIEVER=searx Duckduckgo - Env: RETRIEVER=duckduckgo Arxiv - Env: RETRIEVER=arxiv Exa - Env: RETRIEVER=exa PubMedCentral - Env: RETRIEVER=pubmed_central","s":"Web Search Engines","u":"/docs/gpt-researcher/retrievers","h":"#web-search-engines","p":177},{"i":182,"t":"You can also use any custom retriever of your choice by specifying the RETRIEVER=custom env var. Custom retrievers allow you to use any search engine that provides an API to retrieve documents and is widely used for enterprise research tasks. In addition to setting the RETRIEVER env, you also need to set the following env vars: RETRIEVER_ENDPOINT: The endpoint URL of the custom retriever. Additional arguments required by the retriever should be prefixed with RETRIEVER_ARG_ (e.g., RETRIEVER_ARG_API_KEY).","s":"Custom Retrievers","u":"/docs/gpt-researcher/retrievers","h":"#custom-retrievers","p":177},{"i":184,"t":"RETRIEVER=customRETRIEVER_ENDPOINT=https://api.myretriever.comRETRIEVER_ARG_API_KEY=YOUR_API_KEY Copy","s":"Example","u":"/docs/gpt-researcher/retrievers","h":"#example","p":177},{"i":186,"t":"For the custom retriever to work correctly, the response from the endpoint should be in the following format: [ { \"url\": \"http://example.com/page1\", \"raw_content\": \"Content of page 1\" }, { \"url\": \"http://example.com/page2\", \"raw_content\": \"Content of page 2\" }] Copy The system assumes this response format and processes the list of sources accordingly. Missing a retriever? Feel free to contribute to this project by submitting issues or pull requests on our GitHub page.","s":"Response Format","u":"/docs/gpt-researcher/retrievers","h":"#response-format","p":177},{"i":188,"t":"On this page","s":"Troubleshooting","u":"/docs/gpt-researcher/troubleshooting","h":"","p":187},{"i":190,"t":"This relates to not having permission to use gpt-4 yet. Based on OpenAI, it will be widely available for all by end of July.","s":"model: gpt-4 does not exist","u":"/docs/gpt-researcher/troubleshooting","h":"#model-gpt-4-does-not-exist","p":187},{"i":192,"t":"The issue relates to the library WeasyPrint (which is used to generate PDFs from the research report). Please follow this guide to resolve it: https://doc.courtbouillon.org/weasyprint/stable/first_steps.html Or you can install this package manually In case of MacOS you can install this lib using brew install glib pango If you face an issue with linking afterward, you can try running brew link glib In case of Linux you can install this lib using sudo apt install libglib2.0-dev","s":"cannot load library 'gobject-2.0-0'","u":"/docs/gpt-researcher/troubleshooting","h":"#cannot-load-library-gobject-20-0","p":187},{"i":194,"t":"In case of MacOS you can install this lib using brew install pango In case of Linux you can install this lib using sudo apt install libpango-1.0-0 Workaround for Mac M chip users If the above solutions don't work, you can try the following: Install a fresh version of Python 3.11 pointed to brew: brew install python@3.11 Install the required libraries: brew install pango glib gobject-introspection Install the required GPT Researcher Python packages: pip3.11 install -r requirements.txt Run the app with Python 3.11 (using brew): python3.11 -m uvicorn main:app --reload","s":"cannot load library 'pango'","u":"/docs/gpt-researcher/troubleshooting","h":"#cannot-load-library-pango","p":187},{"i":196,"t":"We're using Selenium for site scraping. Some sites fail to be scraped. In these cases, restart and try running again.","s":"Error processing the url","u":"/docs/gpt-researcher/troubleshooting","h":"#error-processing-the-url","p":187},{"i":198,"t":"Many users have an issue with their chromedriver because the latest chrome browser version doesn't have a compatible chrome driver yet. To downgrade your Chrome web browser using slimjet, follow these steps. First, visit the website and scroll down to find the list of available older Chrome versions. Choose the version you wish to install making sure it's compatible with your operating system. Once you've selected the desired version, click on the corresponding link to download the installer. Before proceeding with the installation, it's crucial to uninstall your current version of Chrome to avoid conflicts. It's important to check if the version you downgrade to, has a chromedriver available in the official chrome driver website If none of the above work, you can try out our hosted beta","s":"Chrome version issues","u":"/docs/gpt-researcher/troubleshooting","h":"#chrome-version-issues","p":187},{"i":200,"t":"On this page","s":"config.singleton","u":"/docs/reference/config/singleton","h":"","p":199},{"i":202,"t":"class Singleton(abc.ABCMeta, type) Copy Singleton metaclass for ensuring only one instance of a class. __call__​ def __call__(cls, *args, **kwargs) Copy Call method for the singleton metaclass.","s":"Singleton Objects","u":"/docs/reference/config/singleton","h":"#singleton-objects","p":199},{"i":204,"t":"class AbstractSingleton(abc.ABC, metaclass=Singleton) Copy Abstract singleton class for ensuring only one instance of a class.","s":"AbstractSingleton Objects","u":"/docs/reference/config/singleton","h":"#abstractsingleton-objects","p":199},{"i":206,"t":"On this page","s":"Tailored Research","u":"/docs/gpt-researcher/tailored-research","h":"","p":205},{"i":208,"t":"You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. The GPT Researcher will then conduct research on the provided sources. from gpt_researcher import GPTResearcherimport asyncioasync def get_report(query: str, report_type: str, sources: list) -> str: researcher = GPTResearcher(query=query, report_type=report_type, source_urls=sources) await researcher.conduct_research() report = await researcher.write_report() return reportif __name__ == \"__main__\": query = \"What are the latest advancements in AI?\" report_type = \"research_report\" sources = [\"https://en.wikipedia.org/wiki/Artificial_intelligence\", \"https://www.ibm.com/watson/ai\"] report = asyncio.run(get_report(query, report_type, sources)) print(report) Copy","s":"Research on Specific Sources 📚","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-specific-sources-","p":205},{"i":210,"t":"You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout. Simply pass the prompt as the query argument to the GPTResearcher class and the \"custom_report\" report_type. from gpt_researcher import GPTResearcherimport asyncioasync def get_report(prompt: str, report_type: str) -> str: researcher = GPTResearcher(query=prompt, report_type=report_type) await researcher.conduct_research() report = await researcher.write_report() return report if __name__ == \"__main__\": report_type = \"custom_report\" prompt = \"Research the latest advancements in AI and provide a detailed report in APA format including sources.\" report = asyncio.run(get_report(prompt=prompt, report_type=report_type)) print(report) Copy","s":"Specify Agent Prompt 📝","u":"/docs/gpt-researcher/tailored-research","h":"#specify-agent-prompt-","p":205},{"i":212,"t":"You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents. Step 1: Add the env variable DOC_PATH pointing to the folder where your documents are located. For example: export DOC_PATH=\"./my-docs\" Copy Step 2: When you create an instance of the GPTResearcher class, pass the report_source argument as \"local\". GPT Researcher will then conduct research on the provided documents. from gpt_researcher import GPTResearcherimport asyncioasync def get_report(query: str, report_type: str, report_source: str) -> str: researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source) await researcher.conduct_research() report = await researcher.write_report() return report if __name__ == \"__main__\": query = \"What can you tell me about myself based on my documents?\" report_type = \"research_report\" report_source = \"local\" # \"local\" or \"web\" report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source)) print(report) Copy","s":"Research on Local Documents 📄","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-local-documents-","p":205},{"i":214,"t":"You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents. Simply provide the sources and set the report_source argument as \"hybrid\" and watch the magic happen. Please note! You should set the proper retrievers for the web sources and doc path for local documents for this to work. To lean more about retrievers check out the Retrievers documentation.","s":"Hybrid Research 🔄","u":"/docs/gpt-researcher/tailored-research","h":"#hybrid-research-","p":205},{"i":216,"t":"You can instruct the GPT Researcher to research on a list of langchain document instances. For example: from langchain_core.documents import Documentfrom typing import List, Dictfrom gpt_researcher import GPTResearcherfrom langchain_postgres.vectorstores import PGVectorfrom langchain_openai import OpenAIEmbeddingsfrom sqlalchemy import create_engineimport asyncioCONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'def get_retriever(collection_name: str, search_kwargs: Dict[str, str]): engine = create_engine(CONNECTION_STRING) embeddings = OpenAIEmbeddings() index = PGVector.from_existing_index( use_jsonb=True, embedding=embeddings, collection_name=collection_name, connection=engine, ) return index.as_retriever(search_kwargs=search_kwargs)async def get_report(query: str, report_type: str, report_source: str, documents: List[Document]) -> str: researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source, documents=documents) await researcher.conduct_research() report = await researcher.write_report() return reportif __name__ == \"__main__\": query = \"What can you tell me about blue cheese based on my documents?\" report_type = \"research_report\" report_source = \"langchain_documents\" # using a LangChain retriever to get all the documents regarding cheese # https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html#langchain_core.retrievers.BaseRetriever.invoke langchain_retriever = get_retriever(\"cheese_collection\", { \"k\": 3 }) documents = langchain_retriever.invoke(\"All the documents about cheese\") report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source, documents=documents)) print(report) Copy","s":"Research on LangChain Documents 🦜️🔗","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-langchain-documents-️","p":205},{"i":218,"t":"On this page","s":"Vector Stores","u":"/docs/gpt-researcher/vector-stores","h":"","p":217},{"i":220,"t":"from gpt_researcher import GPTResearcherfrom langchain.text_splitter import CharacterTextSplitterfrom langchain_openai import OpenAIEmbeddingsfrom langchain_community.vectorstores import FAISSfrom langchain_core.documents import Document# exerpt taken from - https://paulgraham.com/wealth.htmlessay = \"\"\"May 2004(This essay was originally published in Hackers & Painters.)If you wanted to get rich, how would you do it? I think your best bet would be to start or join a startup.That's been a reliable way to get rich for hundreds of years. The word \"startup\" dates from the 1960s,but what happens in one is very similar to the venture-backed trading voyages of the Middle Ages.Startups usually involve technology, so much so that the phrase \"high-tech startup\" is almost redundant.A startup is a small company that takes on a hard technical problem.Lots of people get rich knowing nothing more than that. You don't have to know physics to be a good pitcher.But I think it could give you an edge to understand the underlying principles. Why do startups have to be small?Will a startup inevitably stop being a startup as it grows larger?And why do they so often work on developing new technology? Why are there so many startups selling new drugs or computer software,and none selling corn oil or laundry detergent?The PropositionEconomically, you can think of a startup as a way to compress your whole working life into a few years.Instead of working at a low intensity for forty years, you work as hard as you possibly can for four.This pays especially well in technology, where you earn a premium for working fast.Here is a brief sketch of the economic proposition. If you're a good hacker in your mid twenties,you can get a job paying about $80,000 per year. So on average such a hacker must be able to do atleast $80,000 worth of work per year for the company just to break even. You could probably work twiceas many hours as a corporate employee, and if you focus you can probably get three times as much done in an hour.[1]You should get another multiple of two, at least, by eliminating the drag of the pointy-haired middle manager whowould be your boss in a big company. Then there is one more multiple: how much smarter are you than your jobdescription expects you to be? Suppose another multiple of three. Combine all these multipliers,and I'm claiming you could be 36 times more productive than you're expected to be in a random corporate job.[2]If a fairly good hacker is worth $80,000 a year at a big company, then a smart hacker working very hard without any corporate bullshit to slow him down should be able to do work worth about $3 million a year..........\"\"\"document = [Document(page_content=essay)]text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=30, separator=\"\\n\")docs = text_splitter.split_documents(documents=document)vector_store = FAISS.from_documents(documents, OpenAIEmbeddings())query = \"\"\" Summarize the essay into 3 or 4 succinct sections. Make sure to include key points regarding wealth creation. Include some recommendations for entrepeneurs in the conclusion.\"\"\"# Create an instance of GPTResearcherresearcher = GPTResearcher( query=query, report_type=\"research_report\", report_source=\"langchain_vectorstore\", vector_store=vector_store,)# Conduct research and write the reportawait researcher.conduct_research()report = await researcher.write_report() Copy","s":"Faiss","u":"/docs/gpt-researcher/vector-stores","h":"#faiss","p":217},{"i":222,"t":"from gpt_researcher import GPTResearcherfrom langchain_postgres.vectorstores import PGVectorfrom langchain_openai import OpenAIEmbeddingsCONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'# assuming the vector store exists and contains the relevent documents# also assuming embeddings have been or will be generatedvector_store = PGVector.from_existing_index( use_jsonb=True, embedding=OpenAIEmbeddings(), collection_name='some collection name', connection=CONNECTION_STRING, async_mode=True,)query = \"\"\" Create a short report about apples. Include a section about which apples are considered best during each season.\"\"\"# Create an instance of GPTResearcherresearcher = GPTResearcher( query=query, report_type=\"research_report\", report_source=\"langchain_vectorstore\", vector_store=vector_store, )# Conduct research and write the reportawait researcher.conduct_research()report = await researcher.write_report() Copy","s":"PGVector","u":"/docs/gpt-researcher/vector-stores","h":"#pgvector","p":217},{"i":224,"t":"On this page","s":"processing.html","u":"/docs/reference/processing/html","h":"","p":223},{"i":226,"t":"On this page","s":"config.config","u":"/docs/reference/config/config","h":"","p":225},{"i":228,"t":"class Config(metaclass=Singleton) Copy Configuration class to store the state of bools for different scripts access. __init__​ def __init__() -> None Copy Initialize the Config class set_fast_llm_model​ def set_fast_llm_model(value: str) -> None Copy Set the fast LLM model value. set_smart_llm_model​ def set_smart_llm_model(value: str) -> None Copy Set the smart LLM model value. set_fast_token_limit​ def set_fast_token_limit(value: int) -> None Copy Set the fast token limit value. set_smart_token_limit​ def set_smart_token_limit(value: int) -> None Copy Set the smart token limit value. set_browse_chunk_max_length​ def set_browse_chunk_max_length(value: int) -> None Copy Set the browse_website command chunk max length value. set_openai_api_key​ def set_openai_api_key(value: str) -> None Copy Set the OpenAI API key value. set_debug_mode​ def set_debug_mode(value: bool) -> None Copy Set the debug mode value.","s":"Config Objects","u":"/docs/reference/config/config","h":"#config-objects","p":225},{"i":230,"t":"class APIKeyError(Exception) Copy Exception raised when an API key is not set in config.py or as an environment variable. check_openai_api_key​ def check_openai_api_key(cfg) -> None Copy Check if the OpenAI API key is set in config.py or as an environment variable. check_tavily_api_key​ def check_tavily_api_key(cfg) -> None Copy Check if the Tavily Search API key is set in config.py or as an environment variable. check_google_api_key​ def check_google_api_key(cfg) -> None Copy Check if the Google API key is set in config.py or as an environment variable. check_serp_api_key​ def check_serp_api_key(cfg) -> None Copy Check if the SERP API key is set in config.py or as an environment variable. check_searx_url​ def check_searx_url(cfg) -> None Copy Check if the Searx URL is set in config.py or as an environment variable.","s":"APIKeyError Objects","u":"/docs/reference/config/config","h":"#apikeyerror-objects","p":225},{"i":232,"t":"On this page","s":"Welcome","u":"/docs/welcome","h":"","p":231},{"i":234,"t":"Quickly accessing relevant and trustworthy information is more crucial than ever. However, we've learned that none of today's search engines provide a suitable tool that provides factual, explicit and objective answers without the need to continuously click and explore multiple sites for a given research task. This is why we've built the trending open source GPT Researcher. GPT Researcher is an autonomous agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task. To learn more about GPT Researcher, check out the documentation page.","s":"GPT Researcher","u":"/docs/welcome","h":"#gpt-researcher","p":231},{"i":236,"t":"On this page","s":"processing.text","u":"/docs/reference/processing/text","h":"","p":235}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/2",[0,5.129,1,1.957,2,3.256,3,3.256,4,3.256,5,2.283,6,1.046,7,1.874,8,3.256,9,3.256,10,2.582,11,4.271,12,1.124,13,2.051,14,1.874,15,1.957,16,2.283,17,2.366,18,1.731,19,1.668,20,4.287,21,2.873,22,2.051,23,2.621,24,2.283,25,2.113,26,1.957,27,2.283,28,1.992,29,1.957,30,2.433,31,2.621,32,2.873,33,1.936,34,1.557,35,3.911,36,3.256,37,2.433,38,2.873,39,2.873,40,3.256,41,4.287,42,2.158,43,2.621,44,1.611,45,2.433,46,2.873,47,2.403,48,2.433,49,4.858,50,3.06,51,2.433,52,3.256,53,2.158,54,3.256,55,1.874,56,2.158,57,2.621,58,1.668,59,2.283,60,1.784,61,2.283,62,3.256,63,1.799,64,2.283,65,2.433,66,3.256,67,2.873,68,2.158,69,2.433,70,4.287,71,1.375,72,2.873,73,0.826,74,0.734,75,1.874,76,1.025,77,2.283,78,2.433,79,2.621,80,3.256,81,2.051]],["t/4",[5,4.228,12,1.033,17,2.807,19,1.776,28,1.422,29,2.084,45,2.591,46,3.059,47,2.517,53,4.403,69,3.803,76,1.899,81,3.205,82,2.298,83,3.059,84,1.148,85,4.506,86,3.467,87,3.467,88,2.431,89,3.059,90,3.467,91,2.791,92,3.467,93,3.059,94,2.431,95,4.097,96,3.47,97,2.431,98,1.344,99,2.298,100,4.097,101,2.431,102,1.843,103,2.591,104,2.591,105,3.059,106,2.591,107,2.591,108,3.467,109,3.467,110,3.059,111,3.059,112,3.467,113,5.089,114,3.467,115,1.508,116,3.467,117,3.803,118,2.929,119,1.995,120,3.467,121,3.059,122,3.059,123,3.467,124,2.591,125,3.467,126,1.344,127,2.591,128,3.059,129,1.208,130,3.059,131,2.705,132,3.059,133,3.467,134,2.298,135,2.298,136,2.431]],["t/6",[12,0.431,13,1.862,14,1.701,17,1.085,19,1.515,25,2.669,28,1.212,31,2.38,32,2.609,38,2.609,56,1.959,61,3.167,63,1.633,73,0.75,74,0.667,84,0.979,85,2.209,99,1.959,100,3.636,103,3.375,104,2.209,107,2.209,115,1.286,126,1.146,129,1.03,131,1.571,134,1.959,136,2.073,137,2.956,138,3.035,139,2.956,140,4.163,141,5.833,142,1.701,143,2.609,144,2.209,145,3.636,146,1.633,147,2.956,148,2.956,149,2.956,150,2.209,151,1.515,152,2.956,153,2.209,154,2.609,155,2.956,156,2.845,157,4.516,158,3.986,159,3.167,160,3.973,161,2.209,162,2.609,163,3.986,164,3.262,165,2.609,166,2.609,167,3.843,168,3.167,169,2.38,170,1.959,171,2.38,172,2.609,173,1.326,174,2.609,175,3.986,176,2.458,177,2.956,178,1.633,179,3.167,180,1.959,181,3.636,182,3.636,183,2.956,184,2.495,185,2.956,186,1.777,187,3.986,188,3.375,189,1.777,190,2.715,191,2.209,192,2.956,193,1.959,194,2.956,195,1.959]],["t/8",[0,4.387,12,0.955,17,2.685,18,3.146,19,1.72,28,2.039,29,2.018,44,1.661,56,2.225,58,1.72,71,2.499,74,1.476,76,1.864,82,2.225,102,1.785,103,2.509,127,2.509,151,1.72,156,2.115,164,1.785,167,2.355,168,2.355,169,2.704,179,2.355,181,2.704,188,3.715,190,2.988,196,2.963,197,2.963,198,2.018,199,2.988,200,3.406,201,2.225,202,2.355,203,3.715,204,1.554,205,4.002,206,4.765,207,2.963,208,4.002,209,2.963,210,4.423,211,3.358,212,1.72,213,2.704,214,2.963,215,3.358,216,3.358,217,3.358,218,3.358,219,1.338,220,3.358,221,2.988,222,2.459,223,3.358,224,3.358,225,3.358,226,3.358,227,3.358,228,0.569,229,1.377,230,2.115,231,2.225,232,2.355,233,1.377,234,2.355,235,2.704,236,2.509,237,2.509,238,3.358,239,2.963]],["t/10",[6,0.712,12,0.717,17,2.153,19,1.694,25,3.018,27,2.318,35,2.662,42,2.191,55,1.903,60,1.804,61,2.318,73,1.488,74,1.108,75,1.903,98,1.282,99,2.191,100,2.662,102,1.757,104,2.471,119,3.375,128,2.918,129,1.712,131,1.757,146,1.826,156,2.082,159,2.318,164,3.117,170,2.191,173,1.483,176,2.63,184,1.826,186,1.987,191,2.471,207,2.918,221,1.987,228,0.561,233,1.356,240,1.53,241,3.095,242,2.318,243,3.451,244,2.407,245,3.306,246,1.53,247,2.318,248,2.918,249,2.082,250,3.306,251,2.954,252,2.918,253,2.918,254,2.954,255,2.662,256,2.318,257,3.306,258,3.446,259,2.191,260,2.191,261,1.987,262,3.306,263,2.471,264,3.306,265,2.082,266,3.306,267,3.446,268,2.518,269,2.918,270,3.306,271,3.306,272,2.471,273,2.662,274,2.471,275,2.471,276,3.306,277,2.318]],["t/12",[11,2.971,12,1.088,13,2.823,14,2.58,17,2.242,24,4.281,25,2.655,47,2.217,50,2.823,59,3.143,60,1.646,73,1.137,76,1.412,84,1.484,118,2.58,119,3.513,124,3.35,134,2.971,167,4.281,176,2.738,178,2.477,182,3.609,189,3.669,190,3.669,201,2.971,204,2.826,230,2.823,240,3.213,241,2.823,244,1.484,278,3.609,279,3.956,280,3.35,281,3.609,282,4.483,283,2.823,284,2.383,285,2.826,286,3.956,287,3.143,288,2.383]],["t/14",[11,4.009,12,1.082,13,2.195,15,2.095,17,1.28,19,3.099,25,2.222,33,2.827,35,2.806,37,2.605,73,0.884,74,0.786,76,1.098,79,2.806,84,1.154,88,3.583,131,2.716,140,2.195,141,3.076,151,1.786,160,2.095,171,2.806,178,1.926,179,2.444,186,2.095,204,1.613,206,2.806,232,2.444,241,2.195,242,2.444,244,1.154,247,2.444,256,2.444,275,2.605,289,3.076,290,3.485,291,3.076,292,4.509,293,5.109,294,2.605,295,2.006,296,5.109,297,1.926,298,3.076,299,3.485,300,3.485,301,3.485,302,3.485,303,3.485,304,3.485,305,3.485,306,2.605,307,6.049,308,5.109,309,3.076,310,5.109,311,3.485,312,3.485,313,2.605,314,3.485,315,2.444,316,3.485,317,2.006,318,3.076,319,3.485,320,2.605,321,2.806,322,2.605,323,2.006,324,3.076,325,3.076,326,2.195,327,3.076,328,3.485]],["t/17",[67,5.094,115,3.413,325,5.094,329,5.864,330,5.772,331,5.772,332,3.689,333,3.443,334,4.772,335,4.772,336,4.647,337,1.911,338,5.094,339,5.094,340,3.825,341,4.313]],["t/19",[5,0.953,6,1.067,7,0.421,11,1.856,14,1.095,16,0.513,18,0.389,19,0.375,22,0.856,25,1.885,26,0.44,28,1.563,33,0.292,41,1.199,42,0.901,44,1.385,47,1.177,53,1.577,55,0.421,58,1.219,60,0.499,63,0.404,71,0.309,73,0.483,74,1.836,84,0.63,94,0.953,98,1.086,102,0.389,104,0.547,115,0.318,119,1.369,124,0.547,129,0.255,131,0.722,134,0.901,142,1.095,146,0.404,173,0.854,176,0.328,180,0.485,189,1.683,197,0.645,200,1.095,201,1.856,204,0.339,208,0.589,212,0.375,219,1.265,221,0.817,228,0.646,229,0.78,230,1.199,231,0.485,232,0.513,233,0.557,237,0.547,240,0.881,246,1.296,247,0.513,251,1.144,253,0.645,258,2.461,275,0.547,283,1.764,288,1.012,291,0.645,295,1.095,298,1.679,315,0.513,317,1.095,321,0.589,323,0.782,326,1.764,333,0.35,342,0.645,343,0.731,344,0.731,345,0.589,346,1.683,347,0.589,348,4.532,349,0.485,350,0.547,351,0.645,352,3.876,353,1.991,354,0.645,355,0.421,356,1.199,357,0.547,358,1.679,359,2.872,360,2.326,361,1.577,362,1.903,363,1.612,364,4.755,365,0.731,366,0.645,367,2.291,368,0.645,369,0.645,370,0.731,371,0.44,372,0.645,373,0.362,374,0.731,375,0.731,376,0.645,377,0.589,378,1.015,379,0.953,380,1.359,381,0.589,382,0.731,383,0.375,384,0.547,385,0.731,386,0.645,387,0.513,388,0.953,389,0.751,390,0.901,391,0.731,392,0.589,393,0.513,394,4.559,395,1.43,396,0.404,397,1.679,398,0.782,399,4.084,400,2.23,401,0.731,402,0.589,403,2.38,404,0.645,405,0.547,406,0.731,407,0.953,408,1.359,409,0.731,410,0.731,411,1.199,412,0.731,413,0.731,414,2,415,0.589,416,0.731,417,0.731,418,1.359,419,1.015,420,1.359,421,1.359,422,0.645,423,0.421,424,0.645,425,0.645,426,1.177,427,1.359,428,1.359,429,1.359,430,1.359,431,1.359,432,1.359,433,0.856,434,2.38,435,2.38,436,1.359,437,1.199,438,1.359,439,1.199,440,1.359,441,1.359,442,0.731,443,0.645,444,0.589,445,1.094,446,0.856,447,0.44,448,0.731,449,1.359,450,1.359,451,2.38,452,1.359,453,1.359,454,1.359,455,1.359,456,0.731,457,0.645,458,0.645,459,0.645,460,0.731,461,0.731,462,0.731,463,0.731,464,1.866,465,2.8,466,0.461,467,0.731,468,0.589,469,0.731,470,0.645,471,1.359,472,0.731,473,0.485,474,0.645,475,0.731,476,2.38,477,0.513,478,2.8,479,1.359,480,1.359,481,2.38,482,1.532,483,1.359,484,1.359,485,2.38,486,1.359,487,3.51,488,1.679,489,0.731,490,0.547,491,0.731,492,0.731,493,0.589,494,0.547,495,3.838,496,1.359,497,1.359,498,2.38,499,1.359,500,0.856,501,2.38,502,1.359,503,2.38,504,1.359,505,1.359,506,1.359,507,1.359,508,1.359,509,1.359,510,1.359,511,1.359,512,0.856,513,0.731,514,0.645,515,0.547,516,0.589,517,1.359,518,0.731,519,1.359,520,3.175,521,1.359,522,1.903,523,0.645,524,0.828,525,0.731,526,0.731,527,0.731,528,0.731,529,0.731,530,0.731,531,0.731,532,0.731,533,0.731,534,0.731,535,0.731,536,1.359,537,0.731,538,0.731,539,0.731,540,1.199,541,0.731,542,0.731,543,0.731,544,0.731,545,0.731,546,0.731,547,1.359,548,0.731,549,1.359,550,0.731,551,0.731,552,0.589,553,0.731,554,0.731,555,0.731,556,0.731,557,0.731,558,0.731,559,0.731,560,0.731,561,0.856,562,0.731,563,0.645,564,0.645,565,0.589]],["t/21",[337,2.408]],["t/23",[10,2.682,12,1.075,26,3.033,33,2.011,60,1.853,73,1.676,75,2.904,76,1.589,97,3.538,115,2.874,229,2.069,332,2.586,333,3.16,334,4.882,353,2.561,356,3.178,383,2.586,414,4.161,419,3.771,566,5.046,567,3.538,568,5.32,569,4.453,570,3.178,571,3.771,572,3.16,573,3.771,574,3.771,575,4.063]],["t/25",[10,2.417,12,1.206,17,2.263,18,2.417,60,2.263,63,2.512,73,1.774,75,2.616,76,1.432,84,1.505,115,1.978,138,2.249,151,2.329,190,2.733,203,3.398,241,2.863,249,2.863,256,3.188,277,3.188,284,2.417,287,3.188,297,2.512,320,3.398,333,2.174,337,1.505,426,2.249,494,3.398,571,3.398,572,2.174,576,4.546,577,4.012,578,3.013,579,3.188,580,4.546,581,2.733,582,3.66,583,4.546,584,3.013,585,3.188,586,4.012,587,3.66,588,4.012,589,3.66,590,3.188]],["t/27",[6,1.254,12,1.057,17,2.138,48,4.352,73,1.836,131,3.096,138,2.881,151,2.984,179,4.084,205,4.689,251,3.501,295,3.352,320,4.352,359,3.668,400,2.138,578,3.86,591,5.411,592,5.824,593,4.689,594,4.352]],["t/29",[6,1.667,12,0.719,17,1.81,25,3.369,33,2.903,55,2.837,60,2.389,61,3.457,84,1.632,98,1.911,115,2.144,140,3.105,160,2.963,161,3.684,179,3.457,189,3.911,249,3.105,285,2.282,287,3.457,493,3.969,582,3.969,595,4.312,596,7.283,597,3.105,598,6.507,599,4.93,600,4.93,601,4.351,602,4.93]],["t/31",[12,0.913,48,3.471,60,2.296,71,2.985,73,1.178,75,2.673,76,1.463,84,2.07,115,2.02,119,2.673,131,3.758,146,2.566,193,3.078,306,3.471,333,2.221,334,3.078,335,3.078,336,5.035,337,1.538,340,3.078,349,3.078,350,3.471,353,1.801,367,3.759,371,2.792,378,3.471,379,3.257,561,2.926,568,3.74,581,2.792,594,5.283,603,3.471,604,4.645,605,5.035,606,2.792,607,4.099,608,3.74,609,4.645,610,4.645]],["t/33",[337,2.408]],["t/35",[12,1.081,34,2.114,74,1.967,176,1.983,198,2.657,219,2.747,222,2.991,228,0.75,243,2.35,244,2.453,274,3.304,346,2.657,360,2.93,400,1.623,446,2.785,464,2.35,524,1.923,611,2.265,612,2.785,613,3.1,614,2.114,615,3.304,616,4.421,617,3.304,618,3.902,619,3.902,620,4.421,621,2.544,622,2.93,623,3.1,624,2.93,625,3.559,626,2.442,627,2.265,628,3.304,629,2.785,630,2.785,631,4.421]],["t/37",[6,1.314,12,1.013,17,1.076,25,2.371,29,1.761,44,1.449,53,1.941,55,1.686,56,1.941,58,1.501,73,1.138,74,1.759,76,1.716,77,2.054,84,1.485,98,1.135,135,1.941,140,1.845,160,2.696,198,1.761,212,1.501,214,2.585,219,1.168,222,2.696,228,0.497,229,2.235,233,1.201,242,2.054,244,2.392,249,1.845,274,2.189,283,1.845,285,2.076,322,2.189,326,1.845,334,1.941,335,1.941,346,1.761,359,1.845,360,1.941,361,1.941,364,2.189,384,2.189,388,2.054,400,1.076,411,2.585,464,2.384,470,2.585,524,1.951,561,1.845,572,1.401,585,2.054,611,1.501,612,1.845,614,2.145,615,2.189,617,2.189,618,2.585,619,2.585,621,2.581,623,2.054,624,1.941,625,2.358,626,1.618,627,1.501,629,1.845,630,1.845,632,2.929,633,2.929,634,2.189,635,2.054,636,2.929,637,2.929,638,2.585,639,2.929,640,2.929,641,2.929,642,4.485,643,2.929,644,2.929,645,2.054,646,2.929,647,2.929,648,2.189,649,2.585,650,2.929,651,2.929,652,2.929,653,2.929,654,2.585,655,2.929,656,2.585]],["t/39",[6,0.923,11,0.886,12,0.789,16,1.645,17,0.491,19,1.202,25,1.021,37,0.999,56,0.886,58,1.607,60,0.862,64,2.643,65,0.999,73,1.291,76,0.987,84,1.248,85,0.999,98,1.215,102,0.711,115,0.582,129,0.818,136,0.938,138,2.93,142,2.169,146,0.739,156,0.842,159,1.645,164,1.247,173,1.406,180,2.498,186,1.411,199,0.804,219,0.533,228,0.227,229,0.548,232,0.938,233,0.548,242,0.938,244,1.248,246,1.745,261,0.804,267,1.645,268,1.931,274,0.999,277,0.938,283,1.478,284,0.711,285,1.985,288,1.247,295,0.77,297,1.732,315,0.938,317,2.93,321,1.077,329,0.999,332,1.607,333,1.122,340,0.886,347,1.077,352,0.886,353,0.91,359,0.842,363,2.718,367,2.265,371,0.804,373,2.121,383,1.202,386,1.18,387,0.938,389,2.369,393,0.938,395,0.804,398,0.77,405,0.999,414,1.478,423,0.77,426,2.121,433,1.975,439,2.071,468,2.524,477,1.645,512,0.842,514,2.071,561,0.842,571,0.999,572,1.122,581,1.411,584,0.886,585,1.645,590,0.938,597,0.842,605,1.889,607,1.18,635,0.938,656,1.18,657,0.999,658,2.767,659,2.347,660,1.18,661,2.347,662,2.347,663,5.188,664,0.999,665,1.337,666,1.337,667,1.337,668,1.337,669,1.18,670,1.337,671,1.337,672,1.337,673,1.337,674,3.205,675,2.071,676,2.347,677,2.347,678,3.034,679,2.347,680,1.18,681,1.18,682,1.754,683,1.18,684,1.077,685,1.337,686,2.071,687,1.337,688,1.337,689,1.337,690,1.077,691,1.337,692,1.645,693,2.347,694,1.645,695,1.077,696,1.337,697,1.645,698,1.337,699,4.168,700,2.343,701,1.337,702,1.337,703,1.337,704,1.337,705,1.18,706,1.18,707,1.077,708,1.18,709,1.337,710,1.337,711,1.889,712,1.18,713,1.337,714,2.767,715,1.18,716,1.337,717,1.337,718,1.337,719,1.337,720,1.337,721,1.337,722,1.086,723,1.337,724,1.337,725,1.754,726,1.337,727,1.337,728,1.337,729,1.337,730,1.337,731,1.337,732,1.337,733,1.337,734,1.247,735,1.337,736,2.347,737,1.337,738,1.337,739,1.337,740,0.938,741,2.347,742,1.337,743,1.337,744,1.337,745,1.077,746,1.18,747,2.374,748,1.337,749,1.337,750,0.739,751,1.337,752,1.337,753,1.077,754,1.337,755,1.077,756,1.18,757,1.337,758,1.337,759,2.071,760,0.999,761,0.938,762,1.451,763,0.77,764,2.078,765,0.999,766,1.337,767,1.337,768,0.77,769,0.804,770,1.18,771,0.533,772,1.18,773,0.999,774,0.999]],["t/41",[337,2.408]],["t/43",[6,1.169,44,2.686,81,3.42,212,3.55,228,1.175,373,2.686,396,3,400,1.994,574,4.058,775,4.792,776,4.058,777,5.43,778,4.792,779,4.792,780,4.792,781,4.363,782,4.058,783,3.107,784,3.125,785,3.807,786,3.807,787,3.599,788,4.058,789,3.807,790,3.807,791,4.058,792,4.058]],["t/45",[6,0.978,44,2.249,81,2.863,84,1.505,126,1.762,228,1.271,254,2.733,295,2.616,355,2.616,396,2.512,400,2.263,423,2.616,567,3.188,574,3.398,606,2.733,648,3.398,664,3.398,674,3.398,755,3.66,765,3.398,768,2.616,779,4.012,782,3.398,783,3.136,787,4.085,791,3.398,793,4.012,794,6.163,795,6.163,796,6.992,797,4.012,798,5.439,799,4.012,800,2.512,801,4.546,802,4.012,803,4.012,804,4.012,805,4.546,806,3.66,807,4.546,808,4.012]],["t/47",[99,4.082,295,3.544,349,4.082,573,4.603,586,5.435,606,3.702,664,4.603,775,5.435,778,5.435,782,4.603,798,6.613,809,6.033,810,5.435,811,6.159,812,6.159]],["t/49",[12,1.172,17,1.635,19,2.281,22,2.804,50,2.804,51,3.327,73,1.541,76,1.402,84,1.474,97,3.121,98,1.726,126,1.726,142,2.562,151,2.281,164,2.366,170,2.95,219,1.774,284,2.366,297,2.459,323,2.562,347,3.584,395,2.676,437,3.929,477,3.121,523,3.929,552,3.584,587,3.584,589,3.584,603,3.327,606,2.676,648,3.327,722,2.061,797,3.929,809,3.584,813,3.929,814,3.327,815,4.452,816,4.452,817,4.452,818,2.804,819,3.929,820,3.929,821,3.584,822,3.929,823,4.452,824,4.452,825,4.452,826,4.452,827,4.452,828,4.452,829,4.452]],["t/51",[337,2.408]],["t/53",[12,0.82,47,3.835,74,1.597,76,1.77,228,1.2,400,2.064,407,3.941,423,3.234,567,3.941,734,2.987,776,4.2,781,3.54,783,3.176,785,3.941,786,3.941,789,3.941,790,3.941,792,4.2,830,3.941,831,4.2,832,3.54,833,4.525]],["t/55",[204,3.257,233,2.885,834,5.665,835,7.036]],["t/57",[6,1.427,28,1.685,29,2.47,129,2.502,212,2.946,228,1.218,229,1.685,373,2.033,400,1.509,433,2.589,567,4.031,615,4.297,760,3.071,762,3.07,781,2.589,783,2.974,784,2.365,785,2.882,786,4.031,788,4.297,836,3.627,837,6.086,838,4.833,839,4.11,840,5.749,841,4.11,842,4.297,843,6.631,844,6.631,845,5.852,846,4.11,847,6.631]],["t/59",[6,0.694,28,1.322,29,2.898,44,1.594,71,1.361,84,1.067,88,2.26,98,1.869,106,2.409,228,0.817,235,2.595,252,2.844,284,1.713,355,3.324,373,1.594,393,2.26,395,1.937,400,1.183,404,2.844,466,2.03,473,2.136,474,2.844,567,5.236,587,2.595,595,3.829,635,5.236,746,2.844,750,1.781,783,2.163,784,2.775,836,2.844,837,5.527,838,4.615,842,3.603,845,4.255,848,6.59,849,4.317,850,3.223,851,3.223,852,4.822,853,3.223,854,2.844,855,4.822,856,3.223,857,2.595,858,3.223,859,4.255,860,3.381,861,3.882,862,2.595,863,4.822,864,3.223,865,5.777,866,3.223,867,2.844,868,2.844,869,2.844,870,3.223,871,3.223,872,2.136,873,2.595,874,3.223]],["t/61",[12,0.85,29,3.501,76,1.834,129,2.029,202,4.084,212,2.984,228,0.987,426,2.881,570,3.668,776,4.352,782,4.352,788,4.352,789,4.084,790,4.084,792,4.352,831,4.352,832,3.668,837,4.689,838,3.501,842,4.352,848,5.14,875,5.14,876,5.14]],["t/63",[12,0.742,28,2.086,47,3.878,74,1.498,129,1.772,228,1.126,246,2.354,251,3.057,353,1.972,373,2.516,383,2.606,390,3.371,400,1.868,407,3.566,423,2.927,570,3.203,734,2.704,761,3.566,762,2.354,764,4.402,771,2.647,783,2.281,792,3.801,800,2.81,830,3.566,832,3.203,833,4.095,877,5.861,878,5.086,879,4.095,880,4.489]],["t/65",[12,1.005,73,1.746,76,2.168,352,4.563,881,6.885,882,4.337]],["t/68",[6,0.746,10,1.843,12,0.88,30,2.591,33,2.82,51,3.803,71,1.464,73,1.291,74,0.782,76,2.329,77,2.431,84,1.148,94,2.431,101,2.431,105,3.059,115,1.508,134,2.298,136,2.431,146,2.812,151,1.776,168,2.431,172,3.059,173,1.555,208,2.791,235,2.791,240,1.605,247,2.431,249,2.183,256,2.431,273,2.791,285,1.605,333,1.658,351,3.059,352,2.298,356,3.205,359,2.183,364,2.591,376,3.059,377,2.791,378,2.591,379,2.431,381,2.791,396,1.915,422,3.059,446,2.183,524,1.508,573,2.591,581,2.084,582,2.791,590,2.431,882,3.205,883,4.491,884,2.791,885,3.059,886,4.491,887,3.467,888,3.467,889,5.089,890,3.059,891,4.855,892,3.467,893,2.431,894,3.467,895,3.467,896,3.467,897,3.467,898,3.467,899,3.059,900,3.467,901,3.467,902,3.059,903,3.467,904,3.467,905,2.705,906,3.467,907,2.791]],["t/70",[6,0.932,7,2.493,28,3.159,68,2.871,69,3.237,74,0.977,76,2.507,79,3.487,97,3.037,98,2.312,170,2.871,187,3.823,195,2.871,201,2.871,284,2.302,349,2.871,355,3.432,356,2.728,384,3.237,389,3.767,396,2.393,398,2.493,524,2.594,579,3.037,756,3.823,813,3.823,873,3.487,882,2.728,893,4.781,905,3.625,908,4.332,909,3.037,910,3.823,911,4.332,912,3.823,913,4.332,914,4.332,915,4.332,916,3.823,917,3.823,918,4.332,919,4.332,920,4.332]],["t/72",[10,2.417,12,1.094,33,1.812,34,2.174,63,3.863,71,1.919,73,1.153,76,2.361,94,3.188,95,3.66,96,2.616,99,3.013,131,2.417,135,3.013,161,3.398,168,3.188,176,2.039,184,2.512,193,3.013,229,2.528,259,3.013,261,2.733,284,2.417,306,3.398,345,3.66,355,2.616,466,2.863,561,2.863,575,3.66,589,3.66,597,2.863,635,3.188,722,2.104,857,3.66,905,2.417,921,4.322,922,4.962,923,3.398,924,4.546,925,4.546,926,4.012,927,3.013,928,3.188]],["t/74",[1,3.365,6,0.852,10,2.104,12,1.161,34,1.893,73,1.004,74,1.795,76,2.347,96,2.278,106,2.958,118,2.278,126,1.534,138,1.958,142,3.739,153,2.958,164,2.976,176,2.511,178,3.093,184,2.187,198,2.379,233,1.623,240,2.592,243,2.104,244,1.854,260,2.623,261,2.379,268,2.028,473,2.623,477,2.775,722,1.832,814,2.958,818,2.493,872,2.623,905,2.976,921,2.775,927,3.711,929,3.958,930,3.493,931,4.184,932,3.493,933,3.493,934,3.493,935,3.958,936,2.775,937,3.365,938,2.775,939,2.958]],["t/76",[1,2.487,12,1.162,15,1.587,17,1.874,22,1.663,25,1.148,47,1.306,58,1.353,63,1.459,65,1.973,70,2.33,73,1.295,74,1.57,76,0.831,84,0.874,96,3.325,98,1.023,118,2.937,126,1.023,129,1.442,138,1.306,140,1.663,142,1.519,144,1.973,145,2.126,150,1.973,151,1.353,176,2.591,178,2.82,184,2.286,189,1.587,204,1.222,210,3.093,212,1.353,230,1.663,233,2.73,240,2.362,243,2.713,244,2.077,248,2.33,259,1.75,260,1.75,261,2.487,265,1.663,268,1.353,283,1.663,284,1.403,288,1.403,289,2.33,322,1.973,335,1.75,361,1.75,371,2.487,400,0.969,419,1.973,446,2.606,512,1.663,585,1.851,649,2.33,657,1.973,707,2.126,722,2.362,740,1.851,784,1.519,800,1.459,818,2.606,832,1.663,872,1.75,882,1.663,928,1.851,931,3.093,936,1.851,937,3.473,938,1.851,939,1.973,940,2.33,941,2.126,942,2.33,943,2.64,944,2.126,945,4.109,946,2.33,947,2.64,948,2.126,949,2.33,950,2.33,951,2.33,952,1.663,953,1.587,954,1.851,955,2.33,956,2.64,957,2.64,958,2.33,959,2.33]],["t/78",[1,0.979,5,1.142,6,0.351,7,0.938,12,0.868,15,0.979,17,1.022,18,0.866,26,0.979,28,1.495,31,1.312,34,0.779,42,1.845,55,0.938,60,0.598,73,0.413,74,1.654,76,2.142,101,3.955,102,2.575,106,1.217,107,1.217,110,1.438,111,1.438,121,1.438,126,1.079,129,0.97,134,1.08,142,0.938,144,1.217,156,1.026,160,0.979,166,1.438,171,1.312,176,0.731,178,0.9,198,2.191,200,1.602,201,2.416,204,2.243,212,0.835,219,1.11,222,1.377,228,0.731,229,0.668,231,1.08,233,1.142,237,1.217,240,0.754,243,0.866,244,1.748,259,1.08,260,1.08,272,1.217,278,2.242,280,1.217,281,1.312,285,0.754,313,4.443,326,1.026,333,0.779,355,1.602,356,1.026,361,1.08,364,2.081,371,2.191,388,2.556,396,2.014,398,1.602,415,1.312,443,1.438,444,1.312,445,1.312,446,3.325,447,0.979,464,1.48,466,1.026,473,1.08,512,1.754,515,1.217,524,1.211,561,1.026,569,1.438,570,1.754,603,1.217,606,0.979,611,0.835,614,1.743,625,1.312,626,0.9,627,0.835,634,1.217,645,1.142,657,1.217,722,0.754,725,1.217,771,0.649,784,0.938,800,0.9,819,1.438,862,1.312,868,1.438,882,1.026,886,1.438,891,2.242,905,2.575,909,1.142,921,1.142,922,1.312,937,0.979,938,1.142,952,1.026,955,1.438,960,1.629,961,1.629,962,2.784,963,1.629,964,3.806,965,1.629,966,1.438,967,1.438,968,1.629,969,2.784,970,0.979,971,1.629,972,2.242,973,1.629,974,3.421,975,2.784,976,1.629,977,1.629,978,2.457,979,1.629,980,1.629,981,1.629,982,1.629,983,1.629,984,1.629,985,1.629,986,1.629,987,2.784,988,1.629,989,1.629,990,2.784,991,1.629,992,1.629,993,1.629,994,1.312,995,1.629,996,0.979,997,1.438,998,1.629,999,1.629,1000,1.629,1001,1.629,1002,1.629,1003,1.629,1004,1.629,1005,1.629,1006,1.629,1007,1.629,1008,1.629,1009,1.629,1010,1.629,1011,1.629,1012,1.629,1013,1.438,1014,1.629,1015,1.629,1016,1.629,1017,1.629,1018,1.629,1019,1.629,1020,1.438,1021,1.629,1022,1.438,1023,1.629,1024,1.629,1025,1.629,1026,1.629,1027,1.629,1028,1.629,1029,1.312,1030,1.217,1031,1.629,1032,1.438,1033,1.438,1034,1.438]],["t/80",[6,0.344,10,0.849,12,0.931,14,0.919,17,1.319,18,0.849,19,0.818,25,0.695,26,0.96,28,1.122,33,0.636,42,1.814,47,0.79,58,0.818,60,1.005,63,0.882,69,2.045,71,1.797,73,0.694,74,1.26,75,0.919,76,1.507,82,1.058,88,1.12,101,3.357,115,0.695,126,1.061,136,1.12,143,2.415,151,0.818,162,1.409,164,0.849,170,1.058,173,0.716,175,1.409,176,0.716,178,3.525,181,1.286,184,1.512,198,1.645,200,0.919,201,1.058,204,1.97,210,3.578,219,0.636,228,0.609,229,0.655,230,1.724,231,1.058,233,0.655,234,1.12,240,2.216,244,1.849,251,0.96,265,1.724,267,1.12,268,1.402,275,1.193,278,2.203,285,0.739,288,0.849,294,1.193,313,4.173,317,0.919,323,0.919,332,0.818,333,0.764,337,0.529,340,1.058,352,3.173,354,1.409,359,1.006,371,0.96,373,0.79,388,3.357,389,0.882,400,2.05,405,1.193,407,1.12,424,1.409,433,1.006,445,2.203,482,2.891,494,1.193,500,1.724,512,1.006,516,1.286,524,0.695,579,1.12,606,0.96,614,0.764,634,2.045,645,1.12,722,0.739,734,0.849,740,1.12,768,1.575,771,0.636,800,0.882,891,1.286,905,2.545,907,1.286,923,2.045,937,3.699,945,3.854,946,1.409,948,2.891,952,2.262,953,1.645,954,1.919,958,1.409,959,2.415,964,1.409,970,0.96,972,2.891,974,2.067,996,0.96,997,1.409,1013,1.409,1022,1.409,1029,2.891,1032,1.409,1033,1.409,1035,2.203,1036,1.597,1037,1.409,1038,1.597,1039,4.256,1040,1.597,1041,4.787,1042,1.409,1043,1.597,1044,1.597,1045,1.597,1046,1.597,1047,1.597,1048,1.597,1049,1.597,1050,1.597,1051,1.597,1052,1.597,1053,1.597,1054,1.597,1055,1.597,1056,1.597,1057,1.597,1058,1.597,1059,1.597,1060,1.597,1061,1.597,1062,1.597,1063,1.597,1064,2.737,1065,1.597,1066,1.409,1067,2.415,1068,1.597,1069,1.409,1070,1.409,1071,2.415,1072,1.409,1073,2.415,1074,2.415,1075,2.415,1076,1.597,1077,1.597,1078,1.597,1079,1.597,1080,1.597,1081,1.597,1082,1.597,1083,1.597,1084,1.409,1085,1.597,1086,1.597,1087,1.409]],["t/82",[6,0.583,7,1.558,12,0.757,17,1.549,23,2.179,26,1.627,30,2.023,33,2.067,37,2.023,43,2.179,45,3.876,47,1.339,50,1.705,51,2.023,59,1.898,60,0.994,72,2.389,73,0.687,75,1.558,76,1.329,77,1.898,84,0.896,85,2.023,96,2.984,97,1.898,115,1.177,118,1.558,119,1.558,130,2.389,131,1.439,135,2.797,151,2.162,164,1.439,169,2.179,170,2.797,176,2.326,180,1.794,184,1.496,191,2.023,195,1.794,231,1.794,233,1.731,234,1.898,240,1.253,244,0.896,259,1.794,283,1.705,285,1.953,292,2.389,294,2.023,313,3.154,317,2.428,323,1.558,326,2.658,327,2.389,332,2.162,339,2.389,340,1.794,341,2.023,352,3.437,360,1.794,363,1.558,371,1.627,383,1.387,392,2.179,407,1.898,415,2.179,426,1.339,444,4.175,447,1.627,466,1.705,490,2.023,493,2.179,512,1.705,552,2.179,561,1.705,565,2.179,568,2.179,579,1.898,595,1.794,608,2.179,648,2.023,654,2.389,747,1.705,771,1.682,818,1.705,822,2.389,849,2.023,862,2.179,867,2.389,872,1.794,873,2.179,875,2.389,882,1.705,885,2.389,893,1.898,905,2.243,923,3.154,937,1.627,942,2.389,966,2.389,1035,2.179,1088,2.707,1089,2.707,1090,2.707,1091,2.707,1092,2.707,1093,2.707,1094,2.707,1095,2.023,1096,2.707,1097,3.724,1098,2.389,1099,2.179,1100,2.707,1101,2.707,1102,2.707,1103,2.707,1104,2.707,1105,2.707,1106,2.707]],["t/84",[337,2.408]],["t/86",[6,0.879,7,2.35,12,1.045,13,2.572,14,2.35,15,3.972,16,2.863,17,2.427,18,2.171,19,2.092,20,3.604,21,3.604,22,2.572,23,4.608,24,4.634,25,2.49,28,1.675,58,2.092,61,2.863,64,4.013,74,0.921,81,2.572,119,2.35,138,2.831,145,3.288,146,3.162,154,3.604,160,2.455,163,3.604,164,2.171,186,2.455,244,1.352,254,2.455,258,2.863,323,2.35,367,2.455,368,3.604,369,3.604,393,2.863,426,2.831,490,3.052,495,3.288,601,3.604,700,3.052,725,3.052,834,3.288,854,3.604,1107,4.084,1108,4.084,1109,4.084,1110,4.084,1111,3.604,1112,4.084,1113,3.604,1114,4.084]],["t/88",[6,0.701,11,2.158,12,1.149,13,2.051,14,1.874,17,2.531,18,1.731,24,3.406,25,2.803,28,1.992,29,2.92,50,2.051,53,3.22,55,1.874,59,2.283,60,1.784,63,1.799,73,1.232,76,2.171,83,2.873,84,1.078,103,2.433,117,3.631,119,3.708,126,1.262,151,1.668,167,3.406,176,2.179,182,2.621,189,2.92,190,2.92,193,2.158,195,2.158,198,1.957,204,2.983,205,2.621,206,2.621,219,1.298,230,2.051,233,2.383,236,2.433,240,2.69,241,3.06,244,1.608,279,2.873,284,2.582,285,2.983,286,2.873,287,3.406,400,1.195,425,2.873,591,3.631,694,3.406,695,2.621,734,1.731,1020,5.129,1115,3.256,1116,3.256,1117,3.256]],["t/91",[71,2.938,574,5.201,783,3.122,1118,6.96,1119,6.96]],["t/93",[6,1.295,12,1.078,13,2.766,14,2.527,15,2.639,24,3.079,50,2.766,55,2.527,59,3.079,60,2.522,74,1.847,84,1.454,118,2.527,140,2.766,186,3.618,190,2.639,202,3.079,203,3.281,241,2.766,244,2.273,254,2.639,277,4.22,285,2.786,288,2.334,317,2.527,383,2.25,396,2.426,426,3.655,490,3.281,578,2.91,764,2.91,814,3.281,821,3.535,953,2.639,1113,3.875,1120,4.391,1121,4.391,1122,4.391,1123,4.391]],["t/95",[337,2.408]],["t/97",[6,1.339,12,1.028,33,1.838,34,2.205,58,2.363,63,2.548,71,1.947,76,1.96,94,3.233,95,3.713,96,2.654,131,2.451,135,3.056,151,2.363,168,3.233,176,2.068,184,2.548,193,3.056,229,2.552,236,3.446,244,1.527,246,2.135,261,2.772,265,2.905,268,2.363,285,2.135,295,2.654,332,2.363,337,1.527,345,3.713,353,1.788,400,1.693,575,3.713,597,3.92,722,2.135,768,2.654,905,2.451,921,3.233,922,3.713,926,4.07,927,3.056,928,3.233,953,2.772,954,3.233,1124,4.611,1125,4.611]],["t/99",[1,3.395,6,0.863,10,2.13,12,1.19,33,1.597,34,1.916,43,3.226,45,4.221,73,1.016,76,2.358,96,2.306,98,1.553,118,2.306,126,1.553,142,3.764,153,2.995,164,3.002,176,2.533,178,3.121,184,2.214,233,1.643,240,2.615,243,2.13,244,1.87,260,2.656,261,2.409,268,2.053,473,2.656,477,2.81,722,2.615,800,2.214,818,3.558,872,2.656,905,2.13,921,2.81,927,3.744,930,3.537,931,4.221,932,4.985,933,3.537,934,3.537,936,2.81,937,2.409,938,2.81,939,2.995,1126,4.007,1127,4.007,1128,3.537]],["t/101",[96,3.614,129,2.188,178,3.469,233,2.575,243,3.338,285,2.907,322,4.693,371,3.774,722,2.907,928,4.403,937,3.774,940,6.691,941,5.056,1129,6.279]],["t/104",[1,3.294,12,1.177,15,2.308,17,1.41,22,2.418,58,1.967,73,1.39,84,1.271,96,2.21,98,1.488,118,3.154,126,1.488,129,1.338,153,2.869,176,2.866,178,3.027,184,2.121,204,1.777,210,2.869,233,2.62,240,2.537,243,2.913,244,2.115,260,2.545,261,2.308,265,2.418,268,1.967,281,3.091,284,2.041,288,2.041,400,1.41,419,2.869,446,3.451,512,2.418,585,2.692,657,2.869,707,3.091,722,1.777,740,2.692,818,3.451,832,2.418,872,2.545,882,2.418,931,2.869,936,2.692,937,2.308,938,2.692,939,2.869,945,5.144,949,3.389,950,3.389,951,3.389,952,2.418,953,2.308,954,2.692,1128,3.389,1130,3.84]],["t/106",[44,2.756,212,2.855,228,1.309,246,2.579,353,2.73,383,2.855,400,2.046,414,3.509,447,3.349,570,3.509,762,2.579,763,3.206,764,3.692,771,2.807,781,3.509,783,3.158,784,3.206,785,3.907,786,3.907,879,4.486,1131,5.571,1132,4.917,1133,5.571]],["t/108",[1,1.729,6,0.619,10,1.529,12,0.953,17,1.056,18,1.529,33,1.147,34,1.376,53,1.907,60,1.624,71,1.215,73,0.73,74,1.473,76,1.697,98,1.115,119,1.655,129,2.108,138,1.423,159,2.017,173,1.984,198,1.729,199,1.729,204,1.332,219,2.603,228,0.488,229,1.18,239,2.539,244,2.569,246,1.332,254,1.729,255,2.316,258,2.017,265,1.812,267,2.017,268,2.266,283,1.812,285,1.332,332,1.474,373,1.423,389,1.589,395,1.729,398,1.655,426,2.188,482,5.967,495,2.316,540,2.539,570,1.812,588,2.539,674,2.15,694,2.017,699,2.539,734,1.529,740,2.017,745,2.316,747,2.786,759,2.539,762,1.332,765,3.306,769,1.729,772,2.539,773,2.15,818,1.812,948,5.257,952,2.786,953,1.729,954,2.017,1067,2.539,1069,3.904,1070,2.539,1071,3.904,1072,3.904,1073,3.904,1074,4.756,1075,4.756,1084,2.539,1134,2.877,1135,2.539,1136,4.423,1137,2.316,1138,2.877,1139,3.904,1140,2.877]],["t/110",[6,1.352,26,3.774,27,4.403,228,1.065,383,3.217,572,3.003,645,4.403,781,3.955,783,2.816,800,3.469,905,3.338,1141,6.279,1142,6.279,1143,4.693,1144,6.279]],["t/112",[6,0.978,27,3.188,58,2.329,84,1.505,89,4.012,126,1.762,146,2.512,230,2.863,338,4.012,357,3.398,373,2.249,393,3.188,395,2.733,564,4.012,572,2.174,605,3.66,606,2.733,747,2.863,755,4.962,763,2.616,765,3.398,793,4.012,809,5.63,814,3.398,820,4.012,838,2.733,893,3.188,905,2.417,910,4.012,912,5.439,1137,3.66,1145,4.012,1146,4.546,1147,4.546,1148,4.546,1149,4.012,1150,4.546,1151,4.546,1152,4.546,1153,3.188,1154,4.546,1155,4.546,1156,4.546,1157,4.546,1158,4.546,1159,4.012,1160,4.546,1161,3.66,1162,4.546,1163,4.546]],["t/114",[12,0.628,47,3.624,74,0.971,88,3.017,115,1.872,202,3.017,212,2.205,228,0.729,251,2.586,350,3.215,353,1.668,356,2.71,357,3.215,373,3.802,396,2.377,398,2.476,400,2.495,423,2.476,565,3.464,663,3.017,722,1.992,734,2.287,762,1.992,771,1.715,783,1.93,787,2.852,800,2.377,808,3.797,830,4.162,832,2.71,833,3.464,877,6.783,880,5.997,893,3.017,1145,3.797,1164,5.935,1165,4.303,1166,4.303,1167,3.464,1168,3.017,1169,4.303,1170,4.303,1171,4.303,1172,5.935]],["t/116",[6,1.379,228,1.086,400,2.352,783,3.441,799,5.652,802,5.652,803,5.652,804,5.652,806,5.157,1173,6.405,1174,6.405,1175,6.405]],["t/118",[337,2.408]],["t/120",[6,0.985,12,1.024,28,1.878,47,2.265,73,1.78,74,1.033,129,1.595,212,2.346,228,1.19,246,2.867,353,2.72,373,2.265,383,3.173,396,3.421,414,2.884,595,3.035,761,3.21,762,2.119,763,2.635,764,4.651,771,2.797,781,2.884,783,3.148,784,2.635,821,3.686,838,2.752,879,3.686,944,3.686,1132,4.041,1176,4.041,1177,4.579,1178,4.579,1179,4.579,1180,4.041,1181,4.579,1182,4.579]],["t/122",[12,0.698,74,1.918,219,1.907,222,3.155,228,0.811,240,2.214,244,2.111,464,2.542,524,2.774,611,3.268,612,4.017,613,3.354,614,2.287,621,4.404,622,3.17,626,2.643,627,2.451,628,3.575,629,3.013,630,3.013,927,3.17,974,4.129,1183,4.783,1184,3.354,1185,3.851,1186,3.851,1187,4.221,1188,4.221,1189,4.221,1190,3.354]],["t/125",[74,1.762,219,2.632,228,1.119,346,3.968,355,3.799,622,4.376,1191,6.602,1192,6.602,1193,6.602]],["t/127",[33,2.632,74,1.762,199,3.968,219,2.632,228,1.119,638,5.826,1194,6.602,1195,6.602,1196,6.602]],["t/129",[33,2.632,74,1.762,77,4.629,118,3.799,219,2.632,228,1.119,1197,6.602,1198,6.602,1199,6.602]],["t/132",[12,0.754,74,1.844,222,3.32,228,0.876,244,2.467,400,1.898,464,2.747,524,2.919,611,2.648,612,3.255,614,2.472,621,3.862,626,2.855,627,2.648,776,3.862,787,3.425,789,3.624,790,3.624,831,3.862,972,4.161,974,3.862,1184,3.624,1185,4.161,1186,4.161,1200,5.168,1201,5.168,1202,5.168]],["t/134",[12,0.608,44,2.059,74,1.713,102,2.213,219,1.659,222,2.87,228,1.225,229,2.379,240,1.927,244,1.921,361,2.759,389,2.3,400,2.652,464,2.213,524,2.524,578,2.759,611,2.133,614,1.991,621,2.396,626,2.3,627,2.133,645,2.919,753,3.352,768,2.396,781,2.622,783,2.602,787,3.845,907,3.352,927,2.759,944,3.352,1176,3.674,1185,3.352,1186,3.352,1187,3.674,1188,3.674,1203,7.595,1204,3.674,1205,4.163,1206,4.163,1207,4.163,1208,4.163,1209,4.163,1210,4.163,1211,4.163,1212,4.163,1213,4.163,1214,4.163]],["t/136",[6,1.436,12,0.973,84,2.208,129,2.324,254,4.01,334,4.421,335,4.421,458,5.887,1215,6.671]],["t/138",[6,1.421,12,0.963,25,2.872,56,4.376,60,2.424,74,1.489,221,3.968,228,1.119,1216,6.602,1217,6.602]],["t/140",[12,0.944,25,2.814,55,3.723,60,2.375,74,1.459,160,3.888,228,1.097,288,3.439,363,3.723,1218,4.536,1219,6.469,1220,6.469]],["t/142",[12,0.954,57,5.261,74,1.474,159,4.582,228,1.108,591,4.884,700,4.884,722,3.025,1221,6.535,1222,6.535,1223,6.535]],["t/144",[84,2.231,126,2.613,228,1.143,254,4.052,773,5.038,1139,5.949,1224,5.949,1225,6.741]],["t/146",[12,0.954,50,4.116,228,1.108,398,3.761,591,5.803,593,5.261,675,5.767,722,3.025,1168,4.582,1226,6.535]],["t/148",[6,0.952,10,2.35,12,1.006,25,1.923,33,2.41,48,3.304,65,3.304,71,2.553,73,1.121,76,1.392,84,1.464,99,2.93,115,1.923,129,1.54,131,2.35,135,2.93,180,2.93,233,1.813,295,2.544,326,2.785,329,4.519,333,2.114,335,2.93,336,5.549,337,1.464,341,3.304,349,4.008,350,4.519,355,2.544,356,3.809,358,3.902,383,2.265,392,3.559,397,3.902,423,2.544,570,2.785,594,5.151,606,2.657,608,3.559,917,3.902,928,3.1,1035,3.559,1098,3.902,1227,4.421,1228,4.421,1229,4.421]],["t/150",[337,2.408]],["t/152",[6,1.466,28,2.794,246,3.153,353,2.641,747,4.291,787,4.515,1230,6.812]],["t/154",[6,1.121,74,1.175,126,2.615,138,3.701,173,3.026,228,0.883,246,3.794,353,3.177,389,4.528,694,4.73,695,4.195,750,3.727,771,2.077,1231,5.21,1232,4.598,1233,4.598,1234,5.21,1235,5.21]],["t/156",[6,1.22,74,1.279,126,2.76,173,2.543,228,0.961,246,3.779,353,3.165,389,4.51,584,4.719,750,3.132,771,2.26,1232,5.004,1233,5.004,1236,5.67,1237,5.67]],["t/158",[6,1.14,26,3.183,28,2.172,173,3.058,204,2.451,228,0.898,246,3.156,297,2.926,332,2.713,337,1.753,373,2.62,433,3.336,572,2.533,573,3.958,694,5.287,750,2.926,762,2.451,909,3.713,1168,3.713,1238,6.817,1239,5.296,1240,5.296,1241,5.296,1242,5.296,1243,5.296,1244,5.296,1245,5.296,1246,5.296,1247,5.296]],["t/160",[6,1.586,12,0.636,73,1.106,74,1.351,126,1.691,129,1.519,138,3.645,173,3.069,195,4.536,200,3.938,204,2.019,228,1.016,315,3.058,317,4.24,584,4.883,663,3.058,674,3.259,686,7.212,711,3.511,750,3.781,763,2.51,838,2.621,1099,3.511,1143,3.259,1248,4.361,1249,4.361,1250,4.361,1251,4.361,1252,3.849,1253,4.361,1254,4.361,1255,4.361]],["t/162",[12,0.806,33,2.791,63,3.052,73,1.401,81,3.479,98,2.141,272,4.128,285,2.557,295,3.179,297,3.052,353,2.141,379,3.873,603,4.128,690,5.638,692,3.873,768,3.179,771,2.202,1256,7.003,1257,4.447,1258,5.523,1259,5.523,1260,5.523,1261,5.523,1262,5.523,1263,5.523,1264,5.523]],["t/164",[6,1.289,127,5.504,297,3.308,353,2.855,383,3.773,398,3.445,433,3.771,763,3.445,771,2.935,773,4.474,838,3.598,860,4.198,1265,5.987,1266,5.987,1267,5.987,1268,5.987]],["t/166",[6,1.308,12,0.65,73,1.129,74,1.371,98,2.355,126,2.681,138,3.422,173,3.333,200,3.497,228,0.755,240,2.061,243,2.366,263,3.327,297,2.459,317,3.497,663,3.121,690,4.892,708,5.362,711,3.584,760,3.327,763,2.562,768,2.562,771,1.774,774,3.327,1252,3.929,1269,4.452,1270,4.452,1271,6.917,1272,6.917,1273,4.452,1274,4.452,1275,4.452,1276,6.076,1277,3.929,1278,3.929,1279,4.452,1280,4.452,1281,3.929]],["t/168",[12,0.731,33,1.996,138,3.252,173,3.291,228,1.244,229,2.696,337,1.657,387,3.511,388,4.609,405,3.742,692,4.609,697,4.609,734,3.494,771,1.996,909,3.511,1282,6.573,1283,5.007,1284,4.419,1285,5.007,1286,5.007,1287,5.007,1288,5.007,1289,6.573,1290,6.573,1291,6.573,1292,6.573]],["t/170",[158,5.435,228,1.044,229,2.526,346,3.702,353,2.387,447,3.702,762,2.851,769,3.702,771,2.455,800,3.403,1218,4.319,1293,5.435,1294,7.493,1295,6.159,1296,6.159,1297,6.159]],["t/172",[33,2.164,60,1.994,75,3.125,161,4.058,173,2.435,193,3.599,219,2.164,228,0.921,229,2.227,353,2.105,361,3.599,447,3.264,734,3.682,762,2.514,769,3.264,1218,3.807,1277,4.792,1278,4.792,1298,5.43,1299,5.43,1300,5.43,1301,5.43,1302,5.43,1303,5.43,1304,6.928,1305,6.928,1306,5.43,1307,5.43]],["t/174",[44,2.881,71,2.459,78,4.352,84,1.928,102,3.096,129,2.522,228,0.987,377,4.689,387,4.084,447,3.501,459,5.14,590,4.084,762,2.696,769,3.501,1161,4.689,1204,5.14,1281,6.389,1308,5.14,1309,5.824,1310,5.824,1311,5.824]],["t/176",[129,1.975,173,3.193,228,0.961,323,3.263,332,2.905,353,2.198,383,2.905,447,4.279,682,4.237,692,3.976,697,3.976,762,2.625,769,3.408,770,5.004,771,2.838,800,3.132,936,3.976,1095,4.237,1218,3.976,1293,5.004,1312,7.12,1313,5.67]],["t/178",[337,2.408]],["t/180",[6,1.366,12,0.554,44,1.877,64,2.66,73,0.962,129,1.322,164,2.017,204,2.515,228,0.921,229,2.228,332,1.944,340,2.515,353,2.106,363,3.988,367,4.409,387,2.66,414,3.422,426,1.877,581,4.166,593,3.055,597,2.39,660,3.348,663,3.81,680,3.348,681,3.348,682,2.836,683,3.348,684,3.055,750,3.507,762,3.843,768,2.184,769,2.281,771,1.512,857,3.055,860,2.66,1314,5.433,1315,3.794,1316,3.794,1317,3.794,1318,5.433,1319,3.794,1320,3.348,1321,3.794,1322,3.794,1323,3.794,1324,3.794,1325,3.794,1326,3.794,1327,3.794,1328,3.794,1329,3.794]],["t/182",[6,1.556,12,0.709,17,1.783,44,2.402,98,1.882,126,2.497,129,1.692,180,4.269,221,2.918,297,2.682,353,1.882,363,4.739,367,2.918,389,3.993,466,3.058,572,2.322,581,2.918,750,2.682,762,3.346,769,3.872,861,3.909,1143,3.629,1330,4.855,1331,4.855,1332,4.855,1333,4.855,1334,3.629,1335,4.855,1336,4.855,1337,4.855,1338,4.855]],["t/184",[228,1.22,1339,7.194]],["t/186",[60,1.769,71,2.035,74,1.803,82,3.194,129,1.679,142,3.688,156,3.035,160,3.852,199,2.897,221,3.852,228,0.817,268,3.284,329,3.601,337,2.384,363,3.688,378,3.601,379,3.379,381,3.88,389,2.662,423,2.773,635,3.379,722,2.231,753,3.88,830,3.379,860,3.379,1034,4.253,1143,3.601,1340,4.819,1341,4.819,1342,6.409,1343,4.819,1344,4.819,1345,4.819,1346,4.819]],["t/188",[337,2.408]],["t/190",[6,1.392,42,4.287,73,1.641,117,4.835,233,2.653,246,2.995,251,3.888,263,4.835,861,5.208,923,4.835,1347,6.469,1348,6.469]],["t/192",[6,1.556,7,3.707,12,0.709,16,3.405,68,3.218,78,3.629,82,4.269,117,3.629,129,1.692,213,3.909,244,1.607,285,2.248,332,2.488,400,1.783,783,3.593,784,2.794,806,3.909,953,2.918,1349,4.855,1350,4.855,1351,4.855,1352,4.285,1353,5.685,1354,5.685,1355,5.685,1356,4.285,1357,4.855,1358,5.685,1359,4.855,1360,4.285,1361,4.285,1362,4.285,1363,4.855]],["t/194",[6,1.422,7,3.294,12,0.596,44,2.831,68,2.707,71,1.724,73,1.036,81,2.572,102,2.171,129,1.423,212,3.386,213,3.288,357,3.052,395,2.455,400,1.499,712,3.604,780,5.051,783,3.783,784,2.35,785,2.863,786,2.863,788,4.278,789,2.863,790,2.863,831,3.052,849,3.052,1030,3.052,1167,3.288,1352,3.604,1353,5.051,1354,6.655,1355,3.604,1356,5.051,1360,3.604,1361,3.604,1362,3.604,1364,4.084,1365,4.084,1366,4.084,1367,4.084,1368,4.084,1369,4.084,1370,4.084,1371,4.084,1372,4.084,1373,4.084,1374,4.084]],["t/196",[6,1.379,7,3.686,68,4.245,188,5.734,190,4.612,342,5.652,400,2.352,488,5.652,594,4.786,1375,6.405,1376,6.405]],["t/198",[5,2.692,6,0.826,22,2.418,47,1.899,68,2.545,71,1.621,82,2.545,102,2.041,115,1.67,127,2.869,129,1.338,146,2.121,150,2.869,199,2.308,202,2.692,234,2.692,263,4.095,280,2.869,294,2.869,323,2.21,333,1.836,346,2.308,395,2.308,426,1.899,468,3.091,500,2.418,524,1.67,783,2.866,832,3.451,834,3.091,849,5.726,859,3.389,860,2.692,869,3.389,1087,3.389,1095,2.869,1099,3.091,1159,3.389,1161,3.091,1180,3.389,1218,2.692,1308,3.389,1358,3.389,1377,5.48,1378,7.662,1379,3.84,1380,5.48,1381,5.48,1382,5.48,1383,3.84,1384,5.48,1385,3.84,1386,3.84,1387,3.84,1388,6.39,1389,3.84,1390,3.389,1391,3.84,1392,3.84]],["t/200",[337,2.408]],["t/202",[200,3.445,228,1.249,359,3.771,360,3.968,595,3.968,614,2.863,884,4.82,970,4.427,996,3.598,1393,5.987,1394,6.499,1395,7.364,1396,5.987,1397,5.987,1398,5.987,1399,5.987]],["t/204",[200,3.761,228,1.108,595,4.331,970,4.98,996,3.928,1394,5.767,1400,6.535,1401,6.535,1402,6.535]],["t/206",[337,2.408]],["t/208",[12,1.119,33,1.704,34,2.044,60,2.815,73,1.499,74,1.869,98,2.29,199,3.551,219,1.704,221,2.569,222,2.923,228,0.725,244,1.956,346,2.569,464,2.272,524,1.859,611,2.19,612,2.692,613,2.997,614,2.044,621,3.897,622,2.833,623,2.997,624,2.833,626,2.361,627,2.19,628,3.194,629,2.692,630,2.692,750,2.361,974,3.897,1168,2.997,1184,2.997,1189,3.772,1190,2.997,1257,3.441,1403,4.274,1404,4.274,1405,4.274]],["t/210",[12,1.039,33,1.607,34,1.928,60,1.481,74,1.804,76,1.27,78,3.014,93,3.559,98,1.563,219,1.607,222,2.806,228,0.684,244,2.485,254,2.424,256,4.602,267,2.827,268,2.066,284,2.143,288,2.143,346,2.424,390,2.673,464,2.143,466,2.54,515,3.014,524,1.754,571,3.014,611,2.066,612,2.54,613,2.827,614,1.928,621,3.777,624,3.76,626,2.228,627,2.066,629,2.54,630,2.54,750,2.228,761,2.827,970,2.424,974,3.777,978,3.559,1066,3.559,1153,2.827,1190,2.827,1257,3.247,1334,3.014,1406,5.673,1407,4.032,1408,4.032,1409,4.032]],["t/212",[12,1.017,28,1.37,34,1.598,47,2.449,73,1.256,74,1.788,98,1.92,146,1.845,174,2.948,186,2.008,219,1.331,222,2.449,228,0.84,229,1.37,233,1.37,244,1.953,265,2.104,268,1.712,315,2.342,317,1.922,372,2.948,373,1.652,390,2.214,398,1.922,423,1.922,426,1.652,464,1.776,515,2.496,524,1.453,572,3.491,611,1.712,612,2.104,613,2.342,614,1.598,621,2.85,622,2.214,623,2.342,624,3.282,626,1.845,627,1.712,629,2.104,630,2.104,678,2.689,745,2.689,747,4.11,762,1.546,763,1.922,764,2.214,774,2.496,830,2.342,953,2.008,970,2.008,974,3.755,996,2.008,1030,2.496,1137,2.689,1153,2.342,1184,2.342,1190,2.342,1334,2.496,1410,3.34,1411,3.34,1412,3.34,1413,3.34,1414,3.34,1415,4.751,1416,4.37,1417,2.948,1418,3.34,1419,2.948]],["t/214",[12,1.055,34,2.322,60,2.654,71,2.05,73,1.232,84,1.607,98,1.882,102,2.581,115,2.112,126,2.497,195,3.218,229,1.991,332,2.488,333,2.322,363,4.16,390,3.218,426,3.186,572,3.457,617,3.629,678,3.909,747,4.057,761,3.405,768,2.794,774,3.629,884,3.909,1334,3.629,1415,3.909,1420,4.285,1421,6.441,1422,4.855,1423,4.855,1424,4.855,1425,4.855]],["t/216",[6,0.661,12,0.818,73,0.778,74,1.942,199,2.793,219,1.223,222,2.299,228,0.52,229,1.259,233,1.259,244,1.538,360,2.034,363,1.766,390,2.034,464,2.47,524,3.076,572,3.382,581,1.845,584,2.034,611,1.573,614,1.468,621,2.674,622,2.034,623,2.152,624,3.08,626,1.696,627,1.573,628,2.294,629,1.933,630,1.933,734,1.631,909,3.259,974,4.069,996,1.845,1184,2.152,1190,2.152,1415,3.742,1416,4.101,1417,2.709,1419,2.709,1426,2.709,1427,3.069,1428,3.069,1429,2.471,1430,2.709,1431,2.709,1432,2.471,1433,2.709,1434,3.069,1435,3.069,1436,3.069,1437,3.069,1438,3.069,1439,3.069,1440,3.069,1441,3.069,1442,3.069,1443,3.069,1444,2.709,1445,2.709,1446,3.069,1447,3.069,1448,3.069,1449,3.069,1450,3.069,1451,4.647,1452,3.069,1453,5.608,1454,3.069,1455,2.709,1456,3.069,1457,3.069,1458,3.069,1459,3.069,1460,3.069]],["t/218",[337,2.408]],["t/220",[1,1.032,12,0.25,15,1.032,18,0.912,19,1.491,27,1.203,28,0.704,30,2.832,34,0.821,39,1.515,58,0.879,71,2.678,74,1.481,84,1.254,91,1.382,107,1.283,122,1.515,124,1.283,132,1.515,144,1.283,165,1.515,167,1.203,186,1.032,191,1.283,196,2.568,200,1.675,209,1.515,222,0.849,228,0.291,231,1.137,234,2.041,236,1.283,237,2.175,242,2.657,243,0.912,247,1.203,249,1.081,251,1.032,255,1.382,259,1.137,269,1.515,272,2.175,273,1.382,280,1.283,288,1.547,306,1.283,309,1.515,318,1.515,323,0.988,326,1.833,334,1.929,341,1.283,349,1.137,355,0.988,366,2.568,384,1.283,396,0.948,457,3.344,473,1.137,500,1.081,516,1.382,524,2.173,563,1.515,572,0.821,578,1.929,597,2.386,611,0.879,617,1.283,627,0.879,664,1.283,669,2.568,697,1.203,715,1.515,734,1.547,771,0.684,810,3.344,883,4.409,902,2.568,916,1.515,952,1.081,967,1.515,994,1.382,996,1.032,1029,1.382,1030,1.283,1037,2.568,1095,1.283,1097,1.515,1149,1.515,1153,1.203,1167,1.382,1168,1.203,1284,3.938,1420,1.515,1426,1.515,1429,1.382,1432,1.382,1433,1.515,1455,1.515,1461,1.716,1462,1.716,1463,1.716,1464,1.716,1465,1.716,1466,1.716,1467,1.716,1468,1.716,1469,2.91,1470,4.996,1471,1.716,1472,1.716,1473,1.716,1474,6.086,1475,1.716,1476,1.716,1477,1.716,1478,1.716,1479,1.716,1480,2.91,1481,1.716,1482,1.716,1483,3.789,1484,1.716,1485,1.716,1486,1.716,1487,1.716,1488,3.789,1489,1.716,1490,1.716,1491,1.716,1492,1.716,1493,1.716,1494,1.716,1495,1.716,1496,1.716,1497,1.716,1498,1.716,1499,2.91,1500,1.716,1501,1.716,1502,1.716,1503,1.716,1504,1.716,1505,1.716,1506,1.716,1507,1.716,1508,1.716,1509,1.716,1510,1.716,1511,1.716,1512,1.716,1513,1.716,1514,1.716,1515,1.716,1516,1.716,1517,1.716,1518,1.716,1519,1.716,1520,1.716,1521,1.716,1522,1.716,1523,1.716,1524,1.716,1525,3.789,1526,1.716,1527,3.789,1528,2.91,1529,1.716,1530,1.716,1531,3.789,1532,1.716,1533,1.716,1534,1.716,1535,1.716,1536,1.716,1537,1.716,1538,2.91,1539,1.716,1540,1.716,1541,1.716,1542,1.716,1543,1.716,1544,1.716,1545,1.716,1546,1.716,1547,1.716,1548,1.716,1549,1.716,1550,1.716,1551,1.716,1552,1.716,1553,1.716,1554,1.716,1555,1.716,1556,1.716,1557,1.716,1558,1.716,1559,1.716,1560,1.716,1561,1.716,1562,1.515,1563,1.515,1564,1.515,1565,1.515,1566,1.515,1567,1.515]],["t/222",[12,0.624,28,2.423,34,2.044,57,3.441,74,1.834,156,3.721,189,2.569,204,1.978,222,2.114,228,0.725,243,2.272,244,1.415,249,2.692,288,2.272,402,3.441,433,2.692,524,2.945,572,2.044,584,2.833,611,2.19,627,2.19,634,3.194,725,3.194,941,3.441,952,2.692,994,3.441,996,2.569,1111,3.772,1135,3.772,1153,2.997,1429,3.441,1430,3.772,1431,3.772,1432,3.441,1444,3.772,1445,3.772,1562,3.772,1563,3.772,1564,3.772,1565,3.772,1566,3.772,1567,3.772,1568,4.274,1569,4.274,1570,4.274,1571,4.274,1572,4.274,1573,4.274,1574,4.274,1575,4.274,1576,5.908,1577,4.274]],["t/224",[337,2.408]],["t/226",[337,2.408]],["t/228",[64,3.472,74,1.75,101,2.342,126,2.929,138,2.449,173,2.221,228,1.344,246,1.546,353,1.295,402,2.689,446,2.104,500,4.887,614,3.71,615,2.496,692,3.472,697,3.472,700,3.701,705,2.948,706,2.948,714,6.667,760,2.496,771,1.331,791,2.496,842,2.496,970,3.547,974,3.396,1224,2.948,1578,3.34,1579,4.952,1580,3.34,1581,4.952,1582,3.34,1583,3.34,1584,3.34,1585,3.34,1586,3.34,1587,3.34,1588,3.34,1589,5.9,1590,3.34,1591,3.34,1592,3.34,1593,3.34,1594,3.34,1595,3.34,1596,3.34,1597,3.34,1598,3.34,1599,3.34,1600,3.34]],["t/230",[74,1.658,126,2.964,221,2.294,228,1.296,246,1.767,333,3.516,353,2.85,367,2.294,414,2.404,500,4.63,614,3.516,658,6.748,682,2.852,684,3.073,763,4.4,771,2.93,838,4.596,970,2.294,1042,3.368,1320,3.368,1601,3.817,1602,3.817,1603,3.817,1604,3.817,1605,3.817,1606,3.817,1607,3.817,1608,3.817,1609,3.817,1610,3.817,1611,3.817,1612,3.817]],["t/232",[337,2.408]],["t/234",[10,2.227,12,1.15,14,2.411,17,2.46,18,2.227,25,1.823,60,2.14,73,1.7,75,2.411,76,1.319,84,1.929,91,3.374,98,2.259,115,1.823,140,2.639,150,3.132,176,1.879,188,3.132,189,2.519,190,2.519,203,3.132,232,2.938,241,2.639,258,2.938,277,2.938,287,2.938,297,2.315,320,3.132,324,3.698,333,2.004,337,1.387,348,3.698,367,2.519,426,2.073,494,3.132,500,2.639,572,2.004,577,3.698,578,2.777,579,2.938,581,2.519,590,4.086,597,2.639,791,3.132,876,3.698,890,3.698,899,3.698,1390,3.698,1613,4.19,1614,5.828,1615,4.19,1616,4.19]],["t/236",[337,2.408]]],"invertedIndex":[["",{"_index":74,"t":{"2":{"position":[[1020,1]]},"6":{"position":[[379,1]]},"8":{"position":[[559,1],[614,1],[746,1],[998,5]]},"10":{"position":[[316,1],[611,1]]},"14":{"position":[[546,1]]},"19":{"position":[[1780,1],[2263,1],[2294,1],[2439,1],[2545,1],[2579,1],[2708,2],[2733,1],[2735,1],[2737,3],[2853,1],[2894,1],[2918,1],[3119,1],[3662,1],[3780,1],[4221,1],[4341,1],[4343,2],[4380,1],[4400,1],[4424,1],[4459,1],[4502,2],[4529,1],[4793,1],[5039,2],[5065,1],[5162,1],[5506,1],[5566,1],[5647,1],[6043,1],[6265,1],[6571,1],[6573,2],[6610,1],[6630,1],[6654,1],[6689,1],[6732,2],[6759,1],[7023,2],[7118,1],[7276,1],[7421,1],[7527,1],[7561,1],[7690,2],[7715,1],[7717,1],[7735,1],[7811,1],[7917,1],[7931,2],[7956,2],[7973,1],[8000,1],[8093,1],[8095,1],[8114,1],[8197,1],[8226,1],[8257,1],[8316,2],[8371,2],[8397,1],[8494,1],[8539,1]]},"35":{"position":[[72,3],[141,3],[145,1],[159,1],[211,1],[237,1],[257,1],[296,1],[368,1],[442,1],[468,1],[527,2]]},"37":{"position":[[339,1],[402,1],[465,3],[538,3],[553,1],[668,1],[753,3],[844,3],[855,1],[921,2]]},"53":{"position":[[30,1],[105,1]]},"63":{"position":[[154,1],[232,1]]},"68":{"position":[[492,1]]},"70":{"position":[[165,1]]},"74":{"position":[[61,1],[257,1],[347,1],[419,1],[504,1],[583,1],[651,1]]},"76":{"position":[[260,1],[517,1],[759,1],[957,1],[1079,1],[1163,1],[1307,1]]},"78":{"position":[[617,1],[1166,1],[1577,1],[1616,1],[1719,1],[1793,1],[1819,1],[2116,1],[2149,1],[2189,1],[2220,1],[2252,1],[2286,1],[2350,1],[2378,1],[2860,1]]},"80":{"position":[[1375,1],[1565,1],[2405,1],[2480,1],[2526,2],[2595,1],[2839,2]]},"86":{"position":[[448,1]]},"93":{"position":[[0,2],[62,2],[129,2],[218,3],[274,2],[321,2],[380,2]]},"108":{"position":[[1041,1],[1135,1],[1181,2],[1275,1],[1415,2]]},"114":{"position":[[230,1]]},"120":{"position":[[104,2]]},"122":{"position":[[160,1],[178,1],[230,1],[275,1],[334,2],[355,1],[405,1],[432,1]]},"125":{"position":[[6,1],[74,1]]},"127":{"position":[[6,1],[56,1]]},"129":{"position":[[6,1],[77,1]]},"132":{"position":[[85,1],[181,1],[200,1],[252,1],[297,1]]},"134":{"position":[[159,1],[269,1],[308,1],[360,1],[405,1]]},"138":{"position":[[88,1]]},"140":{"position":[[134,1]]},"142":{"position":[[84,1]]},"154":{"position":[[0,1]]},"156":{"position":[[0,1]]},"160":{"position":[[148,1],[425,1]]},"166":{"position":[[89,1],[329,1]]},"186":{"position":[[110,1],[112,1],[184,2],[187,1],[259,2]]},"208":{"position":[[292,1],[310,1],[428,1],[487,2],[508,1],[564,1],[592,1],[692,1]]},"210":{"position":[[391,1],[409,1],[507,1],[567,2],[594,1],[619,1],[732,1]]},"212":{"position":[[662,1],[680,1],[806,1],[866,2],[887,1],[960,1],[994,1],[1004,1],[1030,1]]},"216":{"position":[[383,1],[524,1],[570,1],[597,1],[719,1],[874,1],[892,1],[1039,1],[1098,2],[1119,1],[1197,1],[1231,1],[1255,1],[1327,1],[1495,1],[1532,1],[1541,2],[1554,1],[1624,1]]},"220":{"position":[[299,1],[360,1],[2684,1],[2730,1],[2808,1],[2872,1],[2931,1],[2933,3],[3162,1],[3370,1]]},"222":{"position":[[155,1],[357,1],[528,1],[530,3],[697,1],[824,2],[906,1]]},"228":{"position":[[143,1],[239,1],[339,1],[442,1],[548,1],[667,1],[790,1],[881,1]]},"230":{"position":[[175,1],[319,1],[470,1],[610,1],[742,1]]}}}],["0",{"_index":712,"t":{"39":{"position":[[2048,1]]},"194":{"position":[[145,1]]}}}],["0.01",{"_index":592,"t":{"27":{"position":[[50,5]]}}}],["0.1",{"_index":1116,"t":{"88":{"position":[[530,6]]}}}],["0.55",{"_index":719,"t":{"39":{"position":[[2200,5]]}}}],["0.55temperature=0.55",{"_index":1252,"t":{"160":{"position":[[463,20]]},"166":{"position":[[366,20]]}}}],["01",{"_index":1244,"t":{"158":{"position":[[376,2]]}}}],["05",{"_index":1243,"t":{"158":{"position":[[373,2]]}}}],["1",{"_index":423,"t":{"19":{"position":[[2131,1]]},"39":{"position":[[2054,2]]},"45":{"position":[[437,2]]},"53":{"position":[[5,1]]},"63":{"position":[[5,1]]},"114":{"position":[[5,1]]},"148":{"position":[[241,2]]},"186":{"position":[[181,2]]},"212":{"position":[[223,2]]}}}],["1,200",{"_index":264,"t":{"10":{"position":[[768,5]]}}}],["1.0",{"_index":1365,"t":{"194":{"position":[[141,3]]}}}],["1.7.1",{"_index":850,"t":{"59":{"position":[[81,7]]}}}],["100",{"_index":133,"t":{"4":{"position":[[967,5]]}}}],["1106",{"_index":431,"t":{"19":{"position":[[2383,4],[7365,4]]}}}],["128k",{"_index":425,"t":{"19":{"position":[[2192,5]]},"88":{"position":[[360,5]]}}}],["18.17.0nvm",{"_index":802,"t":{"45":{"position":[[205,10]]},"116":{"position":[[23,10]]}}}],["1960s,but",{"_index":1475,"t":{"220":{"position":[[574,9]]}}}],["2",{"_index":830,"t":{"53":{"position":[[74,1]]},"63":{"position":[[92,1]]},"114":{"position":[[29,1],[370,1]]},"186":{"position":[[256,2]]},"212":{"position":[[364,2]]}}}],["2+3",{"_index":442,"t":{"19":{"position":[[2751,3]]}}}],["2.0",{"_index":1146,"t":{"112":{"position":[[62,3]]}}}],["2.1smart_llm_model=claud",{"_index":1290,"t":{"168":{"position":[[293,25],[425,25]]}}}],["20",{"_index":203,"t":{"8":{"position":[[194,2],[317,3]]},"25":{"position":[[167,3]]},"93":{"position":[[148,2]]},"234":{"position":[[516,3]]}}}],["2000",{"_index":701,"t":{"39":{"position":[[1706,5]]}}}],["2004(thi",{"_index":1468,"t":{"220":{"position":[[308,9]]}}}],["2021",{"_index":148,"t":{"6":{"position":[[179,6]]}}}],["2023\"report_typ",{"_index":1195,"t":{"127":{"position":[[39,16]]}}}],["2024",{"_index":1274,"t":{"166":{"position":[[433,6]]}}}],["20240229",{"_index":1292,"t":{"168":{"position":[[326,8],[458,8]]}}}],["2k",{"_index":1113,"t":{"86":{"position":[[385,4]]},"93":{"position":[[119,2]]}}}],["3",{"_index":734,"t":{"39":{"position":[[2548,2],[2767,2]]},"53":{"position":[[143,1]]},"63":{"position":[[208,1]]},"80":{"position":[[2458,2]]},"88":{"position":[[497,1]]},"108":{"position":[[1113,2]]},"114":{"position":[[134,1]]},"168":{"position":[[319,1],[451,1]]},"172":{"position":[[222,1],[267,1]]},"216":{"position":[[1539,1]]},"220":{"position":[[2645,2],[2962,1]]}}}],["3.10",{"_index":1177,"t":{"120":{"position":[[69,5]]}}}],["3.11",{"_index":780,"t":{"43":{"position":[[82,5]]},"194":{"position":[[275,4],[513,4]]}}}],["32768",{"_index":1272,"t":{"166":{"position":[[244,6],[323,5],[529,5]]}}}],["36",{"_index":1544,"t":{"220":{"position":[[2374,2]]}}}],["4",{"_index":251,"t":{"10":{"position":[[285,2],[330,1]]},"19":{"position":[[2178,1],[2381,1],[7363,1]]},"27":{"position":[[82,1]]},"63":{"position":[[262,1]]},"80":{"position":[[2571,1]]},"114":{"position":[[268,1]]},"190":{"position":[[49,1]]},"220":{"position":[[2967,1]]}}}],["4000",{"_index":703,"t":{"39":{"position":[[1788,5]]}}}],["4o",{"_index":694,"t":{"39":{"position":[[1506,2],[1628,3]]},"88":{"position":[[341,2],[357,2]]},"108":{"position":[[1092,4]]},"154":{"position":[[250,2],[321,3]]},"158":{"position":[[407,2],[438,2],[467,3]]}}}],["5",{"_index":1124,"t":{"97":{"position":[[343,1]]}}}],["50",{"_index":1299,"t":{"172":{"position":[[35,3]]}}}],["6",{"_index":1125,"t":{"97":{"position":[[345,1]]}}}],["7",{"_index":1127,"t":{"99":{"position":[[32,1]]}}}],["700",{"_index":710,"t":{"39":{"position":[[1972,4]]}}}],["70b",{"_index":1277,"t":{"166":{"position":[[492,3]]},"172":{"position":[[269,3]]}}}],["7b",{"_index":1281,"t":{"166":{"position":[[541,2]]},"174":{"position":[[233,2],[277,2]]}}}],["7bsmart_llm_model=mistr",{"_index":1297,"t":{"170":{"position":[[160,25]]}}}],["80,000",{"_index":1525,"t":{"220":{"position":[[1742,7],[1815,7],[2488,7]]}}}],["800",{"_index":723,"t":{"39":{"position":[[2299,4]]}}}],["8192",{"_index":708,"t":{"39":{"position":[[1887,5]]},"166":{"position":[[496,4],[511,4]]}}}],["85",{"_index":238,"t":{"8":{"position":[[1011,3]]}}}],["8b",{"_index":1278,"t":{"166":{"position":[[508,2]]},"172":{"position":[[224,2]]}}}],["8x7b",{"_index":1271,"t":{"166":{"position":[[239,4],[318,4],[524,4]]}}}],["95",{"_index":303,"t":{"14":{"position":[[344,3]]}}}],["__call__",{"_index":1396,"t":{"202":{"position":[[103,9]]}}}],["__call__(cl",{"_index":1397,"t":{"202":{"position":[[117,13]]}}}],["__init__",{"_index":1581,"t":{"228":{"position":[[117,9],[131,10]]}}}],["__init__(self",{"_index":991,"t":{"78":{"position":[[1518,15]]}}}],["__main__",{"_index":630,"t":{"35":{"position":[[530,11]]},"37":{"position":[[924,11]]},"122":{"position":[[337,11]]},"208":{"position":[[490,11]]},"210":{"position":[[570,11]]},"212":{"position":[[869,11]]},"216":{"position":[[1101,11]]}}}],["__name__",{"_index":629,"t":{"35":{"position":[[518,8]]},"37":{"position":[[912,8]]},"122":{"position":[[325,8]]},"208":{"position":[[478,8]]},"210":{"position":[[558,8]]},"212":{"position":[[857,8]]},"216":{"position":[[1089,8]]}}}],["abil",{"_index":910,"t":{"70":{"position":[[101,7]]},"112":{"position":[[375,7]]}}}],["abov",{"_index":102,"t":{"4":{"position":[[330,6]]},"8":{"position":[[821,6]]},"10":{"position":[[518,5]]},"19":{"position":[[4808,6]]},"39":{"position":[[3278,5]]},"78":{"position":[[769,6],[1247,6],[1887,6],[2990,6],[3410,6]]},"134":{"position":[[645,5]]},"174":{"position":[[115,6]]},"194":{"position":[[186,5]]},"198":{"position":[[755,5]]},"214":{"position":[[20,5]]}}}],["abstract",{"_index":1402,"t":{"204":{"position":[[59,8]]}}}],["abstractsingleton(abc.abc",{"_index":1400,"t":{"204":{"position":[[6,26]]}}}],["academia",{"_index":300,"t":{"14":{"position":[[275,9]]}}}],["accept",{"_index":1064,"t":{"80":{"position":[[1805,8],[1857,10]]}}}],["access",{"_index":791,"t":{"43":{"position":[[225,6]]},"45":{"position":[[335,6]]},"228":{"position":[[109,7]]},"234":{"position":[[8,9]]}}}],["accord",{"_index":116,"t":{"4":{"position":[[500,9]]}}}],["accordingli",{"_index":1344,"t":{"186":{"position":[[341,12]]}}}],["account",{"_index":1263,"t":{"162":{"position":[[186,7]]}}}],["accur",{"_index":596,"t":{"29":{"position":[[72,9],[188,8],[426,8]]}}}],["achiev",{"_index":250,"t":{"10":{"position":[[267,8]]}}}],["act",{"_index":935,"t":{"74":{"position":[[204,4]]}}}],["action",{"_index":491,"t":{"19":{"position":[[4092,6]]}}}],["activ",{"_index":845,"t":{"57":{"position":[[275,8],[327,10],[580,10]]},"59":{"position":[[635,8],[1006,10]]}}}],["activate/deactiv",{"_index":839,"t":{"57":{"position":[[42,19]]}}}],["actual",{"_index":52,"t":{"2":{"position":[[648,8]]}}}],["ad",{"_index":605,"t":{"31":{"position":[[152,6],[270,6]]},"39":{"position":[[892,6],[3792,6]]},"112":{"position":[[185,5]]}}}],["add",{"_index":398,"t":{"19":{"position":[[1131,3],[8773,3]]},"39":{"position":[[3233,3]]},"70":{"position":[[89,4]]},"78":{"position":[[2380,3],[3169,3]]},"108":{"position":[[669,3]]},"114":{"position":[[64,3]]},"146":{"position":[[13,3]]},"164":{"position":[[178,3]]},"212":{"position":[[226,3]]}}}],["add_edg",{"_index":1027,"t":{"78":{"position":[[3099,8]]}}}],["add_nod",{"_index":1026,"t":{"78":{"position":[[3089,9]]}}}],["addit",{"_index":180,"t":{"6":{"position":[[894,9]]},"19":{"position":[[8674,10]]},"39":{"position":[[215,9],[971,10],[3499,10],[3672,10]]},"82":{"position":[[499,9]]},"148":{"position":[[36,10]]},"182":{"position":[[246,8],[392,10]]}}}],["adjust",{"_index":823,"t":{"49":{"position":[[288,6]]}}}],["advanc",{"_index":1257,"t":{"162":{"position":[[19,8]]},"208":{"position":[[531,12]]},"210":{"position":[[642,12]]}}}],["advantag",{"_index":1049,"t":{"80":{"position":[[1226,9]]}}}],["afterward",{"_index":1359,"t":{"192":{"position":[[355,10]]}}}],["again",{"_index":342,"t":{"19":{"position":[[19,5]]},"196":{"position":[[111,6]]}}}],["agent",{"_index":76,"t":{"2":{"position":[[1048,5]]},"4":{"position":[[658,5],[764,5],[852,5]]},"8":{"position":[[394,5],[456,5],[597,5]]},"12":{"position":[[277,5]]},"14":{"position":[[145,6]]},"23":{"position":[[250,5]]},"25":{"position":[[60,5]]},"31":{"position":[[244,5]]},"37":{"position":[[0,5],[75,6],[177,5]]},"39":{"position":[[2575,6],[2634,5],[3020,5]]},"49":{"position":[[523,5]]},"53":{"position":[[86,5]]},"61":{"position":[[31,5]]},"65":{"position":[[37,5]]},"68":{"position":[[121,6],[304,5],[348,7],[435,5],[689,5],[790,5]]},"70":{"position":[[57,5],[73,5],[206,7],[319,6],[348,6],[504,7]]},"72":{"position":[[120,6],[165,5],[405,6],[562,5]]},"74":{"position":[[40,7],[136,5],[169,6],[198,5],[284,5]]},"76":{"position":[[829,5]]},"78":{"position":[[904,7],[917,5],[1058,7],[1290,5],[1325,5],[1410,5],[1936,6],[2003,6],[2036,7],[2129,6],[2399,5],[3369,6]]},"80":{"position":[[367,5],[385,6],[772,7],[2198,6],[2257,7]]},"82":{"position":[[814,5],[996,6]]},"88":{"position":[[50,7],[129,6],[316,6],[581,5],[762,5]]},"97":{"position":[[115,6],[223,6]]},"99":{"position":[[37,7],[130,7],[226,5],[259,6],[338,5]]},"108":{"position":[[218,7],[408,7],[430,6]]},"148":{"position":[[264,5]]},"210":{"position":[[20,5]]},"234":{"position":[[409,5]]}}}],["agent_rol",{"_index":735,"t":{"39":{"position":[[2551,11]]}}}],["ages.startup",{"_index":1481,"t":{"220":{"position":[[672,13]]}}}],["aggreg",{"_index":241,"t":{"10":{"position":[[15,11],[356,10]]},"12":{"position":[[469,9]]},"14":{"position":[[680,10]]},"25":{"position":[[150,11]]},"88":{"position":[[246,10],[961,9]]},"93":{"position":[[132,10]]},"234":{"position":[[499,11]]}}}],["ahead",{"_index":408,"t":{"19":{"position":[[1484,5],[8763,5]]}}}],["ai",{"_index":33,"t":{"2":{"position":[[373,2],[853,2]]},"14":{"position":[[82,2],[142,2],[226,2],[478,2],[745,2]]},"19":{"position":[[260,2]]},"23":{"position":[[79,2]]},"29":{"position":[[145,2],[234,2],[446,2]]},"68":{"position":[[118,2],[212,2],[432,2],[470,2],[620,2]]},"72":{"position":[[402,2]]},"80":{"position":[[2420,2]]},"82":{"position":[[102,2],[344,2],[1186,2]]},"97":{"position":[[220,2]]},"99":{"position":[[34,2]]},"108":{"position":[[1056,2]]},"127":{"position":[[21,2]]},"129":{"position":[[49,2]]},"148":{"position":[[176,2],[274,2]]},"162":{"position":[[28,2],[98,2]]},"168":{"position":[[16,2]]},"172":{"position":[[9,2]]},"208":{"position":[[547,4]]},"210":{"position":[[658,2]]}}}],["aim",{"_index":97,"t":{"4":{"position":[[270,4]]},"23":{"position":[[33,6]]},"49":{"position":[[416,3]]},"70":{"position":[[39,5]]},"82":{"position":[[288,6]]}}}],["aka",{"_index":892,"t":{"68":{"position":[[366,4]]}}}],["allow",{"_index":466,"t":{"19":{"position":[[3382,6]]},"59":{"position":[[412,6]]},"72":{"position":[[215,6]]},"78":{"position":[[268,8]]},"82":{"position":[[952,5]]},"182":{"position":[[115,5]]},"210":{"position":[[88,6]]}}}],["alreadi",{"_index":885,"t":{"68":{"position":[[133,7]]},"82":{"position":[[445,7]]}}}],["although",{"_index":365,"t":{"19":{"position":[[430,8]]}}}],["alway",{"_index":1106,"t":{"82":{"position":[[1422,7]]}}}],["amaz",{"_index":607,"t":{"31":{"position":[[315,7]]},"39":{"position":[[121,7]]}}}],["amazingli",{"_index":1261,"t":{"162":{"position":[[83,9]]}}}],["analysi",{"_index":322,"t":{"14":{"position":[[788,9]]},"37":{"position":[[1253,9]]},"76":{"position":[[115,9]]},"101":{"position":[[92,8]]}}}],["analyz",{"_index":310,"t":{"14":{"position":[[511,7],[638,7]]}}}],["andrew",{"_index":894,"t":{"68":{"position":[[392,6]]}}}],["anoth",{"_index":196,"t":{"8":{"position":[[0,7]]},"220":{"position":[[2042,7],[2287,7]]}}}],["answer",{"_index":258,"t":{"10":{"position":[[537,6],[644,6]]},"19":{"position":[[1835,7],[2016,6],[2088,11],[5282,6],[5702,7],[5883,6],[5955,12]]},"86":{"position":[[570,8]]},"108":{"position":[[1300,6]]},"234":{"position":[[211,7]]}}}],["anthrop",{"_index":1282,"t":{"168":{"position":[[0,9],[120,9]]}}}],["anyth",{"_index":1171,"t":{"114":{"position":[[311,8]]}}}],["apa",{"_index":267,"t":{"10":{"position":[[809,3],[871,3]]},"39":{"position":[[291,5],[2371,4]]},"80":{"position":[[2628,3]]},"108":{"position":[[1362,3]]},"210":{"position":[[694,3]]}}}],["api",{"_index":353,"t":{"19":{"position":[[191,3],[334,3],[531,4],[686,3],[882,3],[938,3],[1683,4],[1923,3],[2258,4],[3291,5],[3312,3],[4216,4],[5491,3],[5790,3]]},"23":{"position":[[131,4],[157,3]]},"31":{"position":[[113,3]]},"39":{"position":[[439,3],[3530,3]]},"63":{"position":[[184,3]]},"97":{"position":[[462,3]]},"106":{"position":[[120,3],[168,3]]},"114":{"position":[[73,3]]},"120":{"position":[[259,3],[322,3],[376,3]]},"152":{"position":[[22,3]]},"154":{"position":[[22,3],[83,3],[156,3],[216,3],[286,3]]},"156":{"position":[[22,3],[95,3],[168,3],[228,3]]},"162":{"position":[[201,3]]},"164":{"position":[[91,3],[152,3]]},"170":{"position":[[22,3]]},"172":{"position":[[22,3]]},"176":{"position":[[43,3]]},"180":{"position":[[216,3],[619,3]]},"182":{"position":[[167,3]]},"228":{"position":[[817,3]]},"230":{"position":[[59,3],[207,3],[358,3],[502,3],[640,3]]}}}],["apikeyerror(except",{"_index":1601,"t":{"230":{"position":[[6,22]]}}}],["app",{"_index":357,"t":{"19":{"position":[[263,4]]},"112":{"position":[[10,3]]},"114":{"position":[[437,3]]},"194":{"position":[[497,3]]}}}],["appl",{"_index":1576,"t":{"222":{"position":[[562,7],[600,6]]}}}],["applic",{"_index":570,"t":{"23":{"position":[[82,11]]},"61":{"position":[[19,11]]},"63":{"position":[[220,11]]},"78":{"position":[[209,12],[435,11]]},"106":{"position":[[195,12]]},"108":{"position":[[974,11]]},"148":{"position":[[179,12]]}}}],["approach",{"_index":121,"t":{"4":{"position":[[693,8]]},"78":{"position":[[121,8]]}}}],["apt",{"_index":1362,"t":{"192":{"position":[[454,3]]},"194":{"position":[[120,3]]}}}],["arbitrari",{"_index":918,"t":{"70":{"position":[[483,9]]}}}],["architectur",{"_index":281,"t":{"12":{"position":[[90,13]]},"78":{"position":[[3302,12]]},"104":{"position":[[34,12]]}}}],["area",{"_index":171,"t":{"6":{"position":[[661,5]]},"14":{"position":[[250,5]]},"78":{"position":[[811,6]]}}}],["arg",{"_index":1398,"t":{"202":{"position":[[131,6]]}}}],["argument",{"_index":1334,"t":{"182":{"position":[[403,9]]},"210":{"position":[[211,8]]},"212":{"position":[[446,8]]},"214":{"position":[[217,8]]}}}],["around",{"_index":205,"t":{"8":{"position":[[224,6],[977,6]]},"27":{"position":[[43,6]]},"88":{"position":[[490,6]]}}}],["articl",{"_index":77,"t":{"2":{"position":[[1097,8]]},"37":{"position":[[1101,8]]},"68":{"position":[[656,7]]},"82":{"position":[[1097,7]]},"129":{"position":[[24,7]]}}}],["arxiv",{"_index":1318,"t":{"180":{"position":[[450,5],[747,5]]}}}],["ask",{"_index":253,"t":{"10":{"position":[[384,3]]},"19":{"position":[[1170,3]]}}}],["assign",{"_index":738,"t":{"39":{"position":[[2653,8]]}}}],["assist",{"_index":352,"t":{"19":{"position":[[180,10],[250,9],[323,10],[675,10],[871,10],[921,9],[1193,9],[1309,9],[1402,9],[1452,9],[2147,9],[3040,9],[3301,10],[3424,11],[4177,9],[4822,9],[5252,9],[8619,9],[8809,9]]},"39":{"position":[[3640,11]]},"65":{"position":[[43,9]]},"68":{"position":[[796,9]]},"80":{"position":[[113,9],[2226,9],[2314,10],[2361,9],[3149,9]]},"82":{"position":[[142,9],[860,10],[958,10]]}}}],["assistant.idprint(f\"assist",{"_index":548,"t":{"19":{"position":[[7737,29]]}}}],["assistant_id",{"_index":547,"t":{"19":{"position":[[7719,15],[7771,17]]}}}],["assistant_id=assistant_id",{"_index":455,"t":{"19":{"position":[[3175,27],[8170,26]]}}}],["assistant_prompt_instruct",{"_index":417,"t":{"19":{"position":[[1751,28]]}}}],["assistantassist",{"_index":427,"t":{"19":{"position":[[2275,18],[7257,18]]}}}],["assistant’",{"_index":416,"t":{"19":{"position":[[1725,11]]}}}],["associ",{"_index":863,"t":{"59":{"position":[[668,10],[791,10]]}}}],["assum",{"_index":156,"t":{"6":{"position":[[335,7],[904,8]]},"8":{"position":[[337,8]]},"10":{"position":[[1074,6]]},"39":{"position":[[546,6]]},"78":{"position":[[1953,6]]},"186":{"position":[[278,7]]},"222":{"position":[[219,8],[294,8]]}}}],["async",{"_index":645,"t":{"37":{"position":[[798,5]]},"78":{"position":[[1539,5]]},"80":{"position":[[1305,5]]},"110":{"position":[[110,5]]},"134":{"position":[[38,5]]}}}],["async_browse(url",{"_index":218,"t":{"8":{"position":[[616,18]]}}}],["async_mode=true,)queri",{"_index":1575,"t":{"222":{"position":[[505,22]]}}}],["asynchron",{"_index":467,"t":{"19":{"position":[[3393,12]]}}}],["asyncio",{"_index":214,"t":{"8":{"position":[[443,8]]},"37":{"position":[[261,8]]}}}],["asyncio.gather(*task",{"_index":226,"t":{"8":{"position":[[754,22]]}}}],["asyncio.run(generate_research_report",{"_index":647,"t":{"37":{"position":[[936,39]]}}}],["asyncio.run(get_report(prompt=prompt",{"_index":1409,"t":{"210":{"position":[[734,37]]}}}],["asyncio.run(get_report(queri",{"_index":1189,"t":{"122":{"position":[[434,29]]},"208":{"position":[[694,29]]}}}],["asyncio.run(get_report(query=queri",{"_index":1419,"t":{"212":{"position":[[1032,35]]},"216":{"position":[[1626,35]]}}}],["asyncio.run(main",{"_index":631,"t":{"35":{"position":[[542,19]]}}}],["asyncioapp",{"_index":1201,"t":{"132":{"position":[[74,10]]}}}],["asyncioasync",{"_index":613,"t":{"35":{"position":[[47,12]]},"122":{"position":[[101,12]]},"208":{"position":[[218,12]]},"210":{"position":[[331,12]]},"212":{"position":[[583,12]]}}}],["asyncioconnection_str",{"_index":1436,"t":{"216":{"position":[[358,24]]}}}],["asynciofrom",{"_index":1183,"t":{"122":{"position":[[47,11]]}}}],["atleast",{"_index":1526,"t":{"220":{"position":[[1807,7]]}}}],["attent",{"_index":903,"t":{"68":{"position":[[630,9]]}}}],["attribut",{"_index":913,"t":{"70":{"position":[[182,10]]}}}],["audio",{"_index":316,"t":{"14":{"position":[[595,6]]}}}],["autogpt",{"_index":0,"t":{"2":{"position":[[6,7],[282,7],[484,7]]},"8":{"position":[[19,7],[1027,8]]}}}],["autom",{"_index":289,"t":{"14":{"position":[[30,10]]},"76":{"position":[[19,10]]}}}],["automat",{"_index":401,"t":{"19":{"position":[[1244,13]]}}}],["autonom",{"_index":10,"t":{"2":{"position":[[105,10],[1037,10]]},"23":{"position":[[230,10]]},"25":{"position":[[40,10]]},"68":{"position":[[764,10]]},"72":{"position":[[542,10]]},"74":{"position":[[273,10]]},"80":{"position":[[102,10]]},"99":{"position":[[327,10]]},"108":{"position":[[447,13]]},"148":{"position":[[244,10]]},"234":{"position":[[398,10]]}}}],["avail",{"_index":263,"t":{"10":{"position":[[744,10]]},"166":{"position":[[444,9]]},"190":{"position":[[91,9]]},"198":{"position":[[269,9],[692,9]]}}}],["availablerespons",{"_index":225,"t":{"8":{"position":[[727,18]]}}}],["averag",{"_index":236,"t":{"8":{"position":[[949,7]]},"88":{"position":[[462,7]]},"97":{"position":[[319,7]]},"220":{"position":[[1766,7]]}}}],["avoid",{"_index":869,"t":{"59":{"position":[[960,8]]},"198":{"position":[[599,5]]}}}],["await",{"_index":222,"t":{"8":{"position":[[669,5],[748,5]]},"35":{"position":[[406,5],[470,5]]},"37":{"position":[[625,5],[670,5],[857,5]]},"78":{"position":[[1757,5],[1821,5]]},"122":{"position":[[232,5],[277,5]]},"132":{"position":[[254,5],[299,5]]},"134":{"position":[[362,5],[407,5]]},"208":{"position":[[385,5],[430,5]]},"210":{"position":[[464,5],[509,5]]},"212":{"position":[[763,5],[808,5]]},"216":{"position":[[996,5],[1041,5]]},"220":{"position":[[3372,5]]},"222":{"position":[[908,5]]}}}],["azur",{"_index":1238,"t":{"158":{"position":[[44,5],[66,5]]}}}],["azure_openai",{"_index":688,"t":{"39":{"position":[[1308,13]]}}}],["back",{"_index":516,"t":{"19":{"position":[[4937,4]]},"80":{"position":[[2070,4]]},"220":{"position":[[635,6]]}}}],["backend",{"_index":755,"t":{"39":{"position":[[3084,7]]},"45":{"position":[[382,7]]},"112":{"position":[[128,7],[424,7]]}}}],["base",{"_index":233,"t":{"8":{"position":[[925,5]]},"10":{"position":[[917,5]]},"19":{"position":[[1843,5],[5710,5]]},"37":{"position":[[493,5]]},"39":{"position":[[2640,5]]},"55":{"position":[[14,5]]},"74":{"position":[[535,5]]},"76":{"position":[[41,5],[304,5],[397,5],[558,5],[663,5],[1124,5]]},"78":{"position":[[299,5],[956,5]]},"80":{"position":[[207,5]]},"82":{"position":[[912,5],[1044,5]]},"88":{"position":[[170,5],[587,5],[880,5]]},"99":{"position":[[589,5]]},"101":{"position":[[26,5]]},"104":{"position":[[152,5],[234,5],[546,5]]},"148":{"position":[[387,5]]},"190":{"position":[[56,5]]},"212":{"position":[[924,5]]},"216":{"position":[[1161,5]]}}}],["basic",{"_index":1043,"t":{"80":{"position":[[801,9]]}}}],["be",{"_index":1497,"t":{"220":{"position":[[1100,5]]}}}],["beautifulsoup",{"_index":743,"t":{"39":{"position":[[2840,16]]}}}],["becom",{"_index":224,"t":{"8":{"position":[[720,6]]}}}],["befor",{"_index":294,"t":{"14":{"position":[[135,6]]},"80":{"position":[[723,7]]},"82":{"position":[[1213,6]]},"198":{"position":[[498,6]]}}}],["behavior",{"_index":737,"t":{"39":{"position":[[2618,8]]}}}],["below",{"_index":283,"t":{"12":{"position":[[113,6]]},"19":{"position":[[768,6],[1299,6],[5018,6],[5345,5],[8861,5]]},"37":{"position":[[141,6]]},"39":{"position":[[495,6],[998,5]]},"76":{"position":[[8,6]]},"82":{"position":[[1492,6]]},"108":{"position":[[767,6]]}}}],["best",{"_index":249,"t":{"10":{"position":[[240,4]]},"25":{"position":[[240,4]]},"29":{"position":[[10,4]]},"37":{"position":[[276,4]]},"68":{"position":[[721,4]]},"220":{"position":[[433,4]]},"222":{"position":[[622,4]]}}}],["bet",{"_index":1472,"t":{"220":{"position":[[438,3]]}}}],["beta",{"_index":1161,"t":{"112":{"position":[[484,6]]},"174":{"position":[[280,4]]},"198":{"position":[[794,4]]}}}],["betasmart_llm_model=huggingfaceh4/zephyr",{"_index":1311,"t":{"174":{"position":[[236,40]]}}}],["better",{"_index":99,"t":{"4":{"position":[[288,6]]},"6":{"position":[[560,6]]},"10":{"position":[[1135,7]]},"47":{"position":[[95,6]]},"72":{"position":[[226,6]]},"148":{"position":[[169,6]]}}}],["between",{"_index":405,"t":{"19":{"position":[[1390,7]]},"39":{"position":[[2040,7]]},"80":{"position":[[1121,7]]},"168":{"position":[[112,7]]}}}],["bia",{"_index":154,"t":{"6":{"position":[[299,5]]},"86":{"position":[[626,4]]}}}],["bias",{"_index":163,"t":{"6":{"position":[[412,6],[1212,6]]},"86":{"position":[[563,6]]}}}],["big",{"_index":1538,"t":{"220":{"position":[[2164,3],[2508,3]]}}}],["biggest",{"_index":137,"t":{"6":{"position":[[4,7]]}}}],["bing",{"_index":681,"t":{"39":{"position":[[1143,5]]},"180":{"position":[[556,4]]}}}],["bit",{"_index":459,"t":{"19":{"position":[[3252,3]]},"174":{"position":[[28,3]]}}}],["blog",{"_index":374,"t":{"19":{"position":[[626,4]]}}}],["blue",{"_index":1452,"t":{"216":{"position":[[1149,4]]}}}],["bool",{"_index":1579,"t":{"228":{"position":[[81,5],[874,5]]}}}],["boss",{"_index":1537,"t":{"220":{"position":[[2154,4]]}}}],["both",{"_index":195,"t":{"6":{"position":[[1412,4]]},"70":{"position":[[167,4]]},"82":{"position":[[543,4]]},"88":{"position":[[332,4]]},"160":{"position":[[24,4],[80,5],[165,5]]},"214":{"position":[[126,4]]}}}],["box",{"_index":1092,"t":{"82":{"position":[[478,3]]}}}],["break",{"_index":124,"t":{"4":{"position":[[751,8]]},"12":{"position":[[69,5]]},"19":{"position":[[7967,5]]},"220":{"position":[[1870,5]]}}}],["brew",{"_index":1354,"t":{"192":{"position":[[297,4],[386,4]]},"194":{"position":[[48,4],[291,5],[297,4],[354,4],[525,6]]}}}],["brief",{"_index":1518,"t":{"220":{"position":[[1624,5]]}}}],["bring",{"_index":1104,"t":{"82":{"position":[[1311,7]]}}}],["brows",{"_index":707,"t":{"39":{"position":[[1852,6]]},"76":{"position":[[262,7]]},"104":{"position":[[110,7]]}}}],["browse_chunk_max_length",{"_index":704,"t":{"39":{"position":[[1794,24]]}}}],["browse_websit",{"_index":1594,"t":{"228":{"position":[[687,14]]}}}],["browser",{"_index":832,"t":{"53":{"position":[[182,7]]},"61":{"position":[[194,7]]},"63":{"position":[[301,7]]},"76":{"position":[[235,7]]},"104":{"position":[[83,7]]},"114":{"position":[[495,7]]},"198":{"position":[[75,7],[165,7]]}}}],["bs",{"_index":742,"t":{"39":{"position":[[2837,2]]}}}],["build",{"_index":356,"t":{"19":{"position":[[234,5],[1553,5],[3556,5]]},"23":{"position":[[197,5]]},"68":{"position":[[85,9],[746,5]]},"70":{"position":[[615,9]]},"78":{"position":[[1311,8]]},"114":{"position":[[252,5]]},"148":{"position":[[163,5],[231,5]]}}}],["built",{"_index":579,"t":{"25":{"position":[[229,5]]},"70":{"position":[[156,5]]},"80":{"position":[[2099,6]]},"82":{"position":[[744,5]]},"234":{"position":[[330,5]]}}}],["bullshit",{"_index":1547,"t":{"220":{"position":[[2581,8]]}}}],["burn",{"_index":618,"t":{"35":{"position":[[190,7]]},"37":{"position":[[370,7]]}}}],["busi",{"_index":654,"t":{"37":{"position":[[1244,8]]},"82":{"position":[[598,8]]}}}],["call",{"_index":359,"t":{"19":{"position":[[292,4],[421,8],[733,7],[1071,8],[1258,5],[2216,4],[3088,4],[4115,5],[4196,4],[4861,4],[4991,4]]},"27":{"position":[[124,5]]},"37":{"position":[[165,7]]},"39":{"position":[[422,5]]},"68":{"position":[[319,8]]},"80":{"position":[[2176,6]]},"202":{"position":[[153,4]]}}}],["came",{"_index":8,"t":{"2":{"position":[[88,4]]}}}],["capabl",{"_index":829,"t":{"49":{"position":[[529,13]]}}}],["care",{"_index":494,"t":{"19":{"position":[[4160,4]]},"25":{"position":[[77,4]]},"80":{"position":[[1033,4]]},"234":{"position":[[426,4]]}}}],["carri",{"_index":114,"t":{"4":{"position":[[474,8]]}}}],["case",{"_index":7,"t":{"2":{"position":[[78,4]]},"19":{"position":[[4050,5]]},"70":{"position":[[409,4]]},"78":{"position":[[463,4]]},"82":{"position":[[624,6]]},"86":{"position":[[520,5]]},"192":{"position":[[252,4],[404,4]]},"194":{"position":[[3,4],[70,4]]},"196":{"position":[[80,6]]}}}],["catch",{"_index":518,"t":{"19":{"position":[[4969,5]]}}}],["caus",{"_index":143,"t":{"6":{"position":[[81,6]]},"80":{"position":[[427,5],[2951,5]]}}}],["cd",{"_index":799,"t":{"45":{"position":[[162,2]]},"116":{"position":[[0,2]]}}}],["chain",{"_index":583,"t":{"25":{"position":[[301,7]]}}}],["challeng",{"_index":100,"t":{"4":{"position":[[312,10],[981,9]]},"6":{"position":[[12,9],[320,11]]},"10":{"position":[[92,9]]}}}],["chang",{"_index":759,"t":{"39":{"position":[[3183,6],[3361,6]]},"108":{"position":[[3,6]]}}}],["charactertextsplitter(chunk_size=200",{"_index":1552,"t":{"220":{"position":[[2732,37]]}}}],["charactertextsplitterfrom",{"_index":1462,"t":{"220":{"position":[[76,25]]}}}],["chat",{"_index":1305,"t":{"172":{"position":[[227,4],[273,4]]}}}],["chatgpt",{"_index":368,"t":{"19":{"position":[[542,7]]},"86":{"position":[[440,7]]}}}],["check",{"_index":333,"t":{"17":{"position":[[51,5],[103,5]]},"19":{"position":[[788,5]]},"23":{"position":[[136,5],[326,5]]},"25":{"position":[[488,5]]},"31":{"position":[[409,5]]},"39":{"position":[[1172,5],[3703,5]]},"68":{"position":[[859,5]]},"78":{"position":[[3257,6]]},"80":{"position":[[3273,5]]},"148":{"position":[[572,5]]},"198":{"position":[[634,5]]},"214":{"position":[[417,5]]},"230":{"position":[[187,5],[331,5],[482,5],[622,5],[754,5]]},"234":{"position":[[596,5]]}}}],["check_google_api_key",{"_index":1607,"t":{"230":{"position":[[417,21]]}}}],["check_google_api_key(cfg",{"_index":1608,"t":{"230":{"position":[[443,25]]}}}],["check_openai_api_key",{"_index":1603,"t":{"230":{"position":[[122,21]]}}}],["check_openai_api_key(cfg",{"_index":1604,"t":{"230":{"position":[[148,25]]}}}],["check_searx_url",{"_index":1611,"t":{"230":{"position":[[699,16]]}}}],["check_searx_url(cfg",{"_index":1612,"t":{"230":{"position":[[720,20]]}}}],["check_serp_api_key",{"_index":1609,"t":{"230":{"position":[[561,19]]}}}],["check_serp_api_key(cfg",{"_index":1610,"t":{"230":{"position":[[585,23]]}}}],["check_tavily_api_key",{"_index":1605,"t":{"230":{"position":[[266,21]]}}}],["check_tavily_api_key(cfg",{"_index":1606,"t":{"230":{"position":[[292,25]]}}}],["chees",{"_index":1453,"t":{"216":{"position":[[1154,6],[1320,6],[1608,8]]}}}],["chief",{"_index":930,"t":{"74":{"position":[[48,5]]},"99":{"position":[[138,5]]}}}],["chiefeditor",{"_index":1068,"t":{"80":{"position":[[2186,11]]}}}],["chip",{"_index":1368,"t":{"194":{"position":[[168,4]]}}}],["choic",{"_index":1330,"t":{"182":{"position":[[46,6]]}}}],["choos",{"_index":1099,"t":{"82":{"position":[[972,6]]},"160":{"position":[[65,6]]},"198":{"position":[[302,6]]}}}],["chrome",{"_index":1378,"t":{"198":{"position":[[68,6],[117,6],[154,6],[285,6],[589,6],[718,6]]}}}],["chromedriv",{"_index":1377,"t":{"198":{"position":[[36,12],[679,12]]}}}],["chunk",{"_index":706,"t":{"39":{"position":[[1842,6]]},"228":{"position":[[710,5]]}}}],["chunk_overlap=30",{"_index":1553,"t":{"220":{"position":[[2770,17]]}}}],["circul",{"_index":982,"t":{"78":{"position":[[875,10]]}}}],["circular",{"_index":1048,"t":{"80":{"position":[[1196,8]]}}}],["claim",{"_index":1543,"t":{"220":{"position":[[2352,8]]}}}],["class",{"_index":970,"t":{"78":{"position":[[506,5]]},"80":{"position":[[896,5]]},"202":{"position":[[0,5],[96,6]]},"204":{"position":[[0,5],[78,5],[120,6]]},"210":{"position":[[241,5]]},"212":{"position":[[416,6]]},"228":{"position":[[0,5],[53,5],[177,5]]},"230":{"position":[[0,5]]}}}],["claud",{"_index":1286,"t":{"168":{"position":[[70,7]]}}}],["clean",{"_index":856,"t":{"59":{"position":[[425,5]]}}}],["click",{"_index":1390,"t":{"198":{"position":[[439,5]]},"234":{"position":[[252,5]]}}}],["client",{"_index":530,"t":{"19":{"position":[[5478,7]]}}}],["client.beta.assistants.cr",{"_index":428,"t":{"19":{"position":[[2296,30],[7278,30]]}}}],["client.beta.threads.create()print(f\"thread",{"_index":551,"t":{"19":{"position":[[7813,43]]}}}],["client.beta.threads.create()user_input",{"_index":448,"t":{"19":{"position":[[2855,38]]}}}],["client.beta.threads.messages.cr",{"_index":450,"t":{"19":{"position":[[2920,36],[8002,36]]}}}],["client.beta.threads.messages.list(thread_id=thread_id",{"_index":543,"t":{"19":{"position":[[7120,54]]}}}],["client.beta.threads.runs.cr",{"_index":454,"t":{"19":{"position":[[3121,32],[8116,32]]}}}],["client.beta.threads.runs.retrieve(thread_id=thread_id",{"_index":484,"t":{"19":{"position":[[3782,54],[6267,54]]}}}],["client.beta.threads.runs.submit_tool_output",{"_index":509,"t":{"19":{"position":[[4680,45],[6910,45]]}}}],["clilanggraph",{"_index":1141,"t":{"110":{"position":[[22,12]]}}}],["clone",{"_index":1164,"t":{"114":{"position":[[33,5],[89,6]]}}}],["close",{"_index":1160,"t":{"112":{"position":[[477,6]]}}}],["cloud",{"_index":1158,"t":{"112":{"position":[[452,5]]}}}],["cm",{"_index":727,"t":{"39":{"position":[[2403,4]]}}}],["code",{"_index":361,"t":{"19":{"position":[[379,4],[1029,4],[5372,5],[8717,6]]},"37":{"position":[[1224,4]]},"76":{"position":[[1417,4]]},"78":{"position":[[3293,4]]},"134":{"position":[[651,4]]},"172":{"position":[[87,5]]}}}],["codebas",{"_index":1182,"t":{"120":{"position":[[434,8]]}}}],["collabor",{"_index":608,"t":{"31":{"position":[[396,12]]},"82":{"position":[[820,13]]},"148":{"position":[[541,13]]}}}],["collect",{"_index":941,"t":{"76":{"position":[[100,10]]},"101":{"position":[[77,10]]},"222":{"position":[[457,10]]}}}],["collection_name='som",{"_index":1573,"t":{"222":{"position":[[435,21]]}}}],["collection_name=collection_nam",{"_index":1447,"t":{"216":{"position":[[667,32]]}}}],["combin",{"_index":1420,"t":{"214":{"position":[[8,7]]},"220":{"position":[[2314,7]]}}}],["come",{"_index":912,"t":{"70":{"position":[[138,5]]},"112":{"position":[[165,5],[516,6]]}}}],["comma",{"_index":1315,"t":{"180":{"position":[[353,7]]}}}],["command",{"_index":842,"t":{"57":{"position":[[208,7],[684,8]]},"59":{"position":[[211,8],[718,8]]},"61":{"position":[[105,8]]},"228":{"position":[[702,7]]}}}],["comment",{"_index":565,"t":{"19":{"position":[[8853,7]]},"82":{"position":[[1484,7]]},"114":{"position":[[169,7]]}}}],["commun",{"_index":340,"t":{"17":{"position":[[166,9]]},"31":{"position":[[335,10]]},"39":{"position":[[129,9]]},"80":{"position":[[1109,11]]},"82":{"position":[[1404,10]]},"180":{"position":[[475,10]]}}}],["compani",{"_index":1284,"t":{"168":{"position":[[39,8]]},"220":{"position":[[802,7],[1854,7],[2168,8],[2512,8]]}}}],["compat",{"_index":1380,"t":{"198":{"position":[[106,10],[358,10]]}}}],["compil",{"_index":939,"t":{"74":{"position":[[601,9]]},"76":{"position":[[1165,8]]},"99":{"position":[[655,9]]},"104":{"position":[[587,8]]}}}],["complement",{"_index":826,"t":{"49":{"position":[[483,13]]}}}],["complet",{"_index":53,"t":{"2":{"position":[[657,9]]},"4":{"position":[[113,10],[737,10],[837,10],[938,10]]},"19":{"position":[[3912,13],[4067,9],[6397,13],[8244,8]]},"37":{"position":[[1323,8]]},"88":{"position":[[378,8],[510,9]]},"108":{"position":[[797,9]]}}}],["completedef",{"_index":479,"t":{"19":{"position":[[3694,11],[6179,11]]}}}],["complex",{"_index":655,"t":{"37":{"position":[[1342,7]]}}}],["compon",{"_index":108,"t":{"4":{"position":[[382,11]]}}}],["compos",{"_index":880,"t":{"63":{"position":[[241,7]]},"114":{"position":[[156,7],[239,7],[335,7]]}}}],["comprehens",{"_index":35,"t":{"2":{"position":[[387,13],[1065,13]]},"10":{"position":[[116,13]]},"14":{"position":[[164,13]]}}}],["compress",{"_index":1508,"t":{"220":{"position":[[1380,8]]}}}],["comput",{"_index":1501,"t":{"220":{"position":[[1247,8]]}}}],["concern",{"_index":924,"t":{"72":{"position":[[247,9]]}}}],["conclud",{"_index":128,"t":{"4":{"position":[[858,9]]},"10":{"position":[[221,9]]}}}],["conclus",{"_index":15,"t":{"2":{"position":[[151,11]]},"14":{"position":[[868,12]]},"76":{"position":[[1229,10]]},"78":{"position":[[709,11]]},"86":{"position":[[18,11],[548,11],[656,11]]},"93":{"position":[[206,11]]},"104":{"position":[[651,10]]},"220":{"position":[[3100,15]]}}}],["condit",{"_index":1039,"t":{"80":{"position":[[438,10],[1205,9],[1286,11],[1916,11]]}}}],["conduc",{"_index":874,"t":{"59":{"position":[[1187,9]]}}}],["conduct",{"_index":34,"t":{"2":{"position":[[379,7]]},"35":{"position":[[370,7]]},"72":{"position":[[433,7]]},"74":{"position":[[295,8]]},"78":{"position":[[1721,7]]},"97":{"position":[[251,7]]},"99":{"position":[[349,8]]},"108":{"position":[[606,7]]},"208":{"position":[[129,7]]},"210":{"position":[[72,10]]},"212":{"position":[[492,7]]},"214":{"position":[[37,7]]},"220":{"position":[[3291,7]]},"222":{"position":[[827,7]]}}}],["confer",{"_index":1194,"t":{"127":{"position":[[24,11]]}}}],["config",{"_index":1582,"t":{"228":{"position":[[170,6]]}}}],["config(metaclass=singleton",{"_index":1578,"t":{"228":{"position":[[6,27]]}}}],["config.json",{"_index":677,"t":{"39":{"position":[[877,11],[3777,11]]}}}],["config.pi",{"_index":658,"t":{"39":{"position":[[17,9],[698,9],[952,9]]},"230":{"position":[[81,9],[225,9],[376,9],[520,9],[658,9],[787,9]]}}}],["config_fil",{"_index":679,"t":{"39":{"position":[[915,11],[3815,11]]}}}],["config_path=non",{"_index":625,"t":{"35":{"position":[[350,17]]},"37":{"position":[[607,17]]},"78":{"position":[[1701,17]]}}}],["configur",{"_index":760,"t":{"39":{"position":[[3202,15]]},"57":{"position":[[62,14]]},"166":{"position":[[30,9]]},"228":{"position":[[39,13]]}}}],["conflict",{"_index":859,"t":{"59":{"position":[[487,9],[969,9]]},"198":{"position":[[605,10]]}}}],["connect",{"_index":569,"t":{"23":{"position":[[66,7]]},"78":{"position":[[3197,7]]}}}],["connection=connection_str",{"_index":1574,"t":{"222":{"position":[[475,29]]}}}],["connection=engin",{"_index":1448,"t":{"216":{"position":[[700,18]]}}}],["consid",{"_index":725,"t":{"39":{"position":[[2376,8],[2745,9]]},"78":{"position":[[392,11]]},"86":{"position":[[468,8]]},"222":{"position":[[611,10]]}}}],["consist",{"_index":106,"t":{"4":{"position":[[366,8]]},"59":{"position":[[339,10]]},"74":{"position":[[18,8]]},"78":{"position":[[3055,8]]}}}],["consol",{"_index":772,"t":{"39":{"position":[[3615,7]]},"108":{"position":[[1018,8]]}}}],["constant",{"_index":637,"t":{"37":{"position":[[298,9]]}}}],["constantli",{"_index":48,"t":{"2":{"position":[[592,10]]},"27":{"position":[[98,10]]},"31":{"position":[[6,10]]},"148":{"position":[[14,10]]}}}],["consum",{"_index":1221,"t":{"142":{"position":[[31,8]]}}}],["contact",{"_index":609,"t":{"31":{"position":[[450,7]]}}}],["contain",{"_index":1135,"t":{"108":{"position":[[112,8]]},"222":{"position":[[256,8]]}}}],["content",{"_index":160,"t":{"6":{"position":[[386,7],[745,8],[795,7],[923,7],[1070,7]]},"14":{"position":[[538,7]]},"29":{"position":[[449,7]]},"37":{"position":[[1080,7],[1120,8]]},"78":{"position":[[858,8]]},"86":{"position":[[499,7]]},"140":{"position":[[108,8]]},"186":{"position":[[164,8],[239,8]]}}}],["content=user_input",{"_index":453,"t":{"19":{"position":[[2991,20],[8073,19]]}}}],["context",{"_index":55,"t":{"2":{"position":[[710,7]]},"10":{"position":[[335,7]]},"19":{"position":[[2198,9]]},"29":{"position":[[380,7]]},"37":{"position":[[1048,7]]},"78":{"position":[[321,7]]},"88":{"position":[[366,8]]},"93":{"position":[[340,7]]},"140":{"position":[[0,7]]}}}],["continu",{"_index":232,"t":{"8":{"position":[[901,8]]},"14":{"position":[[85,9]]},"19":{"position":[[8346,8]]},"39":{"position":[[616,8]]},"234":{"position":[[239,12]]}}}],["contribut",{"_index":329,"t":{"17":{"position":[[0,10],[29,14],[61,12]]},"39":{"position":[[143,14]]},"148":{"position":[[558,13],[586,12]]},"186":{"position":[[388,10]]}}}],["control",{"_index":873,"t":{"59":{"position":[[1164,10]]},"70":{"position":[[266,15]]},"82":{"position":[[382,7]]}}}],["convers",{"_index":397,"t":{"19":{"position":[[1117,13],[1368,12],[7877,12]]},"148":{"position":[[454,14]]}}}],["coordin",{"_index":934,"t":{"74":{"position":[[147,11]]},"99":{"position":[[237,11]]}}}],["copi",{"_index":228,"t":{"8":{"position":[[801,4]]},"10":{"position":[[966,4]]},"19":{"position":[[2100,4],[2741,4],[3012,4],[3203,4],[3967,4],[4795,4],[5207,4],[8610,4]]},"35":{"position":[[562,4]]},"37":{"position":[[976,4]]},"39":{"position":[[3452,4]]},"43":{"position":[[170,4],[220,4]]},"45":{"position":[[172,4],[229,4],[287,4],[330,4]]},"53":{"position":[[64,4],[133,4]]},"57":{"position":[[267,4],[396,4],[517,4],[735,4]]},"59":{"position":[[630,4],[1248,4]]},"61":{"position":[[150,4]]},"63":{"position":[[198,4],[252,4]]},"78":{"position":[[756,4],[1194,4],[1867,4],[2977,4]]},"80":{"position":[[991,4],[1895,4],[2842,4]]},"106":{"position":[[59,4],[182,4],[223,4]]},"108":{"position":[[1435,4]]},"110":{"position":[[38,4]]},"114":{"position":[[258,4]]},"116":{"position":[[88,4]]},"120":{"position":[[195,4],[336,4],[390,4]]},"122":{"position":[[492,4]]},"125":{"position":[[94,4]]},"127":{"position":[[76,4]]},"129":{"position":[[96,4]]},"132":{"position":[[400,4]]},"134":{"position":[[78,4],[480,4],[510,4],[630,4]]},"138":{"position":[[119,4]]},"140":{"position":[[170,4]]},"142":{"position":[[109,4]]},"144":{"position":[[85,4]]},"146":{"position":[[126,4]]},"154":{"position":[[325,4]]},"156":{"position":[[286,4]]},"158":{"position":[[471,4]]},"160":{"position":[[484,4],[662,4]]},"166":{"position":[[387,4]]},"168":{"position":[[203,4],[335,4],[467,4]]},"170":{"position":[[199,4]]},"172":{"position":[[281,4]]},"174":{"position":[[285,4]]},"176":{"position":[[218,4]]},"180":{"position":[[283,4],[456,4]]},"184":{"position":[[97,4]]},"186":{"position":[[262,4]]},"202":{"position":[[35,4],[148,4]]},"204":{"position":[[54,4]]},"208":{"position":[[761,4]]},"210":{"position":[[812,4]]},"212":{"position":[[354,4],[1137,4]]},"216":{"position":[[1752,4]]},"220":{"position":[[3404,4]]},"222":{"position":[[940,4]]},"228":{"position":[[34,4],[150,4],[246,4],[346,4],[449,4],[555,4],[674,4],[797,4],[888,4]]},"230":{"position":[[29,4],[182,4],[326,4],[477,4],[617,4],[749,4]]}}}],["corn",{"_index":1503,"t":{"220":{"position":[[1282,4]]}}}],["coroutin",{"_index":216,"t":{"8":{"position":[[587,9]]}}}],["corpor",{"_index":1531,"t":{"220":{"position":[[1930,9],[2438,9],[2571,9]]}}}],["correct",{"_index":872,"t":{"59":{"position":[[1107,7]]},"74":{"position":[[435,11]]},"76":{"position":[[973,11]]},"82":{"position":[[360,12]]},"99":{"position":[[489,11]]},"104":{"position":[[421,11]]}}}],["correctli",{"_index":1340,"t":{"186":{"position":[[33,10]]}}}],["correspond",{"_index":1218,"t":{"140":{"position":[[94,13]]},"170":{"position":[[47,13]]},"172":{"position":[[105,13]]},"176":{"position":[[138,13]]},"198":{"position":[[452,13]]}}}],["cost",{"_index":591,"t":{"27":{"position":[[37,5],[140,5]]},"88":{"position":[[420,5],[524,5]]},"142":{"position":[[0,5]]},"146":{"position":[[17,5],[72,5]]}}}],["count",{"_index":661,"t":{"39":{"position":[[302,6],[2230,5]]}}}],["coupl",{"_index":1300,"t":{"172":{"position":[[71,6]]}}}],["cover",{"_index":1287,"t":{"168":{"position":[[88,6]]}}}],["crawl",{"_index":752,"t":{"39":{"position":[[3041,8]]}}}],["crawler",{"_index":286,"t":{"12":{"position":[[269,7]]},"88":{"position":[[754,7]]}}}],["creat",{"_index":28,"t":{"2":{"position":[[290,7],[997,7]]},"4":{"position":[[556,6]]},"6":{"position":[[1405,6]]},"8":{"position":[[85,6],[561,6]]},"19":{"position":[[911,6],[1080,6],[2137,6],[2265,6],[7247,6],[7789,6],[7975,6],[8097,6]]},"57":{"position":[[77,6]]},"59":{"position":[[137,6]]},"63":{"position":[[96,6]]},"70":{"position":[[48,8],[112,6],[197,8],[303,8],[476,6]]},"78":{"position":[[1900,7],[2016,8],[2997,8]]},"80":{"position":[[518,6],[1185,6]]},"86":{"position":[[619,6]]},"88":{"position":[[285,7],[556,6]]},"120":{"position":[[223,6]]},"152":{"position":[[0,6]]},"158":{"position":[[96,6]]},"212":{"position":[[376,6]]},"220":{"position":[[3116,6]]},"222":{"position":[[534,6],[651,6]]}}}],["create_engine(connection_str",{"_index":1441,"t":{"216":{"position":[[526,32]]}}}],["create_engineimport",{"_index":1435,"t":{"216":{"position":[[338,19]]}}}],["creation",{"_index":1560,"t":{"220":{"position":[[3037,9]]}}}],["creativ",{"_index":716,"t":{"39":{"position":[[2103,11]]}}}],["creator",{"_index":1285,"t":{"168":{"position":[[59,7]]}}}],["criteria",{"_index":153,"t":{"6":{"position":[[274,9]]},"74":{"position":[[486,9]]},"99":{"position":[[540,9]]},"104":{"position":[[461,8]]}}}],["crucial",{"_index":150,"t":{"6":{"position":[[234,7]]},"76":{"position":[[351,7]]},"198":{"position":[[544,7]]},"234":{"position":[[63,7]]}}}],["csv",{"_index":372,"t":{"19":{"position":[[610,3]]},"212":{"position":[[164,4]]}}}],["curl",{"_index":1212,"t":{"134":{"position":[[531,4]]}}}],["current",{"_index":146,"t":{"6":{"position":[[144,9]]},"10":{"position":[[257,9]]},"19":{"position":[[338,9]]},"31":{"position":[[71,9]]},"39":{"position":[[1017,7]]},"68":{"position":[[204,7],[713,7]]},"86":{"position":[[132,7],[277,7]]},"112":{"position":[[463,10]]},"198":{"position":[[570,7]]},"212":{"position":[[109,9]]}}}],["custom",{"_index":389,"t":{"19":{"position":[[958,6],[8644,10]]},"39":{"position":[[42,9],[786,11],[1322,7],[2604,9],[3008,6]]},"70":{"position":[[312,6],[373,10],[493,10]]},"80":{"position":[[2346,10]]},"108":{"position":[[33,9]]},"134":{"position":[[692,9]]},"154":{"position":[[8,6],[69,6],[142,6],[202,6],[272,6]]},"156":{"position":[[8,6],[81,6],[154,6],[214,6]]},"182":{"position":[[21,6],[97,6],[374,6]]},"186":{"position":[[8,6]]}}}],["custom_report",{"_index":1406,"t":{"210":{"position":[[255,15],[596,15]]}}}],["customiz",{"_index":589,"t":{"25":{"position":[[395,12]]},"49":{"position":[[265,12]]},"72":{"position":[[257,16]]}}}],["cycl",{"_index":1067,"t":{"80":{"position":[[2022,5],[2433,8]]},"108":{"position":[[1069,8]]}}}],["cyclic",{"_index":911,"t":{"70":{"position":[[119,8]]}}}],["data",{"_index":371,"t":{"19":{"position":[[581,4]]},"31":{"position":[[164,4]]},"39":{"position":[[3155,5]]},"76":{"position":[[95,4],[478,4]]},"78":{"position":[[408,4],[870,4],[951,4]]},"80":{"position":[[482,4]]},"82":{"position":[[562,4]]},"101":{"position":[[72,4]]}}}],["dataset",{"_index":147,"t":{"6":{"position":[[165,8]]}}}],["date",{"_index":144,"t":{"6":{"position":[[117,4]]},"76":{"position":[[412,4]]},"78":{"position":[[658,5]]},"220":{"position":[[559,5]]}}}],["day",{"_index":296,"t":{"14":{"position":[[208,3],[215,3]]}}}],["deactiv",{"_index":847,"t":{"57":{"position":[[404,10],[458,12],[506,10]]}}}],["deal",{"_index":472,"t":{"19":{"position":[[3489,7]]}}}],["debug",{"_index":1600,"t":{"228":{"position":[[901,5]]}}}],["decreas",{"_index":671,"t":{"39":{"position":[[628,9]]}}}],["dedic",{"_index":866,"t":{"59":{"position":[[832,9]]}}}],["def",{"_index":614,"t":{"35":{"position":[[60,3]]},"37":{"position":[[427,3],[721,3]]},"78":{"position":[[1514,3],[1545,3],[2086,3]]},"80":{"position":[[1311,3]]},"122":{"position":[[114,3]]},"132":{"position":[[135,3]]},"134":{"position":[[234,3]]},"202":{"position":[[113,3]]},"208":{"position":[[231,3]]},"210":{"position":[[344,3]]},"212":{"position":[[596,3]]},"216":{"position":[[781,3]]},"228":{"position":[[127,3],[203,3],[302,3],[404,3],[509,3],[622,3],[754,3],[848,3]]},"230":{"position":[[144,3],[288,3],[439,3],[581,3],[716,3]]}}}],["default",{"_index":663,"t":{"39":{"position":[[356,8],[690,7],[1102,8],[1258,8],[1358,8],[1490,8],[1612,8],[1694,8],[1776,8],[1875,8],[1960,8],[2188,8],[2287,8],[2359,8],[2536,8],[2672,7],[2755,8],[2825,8],[2938,8],[3161,8],[3194,7]]},"114":{"position":[[275,8]]},"160":{"position":[[451,8]]},"166":{"position":[[354,8]]},"180":{"position":[[15,8],[548,7]]}}}],["defin",{"_index":388,"t":{"19":{"position":[[945,8],[1714,6]]},"37":{"position":[[284,6]]},"78":{"position":[[147,6],[474,6],[2288,6]]},"80":{"position":[[737,6],[855,6],[1903,8],[2999,7],[3048,7]]},"168":{"position":[[221,6],[353,6]]}}}],["degre",{"_index":914,"t":{"70":{"position":[[256,6]]}}}],["delet",{"_index":1152,"t":{"112":{"position":[[257,8]]}}}],["deliv",{"_index":1260,"t":{"162":{"position":[[75,7]]}}}],["demo",{"_index":574,"t":{"23":{"position":[[336,5]]},"43":{"position":[[257,5]]},"45":{"position":[[440,5]]},"91":{"position":[[33,4]]}}}],["demonstr",{"_index":375,"t":{"19":{"position":[[636,11]]}}}],["dep",{"_index":805,"t":{"45":{"position":[[282,4]]}}}],["depend",{"_index":567,"t":{"23":{"position":[[10,7]]},"45":{"position":[[242,13]]},"53":{"position":[[17,12]]},"57":{"position":[[534,12],[620,12]]},"59":{"position":[[24,12],[105,12],[295,12],[462,13],[557,10],[929,12],[1127,12]]}}}],["deploy",{"_index":573,"t":{"23":{"position":[[207,6]]},"47":{"position":[[42,11]]},"68":{"position":[[108,9]]},"158":{"position":[[103,11]]}}}],["depsnpm",{"_index":1175,"t":{"116":{"position":[[72,7]]}}}],["depth",{"_index":261,"t":{"10":{"position":[[711,6]]},"39":{"position":[[329,6]]},"72":{"position":[[79,5]]},"74":{"position":[[307,5]]},"76":{"position":[[772,5],[913,5]]},"97":{"position":[[74,5]]},"99":{"position":[[361,5]]},"104":{"position":[[344,5]]}}}],["descript",{"_index":435,"t":{"19":{"position":[[2466,14],[2609,14],[7448,14],[7591,14]]}}}],["design",{"_index":603,"t":{"31":{"position":[[131,6]]},"49":{"position":[[350,7]]},"78":{"position":[[384,7]]},"162":{"position":[[63,8]]}}}],["desir",{"_index":1389,"t":{"198":{"position":[[422,7]]}}}],["detail",{"_index":254,"t":{"10":{"position":[[402,8],[595,8]]},"45":{"position":[[418,8]]},"86":{"position":[[359,8]]},"93":{"position":[[87,8]]},"108":{"position":[[997,8]]},"136":{"position":[[37,7]]},"144":{"position":[[41,8]]},"210":{"position":[[675,8]]}}}],["detergent?th",{"_index":1506,"t":{"220":{"position":[[1302,13]]}}}],["determin",{"_index":854,"t":{"59":{"position":[[272,9]]},"86":{"position":[[634,11]]}}}],["determinist",{"_index":85,"t":{"4":{"position":[[58,13],[629,17],[783,13]]},"6":{"position":[[1417,14]]},"39":{"position":[[2163,13]]},"82":{"position":[[394,13]]}}}],["dev",{"_index":806,"t":{"45":{"position":[[326,3]]},"116":{"position":[[84,3]]},"192":{"position":[[477,3]]}}}],["devday",{"_index":344,"t":{"19":{"position":[[47,6]]}}}],["develop",{"_index":355,"t":{"19":{"position":[[220,10]]},"45":{"position":[[298,11]]},"59":{"position":[[363,11],[852,11],[1210,11]]},"70":{"position":[[233,10],[541,9]]},"72":{"position":[[286,11]]},"78":{"position":[[136,10],[1296,11]]},"125":{"position":[[16,12]]},"148":{"position":[[277,10]]},"220":{"position":[[1168,10]]}}}],["devic",{"_index":824,"t":{"49":{"position":[[392,8]]}}}],["devis",{"_index":109,"t":{"4":{"position":[[401,8]]}}}],["diagram",{"_index":1130,"t":{"104":{"position":[[47,8]]}}}],["dict",{"_index":972,"t":{"78":{"position":[[544,4],[653,4]]},"80":{"position":[[931,4],[954,4],[1359,6]]},"132":{"position":[[183,5]]}}}],["dict[str",{"_index":1440,"t":{"216":{"position":[[500,9]]}}}],["dictfrom",{"_index":1428,"t":{"216":{"position":[[174,8]]}}}],["differ",{"_index":1580,"t":{"228":{"position":[[91,9]]}}}],["direct",{"_index":1066,"t":{"80":{"position":[[1951,6]]},"210":{"position":[[135,9]]}}}],["directori",{"_index":765,"t":{"39":{"position":[[3325,10]]},"45":{"position":[[151,10]]},"108":{"position":[[91,10],[559,10]]},"112":{"position":[[35,10]]}}}],["discord",{"_index":339,"t":{"17":{"position":[[158,7]]},"82":{"position":[[1396,7]]}}}],["discuss",{"_index":1047,"t":{"80":{"position":[[1054,10]]}}}],["display",{"_index":820,"t":{"49":{"position":[[213,8]]},"112":{"position":[[98,7]]}}}],["disrupt",{"_index":292,"t":{"14":{"position":[[67,11],[242,7]]},"82":{"position":[[1226,10]]}}}],["dive",{"_index":955,"t":{"76":{"position":[[1399,4]]},"78":{"position":[[3423,4]]}}}],["divid",{"_index":110,"t":{"4":{"position":[[420,6]]},"78":{"position":[[789,7]]}}}],["do",{"_index":919,"t":{"70":{"position":[[566,5]]}}}],["doc",{"_index":774,"t":{"39":{"position":[[3717,4]]},"166":{"position":[[424,3]]},"212":{"position":[[348,5]]},"214":{"position":[[340,3]]}}}],["doc_path",{"_index":745,"t":{"39":{"position":[[2885,9]]},"108":{"position":[[673,8]]},"212":{"position":[[247,8]]}}}],["doc_path=\"./mi",{"_index":1414,"t":{"212":{"position":[[333,14]]}}}],["docker",{"_index":877,"t":{"63":{"position":[[17,6],[234,6]]},"114":{"position":[[17,6],[149,6],[222,7],[232,6],[328,6]]}}}],["document",{"_index":572,"t":{"23":{"position":[[161,14],[298,14]]},"25":{"position":[[502,13]]},"37":{"position":[[1229,14]]},"39":{"position":[[2246,8],[2927,10]]},"110":{"position":[[59,13]]},"112":{"position":[[292,9]]},"158":{"position":[[13,13]]},"182":{"position":[[183,9]]},"212":{"position":[[57,9],[98,10],[207,10],[290,9],[525,10],[936,11]]},"214":{"position":[[153,10],[359,9],[442,14]]},"216":{"position":[[71,8],[846,10],[1173,11],[1300,9],[1544,9],[1592,9]]},"220":{"position":[[229,9]]},"222":{"position":[[278,10]]},"234":{"position":[[610,13]]}}}],["document(page_content=essay)]text_splitt",{"_index":1551,"t":{"220":{"position":[[2686,43]]}}}],["documentfrom",{"_index":1427,"t":{"216":{"position":[[141,12]]}}}],["documents=docu",{"_index":1451,"t":{"216":{"position":[[975,20],[1716,21]]}}}],["docx",{"_index":954,"t":{"76":{"position":[[1366,5]]},"80":{"position":[[2513,7],[3235,5]]},"97":{"position":[[401,4]]},"104":{"position":[[788,5]]},"108":{"position":[[1168,7]]}}}],["doesn't",{"_index":1379,"t":{"198":{"position":[[91,7]]}}}],["doesn’t",{"_index":463,"t":{"19":{"position":[[3316,7]]}}}],["domain",{"_index":1117,"t":{"88":{"position":[[565,6]]}}}],["don't",{"_index":1167,"t":{"114":{"position":[[199,5]]},"194":{"position":[[202,5]]},"220":{"position":[[909,5]]}}}],["done",{"_index":231,"t":{"8":{"position":[[895,5]]},"19":{"position":[[11,4]]},"78":{"position":[[1966,4]]},"80":{"position":[[718,4]]},"82":{"position":[[659,4]]},"220":{"position":[[2008,4]]}}}],["down",{"_index":280,"t":{"12":{"position":[[75,4]]},"78":{"position":[[1032,4]]},"198":{"position":[[244,4]]},"220":{"position":[[2602,4]]}}}],["downgrad",{"_index":1382,"t":{"198":{"position":[[139,9],[659,9]]}}}],["download",{"_index":1087,"t":{"80":{"position":[[3244,8]]},"198":{"position":[[474,8]]}}}],["draft",{"_index":945,"t":{"76":{"position":[[817,6],[992,5],[1093,5]]},"80":{"position":[[842,6],[877,5],[947,6],[1798,6],[2053,6]]},"104":{"position":[[389,6],[440,5],[515,5]]}}}],["draft['review",{"_index":1065,"t":{"80":{"position":[[1817,15]]}}}],["draftstat",{"_index":1046,"t":{"80":{"position":[[1011,11]]}}}],["draftstate(typeddict",{"_index":1044,"t":{"80":{"position":[[902,22]]}}}],["drag",{"_index":1149,"t":{"112":{"position":[[212,4]]},"220":{"position":[[2096,4]]}}}],["drive",{"_index":896,"t":{"68":{"position":[[456,5]]}}}],["driver",{"_index":1381,"t":{"198":{"position":[[124,6],[725,6]]}}}],["drop",{"_index":564,"t":{"19":{"position":[[8846,4]]},"112":{"position":[[219,4]]}}}],["drug",{"_index":1500,"t":{"220":{"position":[[1238,5]]}}}],["duckduckgo",{"_index":680,"t":{"39":{"position":[[1131,11]]},"180":{"position":[[708,10]]}}}],["dure",{"_index":57,"t":{"2":{"position":[[727,6]]},"142":{"position":[[40,6]]},"222":{"position":[[627,6]]}}}],["dynam",{"_index":966,"t":{"78":{"position":[[281,7]]},"82":{"position":[[900,11]]}}}],["e.g",{"_index":1337,"t":{"182":{"position":[[478,6]]}}}],["each",{"_index":204,"t":{"8":{"position":[[208,4]]},"12":{"position":[[235,4],[361,4]]},"14":{"position":[[327,4]]},"19":{"position":[[3502,4]]},"55":{"position":[[45,5]]},"76":{"position":[[697,4]]},"78":{"position":[[222,4],[912,4],[1320,4],[1984,4],[2394,4]]},"80":{"position":[[152,4],[640,4],[1129,4],[2641,5]]},"88":{"position":[[179,4],[432,4],[720,4],[846,4]]},"104":{"position":[[269,4]]},"108":{"position":[[287,4]]},"158":{"position":[[119,4]]},"160":{"position":[[72,4]]},"180":{"position":[[185,4],[381,4]]},"222":{"position":[[634,4]]}}}],["earn",{"_index":1515,"t":{"220":{"position":[[1582,4]]}}}],["easi",{"_index":821,"t":{"49":{"position":[[222,4]]},"93":{"position":[[234,4]]},"120":{"position":[[13,4]]}}}],["easier",{"_index":354,"t":{"19":{"position":[[209,6]]},"80":{"position":[[2336,6]]}}}],["easili",{"_index":633,"t":{"37":{"position":[[90,6]]}}}],["econom",{"_index":1520,"t":{"220":{"position":[[1644,8]]}}}],["edg",{"_index":1029,"t":{"78":{"position":[[3209,5]]},"80":{"position":[[1298,6],[1574,5],[1928,6]]},"220":{"position":[[990,4]]}}}],["edit",{"_index":1134,"t":{"108":{"position":[[55,4]]}}}],["editor",{"_index":931,"t":{"74":{"position":[[54,6],[340,6]]},"76":{"position":[[510,6],[593,6]]},"99":{"position":[[144,6],[394,6]]},"104":{"position":[[186,6]]}}}],["editor_ag",{"_index":1001,"t":{"78":{"position":[[2136,12]]}}}],["editor_agent.plan_research",{"_index":1012,"t":{"78":{"position":[[2500,27]]}}}],["editor_agent.run_parallel_research",{"_index":1014,"t":{"78":{"position":[[2560,35]]}}}],["editoragent(self.task",{"_index":1002,"t":{"78":{"position":[[2151,22]]}}}],["education\"report_typ",{"_index":1198,"t":{"129":{"position":[[55,21]]}}}],["effici",{"_index":587,"t":{"25":{"position":[[358,9]]},"49":{"position":[[454,9]]},"59":{"position":[[1200,9]]}}}],["effort",{"_index":1094,"t":{"82":{"position":[[644,7]]}}}],["elif",{"_index":559,"t":{"19":{"position":[[8355,4]]}}}],["elimin",{"_index":122,"t":{"4":{"position":[[702,10]]},"220":{"position":[[2080,11]]}}}],["email",{"_index":651,"t":{"37":{"position":[[1129,5]]}}}],["emb",{"_index":1255,"t":{"160":{"position":[[651,5]]}}}],["embed",{"_index":584,"t":{"25":{"position":[[309,11]]},"39":{"position":[[1241,9]]},"156":{"position":[[26,9],[232,9]]},"160":{"position":[[45,11],[179,9],[528,10],[585,9]]},"216":{"position":[[559,10]]},"222":{"position":[[303,10]]}}}],["embedding=embed",{"_index":1446,"t":{"216":{"position":[[645,21]]}}}],["embedding=openaiembed",{"_index":1572,"t":{"222":{"position":[[405,29]]}}}],["embedding_provid",{"_index":685,"t":{"39":{"position":[[1208,19]]}}}],["embedding_provider=\"azure_openai\"azure_openai_api_key=\"your",{"_index":1240,"t":{"158":{"position":[[219,59]]}}}],["embedding_provider=ollama",{"_index":1253,"t":{"160":{"position":[[539,26]]}}}],["employe",{"_index":1532,"t":{"220":{"position":[[1940,9]]}}}],["empti",{"_index":748,"t":{"39":{"position":[[2953,5]]}}}],["enabl",{"_index":393,"t":{"19":{"position":[[1011,6]]},"39":{"position":[[27,7]]},"59":{"position":[[527,8]]},"86":{"position":[[413,6]]},"112":{"position":[[85,6]]}}}],["encapsul",{"_index":868,"t":{"59":{"position":[[946,13]]},"78":{"position":[[172,12]]}}}],["end",{"_index":42,"t":{"2":{"position":[[527,6]]},"10":{"position":[[850,3]]},"19":{"position":[[2076,3],[5943,3]]},"78":{"position":[[2879,3],[2956,4]]},"80":{"position":[[1868,4],[2034,3]]},"190":{"position":[[112,3]]}}}],["endpoint",{"_index":1143,"t":{"110":{"position":[[116,10]]},"160":{"position":[[225,8]]},"182":{"position":[[354,8]]},"186":{"position":[[66,8]]}}}],["endpoint>.openai.azure.com/\"openai_api_version=\"2024",{"_index":1242,"t":{"158":{"position":[[320,52]]}}}],["energi",{"_index":1192,"t":{"125":{"position":[[42,6]]}}}],["engin",{"_index":581,"t":{"25":{"position":[[283,12]]},"31":{"position":[[191,7]]},"39":{"position":[[1066,6],[3379,6]]},"68":{"position":[[377,14]]},"180":{"position":[[51,6],[123,7],[197,6],[530,8]]},"182":{"position":[[143,6]]},"216":{"position":[[517,6]]},"234":{"position":[[133,7]]}}}],["english",{"_index":1140,"t":{"108":{"position":[[1406,8]]}}}],["enhanc",{"_index":648,"t":{"37":{"position":[[997,7]]},"45":{"position":[[28,8]]},"49":{"position":[[13,8]]},"82":{"position":[[228,7]]}}}],["enjoy",{"_index":833,"t":{"53":{"position":[[194,5]]},"63":{"position":[[313,5]]},"114":{"position":[[507,5]]}}}],["enough",{"_index":187,"t":{"6":{"position":[[1136,6],[1284,6]]},"70":{"position":[[576,6]]}}}],["ensur",{"_index":595,"t":{"29":{"position":[[18,6],[333,6]]},"59":{"position":[[328,8],[904,7],[1090,7]]},"82":{"position":[[352,7]]},"120":{"position":[[55,6]]},"202":{"position":[[64,8]]},"204":{"position":[[88,8]]}}}],["enter",{"_index":864,"t":{"59":{"position":[[736,6]]}}}],["enterpris",{"_index":1332,"t":{"182":{"position":[[216,10]]}}}],["entir",{"_index":111,"t":{"4":{"position":[[431,6]]},"78":{"position":[[189,6]]}}}],["entrepeneur",{"_index":1561,"t":{"220":{"position":[[3080,12]]}}}],["env",{"_index":762,"t":{"39":{"position":[[3237,3],[3259,4],[3510,3]]},"57":{"position":[[181,4],[263,3],[595,3]]},"63":{"position":[[103,4]]},"106":{"position":[[71,3]]},"108":{"position":[[682,3]]},"114":{"position":[[122,6]]},"120":{"position":[[232,4]]},"158":{"position":[[208,4]]},"170":{"position":[[61,3]]},"172":{"position":[[119,3]]},"174":{"position":[[133,3]]},"176":{"position":[[76,3]]},"180":{"position":[[159,3],[563,4],[592,4],[625,4],[657,4],[687,4],[721,4],[755,4],[782,4],[817,4]]},"182":{"position":[[88,3],[280,4],[320,3]]},"212":{"position":[[234,3]]}}}],["env.exampl",{"_index":1165,"t":{"114":{"position":[[43,14]]}}}],["env\\scripts\\activ",{"_index":846,"t":{"57":{"position":[[373,22]]}}}],["environ",{"_index":838,"t":{"57":{"position":[[25,11],[94,11],[138,11],[296,12],[427,12],[561,12],[599,12]]},"59":{"position":[[49,11],[154,11],[375,12],[400,11],[656,11],[779,11],[892,11],[1175,11]]},"61":{"position":[[50,11]]},"112":{"position":[[348,11]]},"120":{"position":[[200,11]]},"160":{"position":[[126,11]]},"164":{"position":[[201,11]]},"230":{"position":[[100,11],[244,11],[395,11],[539,11],[677,11],[806,11]]}}}],["eras",{"_index":1077,"t":{"80":{"position":[[2724,5]]}}}],["especi",{"_index":165,"t":{"6":{"position":[[428,10]]},"220":{"position":[[1541,10]]}}}],["essay",{"_index":1469,"t":{"220":{"position":[[318,5],[2951,5]]}}}],["essenti",{"_index":870,"t":{"59":{"position":[[1037,9]]}}}],["establish",{"_index":836,"t":{"57":{"position":[[0,12]]},"59":{"position":[[0,12]]}}}],["etc",{"_index":585,"t":{"25":{"position":[[321,5]]},"37":{"position":[[1159,4]]},"39":{"position":[[336,4],[2429,4]]},"76":{"position":[[1382,4]]},"104":{"position":[[804,4]]}}}],["even",{"_index":247,"t":{"10":{"position":[[198,4]]},"14":{"position":[[727,4]]},"19":{"position":[[8819,4]]},"68":{"position":[[502,4]]},"220":{"position":[[1876,5]]}}}],["event",{"_index":436,"t":{"19":{"position":[[2508,6],[7490,6]]}}}],["eventu",{"_index":308,"t":{"14":{"position":[[485,10],[752,10]]}}}],["everyon",{"_index":901,"t":{"68":{"position":[[598,8]]}}}],["evolv",{"_index":886,"t":{"68":{"position":[[141,7],[293,7]]},"78":{"position":[[312,8]]}}}],["exa",{"_index":1326,"t":{"180":{"position":[[776,3]]}}}],["exact",{"_index":152,"t":{"6":{"position":[[264,5]]}}}],["exampl",{"_index":229,"t":{"8":{"position":[[813,7]]},"19":{"position":[[1536,8],[2654,8],[7636,8]]},"23":{"position":[[346,8]]},"37":{"position":[[6,7],[154,7],[1010,7]]},"39":{"position":[[3340,8]]},"57":{"position":[[172,8]]},"72":{"position":[[370,7],[503,7]]},"78":{"position":[[1384,7]]},"80":{"position":[[3265,7]]},"97":{"position":[[188,7],[438,7]]},"108":{"position":[[1031,9]]},"134":{"position":[[515,7],[674,9]]},"168":{"position":[[262,8],[394,8]]},"170":{"position":[[75,8]]},"172":{"position":[[133,8]]},"180":{"position":[[259,8],[423,8]]},"212":{"position":[[317,8]]},"214":{"position":[[66,8]]},"216":{"position":[[95,8]]}}}],["excel",{"_index":174,"t":{"6":{"position":[[691,5]]},"212":{"position":[[169,6]]}}}],["except",{"_index":1602,"t":{"230":{"position":[[34,9]]}}}],["excit",{"_index":1035,"t":{"80":{"position":[[18,8],[77,8]]},"82":{"position":[[31,8]]},"148":{"position":[[315,7]]}}}],["execut",{"_index":29,"t":{"2":{"position":[[308,8]]},"4":{"position":[[647,7]]},"8":{"position":[[117,7]]},"37":{"position":[[786,8]]},"57":{"position":[[186,7]]},"59":{"position":[[196,9],[868,10]]},"61":{"position":[[81,9]]},"88":{"position":[[38,11],[119,9]]}}}],["exerpt",{"_index":1465,"t":{"220":{"position":[[239,6]]}}}],["exist",{"_index":634,"t":{"37":{"position":[[116,8]]},"78":{"position":[[969,8]]},"80":{"position":[[1978,6],[2717,6]]},"222":{"position":[[245,6]]}}}],["exit",{"_index":556,"t":{"19":{"position":[[7959,7]]}}}],["expans",{"_index":732,"t":{"39":{"position":[[2504,9]]}}}],["expect",{"_index":366,"t":{"19":{"position":[[449,6]]},"220":{"position":[[2260,7],[2411,8]]}}}],["experi",{"_index":170,"t":{"6":{"position":[[596,13]]},"10":{"position":[[153,13]]},"49":{"position":[[366,10]]},"70":{"position":[[551,10]]},"80":{"position":[[38,10]]},"82":{"position":[[105,12],[260,11]]}}}],["expert",{"_index":418,"t":{"19":{"position":[[1803,7],[5670,7]]}}}],["explanatori",{"_index":1082,"t":{"80":{"position":[[2878,12]]}}}],["explicit",{"_index":1616,"t":{"234":{"position":[[188,8]]}}}],["explor",{"_index":876,"t":{"61":{"position":[[206,7]]},"234":{"position":[[262,7]]}}}],["export",{"_index":764,"t":{"39":{"position":[[3287,6],[3405,6],[3492,6]]},"63":{"position":[[144,6],[156,6]]},"93":{"position":[[383,6]]},"106":{"position":[[85,6]]},"120":{"position":[[277,6],[287,6],[341,6]]},"212":{"position":[[326,6]]}}}],["extens",{"_index":908,"t":{"70":{"position":[[16,9]]}}}],["extern",{"_index":675,"t":{"39":{"position":[[858,8],[3758,8]]},"146":{"position":[[83,8]]}}}],["extra",{"_index":1204,"t":{"134":{"position":[[44,6]]},"174":{"position":[[35,5]]}}}],["extract",{"_index":513,"t":{"19":{"position":[[4885,7]]}}}],["face",{"_index":1357,"t":{"192":{"position":[[328,4]]}}}],["facilit",{"_index":961,"t":{"78":{"position":[[88,11]]}}}],["fact",{"_index":262,"t":{"10":{"position":[[723,5]]}}}],["factual",{"_index":140,"t":{"6":{"position":[[47,10],[284,10],[502,7],[567,10],[1432,8]]},"14":{"position":[[369,7]]},"29":{"position":[[60,7]]},"37":{"position":[[1369,7]]},"76":{"position":[[923,7]]},"93":{"position":[[198,7]]},"234":{"position":[[179,8]]}}}],["fail",{"_index":488,"t":{"19":{"position":[[3926,9],[6411,9],[8319,9]]},"196":{"position":[[51,4]]}}}],["fair",{"_index":276,"t":{"10":{"position":[[1066,4]]}}}],["fairli",{"_index":1546,"t":{"220":{"position":[[2460,6]]}}}],["faiss.from_documents(docu",{"_index":1556,"t":{"220":{"position":[[2874,31]]}}}],["faissfrom",{"_index":1464,"t":{"220":{"position":[[187,9]]}}}],["fals",{"_index":1074,"t":{"80":{"position":[[2550,6],[2939,5]]},"108":{"position":[[419,6],[810,6],[1210,6]]}}}],["familiar",{"_index":835,"t":{"55":{"position":[[28,11]]}}}],["far",{"_index":456,"t":{"19":{"position":[[3211,3]]}}}],["fast",{"_index":692,"t":{"39":{"position":[[1454,4],[1674,4]]},"162":{"position":[[93,4]]},"168":{"position":[[232,4],[364,4]]},"176":{"position":[[113,4]]},"228":{"position":[[259,4],[462,4]]}}}],["fast.her",{"_index":1517,"t":{"220":{"position":[[1609,9]]}}}],["fast_llm_model",{"_index":691,"t":{"39":{"position":[[1423,15]]}}}],["fast_llm_model=\"gpt",{"_index":1234,"t":{"154":{"position":[[230,19]]}}}],["fast_llm_model=claud",{"_index":1289,"t":{"168":{"position":[[271,21],[403,21]]}}}],["fast_token_limit",{"_index":698,"t":{"39":{"position":[[1632,17]]}}}],["fastapi",{"_index":776,"t":{"43":{"position":[[29,7]]},"53":{"position":[[97,7]]},"61":{"position":[[11,7]]},"132":{"position":[[5,7]]}}}],["fastapi()@app.get(\"/report/{report_type}\")async",{"_index":1202,"t":{"132":{"position":[[87,47]]}}}],["fastapifrom",{"_index":1200,"t":{"132":{"position":[[20,11]]}}}],["faster",{"_index":239,"t":{"8":{"position":[[1015,6]]},"108":{"position":[[846,6]]}}}],["favorit",{"_index":960,"t":{"78":{"position":[[10,8]]}}}],["featur",{"_index":606,"t":{"31":{"position":[[282,8]]},"45":{"position":[[37,8]]},"47":{"position":[[71,7]]},"49":{"position":[[407,8]]},"78":{"position":[[19,8]]},"80":{"position":[[86,7]]},"112":{"position":[[191,9]]},"148":{"position":[[47,8]]}}}],["feedback",{"_index":818,"t":{"49":{"position":[[157,8]]},"74":{"position":[[548,8]]},"76":{"position":[[1037,8],[1146,9]]},"82":{"position":[[1446,8]]},"99":{"position":[[114,8],[602,8]]},"104":{"position":[[483,9],[568,9]]},"108":{"position":[[392,8]]}}}],["feel",{"_index":378,"t":{"19":{"position":[[775,4],[8833,4]]},"31":{"position":[[437,4]]},"68":{"position":[[846,4]]},"186":{"position":[[375,4]]}}}],["fetch",{"_index":470,"t":{"19":{"position":[[3467,8]]},"37":{"position":[[469,5]]}}}],["fetch_report(queri",{"_index":642,"t":{"37":{"position":[[431,19],[863,19]]}}}],["few",{"_index":273,"t":{"10":{"position":[[1044,3]]},"68":{"position":[[261,3]]},"220":{"position":[[1420,3]]}}}],["field",{"_index":1084,"t":{"80":{"position":[[3042,5]]},"108":{"position":[[135,8]]}}}],["file",{"_index":373,"t":{"19":{"position":[[614,6]]},"39":{"position":[[708,4],[872,4],[962,4],[3264,4],[3772,4]]},"43":{"position":[[53,6]]},"57":{"position":[[660,4]]},"59":{"position":[[264,4]]},"63":{"position":[[108,4]]},"80":{"position":[[2399,5]]},"108":{"position":[[74,4]]},"112":{"position":[[266,5]]},"114":{"position":[[58,5],[96,4],[114,4],[164,4],[343,5]]},"120":{"position":[[237,4]]},"158":{"position":[[213,5]]},"212":{"position":[[129,4]]}}}],["filter",{"_index":287,"t":{"12":{"position":[[396,7]]},"25":{"position":[[136,9]]},"29":{"position":[[240,6]]},"88":{"position":[[234,7],[950,6]]},"234":{"position":[[485,9]]}}}],["final",{"_index":240,"t":{"10":{"position":[[0,8]]},"12":{"position":[[84,5],[460,8],[517,5]]},"19":{"position":[[2117,8],[3017,8],[4029,9]]},"68":{"position":[[340,7]]},"74":{"position":[[627,5],[684,5]]},"76":{"position":[[169,7],[1189,5],[1323,5]]},"78":{"position":[[3219,7]]},"80":{"position":[[476,5],[1143,8],[2047,5],[2242,10],[3176,5]]},"82":{"position":[[728,5]]},"88":{"position":[[213,8],[941,8],[1009,5]]},"99":{"position":[[681,5],[738,5]]},"104":{"position":[[611,5],[745,5]]},"122":{"position":[[384,8]]},"134":{"position":[[621,8]]},"166":{"position":[[4,8]]}}}],["financ",{"_index":298,"t":{"14":{"position":[[259,8]]},"19":{"position":[[1561,7],[1795,7],[5662,7]]}}}],["financi",{"_index":411,"t":{"19":{"position":[[1605,9],[5289,9]]},"37":{"position":[[1263,9]]}}}],["find",{"_index":22,"t":{"2":{"position":[[224,4]]},"19":{"position":[[1939,4],[5806,4]]},"49":{"position":[[255,9]]},"76":{"position":[[1287,9]]},"86":{"position":[[90,4]]},"104":{"position":[[709,9]]},"198":{"position":[[252,4]]}}}],["finit",{"_index":125,"t":{"4":{"position":[[797,6]]}}}],["first",{"_index":5,"t":{"2":{"position":[[68,5]]},"4":{"position":[[4,5],[394,6],[550,5]]},"19":{"position":[[1688,5],[1701,6]]},"78":{"position":[[3163,5]]},"198":{"position":[[208,6]]}}}],["fix",{"_index":89,"t":{"4":{"position":[[151,5]]},"112":{"position":[[501,5]]}}}],["flask",{"_index":1203,"t":{"134":{"position":[[23,5],[88,5],[101,6],[470,5],[500,5]]}}}],["flask(__name__)@app.route('/report/reviewer->reviser->reviewer... workflow.set_entry_point(\"researcher\") workflow.add_edge('researcher', 'reviewer') workflow.add_edge('reviser', 'reviewer') workflow.add_conditional_edges('reviewer', (lambda draft: \"accept\" if draft['review'] is None else \"revise\"), {\"accept\": END, \"revise\": \"reviser\"}) Copy By defining the conditional edges, the graph would direct to reviser if there exists review notes by the reviewer, or the cycle would end with the final draft. If you go back to the main graph we’ve built, you’ll see that this parallel work is under a node named “researcher” called by ChiefEditor agent. Running the Research Assistant After finalizing the agents, states and graphs, it’s time to run our research assistant! To make it easier to customize, the assistant runs with a given task.json file: { \"query\": \"Is AI in a hype cycle?\", \"max_sections\": 3, \"publish_formats\": { \"markdown\": true, \"pdf\": true, \"docx\": true }, \"follow_guidelines\": false, \"model\": \"gpt-4-turbo\", \"guidelines\": [ \"The report MUST be written in APA format\", \"Each sub section MUST include supporting sources using hyperlinks. If none exist, erase the sub section or rewrite it to be a part of the previous section\", \"The report MUST be written in spanish\" ]} Copy The task object is pretty self explanatory, however please notice that follow_guidelines if false would cause the graph to ignore the revision step and defined guidelines. Also, the max_sections field defines how many subheaders to research for. Having less will generate a shorter report. Running the assistant will result in a final research report in formats such as Markdown, PDF and Docx. To download and run the example check out the GPT Researcher x LangGraph open source page.","s":"A Graph within a Graph to support stateful Parallelization","u":"/blog/gptr-langgraph","h":"#a-graph-within-a-graph-to-support-stateful-parallelization","p":20},{"i":38,"t":"Going forward, there are super exciting things to think about. Human in the loop is key for optimized AI experiences. Having a human help the assistant revise and focus on just the right research plan, topics and outline, would enhance the overall quality and experience. Also generally, aiming for relying on human intervention throughout the AI flow ensures correctness, sense of control and deterministic results. Happy to see that LangGraph already supports this out of the box as seen here. In addition, having support for research about both web and local data would be key for many types of business and personal use cases. Lastly, more efforts can be done to improve the quality of retrieved sources and making sure the final report is built in the optimal storyline. A step forward in LangGraph and multi-agent collaboration in a whole would be where assistants can plan and generate graphs dynamically based on given tasks. This vision would allow assistants to choose only a subset of agents for a given task and plan their strategy based on the graph fundamentals as presented in this article and open a whole new world of possibilities. Given the pace of innovation in the AI space, it won’t be long before a new disruptive version of GPT Researcher is launched. Looking forward to what the future brings! To keep track of this project’s ongoing progress and updates please join our Discord community. And as always, if you have any feedback or further questions, please comment below!","s":"What’s Next?","u":"/blog/gptr-langgraph","h":"#whats-next","p":20},{"i":40,"t":"After AutoGPT was published, we immediately took it for a spin. The first use case that came to mind was autonomous online research. Forming objective conclusions for manual research tasks can take time, sometimes weeks, to find the right resources and information. Seeing how well AutoGPT created tasks and executed them got me thinking about the great potential of using AI to conduct comprehensive research and what it meant for the future of online research. But the problem with AutoGPT was that it usually ran into never-ending loops, required human interference for almost every step, constantly lost track of its progress, and almost never actually completed the task. Nonetheless, the information and context gathered during the research task were lost (such as keeping track of sources), and sometimes hallucinated. The passion for leveraging AI for online research and the limitations I found put me on a mission to try and solve it while sharing my work with the world. This is when I created GPT Researcher — an open source autonomous agent for online comprehensive research. In this article, we will share the steps that guided me toward the proposed solution.","s":"How we built GPT Researcher","u":"/blog/building-gpt-researcher","h":"","p":39},{"i":42,"t":"The first step in solving these issues was to seek a more deterministic solution that could ultimately guarantee completing any research task within a fixed time frame, without human interference. This is when we stumbled upon the recent paper Plan and Solve. The paper aims to provide a better solution for the challenges stated above. The idea is quite simple and consists of two components: first, devising a plan to divide the entire task into smaller subtasks and then carrying out the subtasks according to the plan. As it relates to research, first create an outline of questions to research related to the task, and then deterministically execute an agent for every outline item. This approach eliminates the uncertainty in task completion by breaking the agent steps into a deterministic finite set of tasks. Once all tasks are completed, the agent concludes the research. Following this strategy has improved the reliability of completing research tasks to 100%. Now the challenge is, how to improve quality and speed?","s":"Moving from infinite loops to deterministic results","u":"/blog/building-gpt-researcher","h":"#moving-from-infinite-loops-to-deterministic-results","p":39},{"i":44,"t":"The biggest challenge with LLMs is the lack of factuality and unbiased responses caused by hallucinations and out-of-date training sets (GPT is currently trained on datasets from 2021). But the irony is that for research tasks, it is crucial to optimize for these exact two criteria: factuality and bias. To tackle this challenges, we assumed the following: Law of large numbers — More content will lead to less biased results. Especially if gathered properly. Leveraging LLMs for the summarization of factual information can significantly improve the overall better factuality of results. After experimenting with LLMs for quite some time, we can say that the areas where foundation models excel are in the summarization and rewriting of given content. So, in theory, if LLMs only review given content and summarize and rewrite it, potentially it would reduce hallucinations significantly. In addition, assuming the given content is unbiased, or at least holds opinions and information from all sides of a topic, the rewritten result would also be unbiased. So how can content be unbiased? The law of large numbers. In other words, if enough sites that hold relevant information are scraped, the possibility of biased information reduces greatly. So the idea would be to scrape just enough sites together to form an objective opinion on any topic. Great! Sounds like, for now, we have an idea for how to create both deterministic, factual, and unbiased results. But what about the speed problem?","s":"Aiming for objective and unbiased results","u":"/blog/building-gpt-researcher","h":"#aiming-for-objective-and-unbiased-results","p":39},{"i":46,"t":"Another issue with AutoGPT is that it works synchronously. The main idea of it is to create a list of tasks and then execute them one by one. So if, let’s say, a research task requires visiting 20 sites, and each site takes around one minute to scrape and summarize, the overall research task would take a minimum of +20 minutes. That’s assuming it ever stops. But what if we could parallelize agent work? By levering Python libraries such as asyncio, the agent tasks have been optimized to work in parallel, thus significantly reducing the time to research. # Create a list to hold the coroutine agent taskstasks = [async_browse(url, query, self.websocket) for url in await new_search_urls]# Gather the results as they become availableresponses = await asyncio.gather(*tasks, return_exceptions=True) Copy In the example above, we trigger scraping for all URLs in parallel, and only once all is done, continue with the task. Based on many tests, an average research task takes around three minutes (!!). That’s 85% faster than AutoGPT.","s":"Speeding up the research process","u":"/blog/building-gpt-researcher","h":"#speeding-up-the-research-process","p":39},{"i":48,"t":"Finally, after aggregating as much information as possible about a given research task, the challenge is to write a comprehensive report about it. After experimenting with several OpenAI models and even open source, I’ve concluded that the best results are currently achieved with GPT-4. The task is straightforward — provide GPT-4 as context with all the aggregated information, and ask it to write a detailed report about it given the original research task. The prompt is as follows: \"{research_summary}\" Using the above information, answer the following question or topic: \"{question}\" in a detailed report — The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. Write all source urls at the end of the report in apa format. You should write your report only based on the given information and nothing else. Copy The results are quite impressive, with some minor hallucinations in very few samples, but it’s fair to assume that as GPT improves over time, results will only get better.","s":"Finalizing the research report","u":"/blog/building-gpt-researcher","h":"#finalizing-the-research-report","p":39},{"i":50,"t":"Now that we’ve reviewed the necessary steps of GPT Researcher, let’s break down the final architecture, as shown below: More specifically: Generate an outline of research questions that form an objective opinion on any given task. For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task. For each scraped resource, keep track, filter, and summarize only if it includes relevant information. Finally, aggregate all summarized sources and generate a final research report.","s":"The final architecture","u":"/blog/building-gpt-researcher","h":"#the-final-architecture","p":39},{"i":52,"t":"The future of online research automation is heading toward a major disruption. As AI continues to improve, it is only a matter of time before AI agents can perform comprehensive research tasks for any of our day-to-day needs. AI research can disrupt areas of finance, legal, academia, health, and retail, reducing our time for each research by 95% while optimizing for factual and unbiased reports within an influx and overload of ever-growing online information. Imagine if an AI can eventually understand and analyze any form of online content — videos, images, graphs, tables, reviews, text, audio. And imagine if it could support and analyze hundreds of thousands of words of aggregated information within a single prompt. Even imagine that AI can eventually improve in reasoning and analysis, making it much more suitable for reaching new and innovative research conclusions. And that it can do all that in minutes, if not seconds. It’s all a matter of time and what GPT Researcher is all about.","s":"Going forward","u":"/blog/building-gpt-researcher","h":"#going-forward","p":39},{"i":54,"t":"OpenAI has done it again with a groundbreaking DevDay showcasing some of the latest improvements to the OpenAI suite of tools, products and services. One major release was the new Assistants API that makes it easier for developers to build their own assistive AI apps that have goals and can call models and tools. The new Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling. Although you might expect the Retrieval tool to support online information retrieval (such as search APIs or as ChatGPT plugins), it only supports raw data for now such as text or CSV files. This blog will demonstrate how to leverage the latest Assistants API with online information using the function calling tool. To skip the tutorial below, feel free to check out the full Github Gist here. At a high level, a typical integration of the Assistants API has the following steps: Create an Assistant in the API by defining its custom instructions and picking a model. If helpful, enable tools like Code Interpreter, Retrieval, and Function calling. Create a Thread when a user starts a conversation. Add Messages to the Thread as the user ask questions. Run the Assistant on the Thread to trigger responses. This automatically calls the relevant tools. As you can see below, an Assistant object includes Threads for storing and handling conversation sessions between the assistant and users, and Run for invocation of an Assistant on a Thread. Let’s go ahead and implement these steps one by one! For the example, we will build a finance GPT that can provide insights about financial questions. We will use the OpenAI Python SDK v1.2 and Tavily Search API. First things first, let’s define the assistant’s instructions: assistant_prompt_instruction = \"\"\"You are a finance expert. Your goal is to provide answers based on information from the internet. You must use the provided Tavily search API function to find relevant online information. You should never use your own knowledge to answer questions.Please include relevant url sources in the end of your answers.\"\"\" Copy Next, let’s finalize step 1 and create an assistant using the latest GPT-4 Turbo model (128K context), and the call function using the Tavily web search API: # Create an assistantassistant = client.beta.assistants.create( instructions=assistant_prompt_instruction, model=\"gpt-4-1106-preview\", tools=[{ \"type\": \"function\", \"function\": { \"name\": \"tavily_search\", \"description\": \"Get information on recent events from the web.\", \"parameters\": { \"type\": \"object\", \"properties\": { \"query\": {\"type\": \"string\", \"description\": \"The search query to use. For example: 'Latest news on Nvidia stock performance'\"}, }, \"required\": [\"query\"] } } }]) Copy Step 2+3 are quite straight forward, we’ll initiate a new thread and update it with a user message: thread = client.beta.threads.create()user_input = input(\"You: \")message = client.beta.threads.messages.create( thread_id=thread.id, role=\"user\", content=user_input,) Copy Finally, we’ll run the assistant on the thread to trigger the function call and get the response: run = client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id,) Copy So far so good! But this is where it gets a bit messy. Unlike with the regular GPT APIs, the Assistants API doesn’t return a synchronous response, but returns a status. This allows for asynchronous operations across assistants, but requires more overhead for fetching statuses and dealing with each manually. To manage this status lifecycle, let’s build a function that can be reused and handles waiting for various statuses (such as ‘requires_action’): # Function to wait for a run to completedef wait_for_run_completion(thread_id, run_id): while True: time.sleep(1) run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f\"Current run status: {run.status}\") if run.status in ['completed', 'failed', 'requires_action']: return run Copy This function will sleep as long as the run has not been finalized such as in cases where it’s completed or requires an action from a function call. We’re almost there! Lastly, let’s take care of when the assistant wants to call the web search API: # Function to handle tool output submissiondef submit_tool_outputs(thread_id, run_id, tools_to_call): tool_output_array = [] for tool in tools_to_call: output = None tool_call_id = tool.id function_name = tool.function.name function_args = tool.function.arguments if function_name == \"tavily_search\": output = tavily_search(query=json.loads(function_args)[\"query\"]) if output: tool_output_array.append({\"tool_call_id\": tool_call_id, \"output\": output}) return client.beta.threads.runs.submit_tool_outputs( thread_id=thread_id, run_id=run_id, tool_outputs=tool_output_array ) Copy As seen above, if the assistant has reasoned that a function call should trigger, we extract the given required function params and pass back to the runnable thread. We catch this status and call our functions as seen below: if run.status == 'requires_action': run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls) run = wait_for_run_completion(thread.id, run.id) Copy That’s it! We now have a working OpenAI Assistant that can be used to answer financial questions using real time online information. Below is the full runnable code: import osimport jsonimport timefrom openai import OpenAIfrom tavily import TavilyClient# Initialize clients with API keysclient = OpenAI(api_key=os.environ[\"OPENAI_API_KEY\"])tavily_client = TavilyClient(api_key=os.environ[\"TAVILY_API_KEY\"])assistant_prompt_instruction = \"\"\"You are a finance expert. Your goal is to provide answers based on information from the internet. You must use the provided Tavily search API function to find relevant online information. You should never use your own knowledge to answer questions.Please include relevant url sources in the end of your answers.\"\"\"# Function to perform a Tavily searchdef tavily_search(query): search_result = tavily_client.get_search_context(query, search_depth=\"advanced\", max_tokens=8000) return search_result# Function to wait for a run to completedef wait_for_run_completion(thread_id, run_id): while True: time.sleep(1) run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f\"Current run status: {run.status}\") if run.status in ['completed', 'failed', 'requires_action']: return run# Function to handle tool output submissiondef submit_tool_outputs(thread_id, run_id, tools_to_call): tool_output_array = [] for tool in tools_to_call: output = None tool_call_id = tool.id function_name = tool.function.name function_args = tool.function.arguments if function_name == \"tavily_search\": output = tavily_search(query=json.loads(function_args)[\"query\"]) if output: tool_output_array.append({\"tool_call_id\": tool_call_id, \"output\": output}) return client.beta.threads.runs.submit_tool_outputs( thread_id=thread_id, run_id=run_id, tool_outputs=tool_output_array )# Function to print messages from a threaddef print_messages_from_thread(thread_id): messages = client.beta.threads.messages.list(thread_id=thread_id) for msg in messages: print(f\"{msg.role}: {msg.content[0].text.value}\")# Create an assistantassistant = client.beta.assistants.create( instructions=assistant_prompt_instruction, model=\"gpt-4-1106-preview\", tools=[{ \"type\": \"function\", \"function\": { \"name\": \"tavily_search\", \"description\": \"Get information on recent events from the web.\", \"parameters\": { \"type\": \"object\", \"properties\": { \"query\": {\"type\": \"string\", \"description\": \"The search query to use. For example: 'Latest news on Nvidia stock performance'\"}, }, \"required\": [\"query\"] } } }])assistant_id = assistant.idprint(f\"Assistant ID: {assistant_id}\")# Create a threadthread = client.beta.threads.create()print(f\"Thread: {thread}\")# Ongoing conversation loopwhile True: user_input = input(\"You: \") if user_input.lower() == 'exit': break # Create a message message = client.beta.threads.messages.create( thread_id=thread.id, role=\"user\", content=user_input, ) # Create a run run = client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id, ) print(f\"Run ID: {run.id}\") # Wait for run to complete run = wait_for_run_completion(thread.id, run.id) if run.status == 'failed': print(run.error) continue elif run.status == 'requires_action': run = submit_tool_outputs(thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls) run = wait_for_run_completion(thread.id, run.id) # Print messages from the thread print_messages_from_thread(thread.id) Copy The assistant can be further customized and improved using additional retrieval information, OpenAI’s coding interpreter and more. Also, you can go ahead and add more function tools to make the assistant even smarter. Feel free to drop a comment below if you have any further questions!","s":"How to build an OpenAI Assistant with Internet access","u":"/blog/building-openai-assistant","h":"","p":53},{"i":56,"t":"Introduction The config.py enables you to customize GPT Researcher to your specific needs and preferences. Thanks to our amazing community and contributions, GPT Researcher supports multiple LLMs and Retrievers. In addition, GPT Researcher can be tailored to various report formats (such as APA), word count, research iterations depth, etc. GPT Researcher defaults to our recommended suite of integrations: OpenAI for LLM calls and Tavily API for retrieving realtime online information. As seen below, OpenAI still stands as the superior LLM. We assume it will stay this way for some time, and that prices will only continue to decrease, while performance and speed increase over time. The default config.py file can be found in /gpt_researcher/config/. It supports various options for customizing GPT Researcher to your needs. You can also include your own external JSON file config.json by adding the path in the config_file param. Please follow the config.py file for additional future support. Below is a list of current supported options: RETRIEVER: Web search engine used for retrieving sources. Defaults to tavily. Options: duckduckgo, bing, google, serper, searx. Check here for supported retrievers EMBEDDING_PROVIDER: Provider for embedding model. Defaults to openai. Options: ollama, huggingface, azure_openai, custom. LLM_PROVIDER: LLM provider. Defaults to openai. Options: google, ollama, groq and much more! FAST_LLM_MODEL: Model name for fast LLM operations such summaries. Defaults to gpt-4o-mini. SMART_LLM_MODEL: Model name for smart operations like generating research reports and reasoning. Defaults to gpt-4o. FAST_TOKEN_LIMIT: Maximum token limit for fast LLM responses. Defaults to 2000. SMART_TOKEN_LIMIT: Maximum token limit for smart LLM responses. Defaults to 4000. BROWSE_CHUNK_MAX_LENGTH: Maximum length of text chunks to browse in web sources. Defaults to 8192. SUMMARY_TOKEN_LIMIT: Maximum token limit for generating summaries. Defaults to 700. TEMPERATURE: Sampling temperature for LLM responses, typically between 0 and 1. A higher value results in more randomness and creativity, while a lower value results in more focused and deterministic responses. Defaults to 0.55. TOTAL_WORDS: Total word count limit for document generation or processing tasks. Defaults to 800. REPORT_FORMAT: Preferred format for report generation. Defaults to APA. Consider formats like MLA, CMS, Harvard style, IEEE, etc. MAX_ITERATIONS: Maximum number of iterations for processes like query expansion or search refinement. Defaults to 3. AGENT_ROLE: Role of the agent. This might be used to customize the behavior of the agent based on its assigned roles. No default value. MAX_SUBTOPICS: Maximum number of subtopics to generate or consider. Defaults to 3. SCRAPER: Web scraper to use for gathering information. Defaults to bs (BeautifulSoup). You can also use newspaper. DOC_PATH: Path to read and research local documents. Defaults to an empty string indicating no path specified. USER_AGENT: Custom User-Agent string for web crawling and web requests. MEMORY_BACKEND: Backend used for memory operations, such as local storage of temporary data. Defaults to local. To change the default configurations, you can simply add env variables to your .env file as named above or export manually in your local project directory. For example, to manually change the search engine and report format: export RETRIEVER=bingexport REPORT_FORMAT=IEEE Copy Please note that you might need to export additional env vars and obtain API keys for other supported search retrievers and LLM providers. Please follow your console logs for further assistance. To learn more about additional LLM support you can check out the docs here. You can also include your own external JSON file config.json by adding the path in the config_file param.","s":"Introduction","u":"/docs/gpt-researcher/config","h":"","p":55},{"i":58,"t":"On this page","s":"Frontend Application","u":"/docs/gpt-researcher/frontend","h":"","p":57},{"i":60,"t":"A lightweight solution using FastAPI to serve static files. Prerequisites​ Python 3.11+ pip Setup and Running​ Install required packages: pip install -r requirements.txt Copy Start the server: python -m uvicorn main:app Copy Access at http://localhost:8000 Demo​","s":"Option 1: Static Frontend (FastAPI)","u":"/docs/gpt-researcher/frontend","h":"#option-1-static-frontend-fastapi","p":57},{"i":62,"t":"A more robust solution with enhanced features and performance. Prerequisites​ Node.js (v18.17.0 recommended) npm Setup and Running​ Navigate to NextJS directory: cd nextjs Copy Set up Node.js: nvm install 18.17.0nvm use v18.17.0 Copy Install dependencies: npm install --legacy-peer-deps Copy Start development server: npm run dev Copy Access at http://localhost:3000 Note: Requires backend server on localhost:8000 as detailed in option 1. Demo​","s":"Option 2: NextJS Frontend","u":"/docs/gpt-researcher/frontend","h":"#option-2-nextjs-frontend","p":57},{"i":64,"t":"Static Frontend: Quick setup, lightweight deployment. NextJS Frontend: Feature-rich, scalable, better performance and SEO. For production, NextJS is recommended.","s":"Choosing an Option","u":"/docs/gpt-researcher/frontend","h":"#choosing-an-option","p":57},{"i":66,"t":"Our frontend enhances GPT-Researcher by providing: Intuitive Research Interface: Streamlined input for research queries. Real-time Progress Tracking: Visual feedback on ongoing research tasks. Interactive Results Display: Easy-to-navigate presentation of findings. Customizable Settings: Adjust research parameters to suit specific needs. Responsive Design: Optimal experience across various devices. These features aim to make the research process more efficient and user-friendly, complementing GPT-Researcher's powerful agent capabilities.","s":"Frontend Features","u":"/docs/gpt-researcher/frontend","h":"#frontend-features","p":57},{"i":68,"t":"Agent Example If you're interested in using GPT Researcher as a standalone agent, you can easily import it into any existing Python project. Below, is an example of calling the agent to generate a research report: from gpt_researcher import GPTResearcherimport asyncio# It is best to define global constants at the top of your scriptQUERY = \"What happened in the latest burning man floods?\"REPORT_TYPE = \"research_report\"async def fetch_report(query, report_type): \"\"\" Fetch a research report based on the provided query and report type. \"\"\" researcher = GPTResearcher(query=query, report_type=report_type, config_path=None) await researcher.conduct_research() report = await researcher.write_report() return reportasync def generate_research_report(): \"\"\" This is a sample script that executes an async main function to run a research report. \"\"\" report = await fetch_report(QUERY, REPORT_TYPE) print(report)if __name__ == \"__main__\": asyncio.run(generate_research_report()) Copy You can further enhance this example to use the returned report as context for generating valuable content such as news article, marketing content, email templates, newsletters, etc. You can also use GPT Researcher to gather information about code documentation, business analysis, financial information and more. All of which can be used to complete much more complex tasks that require factual and high quality realtime information.","s":"Agent Example","u":"/docs/gpt-researcher/example","h":"","p":67},{"i":70,"t":"On this page","s":"Getting Started","u":"/docs/gpt-researcher/getting-started","h":"","p":69},{"i":72,"t":"Step 1 - Install dependencies $ pip install -r requirements.txt Copy Step 2 - Run the agent with FastAPI $ uvicorn main:app --reload Copy Step 3 - Go to http://localhost:8000 on any browser and enjoy researching!","s":"Quickstart","u":"/docs/gpt-researcher/getting-started","h":"#quickstart","p":69},{"i":74,"t":"Select either based on your familiarity with each:","s":"Using Virtual Environment or Poetry","u":"/docs/gpt-researcher/getting-started","h":"#using-virtual-environment-or-poetry","p":69},{"i":76,"t":"Establishing the Virtual Environment with Activate/Deactivate configuration​ Create a virtual environment using the venv package with the environment name , for example, env. Execute the following command in the PowerShell/CMD terminal: python -m venv env Copy To activate the virtual environment, use the following activation script in PowerShell/CMD terminal: .\\env\\Scripts\\activate Copy To deactivate the virtual environment, run the following deactivation script in PowerShell/CMD terminal: deactivate Copy Install the dependencies for a Virtual environment​ After activating the env environment, install dependencies using the requirements.txt file with the following command: python -m pip install -r requirements.txt Copy","s":"Virtual Environment","u":"/docs/gpt-researcher/getting-started","h":"#virtual-environment","p":69},{"i":78,"t":"Establishing the Poetry dependencies and virtual environment with Poetry version ~1.7.1​ Install project dependencies and simultaneously create a virtual environment for the specified project. By executing this command, Poetry reads the project's \"pyproject.toml\" file to determine the required dependencies and their versions, ensuring a consistent and isolated development environment. The virtual environment allows for a clean separation of project-specific dependencies, preventing conflicts with system-wide packages and enabling more straightforward dependency management throughout the project's lifecycle. poetry install Copy Activate the virtual environment associated with a Poetry project​ By running this command, the user enters a shell session within the isolated environment associated with the project, providing a dedicated space for development and execution. This virtual environment ensures that the project dependencies are encapsulated, avoiding conflicts with system-wide packages. Activating the Poetry shell is essential for seamlessly working on a project, as it ensures that the correct versions of dependencies are used and provides a controlled environment conducive to efficient development and testing. poetry shell Copy","s":"Poetry","u":"/docs/gpt-researcher/getting-started","h":"#poetry","p":69},{"i":80,"t":"Launch the FastAPI application agent on a Virtual Environment or Poetry setup by executing the following command: python -m uvicorn main:app --reload Copy Visit http://localhost:8000 in any web browser and explore your research!","s":"Run the app","u":"/docs/gpt-researcher/getting-started","h":"#run-the-app","p":69},{"i":82,"t":"Step 1 - Install Docker Follow instructions at https://docs.docker.com/engine/install/ Step 2 - Create .env file with your OpenAI Key or simply export it $ export OPENAI_API_KEY={Your API Key here} Copy Step 3 - Run the application $ docker-compose up Copy Step 4 - Go to http://localhost:8000 on any browser and enjoy researching!","s":"Try it with Docker","u":"/docs/gpt-researcher/getting-started","h":"#try-it-with-docker","p":69},{"i":84,"t":"On this page","s":"Introduction","u":"/docs/gpt-researcher/introduction","h":"","p":83},{"i":86,"t":"To form objective conclusions for manual research tasks can take time, sometimes weeks to find the right resources and information. Current LLMs are trained on past and outdated information, with heavy risks of hallucinations, making them almost irrelevant for research tasks. Current LLMs are limited to short token outputs which are not sufficient for long detailed research reports (2k+ words). Solutions that enable web search (such as ChatGPT + Web Plugin), only consider limited resources and content that in some cases result in superficial conclusions or biased answers. Using only a selection of resources can create bias in determining the right conclusions for research questions or tasks.","s":"Why GPT Researcher?","u":"/docs/gpt-researcher/introduction","h":"#why-gpt-researcher","p":83},{"i":88,"t":"The main idea is to run \"planner\" and \"execution\" agents, whereas the planner generates questions to research, and the execution agents seek the most related information based on each generated research question. Finally, the planner filters and aggregates all related information and creates a research report. The agents leverage both gpt-4o-mini and gpt-4o (128K context) to complete a research task. We optimize for costs using each only when necessary. The average research task takes around 3 minutes to complete, and costs ~$0.1. More specifically: Create a domain specific agent based on research query or task. Generate a set of research questions that together form an objective opinion on any given task. For each research question, trigger a crawler agent that scrapes online resources for information relevant to the given task. For each scraped resources, summarize based on relevant information and keep track of its sources. Finally, filter and aggregate all summarized sources and generate a final research report.","s":"Architecture","u":"/docs/gpt-researcher/introduction","h":"#architecture","p":83},{"i":91,"t":"How it Works How to Install Live Demo Homepage","s":"Tutorials","u":"/docs/gpt-researcher/introduction","h":"#tutorials","p":83},{"i":93,"t":"📝 Generate research, outlines, resources and lessons reports 📜 Can generate long and detailed research reports (over 2K words) 🌐 Aggregates over 20 web sources per research to form objective and factual conclusions 🖥️ Includes an easy-to-use web interface (HTML/CSS/JS) 🔍 Scrapes web sources with javascript support 📂 Keeps track and context of visited and used web sources 📄 Export research reports to PDF, Word and more... Let's get started here!","s":"Features","u":"/docs/gpt-researcher/introduction","h":"#features","p":83},{"i":95,"t":"On this page","s":"Retrievers","u":"/docs/gpt-researcher/retrievers","h":"","p":94},{"i":97,"t":"GPT Researcher defaults to using the Tavily search engine for retrieving search results. But you can also use other search engines by specifying the RETRIEVER env var. Please note that each search engine has its own API Key requirements and usage limits. For example: RETRIEVER=bing Copy You can also specify multiple retrievers by separating them with commas. The system will use each specified retriever in sequence. For example: RETRIEVER=tavily, arxiv Copy Thanks to our community, we have integrated the following web search engines: Tavily - Default Bing - Env: RETRIEVER=bing Google - Env: RETRIEVER=google Serp API - Env: RETRIEVER=serpapi Serper - Env: RETRIEVER=serper Searx - Env: RETRIEVER=searx Duckduckgo - Env: RETRIEVER=duckduckgo Arxiv - Env: RETRIEVER=arxiv Exa - Env: RETRIEVER=exa PubMedCentral - Env: RETRIEVER=pubmed_central","s":"Web Search Engines","u":"/docs/gpt-researcher/retrievers","h":"#web-search-engines","p":94},{"i":99,"t":"You can also use any custom retriever of your choice by specifying the RETRIEVER=custom env var. Custom retrievers allow you to use any search engine that provides an API to retrieve documents and is widely used for enterprise research tasks. In addition to setting the RETRIEVER env, you also need to set the following env vars: RETRIEVER_ENDPOINT: The endpoint URL of the custom retriever. Additional arguments required by the retriever should be prefixed with RETRIEVER_ARG_ (e.g., RETRIEVER_ARG_API_KEY).","s":"Custom Retrievers","u":"/docs/gpt-researcher/retrievers","h":"#custom-retrievers","p":94},{"i":101,"t":"RETRIEVER=customRETRIEVER_ENDPOINT=https://api.myretriever.comRETRIEVER_ARG_API_KEY=YOUR_API_KEY Copy","s":"Example","u":"/docs/gpt-researcher/retrievers","h":"#example","p":94},{"i":103,"t":"For the custom retriever to work correctly, the response from the endpoint should be in the following format: [ { \"url\": \"http://example.com/page1\", \"raw_content\": \"Content of page 1\" }, { \"url\": \"http://example.com/page2\", \"raw_content\": \"Content of page 2\" }] Copy The system assumes this response format and processes the list of sources accordingly. Missing a retriever? Feel free to contribute to this project by submitting issues or pull requests on our GitHub page.","s":"Response Format","u":"/docs/gpt-researcher/retrievers","h":"#response-format","p":94},{"i":105,"t":"On this page","s":"LangGraph","u":"/docs/gpt-researcher/langgraph","h":"","p":104},{"i":107,"t":"By using Langgraph, the research process can be significantly improved in depth and quality by leveraging multiple agents with specialized skills. Inspired by the recent STORM paper, this example showcases how a team of AI agents can work together to conduct research on a given topic, from planning to publication. An average run generates a 5-6 page research report in multiple formats such as PDF, Docx and Markdown. Please note: This example uses the OpenAI API only for optimized performance.","s":"Use case","u":"/docs/gpt-researcher/langgraph","h":"#use-case","p":104},{"i":109,"t":"The research team is made up of 7 AI agents: Human - The human in the loop that oversees the process and provides feedback to the agents. Chief Editor - Oversees the research process and manages the team. This is the \"master\" agent that coordinates the other agents using Langgraph. Researcher (gpt-researcher) - A specialized autonomous agent that conducts in depth research on a given topic. Editor - Responsible for planning the research outline and structure. Reviewer - Validates the correctness of the research results given a set of criteria. Revisor - Revises the research results based on the feedback from the reviewer. Writer - Responsible for compiling and writing the final report. Publisher - Responsible for publishing the final report in various formats.","s":"The Multi Agent Team","u":"/docs/gpt-researcher/langgraph","h":"#the-multi-agent-team","p":104},{"i":111,"t":"Generally, the process is based on the following stages: Planning stage Data collection and analysis Review and revision Writing and submission Publication","s":"How it works","u":"/docs/gpt-researcher/langgraph","h":"#how-it-works","p":104},{"i":114,"t":"More specifically (as seen in the architecture diagram) the process is as follows: Browser (gpt-researcher) - Browses the internet for initial research based on the given research task. Editor - Plans the report outline and structure based on the initial research. For each outline topic (in parallel): Researcher (gpt-researcher) - Runs an in depth research on the subtopics and writes a draft. Reviewer - Validates the correctness of the draft given a set of criteria and provides feedback. Revisor - Revises the draft until it is satisfactory based on the reviewer feedback. Writer - Compiles and writes the final report including an introduction, conclusion and references section from the given research findings. Publisher - Publishes the final report to multi formats such as PDF, Docx, Markdown, etc.","s":"Steps","u":"/docs/gpt-researcher/langgraph","h":"#steps","p":104},{"i":116,"t":"Install required packages: pip install -r requirements.txt Copy Update env variables export OPENAI_API_KEY={Your OpenAI API Key here}export TAVILY_API_KEY={Your Tavily API Key here} Copy Run the application: python main.py Copy","s":"How to run","u":"/docs/gpt-researcher/langgraph","h":"#how-to-run","p":104},{"i":118,"t":"To change the research query and customize the report, edit the task.json file in the main directory. Task.json contains the following fields:​ query - The research query or task. model - The OpenAI LLM to use for the agents. max_sections - The maximum number of sections in the report. Each section is a subtopic of the research query. include_human_feedback - If true, the user can provide feedback to the agents. If false, the agents will work autonomously. publish_formats - The formats to publish the report in. The reports will be written in the output directory. source - The location from which to conduct the research. Options: web or local. For local, please add DOC_PATH env var. follow_guidelines - If true, the research report will follow the guidelines below. It will take longer to complete. If false, the report will be generated faster but may not follow the guidelines. guidelines - A list of guidelines that the report must follow. verbose - If true, the application will print detailed logs to the console. For example:​ { \"query\": \"Is AI in a hype cycle?\", \"model\": \"gpt-4o\", \"max_sections\": 3, \"publish_formats\": { \"markdown\": true, \"pdf\": true, \"docx\": true }, \"include_human_feedback\": false, \"source\": \"web\", \"follow_guidelines\": true, \"guidelines\": [ \"The report MUST fully answer the original question\", \"The report MUST be written in apa format\", \"The report MUST be written in english\" ], \"verbose\": true} Copy","s":"Usage","u":"/docs/gpt-researcher/langgraph","h":"#usage","p":104},{"i":120,"t":"pip install langgraph-clilanggraph up Copy From there, see documentation here on how to use the streaming and async endpoints, as well as the playground.","s":"To Deploy","u":"/docs/gpt-researcher/langgraph","h":"#to-deploy","p":104},{"i":122,"t":"The React app (located in frontend directory) is our Frontend 2.0 which we hope will enable us to display the robustness of the backend on the frontend, as well. It comes with loads of added features, such as: a drag-n-drop user interface for uploading and deleting files to be used as local documents by GPTResearcher. a GUI for setting your GPTR environment variables. the ability to trigger the multi_agents flow via the Backend Module or Langgraph Cloud Host (currently in closed beta). stability fixes and more coming soon!","s":"NextJS Frontend App","u":"/docs/gpt-researcher/langgraph","h":"#nextjs-frontend-app","p":104},{"i":124,"t":"Step 1 - Install Docker Step 2 - Clone the '.env.example' file, add your API Keys to the cloned file and save the file as '.env' Step 3 - Within the docker-compose file comment out services that you don't want to run with Docker. $ docker-compose up --build Copy Step 4 - By default, if you haven't uncommented anything in your docker-compose file, this flow will start 2 processes: the Python server running on localhost:8000 the React app running on localhost:3000 Visit localhost:3000 on any browser and enjoy researching!","s":"Run the NextJS React App with Docker","u":"/docs/gpt-researcher/langgraph","h":"#run-the-nextjs-react-app-with-docker","p":104},{"i":126,"t":"cd frontendnvm install 18.17.0nvm use v18.17.0npm install --legacy-peer-depsnpm run dev Copy","s":"Run the NextJS React App with NPM","u":"/docs/gpt-researcher/langgraph","h":"#run-the-nextjs-react-app-with-npm","p":104},{"i":128,"t":"Roadmap We're constantly working on additional features and improvements to our products and services. We're also working on new products and services to help you build better AI applications using GPT Researcher. Our vision is to build the #1 autonomous research agent for AI developers and researchers, and we're excited to have you join us on this journey! The roadmap is prioritized based on the following goals: Performance, Quality, Modularity and Conversational flexibility. The roadmap is public and can be found here. Interested in collaborating or contributing? Check out our contributing page for more information.","s":"Roadmap","u":"/docs/gpt-researcher/roadmap","h":"","p":127},{"i":130,"t":"On this page","s":"Troubleshooting","u":"/docs/gpt-researcher/troubleshooting","h":"","p":129},{"i":132,"t":"This relates to not having permission to use gpt-4 yet. Based on OpenAI, it will be widely available for all by end of July.","s":"model: gpt-4 does not exist","u":"/docs/gpt-researcher/troubleshooting","h":"#model-gpt-4-does-not-exist","p":129},{"i":134,"t":"The issue relates to the library WeasyPrint (which is used to generate PDFs from the research report). Please follow this guide to resolve it: https://doc.courtbouillon.org/weasyprint/stable/first_steps.html Or you can install this package manually In case of MacOS you can install this lib using brew install glib pango If you face an issue with linking afterward, you can try running brew link glib In case of Linux you can install this lib using sudo apt install libglib2.0-dev","s":"cannot load library 'gobject-2.0-0'","u":"/docs/gpt-researcher/troubleshooting","h":"#cannot-load-library-gobject-20-0","p":129},{"i":136,"t":"In case of MacOS you can install this lib using brew install pango In case of Linux you can install this lib using sudo apt install libpango-1.0-0 Workaround for Mac M chip users If the above solutions don't work, you can try the following: Install a fresh version of Python 3.11 pointed to brew: brew install python@3.11 Install the required libraries: brew install pango glib gobject-introspection Install the required GPT Researcher Python packages: pip3.11 install -r requirements.txt Run the app with Python 3.11 (using brew): python3.11 -m uvicorn main:app --reload","s":"cannot load library 'pango'","u":"/docs/gpt-researcher/troubleshooting","h":"#cannot-load-library-pango","p":129},{"i":138,"t":"We're using Selenium for site scraping. Some sites fail to be scraped. In these cases, restart and try running again.","s":"Error processing the url","u":"/docs/gpt-researcher/troubleshooting","h":"#error-processing-the-url","p":129},{"i":140,"t":"Many users have an issue with their chromedriver because the latest chrome browser version doesn't have a compatible chrome driver yet. To downgrade your Chrome web browser using slimjet, follow these steps. First, visit the website and scroll down to find the list of available older Chrome versions. Choose the version you wish to install making sure it's compatible with your operating system. Once you've selected the desired version, click on the corresponding link to download the installer. Before proceeding with the installation, it's crucial to uninstall your current version of Chrome to avoid conflicts. It's important to check if the version you downgrade to, has a chromedriver available in the official chrome driver website If none of the above work, you can try out our hosted beta","s":"Chrome version issues","u":"/docs/gpt-researcher/troubleshooting","h":"#chrome-version-issues","p":129},{"i":142,"t":"On this page","s":"PIP Package","u":"/docs/gpt-researcher/pip-package","h":"","p":141},{"i":144,"t":"Follow these easy steps to get started: Pre-requisite: Ensure Python 3.10+ is installed on your machine 💻 Install gpt-researcher: Grab the official package from PyPi. pip install gpt-researcher Copy Environment Variables: Create a .env file with your OpenAI API key or simply export it export OPENAI_API_KEY={Your OpenAI API Key here} Copy export TAVILY_API_KEY={Your Tavily API Key here} Copy Start using GPT Researcher in your own codebase","s":"Steps to Install GPT Researcher","u":"/docs/gpt-researcher/pip-package","h":"#steps-to-install-gpt-researcher","p":141},{"i":146,"t":"from gpt_researcher import GPTResearcherimport asynciofrom gpt_researcher import GPTResearcherimport asyncioasync def get_report(query: str, report_type: str) -> str: researcher = GPTResearcher(query, report_type) research_result = await researcher.conduct_research() report = await researcher.write_report() return reportif __name__ == \"__main__\": query = \"what team may win the NBA finals?\" report_type = \"research_report\" report = asyncio.run(get_report(query, report_type)) print(report) Copy","s":"Example Usage 📝","u":"/docs/gpt-researcher/pip-package","h":"#example-usage-","p":141},{"i":149,"t":"query = \"Latest developments in renewable energy technologies\"report_type = \"research_report\" Copy","s":"Example 1: Research Report 📚","u":"/docs/gpt-researcher/pip-package","h":"#example-1-research-report-","p":141},{"i":151,"t":"query = \"List of top AI conferences in 2023\"report_type = \"resource_report\" Copy","s":"Example 2: Resource Report 📋","u":"/docs/gpt-researcher/pip-package","h":"#example-2-resource-report-","p":141},{"i":153,"t":"query = \"Outline for an article on the impact of AI in education\"report_type = \"outline_report\" Copy","s":"Example 3: Outline Report 📝","u":"/docs/gpt-researcher/pip-package","h":"#example-3-outline-report-","p":141},{"i":156,"t":"from fastapi import FastAPIfrom gpt_researcher import GPTResearcherimport asyncioapp = FastAPI()@app.get(\"/report/{report_type}\")async def get_report(query: str, report_type: str) -> dict: researcher = GPTResearcher(query, report_type) research_result = await researcher.conduct_research() report = await researcher.write_report() return {\"report\": report}# Run the server# uvicorn main:app --reload Copy","s":"FastAPI Example","u":"/docs/gpt-researcher/pip-package","h":"#fastapi-example","p":141},{"i":158,"t":"Pre-requisite: Install flask with the async extra. pip install 'flask[async]' Copy from flask import Flask, requestfrom gpt_researcher import GPTResearcherapp = Flask(__name__)@app.route('/report/', methods=['GET'])async def get_report(report_type): query = request.args.get('query') researcher = GPTResearcher(query, report_type) research_result = await researcher.conduct_research() report = await researcher.write_report() return report# Run the server# flask run Copy Run the server flask run Copy Example Request curl -X GET \"http://localhost:5000/report/research_report?query=what team may win the nba finals?\" Copy Note: The above code snippets are just examples. You can customize them as per your requirements.","s":"Flask Example","u":"/docs/gpt-researcher/pip-package","h":"#flask-example","p":141},{"i":160,"t":"If you're interested in getting more details about the research, you can use the following getters:","s":"Getters and Setters","u":"/docs/gpt-researcher/pip-package","h":"#getters-and-setters","p":141},{"i":162,"t":"Sources are the URLs that were used to gather information for the research. source_urls = researcher.get_source_urls() Copy","s":"Get Research Sources","u":"/docs/gpt-researcher/pip-package","h":"#get-research-sources","p":141},{"i":164,"t":"Context is all the retrieved information from the research. It includes the sources and their corresponding content. research_context = researcher.get_research_context() Copy","s":"Get Research Context","u":"/docs/gpt-researcher/pip-package","h":"#get-research-context","p":141},{"i":166,"t":"Costs are the number of tokens consumed during the research process. research_costs = researcher.get_costs() Copy","s":"Get Research Costs","u":"/docs/gpt-researcher/pip-package","h":"#get-research-costs","p":141},{"i":168,"t":"You can set the verbose mode to get more detailed logs. researcher.set_verbose(True) Copy","s":"Set Verbose","u":"/docs/gpt-researcher/pip-package","h":"#set-verbose","p":141},{"i":170,"t":"You can also add costs to the research process if you want to track the costs from external usage. researcher.add_costs(0.22) Copy","s":"Add Costs","u":"/docs/gpt-researcher/pip-package","h":"#add-costs","p":141},{"i":172,"t":"On this page","s":"Vector Stores","u":"/docs/gpt-researcher/vector-stores","h":"","p":171},{"i":174,"t":"from gpt_researcher import GPTResearcherfrom langchain.text_splitter import CharacterTextSplitterfrom langchain_openai import OpenAIEmbeddingsfrom langchain_community.vectorstores import FAISSfrom langchain_core.documents import Document# exerpt taken from - https://paulgraham.com/wealth.htmlessay = \"\"\"May 2004(This essay was originally published in Hackers & Painters.)If you wanted to get rich, how would you do it? I think your best bet would be to start or join a startup.That's been a reliable way to get rich for hundreds of years. The word \"startup\" dates from the 1960s,but what happens in one is very similar to the venture-backed trading voyages of the Middle Ages.Startups usually involve technology, so much so that the phrase \"high-tech startup\" is almost redundant.A startup is a small company that takes on a hard technical problem.Lots of people get rich knowing nothing more than that. You don't have to know physics to be a good pitcher.But I think it could give you an edge to understand the underlying principles. Why do startups have to be small?Will a startup inevitably stop being a startup as it grows larger?And why do they so often work on developing new technology? Why are there so many startups selling new drugs or computer software,and none selling corn oil or laundry detergent?The PropositionEconomically, you can think of a startup as a way to compress your whole working life into a few years.Instead of working at a low intensity for forty years, you work as hard as you possibly can for four.This pays especially well in technology, where you earn a premium for working fast.Here is a brief sketch of the economic proposition. If you're a good hacker in your mid twenties,you can get a job paying about $80,000 per year. So on average such a hacker must be able to do atleast $80,000 worth of work per year for the company just to break even. You could probably work twiceas many hours as a corporate employee, and if you focus you can probably get three times as much done in an hour.[1]You should get another multiple of two, at least, by eliminating the drag of the pointy-haired middle manager whowould be your boss in a big company. Then there is one more multiple: how much smarter are you than your jobdescription expects you to be? Suppose another multiple of three. Combine all these multipliers,and I'm claiming you could be 36 times more productive than you're expected to be in a random corporate job.[2]If a fairly good hacker is worth $80,000 a year at a big company, then a smart hacker working very hard without any corporate bullshit to slow him down should be able to do work worth about $3 million a year..........\"\"\"document = [Document(page_content=essay)]text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=30, separator=\"\\n\")docs = text_splitter.split_documents(documents=document)vector_store = FAISS.from_documents(documents, OpenAIEmbeddings())query = \"\"\" Summarize the essay into 3 or 4 succinct sections. Make sure to include key points regarding wealth creation. Include some recommendations for entrepeneurs in the conclusion.\"\"\"# Create an instance of GPTResearcherresearcher = GPTResearcher( query=query, report_type=\"research_report\", report_source=\"langchain_vectorstore\", vector_store=vector_store,)# Conduct research and write the reportawait researcher.conduct_research()report = await researcher.write_report() Copy","s":"Faiss","u":"/docs/gpt-researcher/vector-stores","h":"#faiss","p":171},{"i":176,"t":"from gpt_researcher import GPTResearcherfrom langchain_postgres.vectorstores import PGVectorfrom langchain_openai import OpenAIEmbeddingsCONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'# assuming the vector store exists and contains the relevent documents# also assuming embeddings have been or will be generatedvector_store = PGVector.from_existing_index( use_jsonb=True, embedding=OpenAIEmbeddings(), collection_name='some collection name', connection=CONNECTION_STRING, async_mode=True,)query = \"\"\" Create a short report about apples. Include a section about which apples are considered best during each season.\"\"\"# Create an instance of GPTResearcherresearcher = GPTResearcher( query=query, report_type=\"research_report\", report_source=\"langchain_vectorstore\", vector_store=vector_store, )# Conduct research and write the reportawait researcher.conduct_research()report = await researcher.write_report() Copy","s":"PGVector","u":"/docs/gpt-researcher/vector-stores","h":"#pgvector","p":171},{"i":178,"t":"On this page","s":"Tailored Research","u":"/docs/gpt-researcher/tailored-research","h":"","p":177},{"i":180,"t":"You can specify the sources you want the GPT Researcher to research on by providing a list of URLs. GPT Researcher will then conduct research on the provided sources only. Simply pass the sources as the source_urls argument to the GPTResearcher class and the \"static\" report_source. from gpt_researcher import GPTResearcherimport asyncioasync def get_report(query: str, report_source: str, sources: list) -> str: researcher = GPTResearcher(query=query, report_source=report_source, source_urls=sources) research_context = await researcher.conduct_research() return await researcher.write_report()if __name__ == \"__main__\": query = \"What are the biggest trends in AI lately?\" report_source = \"static\" sources = [ \"https://en.wikipedia.org/wiki/Artificial_intelligence\", \"https://www.ibm.com/think/insights/artificial-intelligence-trends\", \"https://www.forbes.com/advisor/business/ai-statistics\" ] report = asyncio.run(get_report(query=query, report_source=report_source, sources=sources)) print(report) Copy","s":"Research on Specific Sources 📚","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-specific-sources-","p":177},{"i":182,"t":"You can specify the agent prompt instruction upon which the research is conducted. This allows you to guide the research in a specific direction and tailor the report layout. Simply pass the prompt as the query argument to the GPTResearcher class and the \"custom_report\" report_type. from gpt_researcher import GPTResearcherimport asyncioasync def get_report(prompt: str, report_type: str) -> str: researcher = GPTResearcher(query=prompt, report_type=report_type) await researcher.conduct_research() report = await researcher.write_report() return report if __name__ == \"__main__\": report_type = \"custom_report\" prompt = \"Research the latest advancements in AI and provide a detailed report in APA format including sources.\" report = asyncio.run(get_report(prompt=prompt, report_type=report_type)) print(report) Copy","s":"Specify Agent Prompt 📝","u":"/docs/gpt-researcher/tailored-research","h":"#specify-agent-prompt-","p":177},{"i":184,"t":"You can instruct the GPT Researcher to research on local documents by providing the path to those documents. Currently supported file formats are: PDF, plain text, CSV, Excel, Markdown, PowerPoint, and Word documents. Step 1: Add the env variable DOC_PATH pointing to the folder where your documents are located. For example: export DOC_PATH=\"./my-docs\" Copy Step 2: When you create an instance of the GPTResearcher class, pass the report_source argument as \"local\". GPT Researcher will then conduct research on the provided documents. from gpt_researcher import GPTResearcherimport asyncioasync def get_report(query: str, report_source: str) -> str: researcher = GPTResearcher(query=query, report_source=report_source) await researcher.conduct_research() report = await researcher.write_report() return report if __name__ == \"__main__\": query = \"What can you tell me about myself based on my documents?\" report_source = \"local\" # \"local\" or \"web\" report = asyncio.run(get_report(query=query, report_source=report_source)) print(report) Copy","s":"Research on Local Documents 📄","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-local-documents-","p":177},{"i":186,"t":"You can combine the above methods to conduct hybrid research. For example, you can instruct the GPT Researcher to research on both web sources and local documents. Simply provide the sources and set the report_source argument as \"hybrid\" and watch the magic happen. Please note! You should set the proper retrievers for the web sources and doc path for local documents for this to work. To lean more about retrievers check out the Retrievers documentation.","s":"Hybrid Research 🔄","u":"/docs/gpt-researcher/tailored-research","h":"#hybrid-research-","p":177},{"i":188,"t":"You can instruct the GPT Researcher to research on a list of langchain document instances. For example: from langchain_core.documents import Documentfrom typing import List, Dictfrom gpt_researcher import GPTResearcherfrom langchain_postgres.vectorstores import PGVectorfrom langchain_openai import OpenAIEmbeddingsfrom sqlalchemy import create_engineimport asyncioCONNECTION_STRING = 'postgresql://someuser:somepass@localhost:5432/somedatabase'def get_retriever(collection_name: str, search_kwargs: Dict[str, str]): engine = create_engine(CONNECTION_STRING) embeddings = OpenAIEmbeddings() index = PGVector.from_existing_index( use_jsonb=True, embedding=embeddings, collection_name=collection_name, connection=engine, ) return index.as_retriever(search_kwargs=search_kwargs)async def get_report(query: str, report_type: str, report_source: str, documents: List[Document]) -> str: researcher = GPTResearcher(query=query, report_type=report_type, report_source=report_source, documents=documents) await researcher.conduct_research() report = await researcher.write_report() return reportif __name__ == \"__main__\": query = \"What can you tell me about blue cheese based on my documents?\" report_type = \"research_report\" report_source = \"langchain_documents\" # using a LangChain retriever to get all the documents regarding cheese # https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html#langchain_core.retrievers.BaseRetriever.invoke langchain_retriever = get_retriever(\"cheese_collection\", { \"k\": 3 }) documents = langchain_retriever.invoke(\"All the documents about cheese\") report = asyncio.run(get_report(query=query, report_type=report_type, report_source=report_source, documents=documents)) print(report) Copy","s":"Research on LangChain Documents 🦜️🔗","u":"/docs/gpt-researcher/tailored-research","h":"#research-on-langchain-documents-️","p":177},{"i":190,"t":"On this page","s":"config.singleton","u":"/docs/reference/config/singleton","h":"","p":189},{"i":192,"t":"class Singleton(abc.ABCMeta, type) Copy Singleton metaclass for ensuring only one instance of a class. __call__​ def __call__(cls, *args, **kwargs) Copy Call method for the singleton metaclass.","s":"Singleton Objects","u":"/docs/reference/config/singleton","h":"#singleton-objects","p":189},{"i":194,"t":"class AbstractSingleton(abc.ABC, metaclass=Singleton) Copy Abstract singleton class for ensuring only one instance of a class.","s":"AbstractSingleton Objects","u":"/docs/reference/config/singleton","h":"#abstractsingleton-objects","p":189},{"i":196,"t":"On this page","s":"processing.html","u":"/docs/reference/processing/html","h":"","p":195},{"i":198,"t":"On this page","s":"processing.text","u":"/docs/reference/processing/text","h":"","p":197},{"i":200,"t":"On this page","s":"Welcome","u":"/docs/welcome","h":"","p":199},{"i":202,"t":"Quickly accessing relevant and trustworthy information is more crucial than ever. However, we've learned that none of today's search engines provide a suitable tool that provides factual, explicit and objective answers without the need to continuously click and explore multiple sites for a given research task. This is why we've built the trending open source GPT Researcher. GPT Researcher is an autonomous agent that takes care of the tedious task of research for you, by scraping, filtering and aggregating over 20+ web sources per a single research task. To learn more about GPT Researcher, check out the documentation page.","s":"GPT Researcher","u":"/docs/welcome","h":"#gpt-researcher","p":199},{"i":204,"t":"On this page","s":"Configure LLM","u":"/docs/gpt-researcher/llms","h":"","p":203},{"i":206,"t":"Create a local OpenAI API using llama.cpp Server.","s":"Custom OpenAI","u":"/docs/gpt-researcher/llms","h":"#custom-openai","p":203},{"i":208,"t":"# use a custom OpenAI API LLM providerLLM_PROVIDER=\"openai\"# set the custom OpenAI API urlOPENAI_BASE_URL=\"http://localhost:1234/v1\"# set the custom OpenAI API keyOPENAI_API_KEY=\"Your Key\"# specify the custom OpenAI API llm model FAST_LLM_MODEL=\"gpt-4o-mini\"# specify the custom OpenAI API llm model SMART_LLM_MODEL=\"gpt-4o\" Copy","s":"Custom OpenAI API LLM","u":"/docs/gpt-researcher/llms","h":"#custom-openai-api-llm","p":203},{"i":210,"t":"# use a custom OpenAI API EMBEDDING providerEMBEDDING_PROVIDER=\"custom\"# set the custom OpenAI API urlOPENAI_BASE_URL=\"http://localhost:1234/v1\"# set the custom OpenAI API keyOPENAI_API_KEY=\"Your Key\"# specify the custom OpenAI API embedding model OPENAI_EMBEDDING_MODEL=\"custom_model\" Copy","s":"Custom OpenAI API Embedding","u":"/docs/gpt-researcher/llms","h":"#custom-openai-api-embedding","p":203},{"i":212,"t":"See also the documentation in the Langchain Azure OpenAI page. On Azure OpenAI you will need to create deployments for each model you want to use. Please also specify the model names/deployment names in your .env file: EMBEDDING_PROVIDER=\"azure_openai\"AZURE_OPENAI_API_KEY=\"Your key\"AZURE_OPENAI_ENDPOINT=\"https://.openai.azure.com/\"OPENAI_API_VERSION=\"2024-05-01-preview\"FAST_LLM_MODEL=\"gpt-4o-mini\"DEFAULT_LLM_MODEL=\"gpt-4o-mini\"SMART_LLM_MODEL=\"gpt-4o\" Copy","s":"Azure OpenAI","u":"/docs/gpt-researcher/llms","h":"#azure-openai","p":203},{"i":214,"t":"GPT Researcher supports both Ollama LLMs and embeddings. You can choose each or both. To use Ollama you can set the following environment variables # Use ollama for both, LLM and EMBEDDING providerLLM_PROVIDER=ollama# Ollama endpoint to useOLLAMA_BASE_URL=http://localhost:11434# Specify one of the LLM models supported by OllamaFAST_LLM_MODEL=llama3# Specify one of the LLM models supported by Ollama SMART_LLM_MODEL=llama3 # The temperature to use, defaults to 0.55TEMPERATURE=0.55 Copy Optional - You can also use ollama for embeddings EMBEDDING_PROVIDER=ollama# Specify one of the embedding models supported by Ollama OLLAMA_EMBEDDING_MODEL=nomic-embed-text Copy","s":"Ollama","u":"/docs/gpt-researcher/llms","h":"#ollama","p":203},{"i":216,"t":"GroqCloud provides advanced AI hardware and software solutions designed to deliver amazingly fast AI inference performance. To leverage Groq in GPT-Researcher, you will need a GroqCloud account and an API Key. (NOTE: Groq has a very generous free tier.)","s":"Groq","u":"/docs/gpt-researcher/llms","h":"#groq","p":203},{"i":218,"t":"You can signup here: https://console.groq.com/login Once you are logged in, you can get an API Key here: https://console.groq.com/keys Once you have an API key, you will need to add it to your systems environment using the variable name: GROQ_API_KEY=\"*********************\"","s":"Sign up","u":"/docs/gpt-researcher/llms","h":"#sign-up","p":203},{"i":220,"t":"And finally, you will need to configure the GPT-Researcher Provider and Model variables: # To use Groq set the llm provider to groqLLM_PROVIDER=groqGROQ_API_KEY=[Your Key]# Set one of the LLM models supported by GroqFAST_LLM_MODEL=Mixtral-8x7b-32768# Set one of the LLM models supported by GroqSMART_LLM_MODEL=Mixtral-8x7b-32768 # The temperature to use defaults to 0.55TEMPERATURE=0.55 Copy NOTE: As of the writing of this Doc (May 2024), the available Language Models from Groq are: Llama3-70b-8192 Llama3-8b-8192 Mixtral-8x7b-32768 Gemma-7b-it","s":"Update env vars","u":"/docs/gpt-researcher/llms","h":"#update-env-vars","p":203},{"i":222,"t":"Anthropic is an AI safety and research company, and is the creator of Claude. This page covers all integrations between Anthropic models and LangChain. LLM_PROVIDER=anthropicANTHROPIC_API_KEY=[Your key] Copy You can then define the fast and smart LLM models for example: FAST_LLM_MODEL=claude-2.1SMART_LLM_MODEL=claude-3-opus-20240229 Copy You can then define the fast and smart LLM models for example: FAST_LLM_MODEL=claude-2.1SMART_LLM_MODEL=claude-3-opus-20240229 Copy","s":"Anthropic","u":"/docs/gpt-researcher/llms","h":"#anthropic","p":203},{"i":224,"t":"Sign up for a Mistral API key. Then update the corresponding env vars, for example: LLM_PROVIDER=mistralANTHROPIC_API_KEY=[Your key]FAST_LLM_MODEL=open-mistral-7bSMART_LLM_MODEL=mistral-large-latest Copy","s":"Mistral","u":"/docs/gpt-researcher/llms","h":"#mistral","p":203},{"i":226,"t":"Together AI offers an API to query 50+ leading open-source models in a couple lines of code. Then update corresponding env vars, for example: LLM_PROVIDER=togetherTOGETHER_API_KEY=[Your key]FAST_LLM_MODEL=meta-llama/Llama-3-8b-chat-hfSMART_LLM_MODEL=meta-llama/Llama-3-70b-chat-hf Copy","s":"Together AI","u":"/docs/gpt-researcher/llms","h":"#together-ai","p":203},{"i":228,"t":"This integration requires a bit of extra work. Follow this guide to learn more. After you've followed the tutorial above, update the env vars: LLM_PROVIDER=huggingfaceHUGGINGFACE_API_KEY=[Your key]FAST_LLM_MODEL=HuggingFaceH4/zephyr-7b-betaSMART_LLM_MODEL=HuggingFaceH4/zephyr-7b-beta Copy","s":"HuggingFace","u":"/docs/gpt-researcher/llms","h":"#huggingface","p":203},{"i":230,"t":"Sign up here for obtaining a Google Gemini API Key and update the following env vars: Please make sure to update fast and smart models to corresponding valid Gemini models. LLM_PROVIDER=googleGEMINI_API_KEY=[Your key] Copy","s":"Google Gemini","u":"/docs/gpt-researcher/llms","h":"#google-gemini","p":203},{"i":232,"t":"On this page","s":"config.config","u":"/docs/reference/config/config","h":"","p":231},{"i":234,"t":"class Config(metaclass=Singleton) Copy Configuration class to store the state of bools for different scripts access. __init__​ def __init__() -> None Copy Initialize the Config class set_fast_llm_model​ def set_fast_llm_model(value: str) -> None Copy Set the fast LLM model value. set_smart_llm_model​ def set_smart_llm_model(value: str) -> None Copy Set the smart LLM model value. set_fast_token_limit​ def set_fast_token_limit(value: int) -> None Copy Set the fast token limit value. set_smart_token_limit​ def set_smart_token_limit(value: int) -> None Copy Set the smart token limit value. set_browse_chunk_max_length​ def set_browse_chunk_max_length(value: int) -> None Copy Set the browse_website command chunk max length value. set_openai_api_key​ def set_openai_api_key(value: str) -> None Copy Set the OpenAI API key value. set_debug_mode​ def set_debug_mode(value: bool) -> None Copy Set the debug mode value.","s":"Config Objects","u":"/docs/reference/config/config","h":"#config-objects","p":231},{"i":236,"t":"class APIKeyError(Exception) Copy Exception raised when an API key is not set in config.py or as an environment variable. check_openai_api_key​ def check_openai_api_key(cfg) -> None Copy Check if the OpenAI API key is set in config.py or as an environment variable. check_tavily_api_key​ def check_tavily_api_key(cfg) -> None Copy Check if the Tavily Search API key is set in config.py or as an environment variable. check_google_api_key​ def check_google_api_key(cfg) -> None Copy Check if the Google API key is set in config.py or as an environment variable. check_serp_api_key​ def check_serp_api_key(cfg) -> None Copy Check if the SERP API key is set in config.py or as an environment variable. check_searx_url​ def check_searx_url(cfg) -> None Copy Check if the Searx URL is set in config.py or as an environment variable.","s":"APIKeyError Objects","u":"/docs/reference/config/config","h":"#apikeyerror-objects","p":231}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/3",[0,5.865,1,5.773,2,5.773,3,3.689,4,3.444,5,3.414,6,4.772,7,4.772,8,4.648,9,1.911,10,5.095,11,5.095,12,5.095,13,3.826,14,4.314,15,5.095]],["t/5",[9,2.408]],["t/7",[16,2.266,17,1.924,18,2.786,19,3.101,20,2.115,21,2.659,22,1.967,23,3.305,24,3.305,25,4.423,26,1.624,27,1.082,28,2.453,29,2.748,30,3.305,31,2.786,32,3.903,33,3.903,34,4.423,35,2.931,36,2.786,37,3.305,38,2.786,39,3.101,40,3.305,41,3.561,42,2.115,43,1.984,44,2.992,45,2.444,46,2.351,47,2.351,48,2.351,49,3.561,50,2.786,51,2.786,52,4.423,53,0.75]],["t/9",[9,2.408]],["t/11",[3,2.586,4,3.16,5,2.875,6,4.883,27,1.075,54,5.048,55,3.54,56,3.54,57,5.321,58,4.455,59,2.012,60,3.179,61,3.773,62,4.162,63,3.773,64,2.562,65,3.16,66,3.179,67,3.773,68,2.905,69,1.854,70,2.683,71,1.59,72,1.676,73,3.034,74,3.773,75,2.07,76,4.064,77,2.586]],["t/13",[4,2.175,5,1.978,9,1.506,27,1.206,63,3.399,65,2.175,68,2.617,69,2.264,70,2.418,71,1.432,72,1.774,78,4.548,79,2.418,80,3.399,81,4.014,82,2.264,83,2.734,84,3.189,85,2.865,86,3.189,87,3.399,88,2.25,89,3.014,90,3.399,91,3.189,92,2.865,93,4.548,94,2.513,95,2.25,96,3.189,97,2.734,98,3.662,99,4.548,100,3.014,101,3.189,102,2.33,103,4.014,104,3.662,105,4.014,106,3.662,107,2.418,108,2.513,109,3.189,110,1.506]],["t/15",[26,2.139,27,1.057,72,1.837,82,2.139,89,3.861,90,4.353,95,2.882,102,2.985,111,1.254,112,5.412,113,4.69,114,5.825,115,3.501,116,4.69,117,4.353,118,4.353,119,3.669,120,4.085,121,3.096,122,3.352]],["t/17",[5,2.145,27,0.72,59,2.904,69,2.39,82,1.811,84,3.458,92,3.106,98,3.971,110,1.633,111,1.667,120,3.458,123,4.313,124,3.37,125,1.912,126,3.106,127,7.284,128,3.106,129,6.508,130,4.932,131,4.932,132,3.912,133,4.352,134,3.971,135,4.932,136,2.838,137,3.686,138,2.283,139,2.964,140,3.458]],["t/19",[4,2.222,5,2.021,6,3.08,7,3.08,8,5.036,9,1.538,13,3.08,27,0.913,57,3.741,64,1.801,68,2.674,69,2.297,71,1.463,72,1.179,97,2.793,110,2.071,117,5.284,118,3.473,121,3.758,141,2.985,142,3.08,143,3.473,144,2.567,145,3.76,146,3.08,147,3.473,148,4.647,149,5.036,150,2.793,151,2.793,152,3.473,153,4.101,154,3.741,155,3.473,156,3.258,157,4.647,158,2.927,159,2.674,160,4.647]],["t/21",[27,1.005,71,2.168,72,1.747,161,6.886,162,4.337,163,4.564]],["t/24",[4,1.659,5,1.509,17,1.509,22,0.782,27,0.88,38,2.185,59,2.821,66,3.207,67,2.592,70,1.844,71,2.33,72,1.291,92,2.185,96,2.432,97,2.085,98,2.793,102,1.777,109,2.432,110,1.148,111,0.747,119,2.185,138,1.606,141,1.465,144,2.813,155,2.592,156,2.432,162,3.207,163,2.299,164,4.493,165,3.061,166,2.793,167,2.793,168,3.061,169,4.493,170,2.432,171,2.793,172,3.469,173,2.432,174,3.805,175,1.916,176,3.061,177,3.469,178,5.091,179,2.793,180,3.061,181,2.592,182,2.299,183,1.606,184,4.856,185,3.469,186,2.432,187,3.469,188,3.469,189,2.432,190,2.432,191,2.592,192,3.469,193,3.469,194,3.469,195,2.432,196,3.061,197,3.061,198,1.556,199,2.793,200,3.469,201,3.469,202,3.061,203,3.469,204,2.432,205,3.469,206,2.706,207,3.061,208,2.793,209,2.793,210,3.469,211,2.793]],["t/26",[17,2.595,22,0.978,56,3.039,66,2.73,71,2.508,91,3.039,107,2.304,111,0.933,125,2.312,142,2.872,162,2.73,175,2.394,186,4.783,206,3.626,212,4.334,213,3.039,214,3.16,215,2.494,216,3.824,217,4.334,218,3.824,219,3.824,220,2.872,221,4.334,222,3.433,223,3.239,224,4.334,225,3.489,226,3.768,227,4.334,228,3.489,229,2.494,230,2.872,231,3.239,232,3.824,233,3.824,234,4.334,235,3.824,236,2.872,237,4.334,238,3.824,239,4.334,240,2.872]],["t/28",[27,1.094,42,2.175,43,2.04,59,1.813,70,2.418,71,2.361,72,1.154,75,2.528,76,3.662,94,3.864,106,3.662,107,2.418,121,2.418,128,2.865,137,3.399,141,1.92,146,3.014,152,3.399,158,2.865,170,3.189,189,3.189,206,2.418,222,2.617,241,2.105,242,2.734,243,3.014,244,4.323,245,4.964,246,3.399,247,3.014,248,2.865,249,3.014,250,3.662,251,4.548,252,4.548,253,3.189,254,4.014,255,3.662,256,3.662,257,3.014,258,2.513,259,2.617,260,3.189]],["t/30",[21,2.38,22,1.795,27,1.161,28,1.854,42,1.894,43,2.512,46,2.105,70,2.105,71,2.348,72,1.004,95,1.959,111,0.852,183,2.593,206,2.977,241,1.833,242,2.38,244,2.777,257,3.712,258,2.188,259,2.279,261,2.959,262,3.96,263,3.495,264,4.186,265,3.495,266,2.625,267,3.495,268,3.495,269,3.96,270,2.959,271,3.74,272,2.279,273,2.625,274,3.095,275,2.777,276,2.625,277,2.977,278,1.535,279,2.959,280,3.367,281,1.624,282,2.494,283,2.777,284,2.959,285,3.367,286,2.777,287,2.029]],["t/32",[7,1.751,22,1.57,26,0.97,27,1.162,28,2.078,38,2.608,43,2.592,46,2.714,61,1.975,71,0.832,72,1.295,82,1.875,94,1.46,95,1.307,101,1.853,102,1.354,107,1.404,110,0.875,124,1.149,125,1.024,126,1.664,132,1.588,150,2.489,162,1.664,183,2.364,241,2.364,242,2.489,247,1.751,258,2.288,259,3.326,260,1.853,264,3.094,271,1.521,272,2.938,273,1.751,274,2.821,275,1.853,276,1.751,278,1.024,280,3.474,281,2.73,282,2.608,283,1.853,284,1.975,285,2.489,287,1.354,288,1.664,289,1.664,290,2.332,291,1.443,292,2.332,293,2.127,294,1.975,295,1.664,296,2.127,297,1.307,298,1.975,299,1.46,300,1.975,301,2.332,302,2.642,303,2.127,304,2.127,305,1.664,306,3.094,307,1.223,308,1.853,309,4.111,310,1.354,311,1.521,312,2.332,313,2.642,314,2.127,315,2.332,316,2.332,317,1.404,318,1.975,319,1.588,320,2.332,321,1.664,322,1.664,323,1.354,324,1.588,325,1.853,326,1.664,327,2.332,328,1.751,329,2.642,330,2.642,331,2.332,332,2.332,333,2.332,334,1.975,335,2.332,336,2.332]],["t/34",[4,0.78,16,0.835,17,1.212,20,1.745,21,2.193,22,1.655,27,0.868,28,1.748,29,1.111,38,3.327,41,1.313,42,0.78,43,0.731,44,1.378,45,0.901,46,0.867,47,0.867,48,1.481,53,0.732,58,1.439,60,1.755,66,1.027,69,0.599,71,2.143,72,0.414,73,0.98,75,0.669,79,0.867,82,1.023,111,0.351,136,0.938,138,0.755,139,0.98,147,1.219,150,2.193,151,0.98,158,1.027,162,1.027,169,1.439,175,2.015,181,2.082,182,1.081,183,0.755,184,2.243,190,3.957,206,2.577,213,1.143,214,1.496,215,1.603,222,1.603,229,0.938,240,2.418,241,0.755,244,1.143,245,1.313,247,1.081,248,1.027,261,1.219,266,1.081,271,0.938,273,1.081,274,0.901,278,1.08,280,0.98,281,1.143,283,1.143,285,0.98,288,1.755,291,0.971,299,0.901,300,1.219,307,2.244,310,0.835,311,0.938,318,1.219,319,0.98,321,1.027,327,1.439,328,1.081,337,1.603,338,1.631,339,1.631,340,2.786,341,1.439,342,2.558,343,1.631,344,1.439,345,1.439,346,3.808,347,4.445,348,1.631,349,1.439,350,1.439,351,1.439,352,1.219,353,1.631,354,1.313,355,0.65,356,1.313,357,2.786,358,0.938,359,1.631,360,2.243,361,1.631,362,3.423,363,2.786,364,1.631,365,1.631,366,2.459,367,1.631,368,1.631,369,1.631,370,2.577,371,1.439,372,1.219,373,1.313,374,1.631,375,1.631,376,1.027,377,1.219,378,0.98,379,1.631,380,1.219,381,1.631,382,1.631,383,2.786,384,1.313,385,1.313,386,1.631,387,1.631,388,2.786,389,1.631,390,1.143,391,1.143,392,1.631,393,1.631,394,1.313,395,1.631,396,2.243,397,0.98,398,1.027,399,1.081,400,1.439,401,1.631,402,1.631,403,1.631,404,1.631,405,1.631,406,1.631,407,1.631,408,1.631,409,1.631,410,1.631,411,1.631,412,1.631,413,1.631,414,1.631,415,1.631,416,1.439,417,1.631,418,1.631,419,1.631,420,1.631,421,1.631,422,1.631,423,1.439,424,1.631,425,1.439,426,1.631,427,1.847,428,1.631,429,1.631,430,1.219,431,1.439,432,1.313,433,1.219,434,1.631,435,1.631,436,1.631,437,1.143,438,1.313,439,1.219,440,1.631,441,1.313,442,1.439,443,1.439,444,1.439,445,1.439]],["t/36",[3,0.819,4,0.764,5,0.695,9,0.529,13,1.059,17,0.695,20,0.764,21,1.646,22,1.26,26,2.051,27,0.932,28,1.849,29,0.637,43,0.717,53,0.609,59,0.637,68,0.92,69,1.006,70,0.849,71,1.508,72,0.695,73,0.961,75,0.655,79,0.849,80,1.194,82,1.32,91,1.121,94,0.883,102,0.819,111,0.344,115,0.961,119,1.007,124,0.695,138,0.74,141,1.798,150,0.961,151,0.961,163,3.175,173,1.121,183,2.217,184,1.287,190,3.359,198,0.717,206,2.546,211,1.287,214,1.123,226,0.883,231,2.047,236,1.059,240,1.059,241,0.74,246,2.047,258,1.513,274,3.527,277,0.849,278,1.062,280,3.7,281,0.655,287,1.403,288,1.007,297,0.791,299,0.883,305,1.725,306,3.58,307,1.971,308,1.121,309,3.856,312,1.41,314,2.893,317,0.849,321,2.263,323,0.819,324,1.646,325,1.92,326,1.725,331,1.41,332,2.417,337,0.92,342,3.359,346,1.41,347,4.175,355,0.637,356,2.205,358,0.92,360,2.893,362,2.068,377,2.047,391,1.121,396,2.205,397,0.961,399,1.059,400,1.41,416,1.41,425,1.41,427,1.815,438,2.893,443,1.41,444,1.41,446,2.205,447,1.598,448,1.41,449,1.121,450,2.417,451,1.598,452,4.258,453,1.598,454,4.79,455,1.287,456,1.059,457,1.41,458,1.194,459,1.598,460,1.598,461,1.598,462,1.598,463,1.598,464,1.576,465,1.194,466,1.598,467,1.598,468,1.598,469,1.598,470,1.598,471,1.598,472,1.598,473,1.598,474,1.598,475,1.598,476,1.598,477,1.598,478,1.598,479,1.598,480,1.598,481,1.598,482,2.739,483,1.598,484,1.725,485,1.41,486,2.417,487,1.121,488,1.287,489,1.007,490,1.598,491,1.194,492,0.819,493,0.92,494,1.41,495,1.41,496,0.791,497,1.41,498,2.417,499,0.849,500,1.41,501,2.893,502,2.417,503,2.417,504,1.41,505,2.417,506,1.121,507,0.92,508,1.598,509,1.598,510,1.41,511,1.598,512,1.598,513,0.92,514,1.598,515,1.598,516,1.598,517,1.598,518,1.41,519,1.121,520,1.598,521,1.41,522,1.598,523,1.41]],["t/38",[3,2.163,5,1.178,12,2.391,13,1.795,14,2.024,27,0.757,28,0.897,35,1.795,43,2.327,56,1.899,57,2.181,59,2.068,68,1.559,69,0.995,71,1.33,72,0.687,73,1.628,77,1.388,82,1.55,88,1.34,91,1.899,102,2.163,110,0.897,111,0.583,121,1.44,123,1.795,134,2.181,138,1.954,150,1.628,154,2.181,158,1.706,159,1.559,162,1.706,163,3.439,168,2.391,174,2.024,183,1.254,186,1.899,191,2.024,204,1.899,206,2.244,220,1.795,225,2.181,229,1.559,236,2.798,243,2.798,246,3.155,247,1.795,248,1.706,258,1.497,259,2.986,272,1.559,276,1.795,277,1.44,280,1.628,281,1.732,282,1.706,288,1.706,289,1.706,297,1.34,301,2.391,347,3.155,349,2.391,354,2.181,355,1.683,376,2.659,378,1.628,385,2.181,399,1.795,432,4.177,446,2.181,458,2.024,487,1.899,493,1.559,507,2.43,519,1.899,524,2.709,525,3.877,526,2.181,527,2.181,528,2.181,529,2.024,530,2.181,531,2.709,532,2.709,533,2.024,534,2.709,535,2.709,536,1.795,537,1.706,538,2.391,539,2.709,540,2.709,541,1.559,542,2.024,543,2.709,544,3.726,545,2.391,546,2.181,547,2.709,548,2.391,549,2.709,550,2.391,551,2.391,552,2.024,553,2.709,554,2.391,555,2.391,556,2.709,557,2.024,558,2.391,559,2.024,560,2.391,561,2.024,562,2.709,563,1.899,564,1.706,565,2.709,566,2.181,567,2.709,568,2.181]],["t/40",[15,2.875,22,0.735,27,1.124,42,1.558,59,1.937,68,1.875,69,1.785,70,2.584,71,1.026,72,0.826,73,1.958,79,1.732,82,2.367,94,1.8,111,1.046,118,2.435,124,2.114,136,1.875,140,2.284,141,1.375,174,2.435,191,2.435,204,2.284,214,1.993,228,2.623,229,1.875,230,2.159,231,2.435,285,1.958,297,2.404,319,1.958,322,2.052,323,1.669,334,2.435,336,4.289,384,2.623,427,2.159,437,2.284,492,1.669,513,1.875,525,2.435,526,2.623,528,2.623,551,2.875,561,2.435,563,2.284,564,3.061,569,5.131,570,3.258,571,3.258,572,3.258,573,3.258,574,3.258,575,4.272,576,2.052,577,2.284,578,4.289,579,2.875,580,2.284,581,2.284,582,1.958,583,2.875,584,3.913,585,3.258,586,2.875,587,2.875,588,3.258,589,4.289,590,1.612,591,2.875,592,4.86,593,3.258,594,2.159,595,3.258,596,2.159,597,2.623,598,3.258,599,2.284,600,3.258,601,2.435,602,3.258,603,2.052]],["t/42",[5,1.509,27,1.033,56,2.432,71,1.899,82,2.808,110,1.148,121,2.706,125,1.345,159,1.996,173,2.432,176,3.061,182,2.299,189,2.432,190,2.432,214,1.423,231,3.805,243,2.299,249,2.299,255,4.099,259,3.471,261,2.592,272,2.93,278,1.345,291,1.209,297,2.519,341,3.061,345,3.061,370,1.844,371,3.061,372,2.592,437,4.229,449,2.432,456,2.299,492,1.777,525,2.592,533,4.508,548,3.061,582,2.085,591,3.061,594,4.404,603,3.207,604,3.061,605,3.469,606,3.469,607,3.061,608,3.469,609,2.793,610,3.469,611,3.061,612,4.099,613,2.592,614,2.592,615,3.469,616,3.469,617,3.469,618,5.091,619,3.469,620,3.469,621,3.805,622,3.469,623,3.061,624,3.469,625,2.592,626,3.469,627,2.592,628,3.061,629,3.061,630,3.469]],["t/44",[5,1.287,22,0.667,27,0.432,43,2.459,72,0.75,82,1.086,83,2.716,94,1.634,95,3.036,102,1.516,110,0.979,120,3.169,121,1.572,124,2.67,126,4.165,132,1.778,137,2.211,139,3.974,140,3.169,144,1.634,146,1.961,170,3.169,173,2.074,182,1.961,197,2.611,198,1.327,214,1.213,220,1.961,236,1.961,238,3.988,249,1.961,258,2.497,271,1.702,274,1.634,277,3.263,278,1.147,279,2.211,291,1.031,298,2.211,300,2.211,304,3.638,372,2.211,373,2.382,384,2.382,398,2.846,442,2.611,450,2.611,455,3.638,492,1.516,510,3.988,513,1.702,521,2.611,530,2.382,533,2.211,536,1.961,552,2.211,576,1.863,583,2.611,586,2.611,596,1.961,612,3.638,613,3.377,614,2.211,631,2.611,632,2.958,633,5.835,634,2.958,635,2.958,636,2.958,637,2.958,638,2.611,639,2.958,640,4.519,641,3.988,642,3.169,643,3.988,644,2.611,645,3.845,646,2.611,647,2.958,648,3.638,649,2.958,650,2.958,651,1.778,652,3.377,653,2.958,654,2.958]],["t/46",[21,2.02,22,1.476,27,0.955,29,1.339,44,2.46,53,0.57,71,1.864,75,1.378,79,3.147,82,2.686,83,2.989,87,3.717,102,1.722,113,4.004,120,2.356,141,2.5,167,2.705,170,2.356,171,4.004,214,2.04,240,2.227,277,1.786,281,1.378,305,2.116,306,4.425,307,1.555,310,1.722,323,1.722,337,3.407,370,1.786,398,2.116,399,2.227,433,2.511,455,2.705,456,2.227,492,1.722,519,2.356,530,2.705,569,4.389,582,2.02,590,1.662,596,2.227,613,2.511,627,2.511,645,2.356,652,3.717,655,2.965,656,2.965,657,2.989,658,2.356,659,4.767,660,2.965,661,2.965,662,3.36,663,2.705,664,2.965,665,3.36,666,3.36,667,3.36,668,3.36,669,3.36,670,2.989,671,3.36,672,3.36,673,3.36,674,3.36,675,3.36,676,2.356,677,2.511,678,3.36,679,2.965]],["t/48",[22,1.109,23,2.472,27,0.717,28,2.408,43,2.631,46,3.452,53,0.561,68,1.904,69,1.805,72,1.488,82,2.154,85,3.096,86,2.32,92,2.084,96,2.32,111,0.712,115,2.955,121,1.758,124,3.019,125,1.282,136,1.904,140,2.32,144,1.828,159,3.376,179,2.663,183,1.531,195,2.32,198,1.484,236,2.193,242,1.988,247,2.193,249,2.193,258,1.828,273,2.193,277,3.118,281,1.357,287,2.519,291,1.713,326,2.084,333,2.919,370,1.758,398,2.084,427,2.193,430,2.472,491,2.472,492,1.695,506,3.447,552,2.472,581,2.32,584,2.663,612,2.663,614,2.472,628,2.919,642,2.32,651,1.988,660,2.919,670,1.988,680,2.32,681,3.308,682,1.531,683,3.308,684,2.919,685,2.919,686,2.955,687,2.663,688,3.308,689,3.447,690,3.308,691,2.472,692,3.308,693,3.308,694,2.919,695,3.308,696,3.308,697,3.308]],["t/50",[27,1.088,28,1.485,43,2.739,69,1.647,71,1.412,72,1.138,82,2.242,83,3.67,84,3.145,85,2.825,107,2.384,110,1.485,124,2.656,132,3.67,138,2.827,159,3.514,182,2.972,183,3.214,240,2.972,272,2.581,274,2.478,289,2.825,297,2.218,305,2.825,307,2.827,317,2.384,380,3.352,396,3.611,441,3.611,513,2.581,563,3.145,564,2.825,575,2.972,576,2.825,580,4.282,625,3.352,645,4.282,648,3.611,698,3.958,699,4.485,700,3.958]],["t/52",[10,3.078,22,0.787,27,1.082,28,1.155,59,2.827,71,1.098,72,0.885,82,1.281,85,2.197,90,2.606,96,2.445,102,1.787,108,1.927,110,1.155,120,2.445,121,2.717,122,2.007,124,2.223,126,2.197,139,2.096,152,2.606,195,2.445,228,2.808,274,1.927,290,3.078,294,2.606,307,1.614,319,2.096,347,2.606,373,2.808,376,2.197,449,3.584,458,2.606,491,2.606,492,3.1,493,2.007,507,2.007,554,3.078,558,4.511,561,2.606,575,4.01,576,2.197,584,2.808,633,3.078,651,2.096,659,2.808,676,2.445,680,2.445,701,3.488,702,3.078,703,5.112,704,5.112,705,3.078,706,3.488,707,3.488,708,3.488,709,3.488,710,3.488,711,3.488,712,3.488,713,6.051,714,5.112,715,3.078,716,5.112,717,3.488,718,3.488,719,3.488,720,2.445,721,3.488,722,3.078,723,3.488,724,2.808,725,3.078,726,3.488]],["t/54",[4,0.35,5,0.318,17,0.829,22,1.837,26,2.231,29,1.266,31,1.765,35,2.328,38,0.857,43,0.328,48,1.867,53,0.647,59,0.292,61,1.016,62,2.001,64,1.992,66,1.2,69,0.499,72,0.483,73,0.44,75,0.781,77,0.375,79,0.389,80,0.547,88,1.178,94,0.404,110,0.631,111,1.068,115,1.145,119,2.873,121,0.723,122,1.096,124,1.886,125,1.086,132,1.685,134,0.589,136,0.421,141,0.309,142,0.485,143,0.547,144,0.404,145,2.292,150,0.44,155,1.016,156,0.954,158,0.857,159,1.371,163,3.877,165,0.646,171,0.589,175,0.404,181,4.757,182,0.901,183,0.882,189,0.954,195,0.513,196,0.646,198,0.854,207,0.646,208,0.589,209,0.589,214,1.564,215,0.783,222,0.421,223,0.547,226,0.751,229,0.421,240,1.857,248,0.461,256,0.589,266,0.485,271,1.096,281,0.558,286,0.513,288,0.857,289,1.765,291,0.255,297,1.178,305,1.2,307,0.339,310,0.375,317,1.012,322,0.857,323,1.22,328,1.578,337,1.096,342,0.954,356,1.095,370,0.389,376,1.765,378,0.44,385,0.589,390,0.513,399,0.485,427,0.901,431,0.646,432,0.589,433,0.547,437,0.954,465,0.547,484,0.857,487,0.954,488,0.589,489,0.857,491,0.547,492,0.375,493,0.783,494,0.646,496,0.362,501,1.534,504,0.646,507,1.096,513,1.096,527,0.589,536,0.485,541,1.613,557,0.547,566,0.589,568,0.589,575,1.857,577,0.513,589,1.2,590,1.386,594,1.578,614,0.547,625,0.547,656,0.646,670,0.818,676,0.513,682,1.297,685,0.646,689,2.463,702,0.646,705,1.681,720,0.513,724,0.589,727,0.646,728,0.732,729,0.732,730,0.589,731,4.534,732,0.547,733,1.681,734,1.905,735,0.732,736,0.646,737,0.646,738,0.646,739,0.732,740,0.646,741,0.732,742,0.732,743,1.36,744,0.732,745,0.732,746,0.646,747,0.513,748,0.901,749,0.732,750,0.513,751,4.562,752,1.432,753,1.681,754,4.087,755,0.732,756,0.589,757,2.382,758,0.646,759,0.732,760,1.36,761,0.732,762,0.732,763,1.2,764,0.732,765,0.732,766,0.732,767,0.732,768,1.36,769,1.36,770,1.36,771,0.421,772,0.646,773,1.36,774,1.36,775,1.36,776,1.36,777,1.36,778,1.36,779,2.382,780,2.382,781,1.36,782,1.2,783,1.36,784,1.2,785,1.36,786,1.36,787,0.732,788,0.732,789,1.36,790,1.36,791,2.382,792,1.36,793,1.36,794,1.36,795,1.36,796,0.732,797,0.646,798,0.646,799,0.646,800,0.732,801,0.732,802,0.732,803,0.732,804,2.803,805,0.732,806,0.589,807,0.732,808,0.646,809,1.36,810,0.732,811,0.646,812,0.732,813,2.382,814,2.803,815,1.36,816,1.36,817,2.382,818,1.36,819,1.36,820,2.382,821,1.36,822,3.512,823,1.681,824,0.732,825,0.732,826,0.732,827,3.84,828,1.36,829,1.36,830,2.382,831,1.36,832,2.382,833,1.36,834,2.382,835,1.36,836,1.36,837,1.36,838,1.36,839,1.36,840,1.36,841,1.36,842,1.36,843,0.732,844,0.646,845,1.36,846,0.732,847,1.36,848,3.177,849,1.36,850,1.905,851,0.646,852,0.732,853,0.732,854,0.732,855,0.732,856,0.732,857,0.732,858,0.732,859,0.732,860,0.732,861,0.732,862,0.732,863,1.36,864,0.732,865,0.732,866,0.732,867,1.2,868,0.732,869,0.732,870,0.732,871,0.732,872,0.732,873,0.732,874,1.36,875,0.732,876,1.36,877,0.732,878,0.732,879,0.732,880,0.732,881,0.732,882,0.732,883,0.732,884,0.732,885,0.732,886,0.732,887,0.732,888,0.646,889,0.646]],["t/56",[0,1,3,1.608,4,1.123,5,0.582,13,0.887,23,1,27,0.789,28,1.248,29,0.534,53,0.227,62,1.479,63,1,64,0.91,65,1.123,69,0.862,71,0.988,72,1.292,75,0.549,77,1.203,82,0.492,86,0.939,88,2.123,95,2.931,97,1.412,100,0.887,101,1.647,107,0.712,108,1.734,109,0.939,110,1.248,111,0.924,119,0.843,122,0.77,124,1.022,125,1.216,128,0.843,138,1.986,144,0.74,145,2.267,149,1.891,150,0.805,153,1.181,158,0.843,163,0.887,173,0.939,198,1.407,215,0.77,219,1.181,226,2.371,241,1.087,242,0.805,253,0.939,271,2.17,277,1.248,281,0.549,286,1.647,287,1.932,288,0.843,289,1.479,291,0.818,296,1.078,308,0.939,317,1.248,318,1,323,1.608,334,1,352,1.755,355,0.534,370,0.712,398,0.843,464,0.77,465,1,489,1.976,492,1.203,496,2.123,499,1.248,506,1.647,507,2.931,533,1,536,2.5,537,2.375,541,2.719,561,1,575,0.887,577,1.647,596,0.887,599,2.644,642,1.647,651,1.412,657,0.805,676,0.939,680,0.939,682,1.746,720,0.939,724,1.078,730,1.078,746,1.181,747,0.939,750,0.939,752,0.805,771,0.77,784,2.073,806,2.526,844,2.073,890,2.769,891,2.349,892,1.181,893,2.349,894,2.349,895,5.189,896,1,897,1.181,898,1.339,899,1.339,900,1.339,901,1.339,902,1.181,903,1.339,904,1.339,905,1.339,906,1.339,907,3.207,908,2.073,909,2.349,910,2.349,911,3.036,912,2.349,913,1.181,914,1.181,915,1.755,916,1.181,917,1.078,918,1.339,919,2.073,920,1.339,921,1.339,922,1.339,923,1.078,924,1.339,925,1.647,926,2.349,927,1.647,928,1.078,929,1.339,930,1.647,931,1.339,932,4.17,933,2.345,934,1.339,935,1.339,936,1.339,937,1.339,938,1.181,939,1.181,940,1.181,941,1.339,942,1.339,943,1.891,944,1.181,945,1.339,946,2.769,947,1.181,948,1.339,949,1.339,950,1.339,951,1.339,952,1.339,953,1.339,954,1.339,955,1.339,956,1.339,957,1.339,958,1.339,959,1.339,960,1.339,961,1.339,962,1.339,963,1.339,964,1.339,965,2.349,966,1.339,967,1.339,968,1.339,969,2.349,970,1.339,971,1.339,972,1.339,973,1.078,974,1.181,975,1.339,976,1.339,977,0.74,978,1.339,979,1.339,980,1.078,981,1.339,982,1.078,983,1.339,984,1.339,985,2.073,986,1,987,0.887,988,1.452,989,0.77,990,2.08,991,1,992,1.339,993,1.339,994,0.805,995,1.181,996,1.181,997,1,998,1]],["t/58",[9,2.408]],["t/60",[26,1.994,53,1.175,74,4.059,111,1.169,175,3.001,310,3.55,311,3.126,496,2.687,590,2.687,603,3.421,999,4.794,1000,4.059,1001,5.432,1002,4.373,1003,4.794,1004,4.794,1005,4.364,1006,4.059,1007,3.108,1008,3.809,1009,3.809,1010,3.6,1011,4.059,1012,3.809,1013,3.809,1014,4.059,1015,4.059]],["t/62",[26,2.264,53,1.271,55,3.189,74,3.399,110,1.506,111,0.979,122,2.617,151,2.734,175,2.513,222,2.617,278,1.763,299,2.513,464,2.617,529,3.399,590,2.25,603,2.865,686,2.734,771,2.617,896,3.399,907,3.399,982,3.662,991,3.399,1003,4.014,1006,3.399,1007,3.137,1010,4.086,1014,3.399,1016,4.014,1017,6.165,1018,6.165,1019,6.994,1020,4.014,1021,5.441,1022,4.014,1023,4.548,1024,4.014,1025,4.014,1026,4.014,1027,4.548,1028,3.662,1029,4.548,1030,4.014]],["t/64",[67,4.604,103,5.436,122,3.545,142,4.083,151,3.703,249,4.083,896,4.604,999,5.436,1002,4.96,1006,4.604,1021,6.614,1031,6.034,1032,5.436,1033,6.16,1034,6.16]],["t/66",[27,1.172,29,1.775,56,3.123,71,1.402,72,1.542,82,1.635,102,2.282,104,3.586,106,3.586,107,2.367,108,2.461,110,1.474,125,1.726,147,3.328,151,2.677,174,3.328,235,3.93,236,2.952,241,2.062,270,3.328,271,2.563,277,2.367,278,1.726,282,2.805,286,3.123,322,2.805,350,3.93,492,2.282,493,2.563,529,3.328,550,3.93,564,2.805,566,3.586,730,3.586,752,2.677,782,3.93,851,3.93,1020,3.93,1031,3.586,1035,4.454,1036,4.454,1037,4.454,1038,3.93,1039,3.586,1040,4.454,1041,4.454,1042,4.454,1043,4.454,1044,4.454,1045,4.454,1046,4.454]],["t/68",[6,1.943,7,1.943,16,1.502,17,1.952,18,1.846,20,2.146,21,1.762,22,1.76,23,2.191,24,2.191,26,1.076,27,1.014,28,2.393,29,1.168,30,2.191,31,1.846,32,2.587,33,2.587,35,1.943,36,2.826,39,2.055,40,2.191,41,2.36,44,2.697,45,1.619,47,1.558,48,2.385,50,1.846,51,1.846,53,0.497,65,1.402,71,1.717,72,1.138,75,2.236,82,1.076,92,1.846,101,2.055,110,1.485,111,1.315,119,1.846,124,2.372,125,1.136,126,1.846,136,1.687,138,2.077,139,2.697,158,1.846,181,2.191,204,2.055,223,2.191,243,1.943,253,2.055,281,1.202,289,1.846,294,2.191,310,1.502,323,1.502,328,1.943,335,2.587,342,2.055,376,1.846,377,2.191,391,2.055,529,2.191,538,2.587,582,1.762,590,1.45,594,1.943,596,1.943,664,2.587,680,2.055,763,2.587,808,2.587,897,2.587,1047,2.931,1048,2.931,1049,2.931,1050,2.931,1051,2.587,1052,2.931,1053,2.931,1054,2.931,1055,4.487,1056,2.931,1057,2.931,1058,2.931,1059,2.931,1060,2.931,1061,2.931,1062,2.931,1063,2.931,1064,2.931]],["t/70",[9,2.408]],["t/72",[22,1.597,26,2.064,27,0.82,53,1.201,55,3.942,71,1.77,295,3.541,297,3.835,487,3.942,499,2.988,771,3.235,1000,4.201,1005,3.541,1007,3.176,1008,3.942,1009,3.942,1012,3.942,1013,3.942,1015,4.201,1065,3.942,1066,4.201,1067,4.526]],["t/74",[281,2.886,307,3.257,1068,5.665,1069,7.036]],["t/76",[24,4.298,26,1.51,53,1.218,55,4.033,75,1.686,111,1.428,214,1.686,291,2.503,310,2.947,311,2.366,489,2.59,496,2.034,582,2.472,986,3.073,988,3.071,1005,2.59,1007,2.975,1008,2.883,1009,4.033,1011,4.298,1070,3.629,1071,6.087,1072,4.834,1073,4.112,1074,5.752,1075,4.112,1076,4.298,1077,6.633,1078,6.633,1079,5.854,1080,4.112,1081,6.633]],["t/78",[26,1.184,53,0.818,55,5.237,104,2.597,107,1.714,110,1.068,111,0.694,123,3.83,125,1.87,141,1.362,167,2.597,214,1.323,222,3.326,225,2.597,248,2.031,250,2.597,253,5.237,261,2.41,266,2.138,276,2.138,311,2.776,344,2.846,354,2.597,449,2.261,496,1.595,555,2.846,559,4.319,582,2.9,590,1.595,684,2.846,750,2.261,752,1.939,758,2.846,811,2.846,974,2.846,977,1.782,1007,2.164,1070,2.846,1071,5.528,1072,4.615,1076,3.605,1079,4.257,1082,6.591,1083,3.225,1084,3.225,1085,4.824,1086,3.225,1087,2.846,1088,4.824,1089,3.225,1090,3.225,1091,4.257,1092,3.382,1093,3.884,1094,4.824,1095,3.225,1096,5.779,1097,3.225,1098,2.846,1099,3.225,1100,3.225,1101,3.225]],["t/80",[27,0.85,53,0.988,60,3.669,71,1.834,88,2.882,291,2.03,295,3.669,310,2.985,560,5.141,582,3.501,658,4.085,1000,4.353,1006,4.353,1011,4.353,1012,4.085,1013,4.085,1015,4.353,1066,4.353,1071,4.69,1072,3.501,1076,4.353,1082,5.141,1102,5.141]],["t/82",[22,1.498,26,1.868,27,0.743,53,1.126,60,3.205,64,1.972,77,2.607,115,3.058,214,2.087,291,1.773,295,3.205,297,3.879,299,2.811,355,2.648,487,3.568,496,2.517,499,2.704,682,2.355,748,3.372,771,2.928,987,3.372,988,2.355,990,4.403,1007,2.282,1015,3.802,1065,3.568,1067,4.096,1103,5.862,1104,5.088,1105,4.096,1106,4.49]],["t/84",[9,2.408]],["t/86",[22,0.922,27,1.045,28,1.353,79,2.172,82,2.427,88,2.832,95,2.832,111,0.879,124,2.491,133,3.606,139,2.456,140,2.865,144,3.163,145,2.456,159,2.351,214,1.676,229,2.351,277,2.172,304,3.29,319,3.973,322,2.573,323,2.093,352,3.053,492,2.093,493,2.351,513,2.351,528,4.61,557,3.053,576,2.573,577,2.865,578,3.606,579,3.606,580,4.635,599,4.015,603,2.573,638,3.606,643,3.606,651,2.456,686,2.456,689,2.865,737,3.606,738,3.606,750,2.865,827,3.29,933,3.053,1068,3.29,1087,3.606,1107,4.086,1108,4.086,1109,4.086,1110,4.086,1111,3.606,1112,4.086,1113,3.606,1114,4.086]],["t/88",[21,1.958,26,1.196,27,1.149,28,1.609,29,1.299,43,2.18,69,1.785,71,2.171,72,1.233,79,1.732,82,2.532,83,2.921,84,3.408,85,3.061,94,1.8,102,1.669,107,2.584,110,1.078,111,0.701,112,3.632,113,2.623,124,2.804,132,2.921,136,1.875,138,2.984,146,2.159,159,3.709,183,2.691,214,1.993,220,2.159,278,1.263,281,2.384,305,2.052,307,2.984,423,5.131,499,1.732,513,1.875,563,2.284,564,2.052,575,2.159,576,2.052,580,3.408,582,2.921,594,3.221,604,2.875,613,2.435,621,3.632,645,3.408,648,2.623,659,2.623,677,2.435,698,2.875,700,2.875,772,2.875,927,3.408,928,2.623,1115,3.258,1116,3.258,1117,3.258]],["t/91",[74,5.202,141,2.939,1007,3.122,1118,6.96,1119,6.96]],["t/93",[22,1.847,27,1.079,28,2.274,69,2.522,77,2.251,83,2.64,85,2.767,86,4.222,87,3.283,88,3.656,89,2.911,110,1.454,111,1.296,126,2.767,136,2.528,138,2.787,175,2.427,270,3.283,272,2.528,317,2.335,319,2.64,324,2.64,507,2.528,513,2.528,557,3.283,563,3.08,564,2.767,576,2.767,580,3.08,651,3.619,658,3.08,686,2.64,990,2.911,1039,3.537,1113,3.877,1120,4.393,1121,4.393,1122,4.393,1123,4.393]],["t/95",[9,2.408]],["t/97",[3,1.945,13,2.516,27,0.554,53,0.922,62,3.424,64,2.107,72,0.963,75,2.229,88,1.878,97,4.167,111,1.367,116,3.057,128,2.391,145,4.409,250,3.057,277,2.018,291,1.323,307,2.516,355,1.513,464,2.185,541,3.989,590,1.878,599,2.662,747,2.662,892,3.35,895,3.811,913,3.35,914,3.35,915,2.837,916,3.35,917,3.057,977,3.508,988,3.844,994,2.282,1092,2.662,1124,5.436,1125,3.796,1126,3.796,1127,3.796,1128,5.436,1129,3.796,1130,3.35,1131,3.796,1132,3.796,1133,3.796,1134,3.796,1135,3.796,1136,3.796,1137,3.796,1138,3.796,1139,3.796]],["t/99",[27,0.709,64,1.883,65,2.323,82,1.784,97,2.92,108,2.684,111,1.556,125,1.883,145,2.92,226,3.994,248,3.059,278,2.498,291,1.692,536,4.27,541,4.739,590,2.403,670,2.92,977,2.684,988,3.347,994,3.873,1093,3.911,1140,4.857,1141,4.857,1142,4.857,1143,4.857,1144,3.63,1145,3.406,1146,4.857,1147,4.857,1148,4.857,1149,4.857]],["t/101",[53,1.22,1150,7.194]],["t/103",[0,3.603,9,2.384,22,1.803,53,0.817,69,1.77,139,3.853,141,2.035,155,3.603,156,3.38,209,3.881,226,2.663,241,2.232,253,3.38,271,3.689,287,3.285,291,1.68,398,3.036,445,4.255,456,3.195,541,3.689,657,2.898,670,3.853,771,2.774,980,3.881,1065,3.38,1092,3.38,1144,3.603,1151,4.821,1152,4.821,1153,6.411,1154,4.821,1155,4.821,1156,4.821,1157,4.821]],["t/105",[9,2.408]],["t/107",[3,2.364,9,1.527,26,1.694,27,1.028,28,1.527,42,2.206,43,2.069,59,1.839,64,1.788,71,1.96,75,2.553,76,3.714,94,2.549,102,2.364,111,1.34,121,2.452,122,2.655,128,3.921,138,2.136,141,1.948,146,3.058,170,3.235,189,3.235,206,2.452,241,2.136,242,2.773,243,3.058,244,3.235,245,3.714,254,4.071,255,3.714,256,3.714,257,3.058,258,2.549,259,2.655,260,3.235,287,2.364,323,2.364,324,2.773,325,3.235,326,2.906,464,2.655,677,3.448,682,2.136,1158,4.613,1159,4.613]],["t/109",[27,1.19,28,1.871,42,1.917,43,2.534,46,2.131,59,1.598,70,2.131,71,2.359,72,1.017,111,0.863,125,1.554,183,2.616,206,2.131,241,2.616,242,2.41,244,2.811,257,3.745,258,2.215,259,2.307,263,3.538,264,4.223,265,4.987,266,2.657,267,3.538,268,3.538,271,3.766,272,2.307,273,2.657,274,3.122,275,2.811,276,2.657,277,3.004,278,1.554,279,2.996,280,2.41,281,1.644,282,3.559,283,2.811,284,2.996,285,3.396,286,2.811,287,2.054,299,2.215,525,4.223,526,3.228,1160,4.009,1161,4.009,1162,3.538]],["t/111",[46,3.338,138,2.907,150,3.775,241,2.907,259,3.614,260,4.404,274,3.47,280,3.775,281,2.576,291,2.188,292,6.692,293,5.057,294,4.694,1163,6.28]],["t/114",[26,1.411,27,1.177,28,2.116,38,3.453,43,2.867,46,2.914,61,2.871,72,1.391,82,1.411,101,2.694,107,2.042,110,1.272,125,1.489,162,2.42,183,2.538,241,1.778,242,2.309,258,2.122,259,2.211,264,2.871,272,3.155,273,2.546,274,3.029,275,2.694,276,2.546,278,1.489,279,2.871,280,2.309,281,2.621,282,3.453,283,2.694,284,2.871,285,3.295,287,1.968,288,2.42,291,1.338,295,2.42,296,3.093,306,2.871,307,1.778,308,2.694,309,5.146,315,3.39,316,3.39,317,2.042,318,2.871,319,2.309,320,3.39,321,2.42,322,2.42,323,1.968,324,2.309,325,2.694,326,2.42,441,3.093,1162,3.39,1164,3.842]],["t/116",[26,2.046,53,1.309,60,3.51,62,3.51,64,2.73,77,2.855,310,2.855,311,3.207,355,2.807,378,3.35,590,2.757,682,2.58,988,2.58,989,3.207,990,3.694,1005,3.51,1007,3.159,1008,3.908,1009,3.908,1105,4.487,1165,5.573,1166,4.918,1167,5.573]],["t/118",[3,1.475,21,1.73,22,1.473,27,0.953,28,2.57,29,2.603,42,1.377,53,0.488,59,1.147,60,1.813,69,1.625,70,1.53,71,1.698,72,0.73,75,1.181,79,1.53,82,1.057,88,2.189,95,1.424,105,2.54,111,0.62,125,1.116,138,1.333,141,1.215,159,1.657,198,1.985,215,1.657,226,1.59,282,1.813,285,1.73,287,2.268,289,1.813,291,2.108,307,1.333,308,2.018,314,5.259,321,2.787,324,1.73,325,2.018,326,1.813,486,2.54,495,3.906,496,1.424,497,2.54,498,3.906,499,1.53,500,3.906,501,5.969,502,3.906,503,4.758,505,4.758,506,2.018,518,2.54,537,2.787,594,1.908,642,2.018,657,1.73,679,2.54,682,1.333,686,1.73,687,2.318,689,2.018,752,1.73,827,2.318,867,2.54,907,2.151,927,2.018,932,2.54,973,2.318,985,2.54,988,1.333,991,3.307,994,1.73,996,2.54,997,2.151,1168,2.879,1169,2.54,1170,4.426,1171,2.318,1172,2.879,1173,3.906,1174,2.879]],["t/120",[53,1.065,65,3.004,73,3.775,77,3.218,111,1.352,206,3.338,299,3.47,391,4.404,581,4.404,1005,3.956,1007,2.817,1144,4.694,1175,6.28,1176,6.28,1177,6.28]],["t/122",[11,4.014,65,2.175,110,1.506,111,0.979,144,2.513,149,3.662,151,2.734,186,3.189,206,2.418,216,4.014,218,5.441,270,3.399,278,1.763,305,2.865,323,2.33,496,2.25,537,2.865,581,3.189,607,4.014,732,3.399,750,3.189,752,2.734,889,4.014,982,4.964,989,2.617,991,3.399,1016,4.014,1031,5.631,1038,4.014,1072,2.734,1171,3.662,1178,4.014,1179,4.548,1180,4.548,1181,4.548,1182,4.014,1183,4.548,1184,4.548,1185,4.548,1186,3.014,1187,4.548,1188,4.548,1189,4.548,1190,4.548,1191,4.548,1192,4.014,1193,4.548,1194,3.662,1195,4.548,1196,4.548]],["t/124",[5,1.872,22,0.971,26,2.496,27,0.628,53,0.73,64,1.669,66,2.711,115,2.587,143,3.217,175,2.378,186,3.018,215,2.477,241,1.993,295,2.711,297,3.624,299,2.378,310,2.206,355,1.716,449,3.018,496,3.802,499,2.288,568,3.466,658,3.018,732,3.217,771,2.477,895,3.018,988,1.993,1007,1.931,1010,2.853,1030,3.799,1065,4.163,1067,3.466,1103,6.784,1106,5.998,1178,3.799,1197,5.937,1198,4.305,1199,4.305,1200,3.466,1201,3.018,1202,4.305,1203,4.305,1204,4.305,1205,5.937]],["t/126",[26,2.352,53,1.086,111,1.379,1007,3.442,1022,5.653,1024,5.653,1025,5.653,1026,5.653,1028,5.157,1206,6.406,1207,6.406,1208,6.406]],["t/128",[0,4.521,4,2.115,5,1.924,7,2.931,8,5.551,9,1.464,14,3.305,27,1.006,59,2.411,60,2.786,66,3.81,70,2.351,71,1.393,72,1.122,77,2.266,110,1.464,111,0.952,117,5.152,118,3.305,121,2.351,122,2.545,124,1.924,141,2.554,142,4.009,143,4.521,151,2.659,154,3.561,222,2.545,233,3.903,243,2.931,249,2.931,260,3.101,281,1.814,291,1.541,334,3.305,376,2.786,446,3.561,527,3.561,536,2.931,545,3.903,733,3.903,753,3.903,771,2.545,1209,4.423,1210,4.423,1211,4.423]],["t/130",[9,2.408]],["t/132",[72,1.641,111,1.393,115,3.889,246,4.835,281,2.653,427,4.288,621,4.835,682,2.995,691,4.835,1093,5.209,1212,6.47,1213,6.47]],["t/134",[3,2.489,26,1.784,27,0.709,28,1.608,111,1.556,138,2.248,229,3.708,230,3.219,291,1.692,311,2.795,324,2.92,456,4.27,577,3.406,601,3.63,621,3.63,663,3.911,1007,3.594,1028,3.911,1214,4.857,1215,4.857,1216,4.857,1217,4.287,1218,5.686,1219,5.686,1220,5.686,1221,4.287,1222,4.857,1223,5.686,1224,4.857,1225,4.287,1226,4.287,1227,4.287,1228,4.857]],["t/136",[26,1.5,27,0.596,72,1.036,111,1.423,141,1.725,229,3.295,230,2.708,291,1.424,310,3.387,311,2.351,370,2.172,439,3.053,559,3.053,590,2.832,603,2.573,663,3.29,732,3.053,752,2.456,944,3.606,1004,5.053,1007,3.783,1008,2.865,1009,2.865,1011,4.279,1012,2.865,1013,2.865,1066,3.053,1200,3.29,1217,3.606,1218,5.053,1219,6.656,1220,3.606,1221,5.053,1225,3.606,1226,3.606,1227,3.606,1229,4.086,1230,4.086,1231,4.086,1232,4.086,1233,4.086,1234,4.086,1235,4.086,1236,4.086,1237,4.086,1238,4.086,1239,4.086]],["t/138",[26,2.352,83,4.612,111,1.379,117,4.787,229,3.686,230,4.245,652,5.734,727,5.653,823,5.653,1240,6.406,1241,6.406]],["t/140",[4,1.837,5,1.671,17,1.671,31,2.42,88,1.9,111,0.827,141,1.622,144,2.122,230,2.546,291,1.338,295,3.453,297,1.9,298,2.871,322,2.42,370,2.042,380,2.871,437,2.694,456,2.546,458,2.871,484,2.42,493,2.211,519,2.694,523,3.39,542,2.871,546,3.093,559,5.727,627,2.871,657,2.309,658,2.694,691,4.097,752,2.309,806,3.093,1007,2.867,1068,3.093,1091,3.39,1092,2.694,1098,3.39,1192,3.39,1194,3.093,1223,3.39,1242,5.482,1243,7.663,1244,3.842,1245,5.482,1246,5.482,1247,5.482,1248,3.842,1249,5.482,1250,3.842,1251,3.842,1252,3.842,1253,6.391,1254,3.39,1255,3.842,1256,3.39,1257,2.694,1258,3.842,1259,3.842,1260,3.39]],["t/142",[9,2.408]],["t/144",[22,1.033,27,1.024,53,1.19,62,2.885,64,2.721,72,1.781,77,3.174,111,0.986,123,3.036,175,3.422,214,1.878,291,1.596,297,2.266,303,3.688,310,2.347,311,2.636,355,2.798,496,2.266,682,2.868,987,3.036,988,2.12,989,2.636,990,4.652,1005,2.885,1007,3.148,1039,3.688,1072,2.753,1105,3.688,1166,4.042,1260,4.042,1261,4.042,1262,4.581,1263,4.581,1264,4.581,1265,4.581,1266,4.581]],["t/146",[16,3.268,17,2.775,18,4.018,19,3.355,20,2.288,22,1.918,27,0.698,28,2.112,29,1.907,36,4.821,37,3.576,44,3.156,45,2.644,47,2.543,48,2.543,49,3.853,50,3.014,51,3.014,53,0.811,183,2.215,257,3.171,362,4.13,1267,4.785,1268,3.355,1269,3.853,1270,3.853,1271,4.223,1272,4.223,1273,4.785,1274,3.355]],["t/149",[22,1.762,29,2.632,31,4.159,37,4.935,53,1.119,222,3.8,1275,6.603,1276,6.603,1277,6.603]],["t/151",[22,1.762,29,2.632,53,1.119,59,2.632,657,3.969,1051,5.827,1278,6.603,1279,6.603,1280,6.603]],["t/153",[22,1.762,29,2.632,53,1.119,59,2.632,204,4.63,272,3.8,1281,6.603,1282,6.603,1283,6.603]],["t/156",[16,2.649,17,2.92,18,3.256,20,2.472,22,1.844,26,1.898,27,0.754,28,2.467,36,4.228,44,3.32,45,2.856,47,2.748,48,2.748,53,0.877,360,4.162,362,3.863,1000,3.864,1010,3.426,1012,3.625,1013,3.625,1066,3.864,1268,3.625,1269,4.162,1270,4.162,1284,5.17,1285,5.17,1286,5.17]],["t/158",[16,2.134,17,2.524,20,1.992,22,1.713,26,2.653,27,0.608,28,1.921,29,1.66,36,2.623,44,2.871,45,2.301,47,2.214,48,2.214,53,1.225,75,2.38,89,2.761,183,1.928,211,3.354,226,2.301,257,2.761,303,3.354,328,2.761,370,2.214,391,2.921,464,2.397,590,2.06,980,3.354,1005,2.623,1007,2.603,1010,3.846,1261,3.676,1269,3.354,1270,3.354,1271,3.676,1272,3.676,1287,7.596,1288,3.676,1289,4.165,1290,4.165,1291,4.165,1292,4.165,1293,4.165,1294,4.165,1295,4.165,1296,4.165,1297,4.165,1298,4.165]],["t/160",[6,4.422,7,4.422,27,0.974,110,2.208,111,1.436,291,2.324,686,4.01,798,5.888,1299,6.671]],["t/162",[22,1.489,27,0.964,53,1.119,69,2.425,111,1.421,124,2.872,596,4.376,670,3.969,1300,5.827,1301,6.603]],["t/164",[22,1.459,27,0.944,53,1.097,69,2.376,124,2.814,136,3.723,139,3.889,317,3.439,541,3.723,1257,4.537,1302,5.71,1303,6.47]],["t/166",[22,1.474,27,0.954,53,1.108,112,4.884,241,3.025,597,5.262,642,4.583,933,4.884,1304,6.536,1305,6.536,1306,6.536]],["t/168",[53,1.143,110,2.232,278,2.613,686,4.052,997,5.038,1173,5.949,1307,5.949,1308,6.741]],["t/170",[27,0.954,53,1.108,112,5.804,116,5.262,215,3.761,241,3.025,564,4.117,908,5.768,1201,4.583,1309,6.536]],["t/172",[9,2.408]],["t/174",[6,1.93,14,1.284,16,0.88,17,2.174,22,1.481,27,0.251,30,1.284,42,0.821,44,0.85,46,0.913,47,0.913,53,0.291,65,0.821,79,0.913,89,1.93,92,1.082,110,1.255,115,1.032,128,2.388,141,2.679,142,1.138,152,1.284,164,4.411,175,0.949,179,1.383,191,2.833,195,1.204,202,2.57,214,0.704,222,0.988,223,1.284,232,1.516,247,1.138,266,1.138,285,1.032,300,1.284,317,1.548,319,1.032,321,1.082,323,0.88,337,1.676,351,1.516,355,0.685,372,1.284,376,1.834,380,1.284,394,1.383,397,1.032,399,1.138,430,2.177,433,2.177,438,1.383,439,1.284,448,2.57,484,1.082,488,1.383,492,1.492,493,0.988,499,1.548,519,2.042,542,1.284,544,1.516,552,1.284,581,1.204,587,1.516,609,1.383,623,1.516,625,1.284,629,1.516,644,1.516,645,1.204,651,1.032,655,2.57,661,1.516,677,1.284,680,2.658,687,1.383,694,1.516,715,1.516,722,1.516,736,2.57,797,3.346,888,1.516,896,1.284,902,2.57,930,1.204,947,1.516,1032,3.346,1182,1.516,1186,1.138,1200,1.383,1201,1.204,1310,1.383,1311,1.718,1312,1.718,1313,1.383,1314,1.516,1315,1.718,1316,1.718,1317,1.516,1318,1.718,1319,1.718,1320,1.718,1321,1.718,1322,2.912,1323,4.998,1324,1.718,1325,1.718,1326,1.718,1327,6.088,1328,1.718,1329,1.718,1330,1.718,1331,1.718,1332,1.718,1333,2.912,1334,1.718,1335,1.718,1336,3.791,1337,1.718,1338,1.718,1339,1.718,1340,1.718,1341,3.941,1342,3.791,1343,1.718,1344,1.718,1345,1.718,1346,1.718,1347,1.718,1348,1.718,1349,1.718,1350,1.718,1351,1.718,1352,1.718,1353,2.912,1354,1.718,1355,1.718,1356,1.718,1357,1.718,1358,1.718,1359,1.718,1360,1.718,1361,1.718,1362,1.718,1363,1.718,1364,1.718,1365,1.718,1366,1.718,1367,1.718,1368,1.718,1369,1.718,1370,1.718,1371,1.718,1372,1.718,1373,1.718,1374,1.718,1375,1.718,1376,1.718,1377,1.718,1378,1.718,1379,3.791,1380,1.718,1381,3.791,1382,2.912,1383,1.718,1384,1.718,1385,3.791,1386,1.718,1387,1.718,1388,1.718,1389,1.718,1390,1.718,1391,1.718,1392,2.912,1393,1.718,1394,1.718,1395,1.516,1396,1.718,1397,1.718,1398,1.718,1399,1.718,1400,1.718,1401,1.718,1402,1.718,1403,1.718,1404,1.718,1405,1.718,1406,1.718,1407,1.718,1408,1.718,1409,1.718,1410,1.718,1411,1.718,1412,1.718,1413,1.718,1414,1.516,1415,1.718,1416,1.718,1417,1.718,1418,1.516,1419,1.516,1420,1.516,1421,1.516,1422,1.516,1423,1.516]],["t/176",[16,2.191,17,2.946,22,1.834,27,0.624,28,1.416,42,2.045,44,2.115,46,2.273,47,2.273,53,0.725,65,2.045,92,2.693,100,2.834,132,2.57,214,2.424,293,3.443,307,1.979,317,2.273,321,2.693,352,3.196,377,3.196,394,3.443,397,2.57,398,3.723,489,2.693,597,3.443,756,3.443,1111,3.774,1169,3.774,1186,2.834,1310,3.443,1313,3.443,1418,3.774,1419,3.774,1420,3.774,1421,3.774,1422,3.774,1423,3.774,1424,3.774,1425,3.774,1426,4.276,1427,4.276,1428,4.276,1429,4.276,1430,3.774,1431,3.774,1432,4.276,1433,4.276,1434,4.276,1435,4.276,1436,5.91,1437,4.276]],["t/178",[9,2.408]],["t/180",[16,1.992,17,1.691,18,2.449,19,2.726,20,1.859,22,1.883,27,1.08,28,1.287,29,1.55,39,2.726,42,1.859,44,2.735,45,2.148,48,2.067,50,2.449,51,2.449,53,0.659,59,1.55,69,2.719,72,1.402,125,2.143,199,4.452,358,2.238,362,3.703,390,2.726,631,3.431,657,3.323,670,2.337,977,2.148,987,2.577,1002,4.452,1145,2.726,1186,2.577,1201,2.726,1268,2.726,1274,2.726,1300,3.431,1302,3.431,1438,4.808,1439,4.452,1440,3.888,1441,3.888,1442,3.888,1443,3.888,1444,3.888,1445,3.888,1446,3.888,1447,3.888,1448,3.131,1449,3.888]],["t/182",[16,2.067,17,1.755,18,2.541,19,2.829,20,1.929,22,1.804,27,1.04,28,2.485,29,1.608,31,2.541,36,4.135,40,4.241,42,1.929,44,2.807,45,2.229,47,2.145,48,2.145,50,2.541,51,2.541,53,0.684,59,1.608,63,3.015,69,1.481,71,1.27,96,4.604,107,2.145,125,1.564,248,2.541,287,2.067,317,2.145,358,2.322,362,3.778,366,3.561,390,2.829,485,3.561,506,2.829,601,3.015,611,3.561,686,2.425,748,2.674,977,2.229,987,2.674,1145,2.829,1186,2.674,1274,2.829,1450,5.675,1451,4.035,1452,4.035,1453,3.561,1454,4.035]],["t/184",[16,1.777,17,1.509,18,2.185,19,2.432,20,1.659,22,1.769,27,1.033,28,1.997,29,1.383,39,2.432,42,1.659,44,2.519,45,1.916,47,1.844,48,1.844,50,2.185,51,2.185,53,0.863,65,3.538,72,1.291,75,1.423,88,1.716,125,1.974,144,1.916,214,1.423,215,1.996,281,1.423,287,1.777,297,2.519,324,2.085,326,2.185,358,1.996,362,3.471,390,2.432,397,2.085,439,2.592,496,1.716,507,1.996,537,4.186,646,3.061,651,2.085,720,2.432,740,3.061,748,2.299,771,1.996,911,2.793,973,2.793,988,1.606,989,1.996,990,2.299,998,2.592,1065,2.432,1145,2.432,1171,2.793,1186,2.299,1268,2.432,1274,2.432,1438,4.508,1439,4.099,1448,2.793,1455,3.469,1456,3.469,1457,3.469,1458,3.469,1459,3.469,1460,3.061,1461,3.469]],["t/186",[3,2.489,4,2.323,5,2.113,27,1.055,30,3.63,42,2.323,65,3.458,69,2.655,72,1.232,75,1.992,88,3.187,110,1.608,125,1.883,141,2.051,166,3.911,220,3.219,278,2.498,370,2.582,464,2.795,537,4.058,541,4.161,748,3.219,911,3.911,987,3.219,998,3.63,1145,3.406,1395,4.287,1438,3.63,1462,6.443,1463,4.857,1464,4.857,1465,4.857,1466,4.857]],["t/188",[16,1.574,17,3.077,20,1.469,22,1.942,27,0.819,28,1.539,29,1.224,35,2.035,36,2.929,37,2.295,39,2.153,40,3.475,44,2.3,45,1.697,47,1.632,48,2.471,49,2.473,50,1.934,51,1.934,53,0.521,65,3.383,72,0.779,75,1.259,97,1.846,100,2.035,111,0.661,213,3.26,281,1.259,362,4.07,397,1.846,499,1.632,541,1.767,657,2.795,748,2.035,1268,2.153,1274,2.153,1310,2.473,1313,2.473,1314,2.71,1317,2.71,1414,2.71,1424,2.71,1425,2.71,1430,2.71,1431,2.71,1438,3.475,1439,3.744,1448,2.473,1460,2.71,1467,3.071,1468,3.071,1469,3.071,1470,3.071,1471,3.071,1472,3.071,1473,3.071,1474,3.071,1475,3.071,1476,3.071,1477,3.071,1478,3.071,1479,3.071,1480,3.071,1481,3.071,1482,3.071,1483,3.071,1484,4.65,1485,3.071,1486,5.611,1487,3.071,1488,3.071,1489,3.071,1490,3.071,1491,3.071,1492,3.071]],["t/190",[9,2.408]],["t/192",[20,2.864,35,3.969,53,1.249,119,3.772,123,3.969,166,4.821,337,3.446,358,4.239,397,3.599,1493,5.988,1494,6.5,1495,7.365,1496,5.988,1497,5.988,1498,5.988,1499,5.988]],["t/194",[53,1.108,123,4.332,337,3.761,358,4.768,397,3.928,1494,5.768,1500,6.536,1501,6.536,1502,6.536]],["t/196",[9,2.408]],["t/198",[9,2.408]],["t/200",[9,2.408]],["t/202",[4,2.005,5,1.824,9,1.388,27,1.15,43,1.88,65,2.005,68,2.413,69,2.141,70,2.228,71,1.32,72,1.7,79,2.228,80,3.133,81,3.7,82,2.461,83,2.52,84,2.94,85,2.641,86,2.94,87,3.133,88,2.074,89,2.779,90,3.133,91,2.94,97,2.52,108,2.316,109,4.088,110,1.93,124,1.824,125,2.26,126,2.641,128,2.641,132,2.52,145,2.52,180,3.7,199,3.375,298,3.133,484,2.641,513,2.413,609,3.375,652,3.133,676,2.94,689,2.94,725,3.7,731,3.7,1014,3.133,1102,3.7,1256,3.7,1503,4.192,1504,5.83,1505,4.192,1506,4.192]],["t/204",[9,2.408]],["t/206",[64,2.641,111,1.466,214,2.794,537,4.291,682,3.154,1010,4.515,1507,6.813]],["t/208",[22,1.176,53,0.884,64,3.177,95,3.701,111,1.122,198,3.026,226,4.528,278,2.616,355,2.077,682,3.794,927,4.731,928,4.196,977,3.728,1508,5.212,1509,4.599,1510,4.599,1511,5.212,1512,5.212]],["t/210",[22,1.279,53,0.962,64,3.165,100,4.719,111,1.221,198,2.544,226,4.51,278,2.76,355,2.261,682,3.779,977,3.133,1509,5.005,1510,5.005,1513,5.671,1514,5.671]],["t/212",[3,2.714,9,1.754,53,0.898,65,2.533,67,3.959,73,3.184,108,2.927,111,1.14,198,3.058,213,3.714,214,2.172,307,2.452,489,3.337,496,2.621,682,3.156,927,5.287,977,2.927,988,2.452,1201,3.714,1515,6.819,1516,5.297,1517,5.297,1518,5.297,1519,5.297,1520,5.297,1521,5.297,1522,5.297,1523,5.297,1524,5.297]],["t/214",[22,1.352,27,0.637,53,1.016,72,1.107,95,3.645,100,4.884,111,1.586,198,3.07,220,4.537,278,1.691,291,1.52,307,2.02,337,3.939,507,4.241,546,3.513,720,3.059,895,3.059,907,3.261,919,7.213,943,3.513,977,3.782,989,2.511,1072,2.622,1144,3.261,1525,4.363,1526,4.363,1527,4.363,1528,4.363,1529,3.85,1530,4.363,1531,4.363,1532,4.363]],["t/216",[27,0.806,59,2.792,64,2.142,72,1.401,94,3.052,108,3.052,122,3.18,125,2.142,138,2.558,147,4.129,156,3.874,355,2.202,430,4.129,464,3.18,603,3.48,923,5.64,925,3.874,1453,4.876,1533,7.004,1534,5.525,1535,5.525,1536,5.525,1537,5.525,1538,5.525,1539,5.525,1540,5.525]],["t/218",[64,2.855,77,3.774,108,3.308,111,1.289,215,3.446,355,2.936,489,3.772,627,5.504,989,3.446,997,4.475,1072,3.599,1092,4.199,1541,5.988,1542,5.988,1543,5.988,1544,5.988]],["t/220",[22,1.371,27,0.65,46,2.367,53,0.755,72,1.13,95,3.422,108,2.461,111,1.308,125,2.356,183,2.062,198,3.334,278,2.682,337,3.498,355,1.775,464,2.563,507,3.498,691,3.328,895,3.123,923,4.893,940,5.364,943,3.586,986,3.328,989,2.563,998,3.328,1529,3.93,1545,4.454,1546,4.454,1547,6.919,1548,6.919,1549,4.454,1550,4.454,1551,4.454,1552,6.078,1553,3.93,1554,3.93,1555,4.454,1556,4.454,1557,3.93]],["t/222",[9,1.658,27,0.731,53,1.244,59,1.996,75,2.696,95,3.252,198,3.292,213,3.512,342,4.61,355,1.996,465,3.743,499,3.495,747,3.512,925,4.61,930,4.61,1341,4.42,1558,6.575,1559,5.009,1560,5.009,1561,5.009,1562,5.009,1563,5.009,1564,6.575,1565,6.575,1566,6.575,1567,6.575]],["t/224",[31,3.88,53,1.044,64,2.388,75,2.526,299,3.403,355,2.455,378,3.703,641,5.436,988,2.852,994,3.703,1257,4.319,1568,5.436,1569,7.494,1570,6.16,1571,6.16,1572,6.16]],["t/226",[29,2.165,53,0.921,59,2.165,64,2.106,68,3.126,69,1.994,75,2.228,137,4.059,146,3.6,198,2.436,328,3.6,378,3.265,499,3.683,988,2.514,994,3.265,1257,3.809,1553,4.794,1554,4.794,1573,5.432,1574,5.432,1575,5.432,1576,5.432,1577,5.432,1578,5.432,1579,6.929,1580,6.929,1581,5.432,1582,5.432]],["t/228",[53,0.988,109,4.085,110,1.928,141,2.459,208,4.69,291,2.523,370,3.096,378,3.501,590,2.882,601,4.353,747,4.085,799,5.141,988,2.697,994,3.501,1194,4.69,1254,5.141,1288,5.141,1557,6.39,1583,5.825,1584,5.825,1585,5.825]],["t/230",[3,2.906,53,0.962,64,2.198,77,2.906,198,3.194,275,3.977,291,1.976,299,3.133,355,2.838,378,4.28,493,3.264,542,4.238,915,4.238,925,3.977,930,3.977,988,2.625,994,3.409,995,5.005,1257,3.977,1568,5.005,1586,7.121,1587,5.671]],["t/232",[9,2.408]],["t/234",[20,3.711,22,1.75,24,2.498,38,2.105,53,1.344,64,1.296,95,2.451,190,2.344,198,2.222,278,2.929,355,1.332,358,3.397,362,3.397,484,4.888,599,3.474,682,1.547,756,2.691,925,3.474,930,3.474,933,3.702,938,2.95,939,2.95,946,6.668,986,2.498,1014,2.498,1076,2.498,1307,2.95,1588,3.343,1589,4.954,1590,3.343,1591,4.954,1592,3.343,1593,3.343,1594,3.343,1595,3.343,1596,3.343,1597,3.343,1598,3.343,1599,5.903,1600,3.343,1601,3.343,1602,3.343,1603,3.343,1604,3.343,1605,3.343,1606,3.343,1607,3.343,1608,3.343,1609,3.343,1610,3.343]],["t/236",[4,3.516,20,3.516,22,1.659,53,1.297,62,2.405,64,2.85,145,2.295,278,2.965,355,2.931,358,2.198,457,3.37,484,4.631,670,2.295,682,1.768,890,6.749,915,2.854,917,3.075,989,4.401,1072,4.597,1130,3.37,1611,3.819,1612,3.819,1613,3.819,1614,3.819,1615,3.819,1616,3.819,1617,3.819,1618,3.819,1619,3.819,1620,3.819,1621,3.819,1622,3.819]]],"invertedIndex":[["",{"_index":22,"t":{"7":{"position":[[72,3],[141,3],[145,1],[159,1],[211,1],[237,1],[257,1],[296,1],[368,1],[442,1],[468,1],[527,2]]},"24":{"position":[[492,1]]},"26":{"position":[[165,1]]},"30":{"position":[[61,1],[257,1],[347,1],[419,1],[504,1],[583,1],[651,1]]},"32":{"position":[[260,1],[517,1],[759,1],[957,1],[1079,1],[1163,1],[1307,1]]},"34":{"position":[[617,1],[1166,1],[1577,1],[1616,1],[1719,1],[1793,1],[1819,1],[2116,1],[2149,1],[2189,1],[2220,1],[2252,1],[2286,1],[2350,1],[2378,1],[2860,1]]},"36":{"position":[[1375,1],[1565,1],[2405,1],[2480,1],[2526,2],[2595,1],[2839,2]]},"40":{"position":[[1020,1]]},"44":{"position":[[379,1]]},"46":{"position":[[559,1],[614,1],[746,1],[998,5]]},"48":{"position":[[316,1],[611,1]]},"52":{"position":[[546,1]]},"54":{"position":[[1780,1],[2263,1],[2294,1],[2439,1],[2545,1],[2579,1],[2708,2],[2733,1],[2735,1],[2737,3],[2853,1],[2894,1],[2918,1],[3119,1],[3662,1],[3780,1],[4221,1],[4341,1],[4343,2],[4380,1],[4400,1],[4424,1],[4459,1],[4502,2],[4529,1],[4793,1],[5039,2],[5065,1],[5162,1],[5506,1],[5566,1],[5647,1],[6043,1],[6265,1],[6571,1],[6573,2],[6610,1],[6630,1],[6654,1],[6689,1],[6732,2],[6759,1],[7023,2],[7118,1],[7276,1],[7421,1],[7527,1],[7561,1],[7690,2],[7715,1],[7717,1],[7735,1],[7811,1],[7917,1],[7931,2],[7956,2],[7973,1],[8000,1],[8093,1],[8095,1],[8114,1],[8197,1],[8226,1],[8257,1],[8316,2],[8371,2],[8397,1],[8494,1],[8539,1]]},"68":{"position":[[339,1],[402,1],[465,3],[538,3],[553,1],[668,1],[753,3],[844,3],[855,1],[921,2]]},"72":{"position":[[30,1],[105,1]]},"82":{"position":[[154,1],[232,1]]},"86":{"position":[[448,1]]},"93":{"position":[[0,2],[62,2],[129,2],[218,3],[274,2],[321,2],[380,2]]},"103":{"position":[[110,1],[112,1],[184,2],[187,1],[259,2]]},"118":{"position":[[1041,1],[1135,1],[1181,2],[1275,1],[1415,2]]},"124":{"position":[[230,1]]},"144":{"position":[[104,2]]},"146":{"position":[[160,1],[178,1],[230,1],[275,1],[334,2],[355,1],[405,1],[432,1]]},"149":{"position":[[6,1],[74,1]]},"151":{"position":[[6,1],[56,1]]},"153":{"position":[[6,1],[77,1]]},"156":{"position":[[85,1],[181,1],[200,1],[252,1],[297,1]]},"158":{"position":[[159,1],[269,1],[308,1],[360,1],[405,1]]},"162":{"position":[[88,1]]},"164":{"position":[[134,1]]},"166":{"position":[[84,1]]},"174":{"position":[[299,1],[360,1],[2684,1],[2730,1],[2808,1],[2872,1],[2931,1],[2933,3],[3162,1],[3370,1]]},"176":{"position":[[155,1],[357,1],[528,1],[530,3],[697,1],[824,2],[906,1]]},"180":{"position":[[406,1],[424,1],[520,1],[608,2],[629,1],[689,1],[708,1],[710,1],[894,1],[903,1]]},"182":{"position":[[391,1],[409,1],[507,1],[567,2],[594,1],[619,1],[732,1]]},"184":{"position":[[644,1],[662,1],[763,1],[823,2],[844,1],[919,1],[929,1],[955,1]]},"188":{"position":[[383,1],[524,1],[570,1],[597,1],[719,1],[874,1],[892,1],[1039,1],[1098,2],[1119,1],[1197,1],[1231,1],[1255,1],[1327,1],[1495,1],[1532,1],[1541,2],[1554,1],[1624,1]]},"208":{"position":[[0,1]]},"210":{"position":[[0,1]]},"214":{"position":[[148,1],[425,1]]},"220":{"position":[[89,1],[329,1]]},"234":{"position":[[143,1],[239,1],[339,1],[442,1],[548,1],[667,1],[790,1],[881,1]]},"236":{"position":[[175,1],[319,1],[470,1],[610,1],[742,1]]}}}],["0",{"_index":944,"t":{"56":{"position":[[2048,1]]},"136":{"position":[[145,1]]}}}],["0.01",{"_index":114,"t":{"15":{"position":[[50,5]]}}}],["0.1",{"_index":1116,"t":{"88":{"position":[[530,6]]}}}],["0.55",{"_index":951,"t":{"56":{"position":[[2200,5]]}}}],["0.55temperature=0.55",{"_index":1529,"t":{"214":{"position":[[463,20]]},"220":{"position":[[366,20]]}}}],["01",{"_index":1521,"t":{"212":{"position":[[376,2]]}}}],["05",{"_index":1520,"t":{"212":{"position":[[373,2]]}}}],["1",{"_index":771,"t":{"54":{"position":[[2131,1]]},"56":{"position":[[2054,2]]},"62":{"position":[[437,2]]},"72":{"position":[[5,1]]},"82":{"position":[[5,1]]},"103":{"position":[[181,2]]},"124":{"position":[[5,1]]},"128":{"position":[[241,2]]},"184":{"position":[[223,2]]}}}],["1,200",{"_index":692,"t":{"48":{"position":[[768,5]]}}}],["1.0",{"_index":1230,"t":{"136":{"position":[[141,3]]}}}],["1.7.1",{"_index":1083,"t":{"78":{"position":[[81,7]]}}}],["100",{"_index":630,"t":{"42":{"position":[[967,5]]}}}],["1106",{"_index":777,"t":{"54":{"position":[[2383,4],[7365,4]]}}}],["128k",{"_index":772,"t":{"54":{"position":[[2192,5]]},"88":{"position":[[360,5]]}}}],["18.17.0nvm",{"_index":1024,"t":{"62":{"position":[[205,10]]},"126":{"position":[[23,10]]}}}],["1960s,but",{"_index":1328,"t":{"174":{"position":[[574,9]]}}}],["2",{"_index":1065,"t":{"72":{"position":[[74,1]]},"82":{"position":[[92,1]]},"103":{"position":[[256,2]]},"124":{"position":[[29,1],[370,1]]},"184":{"position":[[364,2]]}}}],["2+3",{"_index":787,"t":{"54":{"position":[[2751,3]]}}}],["2.0",{"_index":1179,"t":{"122":{"position":[[62,3]]}}}],["2.1smart_llm_model=claud",{"_index":1565,"t":{"222":{"position":[[293,25],[425,25]]}}}],["20",{"_index":87,"t":{"13":{"position":[[167,3]]},"46":{"position":[[194,2],[317,3]]},"93":{"position":[[148,2]]},"202":{"position":[[516,3]]}}}],["2000",{"_index":934,"t":{"56":{"position":[[1706,5]]}}}],["2004(thi",{"_index":1321,"t":{"174":{"position":[[308,9]]}}}],["2021",{"_index":635,"t":{"44":{"position":[[179,6]]}}}],["2023\"report_typ",{"_index":1279,"t":{"151":{"position":[[39,16]]}}}],["2024",{"_index":1550,"t":{"220":{"position":[[433,6]]}}}],["20240229",{"_index":1567,"t":{"222":{"position":[[326,8],[458,8]]}}}],["2k",{"_index":1113,"t":{"86":{"position":[[385,4]]},"93":{"position":[[119,2]]}}}],["3",{"_index":499,"t":{"36":{"position":[[2458,2]]},"56":{"position":[[2548,2],[2767,2]]},"72":{"position":[[143,1]]},"82":{"position":[[208,1]]},"88":{"position":[[497,1]]},"118":{"position":[[1113,2]]},"124":{"position":[[134,1]]},"174":{"position":[[2645,2],[2962,1]]},"188":{"position":[[1539,1]]},"222":{"position":[[319,1],[451,1]]},"226":{"position":[[222,1],[267,1]]}}}],["3.10",{"_index":1262,"t":{"144":{"position":[[69,5]]}}}],["3.11",{"_index":1004,"t":{"60":{"position":[[82,5]]},"136":{"position":[[275,4],[513,4]]}}}],["32768",{"_index":1548,"t":{"220":{"position":[[244,6],[323,5],[529,5]]}}}],["36",{"_index":1399,"t":{"174":{"position":[[2374,2]]}}}],["4",{"_index":115,"t":{"15":{"position":[[82,1]]},"36":{"position":[[2571,1]]},"48":{"position":[[285,2],[330,1]]},"54":{"position":[[2178,1],[2381,1],[7363,1]]},"82":{"position":[[262,1]]},"124":{"position":[[268,1]]},"132":{"position":[[49,1]]},"174":{"position":[[2967,1]]}}}],["4000",{"_index":936,"t":{"56":{"position":[[1788,5]]}}}],["4o",{"_index":927,"t":{"56":{"position":[[1506,2],[1628,3]]},"88":{"position":[[341,2],[357,2]]},"118":{"position":[[1092,4]]},"208":{"position":[[250,2],[321,3]]},"212":{"position":[[407,2],[438,2],[467,3]]}}}],["5",{"_index":1158,"t":{"107":{"position":[[343,1]]}}}],["50",{"_index":1574,"t":{"226":{"position":[[35,3]]}}}],["6",{"_index":1159,"t":{"107":{"position":[[345,1]]}}}],["7",{"_index":1161,"t":{"109":{"position":[[32,1]]}}}],["700",{"_index":942,"t":{"56":{"position":[[1972,4]]}}}],["70b",{"_index":1553,"t":{"220":{"position":[[492,3]]},"226":{"position":[[269,3]]}}}],["7b",{"_index":1557,"t":{"220":{"position":[[541,2]]},"228":{"position":[[233,2],[277,2]]}}}],["7bsmart_llm_model=mistr",{"_index":1572,"t":{"224":{"position":[[160,25]]}}}],["80,000",{"_index":1379,"t":{"174":{"position":[[1742,7],[1815,7],[2488,7]]}}}],["800",{"_index":954,"t":{"56":{"position":[[2299,4]]}}}],["8192",{"_index":940,"t":{"56":{"position":[[1887,5]]},"220":{"position":[[496,4],[511,4]]}}}],["85",{"_index":678,"t":{"46":{"position":[[1011,3]]}}}],["8b",{"_index":1554,"t":{"220":{"position":[[508,2]]},"226":{"position":[[224,2]]}}}],["8x7b",{"_index":1547,"t":{"220":{"position":[[239,4],[318,4],[524,4]]}}}],["95",{"_index":710,"t":{"52":{"position":[[344,3]]}}}],["__call__",{"_index":1496,"t":{"192":{"position":[[103,9]]}}}],["__call__(cl",{"_index":1497,"t":{"192":{"position":[[117,13]]}}}],["__init__",{"_index":1591,"t":{"234":{"position":[[117,9],[131,10]]}}}],["__init__(self",{"_index":389,"t":{"34":{"position":[[1518,15]]}}}],["__main__",{"_index":51,"t":{"7":{"position":[[530,11]]},"68":{"position":[[924,11]]},"146":{"position":[[337,11]]},"180":{"position":[[611,11]]},"182":{"position":[[570,11]]},"184":{"position":[[826,11]]},"188":{"position":[[1101,11]]}}}],["__name__",{"_index":50,"t":{"7":{"position":[[518,8]]},"68":{"position":[[912,8]]},"146":{"position":[[325,8]]},"180":{"position":[[599,8]]},"182":{"position":[[558,8]]},"184":{"position":[[814,8]]},"188":{"position":[[1089,8]]}}}],["abil",{"_index":216,"t":{"26":{"position":[[101,7]]},"122":{"position":[[375,7]]}}}],["abov",{"_index":370,"t":{"34":{"position":[[769,6],[1247,6],[1887,6],[2990,6],[3410,6]]},"42":{"position":[[330,6]]},"46":{"position":[[821,6]]},"48":{"position":[[518,5]]},"54":{"position":[[4808,6]]},"56":{"position":[[3278,5]]},"136":{"position":[[186,5]]},"140":{"position":[[755,5]]},"158":{"position":[[645,5]]},"186":{"position":[[20,5]]},"228":{"position":[[115,6]]}}}],["abstract",{"_index":1502,"t":{"194":{"position":[[59,8]]}}}],["abstractsingleton(abc.abc",{"_index":1500,"t":{"194":{"position":[[6,26]]}}}],["academia",{"_index":707,"t":{"52":{"position":[[275,9]]}}}],["accept",{"_index":482,"t":{"36":{"position":[[1805,8],[1857,10]]}}}],["access",{"_index":1014,"t":{"60":{"position":[[225,6]]},"62":{"position":[[335,6]]},"202":{"position":[[8,9]]},"234":{"position":[[109,7]]}}}],["accord",{"_index":620,"t":{"42":{"position":[[500,9]]}}}],["accordingli",{"_index":1155,"t":{"103":{"position":[[341,12]]}}}],["account",{"_index":1539,"t":{"216":{"position":[[186,7]]}}}],["accur",{"_index":127,"t":{"17":{"position":[[72,9],[188,8],[426,8]]}}}],["achiev",{"_index":683,"t":{"48":{"position":[[267,8]]}}}],["act",{"_index":269,"t":{"30":{"position":[[204,4]]}}}],["action",{"_index":825,"t":{"54":{"position":[[4092,6]]}}}],["activ",{"_index":1079,"t":{"76":{"position":[[275,8],[327,10],[580,10]]},"78":{"position":[[635,8],[1006,10]]}}}],["activate/deactiv",{"_index":1073,"t":{"76":{"position":[[42,19]]}}}],["actual",{"_index":593,"t":{"40":{"position":[[648,8]]}}}],["ad",{"_index":149,"t":{"19":{"position":[[152,6],[270,6]]},"56":{"position":[[892,6],[3792,6]]},"122":{"position":[[185,5]]}}}],["add",{"_index":215,"t":{"26":{"position":[[89,4]]},"34":{"position":[[2380,3],[3169,3]]},"54":{"position":[[1131,3],[8773,3]]},"56":{"position":[[3233,3]]},"118":{"position":[[669,3]]},"124":{"position":[[64,3]]},"170":{"position":[[13,3]]},"184":{"position":[[226,3]]},"218":{"position":[[178,3]]}}}],["add_edg",{"_index":435,"t":{"34":{"position":[[3099,8]]}}}],["add_nod",{"_index":434,"t":{"34":{"position":[[3089,9]]}}}],["addit",{"_index":536,"t":{"38":{"position":[[499,9]]},"44":{"position":[[894,9]]},"54":{"position":[[8674,10]]},"56":{"position":[[215,9],[971,10],[3499,10],[3672,10]]},"99":{"position":[[246,8],[392,10]]},"128":{"position":[[36,10]]}}}],["adjust",{"_index":1040,"t":{"66":{"position":[[288,6]]}}}],["advanc",{"_index":1453,"t":{"182":{"position":[[642,12]]},"216":{"position":[[19,8]]}}}],["advantag",{"_index":467,"t":{"36":{"position":[[1226,9]]}}}],["afterward",{"_index":1224,"t":{"134":{"position":[[355,10]]}}}],["again",{"_index":727,"t":{"54":{"position":[[19,5]]},"138":{"position":[[111,6]]}}}],["agent",{"_index":71,"t":{"11":{"position":[[250,5]]},"13":{"position":[[60,5]]},"19":{"position":[[244,5]]},"21":{"position":[[37,5]]},"24":{"position":[[121,6],[304,5],[348,7],[435,5],[689,5],[790,5]]},"26":{"position":[[57,5],[73,5],[206,7],[319,6],[348,6],[504,7]]},"28":{"position":[[120,6],[165,5],[405,6],[562,5]]},"30":{"position":[[40,7],[136,5],[169,6],[198,5],[284,5]]},"32":{"position":[[829,5]]},"34":{"position":[[904,7],[917,5],[1058,7],[1290,5],[1325,5],[1410,5],[1936,6],[2003,6],[2036,7],[2129,6],[2399,5],[3369,6]]},"36":{"position":[[367,5],[385,6],[772,7],[2198,6],[2257,7]]},"38":{"position":[[814,5],[996,6]]},"40":{"position":[[1048,5]]},"42":{"position":[[658,5],[764,5],[852,5]]},"46":{"position":[[394,5],[456,5],[597,5]]},"50":{"position":[[277,5]]},"52":{"position":[[145,6]]},"56":{"position":[[2575,6],[2634,5],[3020,5]]},"66":{"position":[[523,5]]},"68":{"position":[[0,5],[75,6],[177,5]]},"72":{"position":[[86,5]]},"80":{"position":[[31,5]]},"88":{"position":[[50,7],[129,6],[316,6],[581,5],[762,5]]},"107":{"position":[[115,6],[223,6]]},"109":{"position":[[37,7],[130,7],[226,5],[259,6],[338,5]]},"118":{"position":[[218,7],[408,7],[430,6]]},"128":{"position":[[264,5]]},"182":{"position":[[20,5]]},"202":{"position":[[409,5]]}}}],["agent_rol",{"_index":964,"t":{"56":{"position":[[2551,11]]}}}],["ages.startup",{"_index":1334,"t":{"174":{"position":[[672,13]]}}}],["aggreg",{"_index":85,"t":{"13":{"position":[[150,11]]},"48":{"position":[[15,11],[356,10]]},"50":{"position":[[469,9]]},"52":{"position":[[680,10]]},"88":{"position":[[246,10],[961,9]]},"93":{"position":[[132,10]]},"202":{"position":[[499,11]]}}}],["ahead",{"_index":760,"t":{"54":{"position":[[1484,5],[8763,5]]}}}],["ai",{"_index":59,"t":{"11":{"position":[[79,2]]},"17":{"position":[[145,2],[234,2],[446,2]]},"24":{"position":[[118,2],[212,2],[432,2],[470,2],[620,2]]},"28":{"position":[[402,2]]},"36":{"position":[[2420,2]]},"38":{"position":[[102,2],[344,2],[1186,2]]},"40":{"position":[[373,2],[853,2]]},"52":{"position":[[82,2],[142,2],[226,2],[478,2],[745,2]]},"54":{"position":[[260,2]]},"107":{"position":[[220,2]]},"109":{"position":[[34,2]]},"118":{"position":[[1056,2]]},"128":{"position":[[176,2],[274,2]]},"151":{"position":[[21,2]]},"153":{"position":[[49,2]]},"180":{"position":[[663,2]]},"182":{"position":[[658,2]]},"216":{"position":[[28,2],[98,2]]},"222":{"position":[[16,2]]},"226":{"position":[[9,2]]}}}],["aim",{"_index":56,"t":{"11":{"position":[[33,6]]},"26":{"position":[[39,5]]},"38":{"position":[[288,6]]},"42":{"position":[[270,4]]},"66":{"position":[[416,3]]}}}],["aka",{"_index":185,"t":{"24":{"position":[[366,4]]}}}],["allow",{"_index":248,"t":{"28":{"position":[[215,6]]},"34":{"position":[[268,8]]},"38":{"position":[[952,5]]},"54":{"position":[[3382,6]]},"78":{"position":[[412,6]]},"99":{"position":[[115,5]]},"182":{"position":[[88,6]]}}}],["alreadi",{"_index":168,"t":{"24":{"position":[[133,7]]},"38":{"position":[[445,7]]}}}],["although",{"_index":735,"t":{"54":{"position":[[430,8]]}}}],["alway",{"_index":567,"t":{"38":{"position":[[1422,7]]}}}],["amaz",{"_index":153,"t":{"19":{"position":[[315,7]]},"56":{"position":[[121,7]]}}}],["amazingli",{"_index":1537,"t":{"216":{"position":[[83,9]]}}}],["analysi",{"_index":294,"t":{"32":{"position":[[115,9]]},"52":{"position":[[788,9]]},"68":{"position":[[1253,9]]},"111":{"position":[[92,8]]}}}],["analyz",{"_index":716,"t":{"52":{"position":[[511,7],[638,7]]}}}],["andrew",{"_index":187,"t":{"24":{"position":[[392,6]]}}}],["anoth",{"_index":655,"t":{"46":{"position":[[0,7]]},"174":{"position":[[2042,7],[2287,7]]}}}],["answer",{"_index":689,"t":{"48":{"position":[[537,6],[644,6]]},"54":{"position":[[1835,7],[2016,6],[2088,11],[5282,6],[5702,7],[5883,6],[5955,12]]},"86":{"position":[[570,8]]},"118":{"position":[[1300,6]]},"202":{"position":[[211,7]]}}}],["anthrop",{"_index":1558,"t":{"222":{"position":[[0,9],[120,9]]}}}],["anyth",{"_index":1204,"t":{"124":{"position":[[311,8]]}}}],["apa",{"_index":506,"t":{"36":{"position":[[2628,3]]},"48":{"position":[[809,3],[871,3]]},"56":{"position":[[291,5],[2371,4]]},"118":{"position":[[1362,3]]},"182":{"position":[[694,3]]}}}],["api",{"_index":64,"t":{"11":{"position":[[131,4],[157,3]]},"19":{"position":[[113,3]]},"54":{"position":[[191,3],[334,3],[531,4],[686,3],[882,3],[938,3],[1683,4],[1923,3],[2258,4],[3291,5],[3312,3],[4216,4],[5491,3],[5790,3]]},"56":{"position":[[439,3],[3530,3]]},"82":{"position":[[184,3]]},"97":{"position":[[216,3],[619,3]]},"99":{"position":[[167,3]]},"107":{"position":[[462,3]]},"116":{"position":[[120,3],[168,3]]},"124":{"position":[[73,3]]},"144":{"position":[[259,3],[322,3],[376,3]]},"206":{"position":[[22,3]]},"208":{"position":[[22,3],[83,3],[156,3],[216,3],[286,3]]},"210":{"position":[[22,3],[95,3],[168,3],[228,3]]},"216":{"position":[[201,3]]},"218":{"position":[[91,3],[152,3]]},"224":{"position":[[22,3]]},"226":{"position":[[22,3]]},"230":{"position":[[43,3]]},"234":{"position":[[817,3]]},"236":{"position":[[59,3],[207,3],[358,3],[502,3],[640,3]]}}}],["apikeyerror(except",{"_index":1611,"t":{"236":{"position":[[6,22]]}}}],["app",{"_index":732,"t":{"54":{"position":[[263,4]]},"122":{"position":[[10,3]]},"124":{"position":[[437,3]]},"136":{"position":[[497,3]]}}}],["appl",{"_index":1436,"t":{"176":{"position":[[562,7],[600,6]]}}}],["applic",{"_index":60,"t":{"11":{"position":[[82,11]]},"34":{"position":[[209,12],[435,11]]},"80":{"position":[[19,11]]},"82":{"position":[[220,11]]},"116":{"position":[[195,12]]},"118":{"position":[[974,11]]},"128":{"position":[[179,12]]}}}],["approach",{"_index":341,"t":{"34":{"position":[[121,8]]},"42":{"position":[[693,8]]}}}],["apt",{"_index":1227,"t":{"134":{"position":[[454,3]]},"136":{"position":[[120,3]]}}}],["arbitrari",{"_index":234,"t":{"26":{"position":[[483,9]]}}}],["architectur",{"_index":441,"t":{"34":{"position":[[3302,12]]},"50":{"position":[[90,13]]},"114":{"position":[[34,12]]}}}],["area",{"_index":373,"t":{"34":{"position":[[811,6]]},"44":{"position":[[661,5]]},"52":{"position":[[250,5]]}}}],["arg",{"_index":1498,"t":{"192":{"position":[[131,6]]}}}],["argument",{"_index":1145,"t":{"99":{"position":[[403,9]]},"180":{"position":[[215,8]]},"182":{"position":[[211,8]]},"184":{"position":[[446,8]]},"186":{"position":[[217,8]]}}}],["around",{"_index":113,"t":{"15":{"position":[[43,6]]},"46":{"position":[[224,6],[977,6]]},"88":{"position":[[490,6]]}}}],["articl",{"_index":204,"t":{"24":{"position":[[656,7]]},"38":{"position":[[1097,7]]},"40":{"position":[[1097,8]]},"68":{"position":[[1101,8]]},"153":{"position":[[24,7]]}}}],["arxiv",{"_index":1128,"t":{"97":{"position":[[450,5],[747,5]]}}}],["ask",{"_index":685,"t":{"48":{"position":[[384,3]]},"54":{"position":[[1170,3]]}}}],["assign",{"_index":967,"t":{"56":{"position":[[2653,8]]}}}],["assist",{"_index":163,"t":{"21":{"position":[[43,9]]},"24":{"position":[[796,9]]},"36":{"position":[[113,9],[2226,9],[2314,10],[2361,9],[3149,9]]},"38":{"position":[[142,9],[860,10],[958,10]]},"54":{"position":[[180,10],[250,9],[323,10],[675,10],[871,10],[921,9],[1193,9],[1309,9],[1402,9],[1452,9],[2147,9],[3040,9],[3301,10],[3424,11],[4177,9],[4822,9],[5252,9],[8619,9],[8809,9]]},"56":{"position":[[3640,11]]}}}],["assistant.idprint(f\"assist",{"_index":875,"t":{"54":{"position":[[7737,29]]}}}],["assistant_id",{"_index":874,"t":{"54":{"position":[[7719,15],[7771,17]]}}}],["assistant_id=assistant_id",{"_index":795,"t":{"54":{"position":[[3175,27],[8170,26]]}}}],["assistant_prompt_instruct",{"_index":767,"t":{"54":{"position":[[1751,28]]}}}],["assistantassist",{"_index":773,"t":{"54":{"position":[[2275,18],[7257,18]]}}}],["assistant’",{"_index":766,"t":{"54":{"position":[[1725,11]]}}}],["associ",{"_index":1094,"t":{"78":{"position":[[668,10],[791,10]]}}}],["assum",{"_index":398,"t":{"34":{"position":[[1953,6]]},"44":{"position":[[335,7],[904,8]]},"46":{"position":[[337,8]]},"48":{"position":[[1074,6]]},"56":{"position":[[546,6]]},"103":{"position":[[278,7]]},"176":{"position":[[219,8],[294,8]]}}}],["async",{"_index":391,"t":{"34":{"position":[[1539,5]]},"36":{"position":[[1305,5]]},"68":{"position":[[798,5]]},"120":{"position":[[110,5]]},"158":{"position":[[38,5]]}}}],["async_browse(url",{"_index":668,"t":{"46":{"position":[[616,18]]}}}],["async_mode=true,)queri",{"_index":1435,"t":{"176":{"position":[[505,22]]}}}],["asynchron",{"_index":805,"t":{"54":{"position":[[3393,12]]}}}],["asyncio",{"_index":664,"t":{"46":{"position":[[443,8]]},"68":{"position":[[261,8]]}}}],["asyncio.gather(*task",{"_index":674,"t":{"46":{"position":[[754,22]]}}}],["asyncio.run(generate_research_report",{"_index":1059,"t":{"68":{"position":[[936,39]]}}}],["asyncio.run(get_report(prompt=prompt",{"_index":1454,"t":{"182":{"position":[[734,37]]}}}],["asyncio.run(get_report(queri",{"_index":1273,"t":{"146":{"position":[[434,29]]}}}],["asyncio.run(get_report(query=queri",{"_index":1448,"t":{"180":{"position":[[905,35]]},"184":{"position":[[957,35]]},"188":{"position":[[1626,35]]}}}],["asyncio.run(main",{"_index":52,"t":{"7":{"position":[[542,19]]}}}],["asyncioapp",{"_index":1285,"t":{"156":{"position":[[74,10]]}}}],["asyncioasync",{"_index":19,"t":{"7":{"position":[[47,12]]},"146":{"position":[[101,12]]},"180":{"position":[[330,12]]},"182":{"position":[[331,12]]},"184":{"position":[[583,12]]}}}],["asyncioconnection_str",{"_index":1471,"t":{"188":{"position":[[358,24]]}}}],["asynciofrom",{"_index":1267,"t":{"146":{"position":[[47,11]]}}}],["atleast",{"_index":1380,"t":{"174":{"position":[[1807,7]]}}}],["attent",{"_index":203,"t":{"24":{"position":[[630,9]]}}}],["attribut",{"_index":221,"t":{"26":{"position":[[182,10]]}}}],["audio",{"_index":721,"t":{"52":{"position":[[595,6]]}}}],["autogpt",{"_index":569,"t":{"40":{"position":[[6,7],[282,7],[484,7]]},"46":{"position":[[19,7],[1027,8]]}}}],["autom",{"_index":290,"t":{"32":{"position":[[19,10]]},"52":{"position":[[30,10]]}}}],["automat",{"_index":755,"t":{"54":{"position":[[1244,13]]}}}],["autonom",{"_index":70,"t":{"11":{"position":[[230,10]]},"13":{"position":[[40,10]]},"24":{"position":[[764,10]]},"28":{"position":[[542,10]]},"30":{"position":[[273,10]]},"36":{"position":[[102,10]]},"40":{"position":[[105,10],[1037,10]]},"109":{"position":[[327,10]]},"118":{"position":[[447,13]]},"128":{"position":[[244,10]]},"202":{"position":[[398,10]]}}}],["avail",{"_index":691,"t":{"48":{"position":[[744,10]]},"132":{"position":[[91,9]]},"140":{"position":[[269,9],[692,9]]},"220":{"position":[[444,9]]}}}],["availablerespons",{"_index":673,"t":{"46":{"position":[[727,18]]}}}],["averag",{"_index":677,"t":{"46":{"position":[[949,7]]},"88":{"position":[[462,7]]},"107":{"position":[[319,7]]},"174":{"position":[[1766,7]]}}}],["avoid",{"_index":1098,"t":{"78":{"position":[[960,8]]},"140":{"position":[[599,5]]}}}],["await",{"_index":44,"t":{"7":{"position":[[406,5],[470,5]]},"34":{"position":[[1757,5],[1821,5]]},"46":{"position":[[669,5],[748,5]]},"68":{"position":[[625,5],[670,5],[857,5]]},"146":{"position":[[232,5],[277,5]]},"156":{"position":[[254,5],[299,5]]},"158":{"position":[[362,5],[407,5]]},"174":{"position":[[3372,5]]},"176":{"position":[[908,5]]},"180":{"position":[[522,5],[565,5]]},"182":{"position":[[464,5],[509,5]]},"184":{"position":[[720,5],[765,5]]},"188":{"position":[[996,5],[1041,5]]}}}],["azur",{"_index":1515,"t":{"212":{"position":[[44,5],[66,5]]}}}],["azure_openai",{"_index":921,"t":{"56":{"position":[[1308,13]]}}}],["back",{"_index":488,"t":{"36":{"position":[[2070,4]]},"54":{"position":[[4937,4]]},"174":{"position":[[635,6]]}}}],["backend",{"_index":982,"t":{"56":{"position":[[3084,7]]},"62":{"position":[[382,7]]},"122":{"position":[[128,7],[424,7]]}}}],["base",{"_index":281,"t":{"30":{"position":[[535,5]]},"32":{"position":[[41,5],[304,5],[397,5],[558,5],[663,5],[1124,5]]},"34":{"position":[[299,5],[956,5]]},"36":{"position":[[207,5]]},"38":{"position":[[912,5],[1044,5]]},"46":{"position":[[925,5]]},"48":{"position":[[917,5]]},"54":{"position":[[1843,5],[5710,5]]},"56":{"position":[[2640,5]]},"68":{"position":[[493,5]]},"74":{"position":[[14,5]]},"88":{"position":[[170,5],[587,5],[880,5]]},"109":{"position":[[589,5]]},"111":{"position":[[26,5]]},"114":{"position":[[152,5],[234,5],[546,5]]},"128":{"position":[[387,5]]},"132":{"position":[[56,5]]},"184":{"position":[[881,5]]},"188":{"position":[[1161,5]]}}}],["basic",{"_index":459,"t":{"36":{"position":[[801,9]]}}}],["be",{"_index":1351,"t":{"174":{"position":[[1100,5]]}}}],["beautifulsoup",{"_index":971,"t":{"56":{"position":[[2840,16]]}}}],["becom",{"_index":672,"t":{"46":{"position":[[720,6]]}}}],["befor",{"_index":458,"t":{"36":{"position":[[723,7]]},"38":{"position":[[1213,6]]},"52":{"position":[[135,6]]},"140":{"position":[[498,6]]}}}],["behavior",{"_index":966,"t":{"56":{"position":[[2618,8]]}}}],["below",{"_index":289,"t":{"32":{"position":[[8,6]]},"38":{"position":[[1492,6]]},"50":{"position":[[113,6]]},"54":{"position":[[768,6],[1299,6],[5018,6],[5345,5],[8861,5]]},"56":{"position":[[495,6],[998,5]]},"68":{"position":[[141,6]]},"118":{"position":[[767,6]]}}}],["best",{"_index":92,"t":{"13":{"position":[[240,4]]},"17":{"position":[[10,4]]},"24":{"position":[[721,4]]},"48":{"position":[[240,4]]},"68":{"position":[[276,4]]},"174":{"position":[[433,4]]},"176":{"position":[[622,4]]}}}],["bet",{"_index":1325,"t":{"174":{"position":[[438,3]]}}}],["beta",{"_index":1194,"t":{"122":{"position":[[484,6]]},"140":{"position":[[794,4]]},"228":{"position":[[280,4]]}}}],["betasmart_llm_model=huggingfaceh4/zephyr",{"_index":1585,"t":{"228":{"position":[[236,40]]}}}],["better",{"_index":249,"t":{"28":{"position":[[226,6]]},"42":{"position":[[288,6]]},"44":{"position":[[560,6]]},"48":{"position":[[1135,7]]},"64":{"position":[[95,6]]},"128":{"position":[[169,6]]}}}],["between",{"_index":465,"t":{"36":{"position":[[1121,7]]},"54":{"position":[[1390,7]]},"56":{"position":[[2040,7]]},"222":{"position":[[112,7]]}}}],["bia",{"_index":638,"t":{"44":{"position":[[299,5]]},"86":{"position":[[626,4]]}}}],["bias",{"_index":643,"t":{"44":{"position":[[412,6],[1212,6]]},"86":{"position":[[563,6]]}}}],["big",{"_index":1392,"t":{"174":{"position":[[2164,3],[2508,3]]}}}],["biggest",{"_index":631,"t":{"44":{"position":[[4,7]]},"180":{"position":[[645,7]]}}}],["bing",{"_index":914,"t":{"56":{"position":[[1143,5]]},"97":{"position":[[556,4]]}}}],["bit",{"_index":799,"t":{"54":{"position":[[3252,3]]},"228":{"position":[[28,3]]}}}],["blog",{"_index":741,"t":{"54":{"position":[[626,4]]}}}],["blue",{"_index":1485,"t":{"188":{"position":[[1149,4]]}}}],["bool",{"_index":1589,"t":{"234":{"position":[[81,5],[874,5]]}}}],["boss",{"_index":1391,"t":{"174":{"position":[[2154,4]]}}}],["both",{"_index":220,"t":{"26":{"position":[[167,4]]},"38":{"position":[[543,4]]},"44":{"position":[[1412,4]]},"88":{"position":[[332,4]]},"186":{"position":[[126,4]]},"214":{"position":[[24,4],[80,5],[165,5]]}}}],["box",{"_index":535,"t":{"38":{"position":[[478,3]]}}}],["break",{"_index":625,"t":{"42":{"position":[[751,8]]},"50":{"position":[[69,5]]},"54":{"position":[[7967,5]]},"174":{"position":[[1870,5]]}}}],["brew",{"_index":1219,"t":{"134":{"position":[[297,4],[386,4]]},"136":{"position":[[48,4],[291,5],[297,4],[354,4],[525,6]]}}}],["brief",{"_index":1372,"t":{"174":{"position":[[1624,5]]}}}],["bring",{"_index":562,"t":{"38":{"position":[[1311,7]]}}}],["brows",{"_index":296,"t":{"32":{"position":[[262,7]]},"56":{"position":[[1852,6]]},"114":{"position":[[110,7]]}}}],["browse_chunk_max_length",{"_index":937,"t":{"56":{"position":[[1794,24]]}}}],["browse_websit",{"_index":1604,"t":{"234":{"position":[[687,14]]}}}],["browser",{"_index":295,"t":{"32":{"position":[[235,7]]},"72":{"position":[[182,7]]},"80":{"position":[[194,7]]},"82":{"position":[[301,7]]},"114":{"position":[[83,7]]},"124":{"position":[[495,7]]},"140":{"position":[[75,7],[165,7]]}}}],["bs",{"_index":970,"t":{"56":{"position":[[2837,2]]}}}],["build",{"_index":66,"t":{"11":{"position":[[197,5]]},"24":{"position":[[85,9],[746,5]]},"26":{"position":[[615,9]]},"34":{"position":[[1311,8]]},"54":{"position":[[234,5],[1553,5],[3556,5]]},"124":{"position":[[252,5]]},"128":{"position":[[163,5],[231,5]]}}}],["built",{"_index":91,"t":{"13":{"position":[[229,5]]},"26":{"position":[[156,5]]},"36":{"position":[[2099,6]]},"38":{"position":[[744,5]]},"202":{"position":[[330,5]]}}}],["bullshit",{"_index":1402,"t":{"174":{"position":[[2581,8]]}}}],["burn",{"_index":32,"t":{"7":{"position":[[190,7]]},"68":{"position":[[370,7]]}}}],["busi",{"_index":538,"t":{"38":{"position":[[598,8]]},"68":{"position":[[1244,8]]}}}],["call",{"_index":119,"t":{"15":{"position":[[124,5]]},"24":{"position":[[319,8]]},"36":{"position":[[2176,6]]},"54":{"position":[[292,4],[421,8],[733,7],[1071,8],[1258,5],[2216,4],[3088,4],[4115,5],[4196,4],[4861,4],[4991,4]]},"56":{"position":[[422,5]]},"68":{"position":[[165,7]]},"192":{"position":[[153,4]]}}}],["came",{"_index":573,"t":{"40":{"position":[[88,4]]}}}],["capabl",{"_index":1046,"t":{"66":{"position":[[529,13]]}}}],["care",{"_index":80,"t":{"13":{"position":[[77,4]]},"36":{"position":[[1033,4]]},"54":{"position":[[4160,4]]},"202":{"position":[[426,4]]}}}],["carri",{"_index":619,"t":{"42":{"position":[[474,8]]}}}],["case",{"_index":229,"t":{"26":{"position":[[409,4]]},"34":{"position":[[463,4]]},"38":{"position":[[624,6]]},"40":{"position":[[78,4]]},"54":{"position":[[4050,5]]},"86":{"position":[[520,5]]},"134":{"position":[[252,4],[404,4]]},"136":{"position":[[3,4],[70,4]]},"138":{"position":[[80,6]]}}}],["catch",{"_index":846,"t":{"54":{"position":[[4969,5]]}}}],["caus",{"_index":450,"t":{"36":{"position":[[427,5],[2951,5]]},"44":{"position":[[81,6]]}}}],["cd",{"_index":1022,"t":{"62":{"position":[[162,2]]},"126":{"position":[[0,2]]}}}],["chain",{"_index":99,"t":{"13":{"position":[[301,7]]}}}],["challeng",{"_index":612,"t":{"42":{"position":[[312,10],[981,9]]},"44":{"position":[[12,9],[320,11]]},"48":{"position":[[92,9]]}}}],["chang",{"_index":985,"t":{"56":{"position":[[3183,6],[3361,6]]},"118":{"position":[[3,6]]}}}],["charactertextsplitter(chunk_size=200",{"_index":1407,"t":{"174":{"position":[[2732,37]]}}}],["charactertextsplitterfrom",{"_index":1312,"t":{"174":{"position":[[76,25]]}}}],["chat",{"_index":1580,"t":{"226":{"position":[[227,4],[273,4]]}}}],["chatgpt",{"_index":737,"t":{"54":{"position":[[542,7]]},"86":{"position":[[440,7]]}}}],["check",{"_index":4,"t":{"3":{"position":[[51,5],[103,5]]},"11":{"position":[[136,5],[326,5]]},"13":{"position":[[488,5]]},"19":{"position":[[409,5]]},"24":{"position":[[859,5]]},"34":{"position":[[3257,6]]},"36":{"position":[[3273,5]]},"54":{"position":[[788,5]]},"56":{"position":[[1172,5],[3703,5]]},"128":{"position":[[572,5]]},"140":{"position":[[634,5]]},"186":{"position":[[417,5]]},"202":{"position":[[596,5]]},"236":{"position":[[187,5],[331,5],[482,5],[622,5],[754,5]]}}}],["check_google_api_key",{"_index":1617,"t":{"236":{"position":[[417,21]]}}}],["check_google_api_key(cfg",{"_index":1618,"t":{"236":{"position":[[443,25]]}}}],["check_openai_api_key",{"_index":1613,"t":{"236":{"position":[[122,21]]}}}],["check_openai_api_key(cfg",{"_index":1614,"t":{"236":{"position":[[148,25]]}}}],["check_searx_url",{"_index":1621,"t":{"236":{"position":[[699,16]]}}}],["check_searx_url(cfg",{"_index":1622,"t":{"236":{"position":[[720,20]]}}}],["check_serp_api_key",{"_index":1619,"t":{"236":{"position":[[561,19]]}}}],["check_serp_api_key(cfg",{"_index":1620,"t":{"236":{"position":[[585,23]]}}}],["check_tavily_api_key",{"_index":1615,"t":{"236":{"position":[[266,21]]}}}],["check_tavily_api_key(cfg",{"_index":1616,"t":{"236":{"position":[[292,25]]}}}],["chees",{"_index":1486,"t":{"188":{"position":[[1154,6],[1320,6],[1608,8]]}}}],["chief",{"_index":263,"t":{"30":{"position":[[48,5]]},"109":{"position":[[138,5]]}}}],["chiefeditor",{"_index":490,"t":{"36":{"position":[[2186,11]]}}}],["chip",{"_index":1233,"t":{"136":{"position":[[168,4]]}}}],["choic",{"_index":1140,"t":{"99":{"position":[[46,6]]}}}],["choos",{"_index":546,"t":{"38":{"position":[[972,6]]},"140":{"position":[[302,6]]},"214":{"position":[[65,6]]}}}],["chrome",{"_index":1243,"t":{"140":{"position":[[68,6],[117,6],[154,6],[285,6],[589,6],[718,6]]}}}],["chromedriv",{"_index":1242,"t":{"140":{"position":[[36,12],[679,12]]}}}],["chunk",{"_index":939,"t":{"56":{"position":[[1842,6]]},"234":{"position":[[710,5]]}}}],["chunk_overlap=30",{"_index":1408,"t":{"174":{"position":[[2770,17]]}}}],["circul",{"_index":374,"t":{"34":{"position":[[875,10]]}}}],["circular",{"_index":466,"t":{"36":{"position":[[1196,8]]}}}],["claim",{"_index":1398,"t":{"174":{"position":[[2352,8]]}}}],["class",{"_index":358,"t":{"34":{"position":[[506,5]]},"36":{"position":[[896,5]]},"180":{"position":[[245,5]]},"182":{"position":[[241,5]]},"184":{"position":[[416,6]]},"192":{"position":[[0,5],[96,6]]},"194":{"position":[[0,5],[78,5],[120,6]]},"234":{"position":[[0,5],[53,5],[177,5]]},"236":{"position":[[0,5]]}}}],["claud",{"_index":1561,"t":{"222":{"position":[[70,7]]}}}],["clean",{"_index":1089,"t":{"78":{"position":[[425,5]]}}}],["click",{"_index":1256,"t":{"140":{"position":[[439,5]]},"202":{"position":[[252,5]]}}}],["client",{"_index":857,"t":{"54":{"position":[[5478,7]]}}}],["client.beta.assistants.cr",{"_index":774,"t":{"54":{"position":[[2296,30],[7278,30]]}}}],["client.beta.threads.create()print(f\"thread",{"_index":878,"t":{"54":{"position":[[7813,43]]}}}],["client.beta.threads.create()user_input",{"_index":788,"t":{"54":{"position":[[2855,38]]}}}],["client.beta.threads.messages.cr",{"_index":790,"t":{"54":{"position":[[2920,36],[8002,36]]}}}],["client.beta.threads.messages.list(thread_id=thread_id",{"_index":870,"t":{"54":{"position":[[7120,54]]}}}],["client.beta.threads.runs.cr",{"_index":794,"t":{"54":{"position":[[3121,32],[8116,32]]}}}],["client.beta.threads.runs.retrieve(thread_id=thread_id",{"_index":819,"t":{"54":{"position":[[3782,54],[6267,54]]}}}],["client.beta.threads.runs.submit_tool_output",{"_index":840,"t":{"54":{"position":[[4680,45],[6910,45]]}}}],["clilanggraph",{"_index":1175,"t":{"120":{"position":[[22,12]]}}}],["clone",{"_index":1197,"t":{"124":{"position":[[33,5],[89,6]]}}}],["close",{"_index":1193,"t":{"122":{"position":[[477,6]]}}}],["cloud",{"_index":1191,"t":{"122":{"position":[[452,5]]}}}],["cm",{"_index":957,"t":{"56":{"position":[[2403,4]]}}}],["code",{"_index":328,"t":{"32":{"position":[[1417,4]]},"34":{"position":[[3293,4]]},"54":{"position":[[379,4],[1029,4],[5372,5],[8717,6]]},"68":{"position":[[1224,4]]},"158":{"position":[[651,4]]},"226":{"position":[[87,5]]}}}],["codebas",{"_index":1266,"t":{"144":{"position":[[434,8]]}}}],["collabor",{"_index":154,"t":{"19":{"position":[[396,12]]},"38":{"position":[[820,13]]},"128":{"position":[[541,13]]}}}],["collect",{"_index":293,"t":{"32":{"position":[[100,10]]},"111":{"position":[[77,10]]},"176":{"position":[[457,10]]}}}],["collection_name='som",{"_index":1433,"t":{"176":{"position":[[435,21]]}}}],["collection_name=collection_nam",{"_index":1480,"t":{"188":{"position":[[667,32]]}}}],["combin",{"_index":1395,"t":{"174":{"position":[[2314,7]]},"186":{"position":[[8,7]]}}}],["come",{"_index":218,"t":{"26":{"position":[[138,5]]},"122":{"position":[[165,5],[516,6]]}}}],["comma",{"_index":1125,"t":{"97":{"position":[[353,7]]}}}],["command",{"_index":1076,"t":{"76":{"position":[[208,7],[684,8]]},"78":{"position":[[211,8],[718,8]]},"80":{"position":[[105,8]]},"234":{"position":[[702,7]]}}}],["comment",{"_index":568,"t":{"38":{"position":[[1484,7]]},"54":{"position":[[8853,7]]},"124":{"position":[[169,7]]}}}],["commun",{"_index":13,"t":{"3":{"position":[[166,9]]},"19":{"position":[[335,10]]},"36":{"position":[[1109,11]]},"38":{"position":[[1404,10]]},"56":{"position":[[129,9]]},"97":{"position":[[475,10]]}}}],["compani",{"_index":1341,"t":{"174":{"position":[[802,7],[1854,7],[2168,8],[2512,8]]},"222":{"position":[[39,8]]}}}],["compat",{"_index":1245,"t":{"140":{"position":[[106,10],[358,10]]}}}],["compil",{"_index":284,"t":{"30":{"position":[[601,9]]},"32":{"position":[[1165,8]]},"109":{"position":[[655,9]]},"114":{"position":[[587,8]]}}}],["complement",{"_index":1043,"t":{"66":{"position":[[483,13]]}}}],["complet",{"_index":594,"t":{"40":{"position":[[657,9]]},"42":{"position":[[113,10],[737,10],[837,10],[938,10]]},"54":{"position":[[3912,13],[4067,9],[6397,13],[8244,8]]},"68":{"position":[[1323,8]]},"88":{"position":[[378,8],[510,9]]},"118":{"position":[[797,9]]}}}],["completedef",{"_index":815,"t":{"54":{"position":[[3694,11],[6179,11]]}}}],["complex",{"_index":1064,"t":{"68":{"position":[[1342,7]]}}}],["compon",{"_index":615,"t":{"42":{"position":[[382,11]]}}}],["compos",{"_index":1106,"t":{"82":{"position":[[241,7]]},"124":{"position":[[156,7],[239,7],[335,7]]}}}],["comprehens",{"_index":584,"t":{"40":{"position":[[387,13],[1065,13]]},"48":{"position":[[116,13]]},"52":{"position":[[164,13]]}}}],["compress",{"_index":1362,"t":{"174":{"position":[[1380,8]]}}}],["comput",{"_index":1355,"t":{"174":{"position":[[1247,8]]}}}],["concern",{"_index":251,"t":{"28":{"position":[[247,9]]}}}],["conclud",{"_index":628,"t":{"42":{"position":[[858,9]]},"48":{"position":[[221,9]]}}}],["conclus",{"_index":319,"t":{"32":{"position":[[1229,10]]},"34":{"position":[[709,11]]},"40":{"position":[[151,11]]},"52":{"position":[[868,12]]},"86":{"position":[[18,11],[548,11],[656,11]]},"93":{"position":[[206,11]]},"114":{"position":[[651,10]]},"174":{"position":[[3100,15]]}}}],["condit",{"_index":452,"t":{"36":{"position":[[438,10],[1205,9],[1286,11],[1916,11]]}}}],["conduc",{"_index":1101,"t":{"78":{"position":[[1187,9]]}}}],["conduct",{"_index":42,"t":{"7":{"position":[[370,7]]},"28":{"position":[[433,7]]},"30":{"position":[[295,8]]},"34":{"position":[[1721,7]]},"40":{"position":[[379,7]]},"107":{"position":[[251,7]]},"109":{"position":[[349,8]]},"118":{"position":[[606,7]]},"174":{"position":[[3291,7]]},"176":{"position":[[827,7]]},"180":{"position":[[125,7]]},"182":{"position":[[72,10]]},"184":{"position":[[492,7]]},"186":{"position":[[37,7]]}}}],["confer",{"_index":1278,"t":{"151":{"position":[[24,11]]}}}],["config",{"_index":1592,"t":{"234":{"position":[[170,6]]}}}],["config(metaclass=singleton",{"_index":1588,"t":{"234":{"position":[[6,27]]}}}],["config.json",{"_index":910,"t":{"56":{"position":[[877,11],[3777,11]]}}}],["config.pi",{"_index":890,"t":{"56":{"position":[[17,9],[698,9],[952,9]]},"236":{"position":[[81,9],[225,9],[376,9],[520,9],[658,9],[787,9]]}}}],["config_fil",{"_index":912,"t":{"56":{"position":[[915,11],[3815,11]]}}}],["config_path=non",{"_index":41,"t":{"7":{"position":[[350,17]]},"34":{"position":[[1701,17]]},"68":{"position":[[607,17]]}}}],["configur",{"_index":986,"t":{"56":{"position":[[3202,15]]},"76":{"position":[[62,14]]},"220":{"position":[[30,9]]},"234":{"position":[[39,13]]}}}],["conflict",{"_index":1091,"t":{"78":{"position":[[487,9],[969,9]]},"140":{"position":[[605,10]]}}}],["connect",{"_index":58,"t":{"11":{"position":[[66,7]]},"34":{"position":[[3197,7]]}}}],["connection=connection_str",{"_index":1434,"t":{"176":{"position":[[475,29]]}}}],["connection=engin",{"_index":1481,"t":{"188":{"position":[[700,18]]}}}],["consid",{"_index":352,"t":{"34":{"position":[[392,11]]},"56":{"position":[[2376,8],[2745,9]]},"86":{"position":[[468,8]]},"176":{"position":[[611,10]]}}}],["consist",{"_index":261,"t":{"30":{"position":[[18,8]]},"34":{"position":[[3055,8]]},"42":{"position":[[366,8]]},"78":{"position":[[339,10]]}}}],["consol",{"_index":996,"t":{"56":{"position":[[3615,7]]},"118":{"position":[[1018,8]]}}}],["constant",{"_index":1050,"t":{"68":{"position":[[298,9]]}}}],["constantli",{"_index":118,"t":{"15":{"position":[[98,10]]},"19":{"position":[[6,10]]},"40":{"position":[[592,10]]},"128":{"position":[[14,10]]}}}],["consum",{"_index":1304,"t":{"166":{"position":[[31,8]]}}}],["contact",{"_index":157,"t":{"19":{"position":[[450,7]]}}}],["contain",{"_index":1169,"t":{"118":{"position":[[112,8]]},"176":{"position":[[256,8]]}}}],["content",{"_index":139,"t":{"17":{"position":[[449,7]]},"34":{"position":[[858,8]]},"44":{"position":[[386,7],[745,8],[795,7],[923,7],[1070,7]]},"52":{"position":[[538,7]]},"68":{"position":[[1080,7],[1120,8]]},"86":{"position":[[499,7]]},"103":{"position":[[164,8],[239,8]]},"164":{"position":[[108,8]]}}}],["content=user_input",{"_index":793,"t":{"54":{"position":[[2991,20],[8073,19]]}}}],["context",{"_index":136,"t":{"17":{"position":[[380,7]]},"34":{"position":[[321,7]]},"40":{"position":[[710,7]]},"48":{"position":[[335,7]]},"54":{"position":[[2198,9]]},"68":{"position":[[1048,7]]},"88":{"position":[[366,8]]},"93":{"position":[[340,7]]},"164":{"position":[[0,7]]}}}],["continu",{"_index":676,"t":{"46":{"position":[[901,8]]},"52":{"position":[[85,9]]},"54":{"position":[[8346,8]]},"56":{"position":[[616,8]]},"202":{"position":[[239,12]]}}}],["contribut",{"_index":0,"t":{"3":{"position":[[0,10],[29,14],[61,12]]},"56":{"position":[[143,14]]},"103":{"position":[[388,10]]},"128":{"position":[[558,13],[586,12]]}}}],["control",{"_index":225,"t":{"26":{"position":[[266,15]]},"38":{"position":[[382,7]]},"78":{"position":[[1164,10]]}}}],["convers",{"_index":753,"t":{"54":{"position":[[1117,13],[1368,12],[7877,12]]},"128":{"position":[[454,14]]}}}],["coordin",{"_index":268,"t":{"30":{"position":[[147,11]]},"109":{"position":[[237,11]]}}}],["copi",{"_index":53,"t":{"7":{"position":[[562,4]]},"34":{"position":[[756,4],[1194,4],[1867,4],[2977,4]]},"36":{"position":[[991,4],[1895,4],[2842,4]]},"46":{"position":[[801,4]]},"48":{"position":[[966,4]]},"54":{"position":[[2100,4],[2741,4],[3012,4],[3203,4],[3967,4],[4795,4],[5207,4],[8610,4]]},"56":{"position":[[3452,4]]},"60":{"position":[[170,4],[220,4]]},"62":{"position":[[172,4],[229,4],[287,4],[330,4]]},"68":{"position":[[976,4]]},"72":{"position":[[64,4],[133,4]]},"76":{"position":[[267,4],[396,4],[517,4],[735,4]]},"78":{"position":[[630,4],[1248,4]]},"80":{"position":[[150,4]]},"82":{"position":[[198,4],[252,4]]},"97":{"position":[[283,4],[456,4]]},"101":{"position":[[97,4]]},"103":{"position":[[262,4]]},"116":{"position":[[59,4],[182,4],[223,4]]},"118":{"position":[[1435,4]]},"120":{"position":[[38,4]]},"124":{"position":[[258,4]]},"126":{"position":[[88,4]]},"144":{"position":[[195,4],[336,4],[390,4]]},"146":{"position":[[492,4]]},"149":{"position":[[94,4]]},"151":{"position":[[76,4]]},"153":{"position":[[96,4]]},"156":{"position":[[400,4]]},"158":{"position":[[78,4],[480,4],[510,4],[630,4]]},"162":{"position":[[119,4]]},"164":{"position":[[170,4]]},"166":{"position":[[109,4]]},"168":{"position":[[85,4]]},"170":{"position":[[126,4]]},"174":{"position":[[3404,4]]},"176":{"position":[[940,4]]},"180":{"position":[[1002,4]]},"182":{"position":[[812,4]]},"184":{"position":[[354,4],[1037,4]]},"188":{"position":[[1752,4]]},"192":{"position":[[35,4],[148,4]]},"194":{"position":[[54,4]]},"208":{"position":[[325,4]]},"210":{"position":[[286,4]]},"212":{"position":[[471,4]]},"214":{"position":[[484,4],[662,4]]},"220":{"position":[[387,4]]},"222":{"position":[[203,4],[335,4],[467,4]]},"224":{"position":[[199,4]]},"226":{"position":[[281,4]]},"228":{"position":[[285,4]]},"230":{"position":[[218,4]]},"234":{"position":[[34,4],[150,4],[246,4],[346,4],[449,4],[555,4],[674,4],[797,4],[888,4]]},"236":{"position":[[29,4],[182,4],[326,4],[477,4],[617,4],[749,4]]}}}],["corn",{"_index":1357,"t":{"174":{"position":[[1282,4]]}}}],["coroutin",{"_index":666,"t":{"46":{"position":[[587,9]]}}}],["corpor",{"_index":1385,"t":{"174":{"position":[[1930,9],[2438,9],[2571,9]]}}}],["correct",{"_index":276,"t":{"30":{"position":[[435,11]]},"32":{"position":[[973,11]]},"38":{"position":[[360,12]]},"78":{"position":[[1107,7]]},"109":{"position":[[489,11]]},"114":{"position":[[421,11]]}}}],["correctli",{"_index":1151,"t":{"103":{"position":[[33,10]]}}}],["correspond",{"_index":1257,"t":{"140":{"position":[[452,13]]},"164":{"position":[[94,13]]},"224":{"position":[[47,13]]},"226":{"position":[[105,13]]},"230":{"position":[[138,13]]}}}],["cost",{"_index":112,"t":{"15":{"position":[[37,5],[140,5]]},"88":{"position":[[420,5],[524,5]]},"166":{"position":[[0,5]]},"170":{"position":[[17,5],[72,5]]}}}],["count",{"_index":893,"t":{"56":{"position":[[302,6],[2230,5]]}}}],["coupl",{"_index":1575,"t":{"226":{"position":[[71,6]]}}}],["cover",{"_index":1562,"t":{"222":{"position":[[88,6]]}}}],["crawl",{"_index":979,"t":{"56":{"position":[[3041,8]]}}}],["crawler",{"_index":700,"t":{"50":{"position":[[269,7]]},"88":{"position":[[754,7]]}}}],["creat",{"_index":214,"t":{"26":{"position":[[48,8],[112,6],[197,8],[303,8],[476,6]]},"34":{"position":[[1900,7],[2016,8],[2997,8]]},"36":{"position":[[518,6],[1185,6]]},"40":{"position":[[290,7],[997,7]]},"42":{"position":[[556,6]]},"44":{"position":[[1405,6]]},"46":{"position":[[85,6],[561,6]]},"54":{"position":[[911,6],[1080,6],[2137,6],[2265,6],[7247,6],[7789,6],[7975,6],[8097,6]]},"76":{"position":[[77,6]]},"78":{"position":[[137,6]]},"82":{"position":[[96,6]]},"86":{"position":[[619,6]]},"88":{"position":[[285,7],[556,6]]},"144":{"position":[[223,6]]},"174":{"position":[[3116,6]]},"176":{"position":[[534,6],[651,6]]},"184":{"position":[[376,6]]},"206":{"position":[[0,6]]},"212":{"position":[[96,6]]}}}],["create_engine(connection_str",{"_index":1476,"t":{"188":{"position":[[526,32]]}}}],["create_engineimport",{"_index":1470,"t":{"188":{"position":[[338,19]]}}}],["creation",{"_index":1416,"t":{"174":{"position":[[3037,9]]}}}],["creativ",{"_index":948,"t":{"56":{"position":[[2103,11]]}}}],["creator",{"_index":1560,"t":{"222":{"position":[[59,7]]}}}],["criteria",{"_index":279,"t":{"30":{"position":[[486,9]]},"44":{"position":[[274,9]]},"109":{"position":[[540,9]]},"114":{"position":[[461,8]]}}}],["crucial",{"_index":298,"t":{"32":{"position":[[351,7]]},"44":{"position":[[234,7]]},"140":{"position":[[544,7]]},"202":{"position":[[63,7]]}}}],["csv",{"_index":740,"t":{"54":{"position":[[610,3]]},"184":{"position":[[164,4]]}}}],["curl",{"_index":1296,"t":{"158":{"position":[[531,4]]}}}],["current",{"_index":144,"t":{"19":{"position":[[71,9]]},"24":{"position":[[204,7],[713,7]]},"44":{"position":[[144,9]]},"48":{"position":[[257,9]]},"54":{"position":[[338,9]]},"56":{"position":[[1017,7]]},"86":{"position":[[132,7],[277,7]]},"122":{"position":[[463,10]]},"140":{"position":[[570,7]]},"184":{"position":[[109,9]]}}}],["custom",{"_index":226,"t":{"26":{"position":[[312,6],[373,10],[493,10]]},"36":{"position":[[2346,10]]},"54":{"position":[[958,6],[8644,10]]},"56":{"position":[[42,9],[786,11],[1322,7],[2604,9],[3008,6]]},"99":{"position":[[21,6],[97,6],[374,6]]},"103":{"position":[[8,6]]},"118":{"position":[[33,9]]},"158":{"position":[[692,9]]},"208":{"position":[[8,6],[69,6],[142,6],[202,6],[272,6]]},"210":{"position":[[8,6],[81,6],[154,6],[214,6]]}}}],["custom_report",{"_index":1450,"t":{"182":{"position":[[255,15],[596,15]]}}}],["customiz",{"_index":106,"t":{"13":{"position":[[395,12]]},"28":{"position":[[257,16]]},"66":{"position":[[265,12]]}}}],["cycl",{"_index":486,"t":{"36":{"position":[[2022,5],[2433,8]]},"118":{"position":[[1069,8]]}}}],["cyclic",{"_index":217,"t":{"26":{"position":[[119,8]]}}}],["data",{"_index":150,"t":{"19":{"position":[[164,4]]},"32":{"position":[[95,4],[478,4]]},"34":{"position":[[408,4],[870,4],[951,4]]},"36":{"position":[[482,4]]},"38":{"position":[[562,4]]},"54":{"position":[[581,4]]},"56":{"position":[[3155,5]]},"111":{"position":[[72,4]]}}}],["dataset",{"_index":634,"t":{"44":{"position":[[165,8]]}}}],["date",{"_index":300,"t":{"32":{"position":[[412,4]]},"34":{"position":[[658,5]]},"44":{"position":[[117,4]]},"174":{"position":[[559,5]]}}}],["day",{"_index":704,"t":{"52":{"position":[[208,3],[215,3]]}}}],["deactiv",{"_index":1081,"t":{"76":{"position":[[404,10],[458,12],[506,10]]}}}],["deal",{"_index":810,"t":{"54":{"position":[[3489,7]]}}}],["debug",{"_index":1610,"t":{"234":{"position":[[901,5]]}}}],["decreas",{"_index":904,"t":{"56":{"position":[[628,9]]}}}],["dedic",{"_index":1097,"t":{"78":{"position":[[832,9]]}}}],["def",{"_index":20,"t":{"7":{"position":[[60,3]]},"34":{"position":[[1514,3],[1545,3],[2086,3]]},"36":{"position":[[1311,3]]},"68":{"position":[[427,3],[721,3]]},"146":{"position":[[114,3]]},"156":{"position":[[135,3]]},"158":{"position":[[234,3]]},"180":{"position":[[343,3]]},"182":{"position":[[344,3]]},"184":{"position":[[596,3]]},"188":{"position":[[781,3]]},"192":{"position":[[113,3]]},"234":{"position":[[127,3],[203,3],[302,3],[404,3],[509,3],[622,3],[754,3],[848,3]]},"236":{"position":[[144,3],[288,3],[439,3],[581,3],[716,3]]}}}],["default",{"_index":895,"t":{"56":{"position":[[356,8],[690,7],[1102,8],[1258,8],[1358,8],[1490,8],[1612,8],[1694,8],[1776,8],[1875,8],[1960,8],[2188,8],[2287,8],[2359,8],[2536,8],[2672,7],[2755,8],[2825,8],[2938,8],[3161,8],[3194,7]]},"97":{"position":[[15,8],[548,7]]},"124":{"position":[[275,8]]},"214":{"position":[[451,8]]},"220":{"position":[[354,8]]}}}],["defin",{"_index":342,"t":{"34":{"position":[[147,6],[474,6],[2288,6]]},"36":{"position":[[737,6],[855,6],[1903,8],[2999,7],[3048,7]]},"54":{"position":[[945,8],[1714,6]]},"68":{"position":[[284,6]]},"222":{"position":[[221,6],[353,6]]}}}],["degre",{"_index":224,"t":{"26":{"position":[[256,6]]}}}],["delet",{"_index":1185,"t":{"122":{"position":[[257,8]]}}}],["deliv",{"_index":1536,"t":{"216":{"position":[[75,7]]}}}],["demo",{"_index":74,"t":{"11":{"position":[[336,5]]},"60":{"position":[[257,5]]},"62":{"position":[[440,5]]},"91":{"position":[[33,4]]}}}],["demonstr",{"_index":742,"t":{"54":{"position":[[636,11]]}}}],["dep",{"_index":1027,"t":{"62":{"position":[[282,4]]}}}],["depend",{"_index":55,"t":{"11":{"position":[[10,7]]},"62":{"position":[[242,13]]},"72":{"position":[[17,12]]},"76":{"position":[[534,12],[620,12]]},"78":{"position":[[24,12],[105,12],[295,12],[462,13],[557,10],[929,12],[1127,12]]}}}],["deploy",{"_index":67,"t":{"11":{"position":[[207,6]]},"24":{"position":[[108,9]]},"64":{"position":[[42,11]]},"212":{"position":[[103,11]]}}}],["depsnpm",{"_index":1208,"t":{"126":{"position":[[72,7]]}}}],["depth",{"_index":242,"t":{"28":{"position":[[79,5]]},"30":{"position":[[307,5]]},"32":{"position":[[772,5],[913,5]]},"48":{"position":[[711,6]]},"56":{"position":[[329,6]]},"107":{"position":[[74,5]]},"109":{"position":[[361,5]]},"114":{"position":[[344,5]]}}}],["descript",{"_index":780,"t":{"54":{"position":[[2466,14],[2609,14],[7448,14],[7591,14]]}}}],["design",{"_index":147,"t":{"19":{"position":[[131,6]]},"34":{"position":[[384,7]]},"66":{"position":[[350,7]]},"216":{"position":[[63,8]]}}}],["desir",{"_index":1255,"t":{"140":{"position":[[422,7]]}}}],["detail",{"_index":686,"t":{"48":{"position":[[402,8],[595,8]]},"62":{"position":[[418,8]]},"86":{"position":[[359,8]]},"93":{"position":[[87,8]]},"118":{"position":[[997,8]]},"160":{"position":[[37,7]]},"168":{"position":[[41,8]]},"182":{"position":[[675,8]]}}}],["detergent?th",{"_index":1360,"t":{"174":{"position":[[1302,13]]}}}],["determin",{"_index":1087,"t":{"78":{"position":[[272,9]]},"86":{"position":[[634,11]]}}}],["determinist",{"_index":533,"t":{"38":{"position":[[394,13]]},"42":{"position":[[58,13],[629,17],[783,13]]},"44":{"position":[[1417,14]]},"56":{"position":[[2163,13]]}}}],["dev",{"_index":1028,"t":{"62":{"position":[[326,3]]},"126":{"position":[[84,3]]},"134":{"position":[[477,3]]}}}],["devday",{"_index":729,"t":{"54":{"position":[[47,6]]}}}],["develop",{"_index":222,"t":{"26":{"position":[[233,10],[541,9]]},"28":{"position":[[286,11]]},"34":{"position":[[136,10],[1296,11]]},"54":{"position":[[220,10]]},"62":{"position":[[298,11]]},"78":{"position":[[363,11],[852,11],[1210,11]]},"128":{"position":[[277,10]]},"149":{"position":[[16,12]]},"174":{"position":[[1168,10]]}}}],["devic",{"_index":1041,"t":{"66":{"position":[[392,8]]}}}],["devis",{"_index":616,"t":{"42":{"position":[[401,8]]}}}],["diagram",{"_index":1164,"t":{"114":{"position":[[47,8]]}}}],["dict",{"_index":360,"t":{"34":{"position":[[544,4],[653,4]]},"36":{"position":[[931,4],[954,4],[1359,6]]},"156":{"position":[[183,5]]}}}],["dict[str",{"_index":1475,"t":{"188":{"position":[[500,9]]}}}],["dictfrom",{"_index":1468,"t":{"188":{"position":[[174,8]]}}}],["differ",{"_index":1590,"t":{"234":{"position":[[91,9]]}}}],["direct",{"_index":485,"t":{"36":{"position":[[1951,6]]},"182":{"position":[[135,9]]}}}],["directori",{"_index":991,"t":{"56":{"position":[[3325,10]]},"62":{"position":[[151,10]]},"118":{"position":[[91,10],[559,10]]},"122":{"position":[[35,10]]}}}],["discord",{"_index":12,"t":{"3":{"position":[[158,7]]},"38":{"position":[[1396,7]]}}}],["discuss",{"_index":463,"t":{"36":{"position":[[1054,10]]}}}],["display",{"_index":1038,"t":{"66":{"position":[[213,8]]},"122":{"position":[[98,7]]}}}],["disrupt",{"_index":558,"t":{"38":{"position":[[1226,10]]},"52":{"position":[[67,11],[242,7]]}}}],["dive",{"_index":327,"t":{"32":{"position":[[1399,4]]},"34":{"position":[[3423,4]]}}}],["divid",{"_index":371,"t":{"34":{"position":[[789,7]]},"42":{"position":[[420,6]]}}}],["do",{"_index":237,"t":{"26":{"position":[[566,5]]}}}],["doc",{"_index":998,"t":{"56":{"position":[[3717,4]]},"184":{"position":[[348,5]]},"186":{"position":[[340,3]]},"220":{"position":[[424,3]]}}}],["doc_path",{"_index":973,"t":{"56":{"position":[[2885,9]]},"118":{"position":[[673,8]]},"184":{"position":[[247,8]]}}}],["doc_path=\"./mi",{"_index":1459,"t":{"184":{"position":[[333,14]]}}}],["docker",{"_index":1103,"t":{"82":{"position":[[17,6],[234,6]]},"124":{"position":[[17,6],[149,6],[222,7],[232,6],[328,6]]}}}],["document",{"_index":65,"t":{"11":{"position":[[161,14],[298,14]]},"13":{"position":[[502,13]]},"56":{"position":[[2246,8],[2927,10]]},"68":{"position":[[1229,14]]},"99":{"position":[[183,9]]},"120":{"position":[[59,13]]},"122":{"position":[[292,9]]},"174":{"position":[[229,9]]},"176":{"position":[[278,10]]},"184":{"position":[[57,9],[98,10],[207,10],[290,9],[525,10],[893,11]]},"186":{"position":[[153,10],[359,9],[442,14]]},"188":{"position":[[71,8],[846,10],[1173,11],[1300,9],[1544,9],[1592,9]]},"202":{"position":[[610,13]]},"212":{"position":[[13,13]]}}}],["document(page_content=essay)]text_splitt",{"_index":1406,"t":{"174":{"position":[[2686,43]]}}}],["documentfrom",{"_index":1467,"t":{"188":{"position":[[141,12]]}}}],["documents=docu",{"_index":1484,"t":{"188":{"position":[[975,20],[1716,21]]}}}],["docx",{"_index":325,"t":{"32":{"position":[[1366,5]]},"36":{"position":[[2513,7],[3235,5]]},"107":{"position":[[401,4]]},"114":{"position":[[788,5]]},"118":{"position":[[1168,7]]}}}],["doesn't",{"_index":1244,"t":{"140":{"position":[[91,7]]}}}],["doesn’t",{"_index":803,"t":{"54":{"position":[[3316,7]]}}}],["domain",{"_index":1117,"t":{"88":{"position":[[565,6]]}}}],["don't",{"_index":1200,"t":{"124":{"position":[[199,5]]},"136":{"position":[[202,5]]},"174":{"position":[[909,5]]}}}],["done",{"_index":399,"t":{"34":{"position":[[1966,4]]},"36":{"position":[[718,4]]},"38":{"position":[[659,4]]},"46":{"position":[[895,5]]},"54":{"position":[[11,4]]},"174":{"position":[[2008,4]]}}}],["down",{"_index":380,"t":{"34":{"position":[[1032,4]]},"50":{"position":[[75,4]]},"140":{"position":[[244,4]]},"174":{"position":[[2602,4]]}}}],["downgrad",{"_index":1247,"t":{"140":{"position":[[139,9],[659,9]]}}}],["download",{"_index":523,"t":{"36":{"position":[[3244,8]]},"140":{"position":[[474,8]]}}}],["draft",{"_index":309,"t":{"32":{"position":[[817,6],[992,5],[1093,5]]},"36":{"position":[[842,6],[877,5],[947,6],[1798,6],[2053,6]]},"114":{"position":[[389,6],[440,5],[515,5]]}}}],["draft['review",{"_index":483,"t":{"36":{"position":[[1817,15]]}}}],["draftstat",{"_index":462,"t":{"36":{"position":[[1011,11]]}}}],["draftstate(typeddict",{"_index":460,"t":{"36":{"position":[[902,22]]}}}],["drag",{"_index":1182,"t":{"122":{"position":[[212,4]]},"174":{"position":[[2096,4]]}}}],["drive",{"_index":192,"t":{"24":{"position":[[456,5]]}}}],["driver",{"_index":1246,"t":{"140":{"position":[[124,6],[725,6]]}}}],["drop",{"_index":889,"t":{"54":{"position":[[8846,4]]},"122":{"position":[[219,4]]}}}],["drug",{"_index":1354,"t":{"174":{"position":[[1238,5]]}}}],["duckduckgo",{"_index":913,"t":{"56":{"position":[[1131,11]]},"97":{"position":[[708,10]]}}}],["dure",{"_index":597,"t":{"40":{"position":[[727,6]]},"166":{"position":[[40,6]]},"176":{"position":[[627,6]]}}}],["dynam",{"_index":349,"t":{"34":{"position":[[281,7]]},"38":{"position":[[900,11]]}}}],["e.g",{"_index":1148,"t":{"99":{"position":[[478,6]]}}}],["each",{"_index":307,"t":{"32":{"position":[[697,4]]},"34":{"position":[[222,4],[912,4],[1320,4],[1984,4],[2394,4]]},"36":{"position":[[152,4],[640,4],[1129,4],[2641,5]]},"46":{"position":[[208,4]]},"50":{"position":[[235,4],[361,4]]},"52":{"position":[[327,4]]},"54":{"position":[[3502,4]]},"74":{"position":[[45,5]]},"88":{"position":[[179,4],[432,4],[720,4],[846,4]]},"97":{"position":[[185,4],[381,4]]},"114":{"position":[[269,4]]},"118":{"position":[[287,4]]},"176":{"position":[[634,4]]},"212":{"position":[[119,4]]},"214":{"position":[[72,4]]}}}],["earn",{"_index":1369,"t":{"174":{"position":[[1582,4]]}}}],["easi",{"_index":1039,"t":{"66":{"position":[[222,4]]},"93":{"position":[[234,4]]},"144":{"position":[[13,4]]}}}],["easier",{"_index":494,"t":{"36":{"position":[[2336,6]]},"54":{"position":[[209,6]]}}}],["easili",{"_index":1048,"t":{"68":{"position":[[90,6]]}}}],["econom",{"_index":1374,"t":{"174":{"position":[[1644,8]]}}}],["edg",{"_index":438,"t":{"34":{"position":[[3209,5]]},"36":{"position":[[1298,6],[1574,5],[1928,6]]},"174":{"position":[[990,4]]}}}],["edit",{"_index":1168,"t":{"118":{"position":[[55,4]]}}}],["editor",{"_index":264,"t":{"30":{"position":[[54,6],[340,6]]},"32":{"position":[[510,6],[593,6]]},"109":{"position":[[144,6],[394,6]]},"114":{"position":[[186,6]]}}}],["editor_ag",{"_index":404,"t":{"34":{"position":[[2136,12]]}}}],["editor_agent.plan_research",{"_index":415,"t":{"34":{"position":[[2500,27]]}}}],["editor_agent.run_parallel_research",{"_index":417,"t":{"34":{"position":[[2560,35]]}}}],["editoragent(self.task",{"_index":405,"t":{"34":{"position":[[2151,22]]}}}],["education\"report_typ",{"_index":1282,"t":{"153":{"position":[[55,21]]}}}],["effici",{"_index":104,"t":{"13":{"position":[[358,9]]},"66":{"position":[[454,9]]},"78":{"position":[[1200,9]]}}}],["effort",{"_index":540,"t":{"38":{"position":[[644,7]]}}}],["elif",{"_index":885,"t":{"54":{"position":[[8355,4]]}}}],["elimin",{"_index":623,"t":{"42":{"position":[[702,10]]},"174":{"position":[[2080,11]]}}}],["email",{"_index":1061,"t":{"68":{"position":[[1129,5]]}}}],["emb",{"_index":1532,"t":{"214":{"position":[[651,5]]}}}],["embed",{"_index":100,"t":{"13":{"position":[[309,11]]},"56":{"position":[[1241,9]]},"176":{"position":[[303,10]]},"188":{"position":[[559,10]]},"210":{"position":[[26,9],[232,9]]},"214":{"position":[[45,11],[179,9],[528,10],[585,9]]}}}],["embedding=embed",{"_index":1479,"t":{"188":{"position":[[645,21]]}}}],["embedding=openaiembed",{"_index":1432,"t":{"176":{"position":[[405,29]]}}}],["embedding_provid",{"_index":918,"t":{"56":{"position":[[1208,19]]}}}],["embedding_provider=\"azure_openai\"azure_openai_api_key=\"your",{"_index":1517,"t":{"212":{"position":[[219,59]]}}}],["embedding_provider=ollama",{"_index":1530,"t":{"214":{"position":[[539,26]]}}}],["employe",{"_index":1386,"t":{"174":{"position":[[1940,9]]}}}],["empti",{"_index":975,"t":{"56":{"position":[[2953,5]]}}}],["enabl",{"_index":750,"t":{"54":{"position":[[1011,6]]},"56":{"position":[[27,7]]},"78":{"position":[[527,8]]},"86":{"position":[[413,6]]},"122":{"position":[[85,6]]}}}],["encapsul",{"_index":344,"t":{"34":{"position":[[172,12]]},"78":{"position":[[946,13]]}}}],["end",{"_index":427,"t":{"34":{"position":[[2879,3],[2956,4]]},"36":{"position":[[1868,4],[2034,3]]},"40":{"position":[[527,6]]},"48":{"position":[[850,3]]},"54":{"position":[[2076,3],[5943,3]]},"132":{"position":[[112,3]]}}}],["endpoint",{"_index":1144,"t":{"99":{"position":[[354,8]]},"103":{"position":[[66,8]]},"120":{"position":[[116,10]]},"214":{"position":[[225,8]]}}}],["endpoint>.openai.azure.com/\"openai_api_version=\"2024",{"_index":1519,"t":{"212":{"position":[[320,52]]}}}],["energi",{"_index":1276,"t":{"149":{"position":[[42,6]]}}}],["engin",{"_index":97,"t":{"13":{"position":[[283,12]]},"19":{"position":[[191,7]]},"24":{"position":[[377,14]]},"56":{"position":[[1066,6],[3379,6]]},"97":{"position":[[51,6],[123,7],[197,6],[530,8]]},"99":{"position":[[143,6]]},"188":{"position":[[517,6]]},"202":{"position":[[133,7]]}}}],["english",{"_index":1174,"t":{"118":{"position":[[1406,8]]}}}],["enhanc",{"_index":529,"t":{"38":{"position":[[228,7]]},"62":{"position":[[28,8]]},"66":{"position":[[13,8]]},"68":{"position":[[997,7]]}}}],["enjoy",{"_index":1067,"t":{"72":{"position":[[194,5]]},"82":{"position":[[313,5]]},"124":{"position":[[507,5]]}}}],["enough",{"_index":238,"t":{"26":{"position":[[576,6]]},"44":{"position":[[1136,6],[1284,6]]}}}],["ensur",{"_index":123,"t":{"17":{"position":[[18,6],[333,6]]},"38":{"position":[[352,7]]},"78":{"position":[[328,8],[904,7],[1090,7]]},"144":{"position":[[55,6]]},"192":{"position":[[64,8]]},"194":{"position":[[88,8]]}}}],["enter",{"_index":1095,"t":{"78":{"position":[[736,6]]}}}],["enterpris",{"_index":1142,"t":{"99":{"position":[[216,10]]}}}],["entir",{"_index":345,"t":{"34":{"position":[[189,6]]},"42":{"position":[[431,6]]}}}],["entrepeneur",{"_index":1417,"t":{"174":{"position":[[3080,12]]}}}],["env",{"_index":988,"t":{"56":{"position":[[3237,3],[3259,4],[3510,3]]},"76":{"position":[[181,4],[263,3],[595,3]]},"82":{"position":[[103,4]]},"97":{"position":[[159,3],[563,4],[592,4],[625,4],[657,4],[687,4],[721,4],[755,4],[782,4],[817,4]]},"99":{"position":[[88,3],[280,4],[320,3]]},"116":{"position":[[71,3]]},"118":{"position":[[682,3]]},"124":{"position":[[122,6]]},"144":{"position":[[232,4]]},"184":{"position":[[234,3]]},"212":{"position":[[208,4]]},"224":{"position":[[61,3]]},"226":{"position":[[119,3]]},"228":{"position":[[133,3]]},"230":{"position":[[76,3]]}}}],["env.exampl",{"_index":1198,"t":{"124":{"position":[[43,14]]}}}],["env\\scripts\\activ",{"_index":1080,"t":{"76":{"position":[[373,22]]}}}],["environ",{"_index":1072,"t":{"76":{"position":[[25,11],[94,11],[138,11],[296,12],[427,12],[561,12],[599,12]]},"78":{"position":[[49,11],[154,11],[375,12],[400,11],[656,11],[779,11],[892,11],[1175,11]]},"80":{"position":[[50,11]]},"122":{"position":[[348,11]]},"144":{"position":[[200,11]]},"214":{"position":[[126,11]]},"218":{"position":[[201,11]]},"236":{"position":[[100,11],[244,11],[395,11],[539,11],[677,11],[806,11]]}}}],["eras",{"_index":509,"t":{"36":{"position":[[2724,5]]}}}],["especi",{"_index":644,"t":{"44":{"position":[[428,10]]},"174":{"position":[[1541,10]]}}}],["essay",{"_index":1322,"t":{"174":{"position":[[318,5],[2951,5]]}}}],["essenti",{"_index":1099,"t":{"78":{"position":[[1037,9]]}}}],["establish",{"_index":1070,"t":{"76":{"position":[[0,12]]},"78":{"position":[[0,12]]}}}],["etc",{"_index":101,"t":{"13":{"position":[[321,5]]},"32":{"position":[[1382,4]]},"56":{"position":[[336,4],[2429,4]]},"68":{"position":[[1159,4]]},"114":{"position":[[804,4]]}}}],["even",{"_index":195,"t":{"24":{"position":[[502,4]]},"48":{"position":[[198,4]]},"52":{"position":[[727,4]]},"54":{"position":[[8819,4]]},"174":{"position":[[1876,5]]}}}],["event",{"_index":781,"t":{"54":{"position":[[2508,6],[7490,6]]}}}],["eventu",{"_index":714,"t":{"52":{"position":[[485,10],[752,10]]}}}],["everyon",{"_index":201,"t":{"24":{"position":[[598,8]]}}}],["evolv",{"_index":169,"t":{"24":{"position":[[141,7],[293,7]]},"34":{"position":[[312,8]]}}}],["exa",{"_index":1136,"t":{"97":{"position":[[776,3]]}}}],["exact",{"_index":637,"t":{"44":{"position":[[264,5]]}}}],["exampl",{"_index":75,"t":{"11":{"position":[[346,8]]},"28":{"position":[[370,7],[503,7]]},"34":{"position":[[1384,7]]},"36":{"position":[[3265,7]]},"46":{"position":[[813,7]]},"54":{"position":[[1536,8],[2654,8],[7636,8]]},"56":{"position":[[3340,8]]},"68":{"position":[[6,7],[154,7],[1010,7]]},"76":{"position":[[172,8]]},"97":{"position":[[259,8],[423,8]]},"107":{"position":[[188,7],[438,7]]},"118":{"position":[[1031,9]]},"158":{"position":[[515,7],[674,9]]},"184":{"position":[[317,8]]},"186":{"position":[[66,8]]},"188":{"position":[[95,8]]},"222":{"position":[[262,8],[394,8]]},"224":{"position":[[75,8]]},"226":{"position":[[133,8]]}}}],["excel",{"_index":646,"t":{"44":{"position":[[691,5]]},"184":{"position":[[169,6]]}}}],["except",{"_index":1612,"t":{"236":{"position":[[34,9]]}}}],["excit",{"_index":446,"t":{"36":{"position":[[18,8],[77,8]]},"38":{"position":[[31,8]]},"128":{"position":[[315,7]]}}}],["execut",{"_index":582,"t":{"40":{"position":[[308,8]]},"42":{"position":[[647,7]]},"46":{"position":[[117,7]]},"68":{"position":[[786,8]]},"76":{"position":[[186,7]]},"78":{"position":[[196,9],[868,10]]},"80":{"position":[[81,9]]},"88":{"position":[[38,11],[119,9]]}}}],["exerpt",{"_index":1318,"t":{"174":{"position":[[239,6]]}}}],["exist",{"_index":377,"t":{"34":{"position":[[969,8]]},"36":{"position":[[1978,6],[2717,6]]},"68":{"position":[[116,8]]},"176":{"position":[[245,6]]}}}],["exit",{"_index":882,"t":{"54":{"position":[[7959,7]]}}}],["expans",{"_index":962,"t":{"56":{"position":[[2504,9]]}}}],["expect",{"_index":736,"t":{"54":{"position":[[449,6]]},"174":{"position":[[2260,7],[2411,8]]}}}],["experi",{"_index":236,"t":{"26":{"position":[[551,10]]},"36":{"position":[[38,10]]},"38":{"position":[[105,12],[260,11]]},"44":{"position":[[596,13]]},"48":{"position":[[153,13]]},"66":{"position":[[366,10]]}}}],["expert",{"_index":768,"t":{"54":{"position":[[1803,7],[5670,7]]}}}],["explanatori",{"_index":516,"t":{"36":{"position":[[2878,12]]}}}],["explicit",{"_index":1506,"t":{"202":{"position":[[188,8]]}}}],["explor",{"_index":1102,"t":{"80":{"position":[[206,7]]},"202":{"position":[[262,7]]}}}],["export",{"_index":990,"t":{"56":{"position":[[3287,6],[3405,6],[3492,6]]},"82":{"position":[[144,6],[156,6]]},"93":{"position":[[383,6]]},"116":{"position":[[85,6]]},"144":{"position":[[277,6],[287,6],[341,6]]},"184":{"position":[[326,6]]}}}],["extens",{"_index":212,"t":{"26":{"position":[[16,9]]}}}],["extern",{"_index":908,"t":{"56":{"position":[[858,8],[3758,8]]},"170":{"position":[[83,8]]}}}],["extra",{"_index":1288,"t":{"158":{"position":[[44,6]]},"228":{"position":[[35,5]]}}}],["extract",{"_index":843,"t":{"54":{"position":[[4885,7]]}}}],["face",{"_index":1222,"t":{"134":{"position":[[328,4]]}}}],["facilit",{"_index":339,"t":{"34":{"position":[[88,11]]}}}],["fact",{"_index":690,"t":{"48":{"position":[[723,5]]}}}],["factual",{"_index":126,"t":{"17":{"position":[[60,7]]},"32":{"position":[[923,7]]},"44":{"position":[[47,10],[284,10],[502,7],[567,10],[1432,8]]},"52":{"position":[[369,7]]},"68":{"position":[[1369,7]]},"93":{"position":[[198,7]]},"202":{"position":[[179,8]]}}}],["fail",{"_index":823,"t":{"54":{"position":[[3926,9],[6411,9],[8319,9]]},"138":{"position":[[51,4]]}}}],["fair",{"_index":697,"t":{"48":{"position":[[1066,4]]}}}],["fairli",{"_index":1401,"t":{"174":{"position":[[2460,6]]}}}],["faiss.from_documents(docu",{"_index":1411,"t":{"174":{"position":[[2874,31]]}}}],["faissfrom",{"_index":1316,"t":{"174":{"position":[[187,9]]}}}],["fals",{"_index":503,"t":{"36":{"position":[[2550,6],[2939,5]]},"118":{"position":[[419,6],[810,6],[1210,6]]}}}],["familiar",{"_index":1069,"t":{"74":{"position":[[28,11]]}}}],["far",{"_index":796,"t":{"54":{"position":[[3211,3]]}}}],["fast",{"_index":925,"t":{"56":{"position":[[1454,4],[1674,4]]},"216":{"position":[[93,4]]},"222":{"position":[[232,4],[364,4]]},"230":{"position":[[113,4]]},"234":{"position":[[259,4],[462,4]]}}}],["fast.her",{"_index":1371,"t":{"174":{"position":[[1609,9]]}}}],["fast_llm_model",{"_index":924,"t":{"56":{"position":[[1423,15]]}}}],["fast_llm_model=\"gpt",{"_index":1511,"t":{"208":{"position":[[230,19]]}}}],["fast_llm_model=claud",{"_index":1564,"t":{"222":{"position":[[271,21],[403,21]]}}}],["fast_token_limit",{"_index":931,"t":{"56":{"position":[[1632,17]]}}}],["fastapi",{"_index":1000,"t":{"60":{"position":[[29,7]]},"72":{"position":[[97,7]]},"80":{"position":[[11,7]]},"156":{"position":[[5,7]]}}}],["fastapi()@app.get(\"/report/{report_type}\")async",{"_index":1286,"t":{"156":{"position":[[87,47]]}}}],["fastapifrom",{"_index":1284,"t":{"156":{"position":[[20,11]]}}}],["faster",{"_index":679,"t":{"46":{"position":[[1015,6]]},"118":{"position":[[846,6]]}}}],["favorit",{"_index":338,"t":{"34":{"position":[[10,8]]}}}],["featur",{"_index":151,"t":{"19":{"position":[[282,8]]},"34":{"position":[[19,8]]},"36":{"position":[[86,7]]},"62":{"position":[[37,8]]},"64":{"position":[[71,7]]},"66":{"position":[[407,8]]},"122":{"position":[[191,9]]},"128":{"position":[[47,8]]}}}],["feedback",{"_index":282,"t":{"30":{"position":[[548,8]]},"32":{"position":[[1037,8],[1146,9]]},"38":{"position":[[1446,8]]},"66":{"position":[[157,8]]},"109":{"position":[[114,8],[602,8]]},"114":{"position":[[483,9],[568,9]]},"118":{"position":[[392,8]]}}}],["feel",{"_index":155,"t":{"19":{"position":[[437,4]]},"24":{"position":[[846,4]]},"54":{"position":[[775,4],[8833,4]]},"103":{"position":[[375,4]]}}}],["fetch",{"_index":808,"t":{"54":{"position":[[3467,8]]},"68":{"position":[[469,5]]}}}],["fetch_report(queri",{"_index":1055,"t":{"68":{"position":[[431,19],[863,19]]}}}],["few",{"_index":179,"t":{"24":{"position":[[261,3]]},"48":{"position":[[1044,3]]},"174":{"position":[[1420,3]]}}}],["field",{"_index":518,"t":{"36":{"position":[[3042,5]]},"118":{"position":[[135,8]]}}}],["file",{"_index":496,"t":{"36":{"position":[[2399,5]]},"54":{"position":[[614,6]]},"56":{"position":[[708,4],[872,4],[962,4],[3264,4],[3772,4]]},"60":{"position":[[53,6]]},"76":{"position":[[660,4]]},"78":{"position":[[264,4]]},"82":{"position":[[108,4]]},"118":{"position":[[74,4]]},"122":{"position":[[266,5]]},"124":{"position":[[58,5],[96,4],[114,4],[164,4],[343,5]]},"144":{"position":[[237,4]]},"184":{"position":[[129,4]]},"212":{"position":[[213,5]]}}}],["filter",{"_index":84,"t":{"13":{"position":[[136,9]]},"17":{"position":[[240,6]]},"50":{"position":[[396,7]]},"88":{"position":[[234,7],[950,6]]},"202":{"position":[[485,9]]}}}],["final",{"_index":183,"t":{"24":{"position":[[340,7]]},"30":{"position":[[627,5],[684,5]]},"32":{"position":[[169,7],[1189,5],[1323,5]]},"34":{"position":[[3219,7]]},"36":{"position":[[476,5],[1143,8],[2047,5],[2242,10],[3176,5]]},"38":{"position":[[728,5]]},"48":{"position":[[0,8]]},"50":{"position":[[84,5],[460,8],[517,5]]},"54":{"position":[[2117,8],[3017,8],[4029,9]]},"88":{"position":[[213,8],[941,8],[1009,5]]},"109":{"position":[[681,5],[738,5]]},"114":{"position":[[611,5],[745,5]]},"146":{"position":[[384,8]]},"158":{"position":[[621,8]]},"220":{"position":[[4,8]]}}}],["financ",{"_index":705,"t":{"52":{"position":[[259,8]]},"54":{"position":[[1561,7],[1795,7],[5662,7]]}}}],["financi",{"_index":763,"t":{"54":{"position":[[1605,9],[5289,9]]},"68":{"position":[[1263,9]]}}}],["find",{"_index":322,"t":{"32":{"position":[[1287,9]]},"40":{"position":[[224,4]]},"54":{"position":[[1939,4],[5806,4]]},"66":{"position":[[255,9]]},"86":{"position":[[90,4]]},"114":{"position":[[709,9]]},"140":{"position":[[252,4]]}}}],["finit",{"_index":626,"t":{"42":{"position":[[797,6]]}}}],["first",{"_index":437,"t":{"34":{"position":[[3163,5]]},"40":{"position":[[68,5]]},"42":{"position":[[4,5],[394,6],[550,5]]},"54":{"position":[[1688,5],[1701,6]]},"140":{"position":[[208,6]]}}}],["fix",{"_index":607,"t":{"42":{"position":[[151,5]]},"122":{"position":[[501,5]]}}}],["flask",{"_index":1287,"t":{"158":{"position":[[23,5],[88,5],[101,6],[470,5],[500,5]]}}}],["flask(__name__)@app.route('/report/ Search the documentation | GPT Researcher - +

Search the documentation

- + \ No newline at end of file