-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
131 lines (97 loc) · 3.65 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.responses import HTMLResponse, FileResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from typing import List, Dict, Union
import uvicorn
import os
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
conversation = None # Initialize conversation as a global variable
@app.on_event("startup")
async def startup_event():
load_dotenv()
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") # Replace with your actual OpenAI API key
# Pre-upload the document
await upload_files(["ScottBot.pdf"])
@app.get("/")
async def read_root(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.post("/upload/")
async def upload_files(files: Union[List[UploadFile], List[str]]):
global conversation # Declare conversation as global so we can modify it
# get pdf text
raw_text = get_pdf_text(files)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
conversation = get_conversation_chain(vectorstore)
return {"detail": "Files processed successfully"}
@app.post("/ask/")
async def ask_question(data: Dict[str, str]):
global conversation # Declare conversation as global so we can access it
if conversation is None:
return {"error": "No documents uploaded yet"}
question = data.get('question')
if question is None:
return {"error": "No question provided"}
response = conversation({'question': question})
chat_history = response['chat_history']
return {"chat_history": chat_history}
def get_pdf_text(files):
text = ""
for file in files:
if isinstance(file, str): # If the file is a path
with open(file, "rb") as f:
pdf_reader = PdfReader(f)
for page in pdf_reader.pages:
text += page.extract_text()
else: # If the file is an UploadFile instance
pdf_reader = PdfReader(file.file)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)