Skip to content

Commit

Permalink
Merge pull request #21 from Strong-AI-Lab/main
Browse files Browse the repository at this point in the history
Update the JVNAUTOSCI-111 from main branch
  • Loading branch information
ysu132 authored Sep 30, 2024
2 parents 8dcdd48 + d455bf6 commit c9b272c
Show file tree
Hide file tree
Showing 12 changed files with 351 additions and 8 deletions.
Binary file added dist/vonlib-0.1.0.tar.gz
Binary file not shown.
9 changes: 9 additions & 0 deletions src/setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from setuptools import find_packages, setup

setup(
name='vonlib',
packages=find_packages(include=['vonlib']),
version='0.1.0',
description='Python Package of tell_von',
author='University of Auckland Strong AI Lab',
)
47 changes: 39 additions & 8 deletions src/tell_von/portal.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,46 @@
from tkinter import ttk
from ttkthemes import ThemedTk
import customtkinter as ctk
from vonlib.googledrive import authenticate_google_drive, upload_file_to_drive, save_to_drive

from vonlib.googledrive import save_to_drive

from openai import OpenAI
import os
# Load your OpenAI API key
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY")
)

text_input = None # Define the text input widget globally
text_input_classification = None
root = None # Define the root window globally

def start_portal(root):
# def on_submit_classification(event=None):
# user_input_classification = text_input_classification.get("1.0", tk.END).rstrip('\n') # Get the text from the text widget
# return
def on_submit(event=None): # Accept an optional event argument

user_input = text_input.get("1.0", tk.END).rstrip('\n') # Get the text from the text widget

user_input_classification = text_input_classification.get("1.0", tk.END).rstrip('\n')
# print("User input raw:", user_input)
if user_input.strip(): # Ensure the input is not just empty or spaces
save_to_drive(user_input) # Save the input to a file

print(user_input_classification)
###################### LLM ##########################
conclusion = client.chat.completions.create(
model="gpt-4o",
messages=[{
"role":"system","content":"Please generate a conclusion of the meeting record."
},{
"role":"user","content":user_input
}]
)
# Print the response
print(conclusion.choices[0].message.content)
res_conclusion = conclusion.choices[0].message.content

if res_conclusion.strip(): # Ensure the input is not just empty or spaces
save_to_drive(res_conclusion+"\n\n"+"---------------"+"\n\n"+user_input,user_input_classification) # Save the input to a file
elif user_input.strip():
save_to_drive(user_input,user_input_classification) # Save the input to a file
else: # If the input is empty, print a message
print("No non blank input provided.")

Expand All @@ -40,6 +65,7 @@ def exit_application():
}

text_input = ctk.CTkTextbox(root, corner_radius=10, border_color="blue", font=("Helvetica", 12), width=380, height=100, **gtk_options)
text_input.insert("1.0","INPUTING......")
text_input.pack(pady=20, padx=20)
text_input.bind('<Control-Return>', on_submit) # Bind the Enter key to submi
text_input.focus() # Set focus to the text input box
Expand All @@ -49,7 +75,7 @@ def exit_application():
# Use CustomTkinter to create the main window

root.title("tell von")
root.geometry('400x200')
root.geometry('400x300')

# Define custom styles that complement the Arc theme
custom_font = tkfont.Font(family="Helvetica", size=12)
Expand All @@ -61,7 +87,12 @@ def exit_application():
#text_input = ttk.Entry(root, font=custom_font, width=50)
#text_input.pack(pady=20)


#############classification###########
text_input_classification = ctk.CTkTextbox(root, corner_radius=10, border_color="blue", font=("Helvetica", 12), width=380, height=50, **gtk_options)
text_input_classification.insert("1.0","CLASSIFICATION TITLE")
text_input_classification.pack(pady=20, padx=20)
text_input_classification.bind('<Control-Return>', on_submit) # Bind the Enter key to submi
text_input_classification.focus() # Set focus to the text input box

submit_button = ctk.CTkButton(root, text='Submit', command=on_submit, border_color="blue", **gtk_options)
submit_button.pack(side=tk.LEFT, padx=(20, 10))
Expand Down
15 changes: 15 additions & 0 deletions src/vonlib/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
"""Defines a common interface for paper recommendation."""

from dataclasses import dataclass


@dataclass
class RecommendationOutput:
"""Data class for the recommendation output.
Attributes:
decision: A boolean indicating if the recommendation decision is positive or not.
explanation: A Python string for explaining the decision."""

decision: bool
explanation: str
1 change: 1 addition & 0 deletions src/vonlib/googledrive.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import os
import io
from googleapiclient.http import MediaIoBaseDownload
from vonlib.localdrive import scan_drives

SCOPES = [
'https://www.googleapis.com/auth/drive.file',
Expand Down
150 changes: 150 additions & 0 deletions src/vonlib/gpt4connect.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
import os
from openai import OpenAI

useollama=True

def get_client():
if (not hasattr(get_client, "api_client")) or ( getattr(get_client,"api_client") == None):
if (useollama):
the_model = 'llama3'
get_client.api_client = OpenAI(
base_url = 'http://localhost:11434/v1',
api_key='ollama', # required, but unused
)
else:
# Your OpenAI API key
api_key = os.getenv("OpenAI_API_KEY")
get_client.api_client = OpenAI(api_key=api_key)

return get_client.api_client


# from flask import Flask, redirect, render_template, request, url_for

#https://flask.palletsprojects.com/en/2.3.x/quickstart/
#python -m flask run

# for models see https://platform.openai.com/docs/deprecations
# for cost see https://openai.com/api/pricing/

#api details https://platform.openai.com/docs/guides/text-generation

#openai.api_key = os.getenv("OPENAI_API_KEY")
# Configure the API key from your OpenAI account

def ask_gpt4(prompt_text, system_prompt=None):
try:
# Building the prompt with an optional system message
full_prompt = f"{system_prompt}\n\n{prompt_text}" if system_prompt else prompt_text
print(full_prompt)

# Sending the prompt to the GPT-4 model
response = get_client().chat.completions.create( model=the_model, # Use GPT-4's engine identifier, update if necessary
# messages=[
# {
# "role": "user",
# "content": "How do I output all files in a directory using Python?",
# },
# ],
messages=[
{
"role": "system",
"content": system_prompt,
},
{
"role": "user",
"content": prompt_text,
},
],
max_tokens=150 # Adjust based on how long you expect the response to be
)

# Extracting and returning the text response
return response.choices[0].message.content
except Exception as e:
print(f"An error occurred: {e}")
return None








# app = Flask(__name__)

# @app.route("/", methods=("GET", "POST"))
# def index():
# if request.method == "POST":
# task = request.form["task"]
# response = openai.Completion.create(
# model="text-davinci-003",
# prompt=generate_prompt(task),
# temperature=0.6,
# max_tokens=1000
# )
# return redirect(url_for("index", result=response.choices[0].text))

# result = request.args.get("result")
# return render_template("index.html", result=result)

# def generate_prompt(task):
# return """Suggest a small number of initial steps to take to carry out a task.

# Task: Empty Rubbish Bin
# Steps: Open bin, take out bag, tie bag, put new bag in bin, close bin, take bag to collection point
# Task: Wash Hands
# Steps: Go to washbasin, turn on hot and cold tap part way, if steaming turn down hot, test temperature with finger, turn down hot until comfortable, wet hands, rub soap with hands under tap, replace soap, rinse hands, turn off taps
# Task: {}
# Steps:""".format(
# task.capitalize()
# )


# def generate_prompt_a(animal):
# return """Suggest three names for an animal that is a superhero.

# Animal: Cat
# Names: Captain Sharpclaw, Agent Fluffball, The Incredible Feline
# Animal: Dog
# Names: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot
# Animal: {}
# Names:""".format(
# animal.capitalize()
# )

# print ("hello world")
# index()

# def send_prompt(prompt):
# # Replace 'YOUR_GPT4_API_ENDPOINT' with the actual API endpoint for GPT-4
# api_endpoint = 'YOUR_GPT4_API_ENDPOINT'

# # Set the headers and payload for the API request
# headers = {'Content-Type': 'application/json'}
# payload = {'prompt': prompt}

# try:
# # Send the API request
# response = requests.post(api_endpoint, headers=headers, json=payload)

# # Check if the request was successful
# if response.status_code == 200:
# # Extract the response from the API response
# response_data = response.json()
# gpt4_response = response_data['response']

# return gpt4_response
# else:
# print('Error: Failed to send prompt to GPT-4')
# except requests.exceptions.RequestException as e:
# print('Error: Failed to connect to GPT-4 API')
# print(e)

# # Example usage
# prompt = "Hello, GPT-4!"
# response = send_prompt(prompt)
# print(response)


27 changes: 27 additions & 0 deletions src/vonlib/localdrive.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import os
import subprocess
def open_folder(path):
if os.name == 'nt': # For Windows
os.startfile(path)
elif os.name == 'posix': # For macOS and Linux
subprocess.call(['open', path])
def scan_drives():
drives = []
# print("sssssssssss",os.name)
if os.name == 'nt': # For Windows
import string
drives = [f"{d}:\\" for d in string.ascii_uppercase if os.path.exists(f"{d}:\\")]
elif os.name == 'posix': # For macOS and Linux
drives = ["/"]
print("sssssssss",drives)
for drive in drives:
von_path = os.path.join(drive, "Von")
if os.path.exists(von_path) and os.path.isdir(von_path):
print(f"Found 'Von' folder in {drive}")
open_folder(von_path)
return von_path

print("Folder 'Von' not found in any drive root.")
return None
if __name__ == "__main__":
scan_drives()
95 changes: 95 additions & 0 deletions src/vonlib/open_ai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
"""Implement paper recommendation and explanation using OpenAI Chat Completions API."""

import logging
import string
from typing import List, Union
from openai import OpenAI
from .base import RecommendationOutput

logger = logging.getLogger(__name__)

client = OpenAI()


def prompt_if_worth_reading(project_description: str, paper_abstract: str) -> List:
"""Create a prompt to ask the assistant whether a research paper is worth reading
based on the project description and the paper's abstract.
Args:
project_description (str): Project description.
paper_abstract (str): Paper abstract.
Returns:
A list of dictionaries, each containing the role and the content of the message."""
prompt = [
{
"role": "system",
"content": (
"You help descide whether a research paper is worth reading based on my project description "
"and the paper's abstract. You only recommand a paper if you think it is closely relevant "
"to the project."
),
},
{"role": "user", "content": f"Here is a project description of my current project: {project_description}"},
{"role": "user", "content": f"And here is an abstract of a research paper: {paper_abstract}"},
{
"role": "user",
"content": (
"Do you think the research paper is relevant and useful for my project and therefore is worth reading? "
"Please answer with 'Yes' or 'No' only, without any punctuation."
),
},
]
return prompt


def prompt_explain_decision(project_description: str, paper_abstract: str, decision: str) -> List:
"""Create a prompt to ask the model to explain the recommendation decision.
Args:
project_description: Project description.
paper_abstract: Paper abstract.
decision: Decision made by the assistant in string.
Returns:
A list of dictionaries, each containing the role and the content of the message."""
prompt = prompt_if_worth_reading(project_description, paper_abstract)
follow_up = [
{"role": "assistant", "content": decision},
{"role": "user", "content": "Please explain your decision."},
]
return prompt + follow_up


def paper_recommendation(
project_description: str,
paper_abstract: str,
configs: dict,
) -> Union[RecommendationOutput, None]:
"""Use OpenAI Chat Completions API to recommend a paper based on the project description and the paper abstract.
Args:
project_description (str): Project description.
paper_abstract (str): Paper abstract.
Return:
A tuple containing the decision and the explanation. If fails to extract arxiv id, return None.
"""
try:
response = client.chat.completions.create(
model=configs["Engine"]["model"], messages=prompt_if_worth_reading(project_description, paper_abstract)
)
decision = response.choices[0].message.content # type: Union[str, None, bool]
assert isinstance(decision, str)

response = client.chat.completions.create(
model=configs["Engine"]["model"],
messages=prompt_explain_decision(project_description, paper_abstract, decision),
)
explanation = response.choices[0].message.content
assert isinstance(explanation, str)

decision = True if decision.lower().translate(str.maketrans("", "", string.punctuation)) == "yes" else False
return RecommendationOutput(decision=decision, explanation=explanation)
except Exception:
logger.exception("Something went wrong while using OpenAI Chat Completions API.")
return None
Loading

0 comments on commit c9b272c

Please sign in to comment.