-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #21 from Strong-AI-Lab/main
Update the JVNAUTOSCI-111 from main branch
- Loading branch information
Showing
12 changed files
with
351 additions
and
8 deletions.
There are no files selected for viewing
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
from setuptools import find_packages, setup | ||
|
||
setup( | ||
name='vonlib', | ||
packages=find_packages(include=['vonlib']), | ||
version='0.1.0', | ||
description='Python Package of tell_von', | ||
author='University of Auckland Strong AI Lab', | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
"""Defines a common interface for paper recommendation.""" | ||
|
||
from dataclasses import dataclass | ||
|
||
|
||
@dataclass | ||
class RecommendationOutput: | ||
"""Data class for the recommendation output. | ||
Attributes: | ||
decision: A boolean indicating if the recommendation decision is positive or not. | ||
explanation: A Python string for explaining the decision.""" | ||
|
||
decision: bool | ||
explanation: str |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,150 @@ | ||
import os | ||
from openai import OpenAI | ||
|
||
useollama=True | ||
|
||
def get_client(): | ||
if (not hasattr(get_client, "api_client")) or ( getattr(get_client,"api_client") == None): | ||
if (useollama): | ||
the_model = 'llama3' | ||
get_client.api_client = OpenAI( | ||
base_url = 'http://localhost:11434/v1', | ||
api_key='ollama', # required, but unused | ||
) | ||
else: | ||
# Your OpenAI API key | ||
api_key = os.getenv("OpenAI_API_KEY") | ||
get_client.api_client = OpenAI(api_key=api_key) | ||
|
||
return get_client.api_client | ||
|
||
|
||
# from flask import Flask, redirect, render_template, request, url_for | ||
|
||
#https://flask.palletsprojects.com/en/2.3.x/quickstart/ | ||
#python -m flask run | ||
|
||
# for models see https://platform.openai.com/docs/deprecations | ||
# for cost see https://openai.com/api/pricing/ | ||
|
||
#api details https://platform.openai.com/docs/guides/text-generation | ||
|
||
#openai.api_key = os.getenv("OPENAI_API_KEY") | ||
# Configure the API key from your OpenAI account | ||
|
||
def ask_gpt4(prompt_text, system_prompt=None): | ||
try: | ||
# Building the prompt with an optional system message | ||
full_prompt = f"{system_prompt}\n\n{prompt_text}" if system_prompt else prompt_text | ||
print(full_prompt) | ||
|
||
# Sending the prompt to the GPT-4 model | ||
response = get_client().chat.completions.create( model=the_model, # Use GPT-4's engine identifier, update if necessary | ||
# messages=[ | ||
# { | ||
# "role": "user", | ||
# "content": "How do I output all files in a directory using Python?", | ||
# }, | ||
# ], | ||
messages=[ | ||
{ | ||
"role": "system", | ||
"content": system_prompt, | ||
}, | ||
{ | ||
"role": "user", | ||
"content": prompt_text, | ||
}, | ||
], | ||
max_tokens=150 # Adjust based on how long you expect the response to be | ||
) | ||
|
||
# Extracting and returning the text response | ||
return response.choices[0].message.content | ||
except Exception as e: | ||
print(f"An error occurred: {e}") | ||
return None | ||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
# app = Flask(__name__) | ||
|
||
# @app.route("/", methods=("GET", "POST")) | ||
# def index(): | ||
# if request.method == "POST": | ||
# task = request.form["task"] | ||
# response = openai.Completion.create( | ||
# model="text-davinci-003", | ||
# prompt=generate_prompt(task), | ||
# temperature=0.6, | ||
# max_tokens=1000 | ||
# ) | ||
# return redirect(url_for("index", result=response.choices[0].text)) | ||
|
||
# result = request.args.get("result") | ||
# return render_template("index.html", result=result) | ||
|
||
# def generate_prompt(task): | ||
# return """Suggest a small number of initial steps to take to carry out a task. | ||
|
||
# Task: Empty Rubbish Bin | ||
# Steps: Open bin, take out bag, tie bag, put new bag in bin, close bin, take bag to collection point | ||
# Task: Wash Hands | ||
# Steps: Go to washbasin, turn on hot and cold tap part way, if steaming turn down hot, test temperature with finger, turn down hot until comfortable, wet hands, rub soap with hands under tap, replace soap, rinse hands, turn off taps | ||
# Task: {} | ||
# Steps:""".format( | ||
# task.capitalize() | ||
# ) | ||
|
||
|
||
# def generate_prompt_a(animal): | ||
# return """Suggest three names for an animal that is a superhero. | ||
|
||
# Animal: Cat | ||
# Names: Captain Sharpclaw, Agent Fluffball, The Incredible Feline | ||
# Animal: Dog | ||
# Names: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot | ||
# Animal: {} | ||
# Names:""".format( | ||
# animal.capitalize() | ||
# ) | ||
|
||
# print ("hello world") | ||
# index() | ||
|
||
# def send_prompt(prompt): | ||
# # Replace 'YOUR_GPT4_API_ENDPOINT' with the actual API endpoint for GPT-4 | ||
# api_endpoint = 'YOUR_GPT4_API_ENDPOINT' | ||
|
||
# # Set the headers and payload for the API request | ||
# headers = {'Content-Type': 'application/json'} | ||
# payload = {'prompt': prompt} | ||
|
||
# try: | ||
# # Send the API request | ||
# response = requests.post(api_endpoint, headers=headers, json=payload) | ||
|
||
# # Check if the request was successful | ||
# if response.status_code == 200: | ||
# # Extract the response from the API response | ||
# response_data = response.json() | ||
# gpt4_response = response_data['response'] | ||
|
||
# return gpt4_response | ||
# else: | ||
# print('Error: Failed to send prompt to GPT-4') | ||
# except requests.exceptions.RequestException as e: | ||
# print('Error: Failed to connect to GPT-4 API') | ||
# print(e) | ||
|
||
# # Example usage | ||
# prompt = "Hello, GPT-4!" | ||
# response = send_prompt(prompt) | ||
# print(response) | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
import os | ||
import subprocess | ||
def open_folder(path): | ||
if os.name == 'nt': # For Windows | ||
os.startfile(path) | ||
elif os.name == 'posix': # For macOS and Linux | ||
subprocess.call(['open', path]) | ||
def scan_drives(): | ||
drives = [] | ||
# print("sssssssssss",os.name) | ||
if os.name == 'nt': # For Windows | ||
import string | ||
drives = [f"{d}:\\" for d in string.ascii_uppercase if os.path.exists(f"{d}:\\")] | ||
elif os.name == 'posix': # For macOS and Linux | ||
drives = ["/"] | ||
print("sssssssss",drives) | ||
for drive in drives: | ||
von_path = os.path.join(drive, "Von") | ||
if os.path.exists(von_path) and os.path.isdir(von_path): | ||
print(f"Found 'Von' folder in {drive}") | ||
open_folder(von_path) | ||
return von_path | ||
|
||
print("Folder 'Von' not found in any drive root.") | ||
return None | ||
if __name__ == "__main__": | ||
scan_drives() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,95 @@ | ||
"""Implement paper recommendation and explanation using OpenAI Chat Completions API.""" | ||
|
||
import logging | ||
import string | ||
from typing import List, Union | ||
from openai import OpenAI | ||
from .base import RecommendationOutput | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
client = OpenAI() | ||
|
||
|
||
def prompt_if_worth_reading(project_description: str, paper_abstract: str) -> List: | ||
"""Create a prompt to ask the assistant whether a research paper is worth reading | ||
based on the project description and the paper's abstract. | ||
Args: | ||
project_description (str): Project description. | ||
paper_abstract (str): Paper abstract. | ||
Returns: | ||
A list of dictionaries, each containing the role and the content of the message.""" | ||
prompt = [ | ||
{ | ||
"role": "system", | ||
"content": ( | ||
"You help descide whether a research paper is worth reading based on my project description " | ||
"and the paper's abstract. You only recommand a paper if you think it is closely relevant " | ||
"to the project." | ||
), | ||
}, | ||
{"role": "user", "content": f"Here is a project description of my current project: {project_description}"}, | ||
{"role": "user", "content": f"And here is an abstract of a research paper: {paper_abstract}"}, | ||
{ | ||
"role": "user", | ||
"content": ( | ||
"Do you think the research paper is relevant and useful for my project and therefore is worth reading? " | ||
"Please answer with 'Yes' or 'No' only, without any punctuation." | ||
), | ||
}, | ||
] | ||
return prompt | ||
|
||
|
||
def prompt_explain_decision(project_description: str, paper_abstract: str, decision: str) -> List: | ||
"""Create a prompt to ask the model to explain the recommendation decision. | ||
Args: | ||
project_description: Project description. | ||
paper_abstract: Paper abstract. | ||
decision: Decision made by the assistant in string. | ||
Returns: | ||
A list of dictionaries, each containing the role and the content of the message.""" | ||
prompt = prompt_if_worth_reading(project_description, paper_abstract) | ||
follow_up = [ | ||
{"role": "assistant", "content": decision}, | ||
{"role": "user", "content": "Please explain your decision."}, | ||
] | ||
return prompt + follow_up | ||
|
||
|
||
def paper_recommendation( | ||
project_description: str, | ||
paper_abstract: str, | ||
configs: dict, | ||
) -> Union[RecommendationOutput, None]: | ||
"""Use OpenAI Chat Completions API to recommend a paper based on the project description and the paper abstract. | ||
Args: | ||
project_description (str): Project description. | ||
paper_abstract (str): Paper abstract. | ||
Return: | ||
A tuple containing the decision and the explanation. If fails to extract arxiv id, return None. | ||
""" | ||
try: | ||
response = client.chat.completions.create( | ||
model=configs["Engine"]["model"], messages=prompt_if_worth_reading(project_description, paper_abstract) | ||
) | ||
decision = response.choices[0].message.content # type: Union[str, None, bool] | ||
assert isinstance(decision, str) | ||
|
||
response = client.chat.completions.create( | ||
model=configs["Engine"]["model"], | ||
messages=prompt_explain_decision(project_description, paper_abstract, decision), | ||
) | ||
explanation = response.choices[0].message.content | ||
assert isinstance(explanation, str) | ||
|
||
decision = True if decision.lower().translate(str.maketrans("", "", string.punctuation)) == "yes" else False | ||
return RecommendationOutput(decision=decision, explanation=explanation) | ||
except Exception: | ||
logger.exception("Something went wrong while using OpenAI Chat Completions API.") | ||
return None |
Oops, something went wrong.