Skip to content

Commit

Permalink
better response
Browse files Browse the repository at this point in the history
  • Loading branch information
melihunsal committed Jul 1, 2023
1 parent 5ab235a commit c34b43d
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 26 deletions.
29 changes: 22 additions & 7 deletions src/prompt_based/examples.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,20 @@
Examples:
================================================================
Chat models are a variation on language models. While chat models use language models under the hood, the interface they expose is a bit different: rather than expose a "text in, text out" API, they expose an interface where "chat messages" are the inputs and outputs.

You can get chat completions by passing one or more messages to the chat model. The response will be a message. The types of messages currently supported in LangChain are AIMessage, HumanMessage, SystemMessage, and ChatMessage -- ChatMessage takes in an arbitrary role parameter. Most of the time, you'll just be dealing with HumanMessage, AIMessage, and SystemMessage.

Most of the time, to use LangChain library, you need these imports below:

from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)


Here are the examples:
================================================================================================================================
Goal: Generate blog post from title. Generate at least 500 words
---------
Python Code:
Expand All @@ -12,11 +27,11 @@ from langchain.prompts.chat import (
)

def generate_blog_post(title):
chat = ChatOpenAI(temperature=0)
chat = ChatOpenAI(temperature=0.1) # Here temperature is set a little bit higher to put some variation

template = "You are a helpful assistant that generates a blog post from the title: {title}. Please provide some content."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_template = "{title}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])

Expand All @@ -36,7 +51,7 @@ from langchain.prompts.chat import (
)

def generate_translation(input_language, output_language, text):
chat = ChatOpenAI(temperature=0)
chat = ChatOpenAI(temperature=0) # Here temperature is set to 0 because translation should be concrete

template = "You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
Expand All @@ -48,7 +63,7 @@ def generate_translation(input_language, output_language, text):
result = chain.run(input_language=input_language, output_language=output_language, text=text)
return result # it will return string
================================================================
Goal: Animal name from animal
Goal: Generate animal name from animal
---------
Python Code:
from langchain import LLMChain
Expand All @@ -60,7 +75,7 @@ from langchain.prompts.chat import (
)

def generate_animal_name(animal):
chat = ChatOpenAI(temperature=0)
chat = ChatOpenAI(temperature=0.1) # Here temperature is set a little bit higher to put some variation

template = "You are a helpful assistant that generates a name for an animal. You generate short answer."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
Expand Down
22 changes: 3 additions & 19 deletions src/prompt_based/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,10 @@
from streamlit_prompts import *
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from subprocess import PIPE, run
from subprocess import PIPE
import tempfile
from termcolor import colored
import subprocess
from subprocess import DEVNULL, STDOUT
import sys
import shutil

from dotenv import load_dotenv
Expand All @@ -33,18 +31,11 @@ def run_python(self,code):
tmp.write(code)
tmp.flush()
environmental_variables = {'OPENAI_API_KEY':self.openai_api_key}
#python_path = subprocess.check_output("which python", shell=True).strip().decode('utf-8')
python_path = shutil.which("python")
process = subprocess.Popen([python_path,tmp.name], env=environmental_variables,stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
return output.strip().decode('utf-8'), err.strip().decode('utf-8')


"""command = f"OPENAI_API_KEY={self.openai_api_key} python "+tmp.name
print(colored(command,"blue"))
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
return result.stdout, result.stderr"""

def refine_code(self,code):
if "```" in code:
code = code.split("```")[1]
Expand Down Expand Up @@ -85,6 +76,8 @@ def __call__(self,topic,num_iterations=10):
if success:
break

response = error

feedback = self.fix_chain.run(code=total_code,error=error)

percentage += 100 // num_iterations
Expand Down Expand Up @@ -124,11 +117,7 @@ def run_code(self,code):
with tempfile.NamedTemporaryFile("w",suffix=".py") as tmp:
tmp.write(code)
tmp.flush()
command = f"OPENAI_API_KEY={self.openai_api_key} streamlit run "+tmp.name
environmental_variables = {'OPENAI_API_KEY':self.openai_api_key,"STREAMLIT_SERVER_PORT":"8502"}
#python_path = subprocess.check_output("which python", shell=True).strip().decode('utf-8')
#process = subprocess.Popen([python_path,"-m","streamlit",tmp.name], env=environmental_variables)
#streamlit_path = subprocess.check_output("which streamlit", shell=True).strip().decode('utf-8')
streamlit_path = shutil.which("python")
process = subprocess.Popen([streamlit_path,"run",tmp.name], env=environmental_variables)
pid = process.pid
Expand All @@ -140,14 +129,9 @@ def run_code_v2(self,code):
with open(filepath,"w") as tmp:
tmp.write(code)
tmp.flush()
command = f"OPENAI_API_KEY={self.openai_api_key} streamlit run "+filepath
environmental_variables = {'OPENAI_API_KEY':self.openai_api_key,"STREAMLIT_SERVER_PORT":"8502"}
#python_path = subprocess.check_output("which python", shell=True).strip().decode('utf-8')
#process = subprocess.Popen([python_path,"-m","streamlit",filepath], env=environmental_variables)
#streamlit_path = subprocess.check_output("which streamlit", shell=True).strip().decode('utf-8')
streamlit_path = shutil.which("streamlit")
process = subprocess.Popen([streamlit_path,"run",filepath], env=environmental_variables)

pid = process.pid
return pid

Expand Down

0 comments on commit c34b43d

Please sign in to comment.