Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix llm_runner for tinyllama #407

Merged
merged 1 commit into from
Feb 8, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 16 additions & 8 deletions models/turbine_models/custom_models/llm_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,7 @@
parser.add_argument(
"--prompt",
type=str,
default="""<s>[INST] <<SYS>>
Be concise. You are a helpful, respectful and honest assistant. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <</SYS>> hi what are you? [/INST]
""",
default="hi what are you?",
help="prompt for llm model",
)
parser.add_argument(
Expand Down Expand Up @@ -183,14 +181,14 @@ def run_llm(
streaming_llm=streaming_llm,
)
if not chat_mode:
prompt = append_user_prompt(chat_sys_prompt, prompt)
initial_input = tokenizer(prompt, return_tensors="pt")
example_input_id = initial_input.input_ids
turbine_results = llm.generate(example_input_id)
return tokenizer.decode(turbine_results)
prompt = chat_sys_prompt
while True:
user_prompt = input("User prompt: ")
prompt = append_user_prompt(prompt, user_prompt)
prompt = append_user_prompt(chat_sys_prompt, user_prompt)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is actually no bueno for chat because doing this you lose track of your chat history/context.

initial_input = tokenizer(prompt, return_tensors="pt")
example_input_id = initial_input.input_ids
result = llm.generate(example_input_id)
Expand All @@ -199,7 +197,13 @@ def run_llm(
prompt = append_bot_prompt(prompt, bot_response)


def run_torch_llm(hf_model_name, hf_auth_token, prompt, streaming_llm=False):
def run_torch_llm(
hf_model_name,
hf_auth_token,
prompt,
streaming_llm=False,
chat_sys_prompt=DEFAULT_CHAT_SYS_PROMPT,
):
from turbine_models.model_builder import HFTransformerBuilder
from transformers import AutoModelForCausalLM

Expand All @@ -210,13 +214,13 @@ def run_torch_llm(hf_model_name, hf_auth_token, prompt, streaming_llm=False):
hf_auth_token=hf_auth_token,
auto_tokenizer=AutoTokenizer,
)
model_builder.build_model()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice catch

if streaming_llm is True:
enable_llama_pos_shift_attention(model_builder.model)

def get_token_from_logits(logits):
return torch.argmax(logits[:, -1, :], dim=1)

prompt = append_user_prompt(chat_sys_prompt, prompt)
initial_input = model_builder.tokenizer(prompt, return_tensors="pt")
example_input_id = initial_input.input_ids

Expand Down Expand Up @@ -256,6 +260,10 @@ def get_token_from_logits(logits):
if args.compare_vs_torch:
print("generating torch output: ")
torch_output = run_torch_llm(
args.hf_model_name, args.hf_auth_token, args.prompt
args.hf_model_name,
args.hf_auth_token,
args.prompt,
args.streaming_llm,
args.chat_sys_prompt,
)
print(torch_output)
Loading