generated from UKPLab/ukp-project-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgen_response.py
48 lines (46 loc) · 2.35 KB
/
gen_response.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import torch
import config
from data_utils import *
from model_init import *
from transformers import GenerationConfig
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_response(model, tokenizer, prompt, use_vllm=False, lora_request=None):
if use_vllm:
from vllm import SamplingParams
inputs = tokenizer(text=prompt,
max_length=4096,
padding='max_length',
truncation=True,
return_tensors="pt")
if lora_request != None:
sampling_params = SamplingParams(temperature=0,
max_tokens=1024,
)
response = model.generate(prompt,
lora_request=lora_request,
sampling_params=sampling_params
)
else:
# Base models don't need to generate exemplars; we can lower the max_tokens for speed
sampling_params = SamplingParams(temperature=0,
max_tokens=250,
)
response = model.generate(prompt,
sampling_params=sampling_params)
elif not use_vllm:
inputs = tokenizer(text=prompt,
max_length=4096,
padding="max_length",
truncation=True,
return_tensors="pt")
generate_ids = model.generate(inputs.input_ids.to(device),
max_new_tokens=config.max_new_tokens,
generation_config=GenerationConfig(
temperature=config.temperature,
do_sample=True, # this will use greedy decoding and will be deterministic (no need to set temp)
top_k=config.top_k,
seed=config.seed
)
)
response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
return response