-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_distill.py
126 lines (105 loc) · 4.54 KB
/
test_distill.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
"""
Decode GSM8K training data using the T5 model.
TODO: adaptive batch size, such that max_len * batch_size = const
"""
import time
import torch
import re
import argparse
import os
import pytz
import hydra
import json
import numpy as np
import torch.nn.functional as F
from datetime import datetime
from tqdm import tqdm
from datasets import load_dataset
from transformers import T5Tokenizer, T5ForConditionalGeneration
from omegaconf import DictConfig, OmegaConf
from src.utils import tprint, parse_pred_ans
from test_distill_multiple import load_test_data
# GSM8K_VALIDATION_INDEX_PATH = 'lib_prompt/validation_index.npy'
# MULTIARITH_PATH = 'data/multiarith/MultiArith.json'
# MULTIARITH_VALIDATION_INDEX_PATH = 'data/multiarith/validation_index.npy'
# def load_test_data(test_data):
# # TODO: add multiarith/ other math datasets
# if(test_data == 'gsm8k_dev'):
# gsm8k = load_dataset('gsm8k', 'main')
# validation_index = np.load(GSM8K_VALIDATION_INDEX_PATH)
# data = gsm8k['train'].select(validation_index)
# data_ = []
# for q, a in zip(data['question'], data['answer']):
# data_.append({'question': q, 'answer': a})
# elif(test_data == 'gsm8k_test'):
# gsm8k = load_dataset('gsm8k', 'main')
# data = gsm8k['test']
# data_ = []
# for q, a in zip(data['question'], data['answer']):
# data_.append({'question': q, 'answer': a})
# elif(test_data == 'multiarith_test'):
# dataset = json.load(open(MULTIARITH_PATH))
# dev_ind = np.load(MULTIARITH_VALIDATION_INDEX_PATH)
# # dev_data = [dataset[i] for i in dev_ind]
# test_data = [d for i, d in enumerate(dataset) if i not in dev_ind]
# data_ = []
# for d in test_data:
# data_.append({'question': d['sQuestion'][1:-1], 'answer': d['lSolutions']})
# else:
# raise ValueError('Invalid test data: %s' % test_data)
# return data_
@hydra.main(version_base=None, config_path="src/conf", config_name="config_inference")
def main(args : DictConfig):
print(OmegaConf.to_yaml(args))
if(args.batch_size_fixed != -1): args.batch_size = args.batch_size_fixed
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
# load the dataset
dataset = load_test_data(args.test_data)
# load the model
tprint('Loading the model from %s' % args.base_model)
start_time = time.time()
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xxl")
model = T5ForConditionalGeneration.from_pretrained(args.base_model)
if(args.model_size == '11b'):
model.parallelize(args.device_map)
else:
model.to('cuda:' + str(args.gpu_id))
tprint('Model loaded in %.1f seconds.' % (time.time() - start_time))
# load the prompt
prompt = open(args.prompt_path).read()
# decode the dataset
tprint('Start decoding ... ')
i = 0
output_path = args.output_path + args.test_data + '_' + args.base_model.split('/')[-1] + '.txt'
tprint('Model output to: %s' % output_path)
# TODO: change this to batch version
with open(output_path, 'w') as fd:
tqdm_total = len(dataset) // args.batch_size
if(len(dataset) % args.batch_size != 0): tqdm_total += 1
for i in tqdm(range(0, len(dataset), args.batch_size), total=tqdm_total):
questions = []
q_batch = []
a_batch = []
for k in range(args.batch_size):
if(i + k >= len(dataset)): break
q = dataset[i + k]['question']
q_batch.append(q)
a = dataset[i + k]['answer']
a_batch.append(a)
prompt_q = prompt + '\nQ: ' + q + '\n'
prompt_q += "Let's think step by step\n"
questions.append(prompt_q)
inputs = tokenizer(questions, padding=True, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(inputs['input_ids'].to(model.device),
attention_mask=inputs['attention_mask'].to(model.device),
max_length=256
)
for q, a, ans_ in zip(q_batch, a_batch, outputs):
ans_ = tokenizer.decode(ans_).replace('<pad>', '').strip()
fd.write('Q: %s\nA_model:\n%s\nA:\n%s\n\n' % (q, ans_, a))
_, _, _, _ = parse_pred_ans(output_path)
return
if __name__ == '__main__':
main()