-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathconfig.py
59 lines (47 loc) · 1.41 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from dataclasses import dataclass
from typing import Optional
import torch
@dataclass
class ModelArgs:
dim: int = 4096
n_layers: int = 32
n_heads: int = 32 # nheads for query
n_kv_heads: Optional[int] = None # nheads for key and V
vocab_size: int = -1 # will be set when loading the tokenizer/
multiple_of: int = 256
ffn_dim_multiplier: Optional[float] = None
norm_eps: float = 1e-5
mode: str = 'train'
# Needed for KV cache
batch_size: int = 32
max_seq_length: int = 32
device: str = 'cuda' if torch.cuda.is_available() else 'cpu'
@dataclass
class TrainArgs(ModelArgs):
n_epochs: int = 10
log_interval: int = 12
eval_iters: int = 200
lr: float = 3e-4
warmup_steps: int = 4000
load_model: bool = False
save_dir: str = '/tmp/'
load_dir: str = '/tmp/'
@dataclass
class DataArgs(ModelArgs):
filepath: str = 'input.txt'
tokenizer_model_path: str = 'tokenizer.model'
train_size: float = 0.9
@dataclass
class InferenceArgs(ModelArgs):
checkpoint_dir: str = 'llama-2-7b'
tokenizer_path: str = 'tokenizer.model'
load_model: bool = True
max_seq_len: int = 10
temperature: float = 0.6
top_p: float = 0.9
@dataclass
class DeepspeedArgs(ModelArgs):
deepspeed: bool = True # Whether to use deepspeed or not.
save_interval: int = 1000
ckpt_id: float = 0.0
deepspeed_config: str = 'deepspeed_config.json'