forked from NeuromorphicComputing/SoftHebb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmulti_layer.py
129 lines (103 loc) · 5.02 KB
/
multi_layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import argparse
from utils import load_presets, get_device, load_config_dataset, seed_init_fn, str2bool
from model import load_layers
from train import run_sup, run_unsup, check_dimension, training_config, run_hybrid
from log import Log, save_logs
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Multi layer Hebbian Training')
parser.add_argument('--preset', choices=load_presets(), default=None,
type=str, help='Preset of hyper-parameters ' +
' | '.join(load_presets()) +
' (default: None)')
parser.add_argument('--dataset-unsup', choices=load_config_dataset(), default='MNIST',
type=str, help='Dataset possibilities ' +
' | '.join(load_config_dataset()) +
' (default: MNIST)')
parser.add_argument('--dataset-sup', choices=load_config_dataset(), default='MNIST',
type=str, help='Dataset possibilities ' +
' | '.join(load_config_dataset()) +
' (default: MNIST)')
parser.add_argument('--training-mode', choices=['successive', 'consecutive', 'simultaneous'], default='successive',
type=str, help='Training possibilities ' +
' | '.join(['successive', 'consecutive', 'simultaneous']) +
' (default: successive)')
parser.add_argument('--resume', choices=[None, "all", "without_classifier"], default=None,
type=str, help='Resume Model ' +
' | '.join(["best", "last"]) +
' (default: None)')
parser.add_argument('--model-name', default=None, type=str, help='Model Name')
parser.add_argument('--training-blocks', default=None, nargs='+', type=int,
help='Selection of the blocks that will be trained')
parser.add_argument('--seed', default=None, type=int,
help='Selection of the blocks that will be trained')
parser.add_argument('--gpu-id', default=0, type=int, metavar='N',
help='Id of gpu selected for training (default: 0)')
parser.add_argument('--save', default=True, type=str2bool, metavar='N',
help='')
parser.add_argument('--validation', default=False, type=str2bool, metavar='N',
help='')
def main(blocks, name_model, resume, save, dataset_sup_config, dataset_unsup_config, train_config, gpu_id):
device = get_device(gpu_id)
model = load_layers(blocks, name_model, resume)
model = model.to(device)
log = Log(train_config)
for id, config in train_config.items():
if config['mode'] == 'unsupervised':
run_unsup(
config['nb_epoch'],
config['print_freq'],
config['batch_size'],
name_model,
dataset_unsup_config,
model,
device,
log.unsup[id],
blocks=config['blocks'],
save=save
)
elif config['mode'] == 'supervised':
run_sup(
config['nb_epoch'],
config['print_freq'],
config['batch_size'],
config['lr'],
name_model,
dataset_sup_config,
model,
device,
log.sup[id],
blocks=config['blocks'],
save=save
)
else:
run_hybrid(
config['nb_epoch'],
config['print_freq'],
config['batch_size'],
config['lr'],
name_model,
dataset_sup_config,
model,
device,
log.sup[id],
blocks=config['blocks'],
save=save
)
save_logs(log, name_model)
if __name__ == '__main__':
params = parser.parse_args()
name_model = params.preset if params.model_name is None else params.model_name
blocks = load_presets(params.preset)
dataset_sup_config = load_config_dataset(params.dataset_sup, params.validation)
dataset_unsup_config = load_config_dataset(params.dataset_unsup, params.validation)
if params.seed is not None:
dataset_sup_config['seed'] = params.seed
dataset_unsup_config['seed'] = params.seed
if dataset_sup_config['seed'] is not None:
seed_init_fn(dataset_sup_config['seed'])
blocks = check_dimension(blocks, dataset_sup_config)
train_config = training_config(blocks, dataset_sup_config, dataset_unsup_config, params.training_mode,
params.training_blocks)
main(blocks, name_model, params.resume, params.save, dataset_sup_config, dataset_unsup_config, train_config,
params.gpu_id)