-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlinUcb.py
84 lines (64 loc) · 4.25 KB
/
linUcb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import poutyne as pt
from poutyne.framework.metrics import batch_metrics
from bandit_dropout import *
from callback import *
from architecture import *
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, random_split
import torchvision.transforms as transforms
import torch
import matplotlib.pyplot as plt
import pickle as pk
from utils import set_random_seed, save_to_pkl,save_loss_acc_plot,save_experience
import numpy as np
from bandit_dropout import egreedy_bandit_dropout
from architecture import architectureMNIST
from callback import activateGradient
train_size = 4000
valid_size = 2000
batch_size = 32
transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
def run_experience(nombre_entrainement=20, nombre_epoch=20, exp_name = 'linUcb',nb_buckets = 16,per_batch=True, reward=None, reward_type='accuracy', seed=None,sigma=5):
set_random_seed(seed)
dataset_CIFAR10 = datasets.CIFAR10(root='./data', train=True, download=True, transform=transformer)
train_dataset_CIFAR10, valid_dataset_CIFAR10, test_dataset_CIFAR10 = random_split(dataset_CIFAR10,[train_size, valid_size,len(dataset_CIFAR10)-valid_size-train_size])
train_dataloader_CIFAR10 = DataLoader(train_dataset_CIFAR10, batch_size=32, shuffle=True)
valid_dataloader_CIFAR10 = DataLoader(valid_dataset_CIFAR10, batch_size=32, shuffle=True)
taille_subplot = int(np.ceil(nb_buckets**0.5))
taill_espace_discret = 100
result_test = np.zeros((nb_buckets, nombre_entrainement, taill_espace_discret))
history_list = list()
for test_indice in range(nombre_entrainement):
dropout = linucb_bandit_dropout(nb_buckets=nb_buckets,batch_update=per_batch,dropout_max=0.8, p=0.5)
dropout.triggered = True
modele = architectureCIFAR10(dropout)
pt_modele = pt.Model(modele, "sgd", "cross_entropy", batch_metrics=["accuracy"])
history = pt_modele.fit_generator(train_dataloader_CIFAR10,valid_dataloader_CIFAR10,epochs=nombre_epoch,callbacks=[activateGradientlinUCB(test_dataset_CIFAR10,100,reward_type=reward_type)])
history_list.append(history)
for bucket in range(dropout.nb_buckets):
f_hat_x = dropout.phi_X.dot(np.linalg.inv(dropout.V_t[bucket]).dot(dropout.B[bucket]))
result_test[bucket,test_indice,:] = f_hat_x
save_experience(history_list,exp_name)
fig,ax = plt.subplots(taille_subplot,taille_subplot)
fig.tight_layout(pad=1.2)
for bucket in range(dropout.nb_buckets):
if ((bucket - 1) < 0 ):
ax[bucket//taille_subplot,bucket%taille_subplot].title.set_text(f'Plus petit que {np.round(float(dropout.bucket_boundaries[bucket]),1)}')
elif (bucket + 1 == dropout.nb_buckets ):
ax[bucket//taille_subplot,bucket%taille_subplot].title.set_text(f'Plus grand que {np.round(float(dropout.bucket_boundaries[bucket - 1]),1)}')
else:
ax[bucket//taille_subplot,bucket%taille_subplot].title.set_text(f'Entre {np.round(float(dropout.bucket_boundaries[bucket-1]),1)} et {np.round(float(dropout.bucket_boundaries[bucket]),1)}')
ax[bucket//taille_subplot,bucket%taille_subplot].set_ylabel("Précision")
ax[bucket//taille_subplot,bucket%taille_subplot].set_xlabel("Taux de dropout")
ax[bucket//taille_subplot,bucket%taille_subplot].fill_between(dropout.discretize_structured_input,np.mean(result_test[bucket,:,:],axis=0), np.mean(result_test[bucket,:,:],axis=0) + np.std(result_test[bucket,:,:],axis=0),alpha=0.5,color='b')
ax[bucket//taille_subplot,bucket%taille_subplot].fill_between(dropout.discretize_structured_input,np.mean(result_test[bucket,:,:],axis=0), np.mean(result_test[bucket,:,:],axis=0) - np.std(result_test[bucket,:,:],axis=0),alpha=0.5,color='b')
ax[bucket//taille_subplot,bucket%taille_subplot].plot(dropout.discretize_structured_input,np.mean(result_test[bucket,:,:],axis=0),label=str(bucket))
#ax[bucket//4,bucket%4].legend()
#ax[bucket//4,bucket%4].set_ylim(42,54)
plt.savefig(f"Results/{exp_name}_contexte.png")
plt.clf()
if __name__ == '__main__':
run_experience(seed=42,nombre_entrainement=2,nombre_epoch=2)