-
Notifications
You must be signed in to change notification settings - Fork 86
/
mnist.py
executable file
·91 lines (78 loc) · 3.49 KB
/
mnist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/usr/bin/env python3
import numpy as np
from teenygrad import Tensor
from tqdm import trange
import gzip, os
from teenygrad.nn import optim
from teenygrad.helpers import getenv
def train(model, X_train, Y_train, optim, steps, BS=128, lossfn=lambda out,y: out.sparse_categorical_crossentropy(y),
transform=lambda x: x, target_transform=lambda x: x, noloss=False):
Tensor.training = True
losses, accuracies = [], []
for i in (t := trange(steps, disable=getenv('CI', False))):
samp = np.random.randint(0, X_train.shape[0], size=(BS))
x = Tensor(transform(X_train[samp]), requires_grad=False)
y = Tensor(target_transform(Y_train[samp]))
# network
out = model.forward(x) if hasattr(model, 'forward') else model(x)
loss = lossfn(out, y)
optim.zero_grad()
loss.backward()
if noloss: del loss
optim.step()
# printing
if not noloss:
cat = np.argmax(out.numpy(), axis=-1)
accuracy = (cat == y.numpy()).mean()
loss = loss.detach().numpy()
losses.append(loss)
accuracies.append(accuracy)
t.set_description("loss %.2f accuracy %.2f" % (loss, accuracy))
return [losses, accuracies]
def evaluate(model, X_test, Y_test, num_classes=None, BS=128, return_predict=False, transform=lambda x: x,
target_transform=lambda y: y):
Tensor.training = False
def numpy_eval(Y_test, num_classes):
Y_test_preds_out = np.zeros(list(Y_test.shape)+[num_classes])
for i in trange((len(Y_test)-1)//BS+1, disable=getenv('CI', False)):
x = Tensor(transform(X_test[i*BS:(i+1)*BS]))
out = model.forward(x) if hasattr(model, 'forward') else model(x)
Y_test_preds_out[i*BS:(i+1)*BS] = out.numpy()
Y_test_preds = np.argmax(Y_test_preds_out, axis=-1)
Y_test = target_transform(Y_test)
return (Y_test == Y_test_preds).mean(), Y_test_preds
if num_classes is None: num_classes = Y_test.max().astype(int)+1
acc, Y_test_pred = numpy_eval(Y_test, num_classes)
print("test set accuracy is %f" % acc)
return (acc, Y_test_pred) if return_predict else acc
def fetch_mnist():
parse = lambda file: np.frombuffer(gzip.open(file).read(), dtype=np.uint8).copy()
BASE = os.path.dirname(__file__)+"/extra/datasets"
X_train = parse(BASE+"/mnist/train-images-idx3-ubyte.gz")[0x10:].reshape((-1, 28*28)).astype(np.float32)
Y_train = parse(BASE+"/mnist/train-labels-idx1-ubyte.gz")[8:]
X_test = parse(BASE+"/mnist/t10k-images-idx3-ubyte.gz")[0x10:].reshape((-1, 28*28)).astype(np.float32)
Y_test = parse(BASE+"/mnist/t10k-labels-idx1-ubyte.gz")[8:]
return X_train, Y_train, X_test, Y_test
X_train, Y_train, X_test, Y_test = fetch_mnist()
# create a model with a conv layer
class TinyConvNet:
def __init__(self):
# https://keras.io/examples/vision/mnist_convnet/
conv = 3
#inter_chan, out_chan = 32, 64
inter_chan, out_chan = 8, 16 # for speed
self.c1 = Tensor.scaled_uniform(inter_chan,1,conv,conv)
self.c2 = Tensor.scaled_uniform(out_chan,inter_chan,conv,conv)
self.l1 = Tensor.scaled_uniform(out_chan*5*5, 10)
def forward(self, x:Tensor):
x = x.reshape(shape=(-1, 1, 28, 28)) # hacks
x = x.conv2d(self.c1).relu().max_pool2d()
x = x.conv2d(self.c2).relu().max_pool2d()
x = x.reshape(shape=[x.shape[0], -1])
return x.dot(self.l1).log_softmax()
if __name__ == "__main__":
np.random.seed(1337)
model = TinyConvNet()
optimizer = optim.Adam([model.c1, model.c2, model.l1], lr=0.001)
train(model, X_train, Y_train, optimizer, steps=100)
assert evaluate(model, X_test, Y_test) > 0.93