-
Notifications
You must be signed in to change notification settings - Fork 1
/
simple_perceptron.py
52 lines (35 loc) · 1013 Bytes
/
simple_perceptron.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# Single Layer Perceptron Learning Rule : OR Gate Example
# https://blog.dbrgn.ch/2013/3/26/perceptrons-in-python/
from random import choice
from numpy import array, dot, random
from pylab import plot, ylim
# unit step as an activation function by using lambda
unit_step = lambda x: 0 if x < 0 else 1
# training data as : x1 x2(inputs) b(bias) O(output)
training_data = [
(array([0,0,1]), 0),
(array([0,1,1]), 1),
(array([1,0,1]), 1),
(array([1,1,1]), 1),
]
# print training_data
# some random initial weights
w = random.rand(3)
# print w
errors = []
eta = 0.2 #learning constant
n = 100 #maximum iterations
for i in xrange(n):
x, expected = choice(training_data)
result = dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += eta * error * x
# w <- final weights
# Test
for x, _ in training_data:
# print x
result = dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
ylim([-1,1])
plot(errors)