-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathBackPropTest.py
124 lines (112 loc) · 4.93 KB
/
BackPropTest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import random
from math import exp, log
def initialize_network(n_inputs,n_hidden,n_outputs):
network = list()
hidden_layer = [{'weights': [random.random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
#1 Additional weight for bias
network.append(hidden_layer)
print("This is the hidden layer")
print(hidden_layer)
output_layer = [{'weights': [random.random() for i in range(n_hidden + 1)]}for i in range(n_outputs)]
network.append(output_layer)
print("This is the output layer")
print(output_layer)
print()
return network
def printNetwork(network):
print("This is the hidden layer\n")
print(network[0])
print("\nThis is the output layer\n")
print(network[1])
def activate(weights, inputs): #Calculate activation of one neuron given an input
#activation is sum(weight_i * input_i) + the fixed value of bias, it's basically a weighted sum
activation = weights[-1] #Bias is assumed to be the last weight on the list
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation
def smoothReLU_transfer(activation):
#Transfer activation to see what output actually is using SmoothReLU/Softplus transfer
return log(1+exp(activation))
def smoothReLU_derivative(output):
return sigmoid_transfer(output) #Derivative of ReLU is sigmoid
def sigmoid_transfer(activation):
return 1.0/ (1.0+ exp(-activation))
def sigmoid_derivative(output):
return output * (1.0-output)
def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = [] #collect the otuputs for a layer and use it as input for the next layer
for neuron in layer:
activation = activate(neuron['weights'],inputs)
neuron['output'] = sigmoid_transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs #returns output from last layer which is the output layer
def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()
if i != len(network)-1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected[j] - neuron['output'])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] * sigmoid_derivative(neuron['output'])
def update_weights(network, row, l_rate):
for i in range(len(network)):
inputs = row[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in network[i-1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
neuron['weights'][-1] += l_rate*neuron['delta']
def train_network(network, train, l_rate, n_epoch, n_outputs):
for epoch in range(n_epoch):
sum_error=0
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[row[-1]] =1
sum_error += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
backward_propagate_error(network,expected)
update_weights(network,row, l_rate)
print('>epoch=%d, lrate=%.3f,error=%.3f' %(epoch, l_rate, sum_error))
def predict(network,row):
outputs = forward_propagate(network,row)
return outputs.index(max(outputs))
random.seed(1)
dataset = [[2.7810836, 2.550537003, 0],
[1.465489372, 2.362125076, 0],
[3.396561688, 4.400293529, 0],
[1.38807019, 1.850220317, 0],
[3.06407232, 3.005305973, 0],
[7.627531214, 2.759262235, 1],
[5.332441248, 2.088626775, 1],
[6.922596716, 1.77106367, 1],
[8.675418651, -0.242068655, 1],
[7.673756466, 3.508563011, 1]]
n_inputs = len(dataset[0]) -1
n_outputs = len(set([row[-1] for row in dataset]))
#One hidden neuron, 2 input values and two neurons in the output layer
# network = [[{'output': 0.7105668883115941, 'weights': [0.13436424411240122, 0.8474337369372327, 0.763774618976614]}],
# [{'output': 0.6213859615555266, 'weights': [0.2550690257394217, 0.49543508709194095]}, {'output': 0.6573693455986976, 'weights': [0.4494910647887381, 0.651592972722763]}]]
network = initialize_network(n_inputs, 2, n_outputs)
train_network(network,dataset, 0.5,30, n_outputs)
#printNetwork(network)
# backward_propagate_error(network,expected)
# output: [0.6629970129852887, 0.7253160725279748] Sigmoid activation and Sigmoid derivative
#
# output: [1.1791724661782688, 1.4701185556410246] SmoothReLU activation and Derivative
#
# output: [1.1791724661782688, 1.4701185556410246] SmoothReLU activation and Sigmoid derivative
#
# output: [0.6629970129852887, 0.7253160725279748] Sigmoid activation and SmoothReLU derivative