-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbrain.py
More file actions
122 lines (94 loc) · 3.85 KB
/
brain.py
File metadata and controls
122 lines (94 loc) · 3.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from typing import List, Any, Tuple
from random import randrange
from abc import abstractmethod, ABC
from numpy.random import permutation
class InvalidPropagationInputException(Exception):
pass
class Neuron(ABC):
@abstractmethod
def activate(self, inputs: List[float]) -> float:
...
@abstractmethod
def update_weights(self, error: float, alpha=0.0001):
...
@abstractmethod
def reset(self):
...
class HeavesideNeuron(Neuron):
def __init__(self, weights: List[float] = [], default_weight: float = 0.0):
self.weights = weights
self.default_weight = float(default_weight)
def activate(self, inputs: List[float]) -> float:
sum_input = [float(i) for i in inputs]
# Extend the weights list to match the length of the inputs
self.weights = self.weights + [self.default_weight for _ in range(len(sum_input) - len(self.weights))]
weighted_sum = sum([p[0] * p[1] for p in zip(sum_input, self.weights)])
return self._heaveside(weighted_sum)
def _heaveside(self, input: float):
if input >= 0:
return 1
else:
return 0
def update_weights(self, error: float, alpha=0.01):
w_num = randrange(0, len(self.weights))
self.weights[w_num] = self.weights[w_num] - error*alpha
def reset(self):
self.weights = [0] * len(self.weights)
class NeuronLayer:
def __init__(self):
self.neurons: List[Neuron] = []
def add_neruon(self, neuron: Neuron):
self.neurons.append(neuron)
def size(self):
return len(self.neurons)
def activate(self, input: List[Any]) -> List[Any]:
return [n.activate(input) for n in self.neurons]
def reset(self):
for nrn in self.neurons:
nrn.reset()
# To keep things simple, this brain will fully connect all neurons
# from one layer to the next in a feed forward method.
class Brain:
def __init__(self):
self.layers: List[NeuronLayer] = [NeuronLayer()]
def add_neuron(self, neuron: Neuron):
self.layers[-1].add_neruon(neuron)
return self
def next_layer(self):
self.layers.append(NeuronLayer())
return self
def propagate(self, input: List[Any]) -> List[Any]:
output = input
for layer in self.layers:
output = layer.activate(output)
return output
def derivative(self, layer: int, neuron: int) -> str:
if layer + 1 >= len(self.layers):
return 1
retval = 0
for nrn_num, nrn in enumerate(self.layers[layer+1].neurons):
if layer + 2 >= len(self.layers):
retval += nrn.weights[neuron]
else:
retval += nrn.weights[neuron] * self.derivative(layer+1, nrn_num)
return retval
def train(self, data: List[Tuple[List, bool]], alpha: float = 0.01):
# snapshots = []
for i in permutation(len(data)):
input, expected = data[i]
actual = self.propagate(input)
error = actual[0] - expected
# pick a random nrn to update
lyr_num = randrange(0, len(self.layers))
nrn_num = randrange(0, len(self.layers[lyr_num].neurons))
# print("actual", actual, "expected", expected, "error", error, "derivative", self.derivative(lyr_num, nrn_num))
error = error*self.derivative(lyr_num, nrn_num)
nrn = self.layers[lyr_num].neurons[nrn_num]
nrn.update_weights(error, alpha)
# snapshots.append(deepcopy(self))
# return snapshots
def reset(self):
for layer in self.layers:
layer.reset()
def error(self, data: List[Tuple[List, bool]]) -> float:
return sum((expected - self.propagate(input)[0])**2 for input, expected in data)/len(data)