EDIT: squashing input between 0, 1 gives me about 0.5 output per neuron per data set.
It seems the output is always 1 with every set of inputs I feed forward after I train. However if I change the learning rate from pos. to neg. and vice versa, The output is always 0.
LN = -0.05
def Matrix(numI, numO):
matrix = []
for i in range(0, numO):
matrix.append([])
for c in range(0, numI):
if c > numI:
rw = random.random()
matrix[i].append(rw)
else:
rw = random.random()
matrix[i].append(rw)
return matrix
class Neuralnetwork:
def __init__(self, numI, numO):
self.Output_layer = Output_layer(numI, numO)
self.Feed_forward = self.Output_layer.Feed_forward
def train(self, t_inputs, t_targets):
for n in range(len(self.Output_layer.Neurons)):
self.Output_layer.new_weight(t_inputs, t_targets, n)
class Output_layer:
def __init__(self, numI, numO):
self.Bias = 1
self.Matrix = Matrix(numI, numO)
self.Neurons = []
for o in range(numO):
self.Neurons.append(Neuron(self.Matrix, o))
def Feed_forward(self, inputs):
outputs = []
for i in self.Neurons:
outputs.append(i.Output(inputs, self.Bias))
print(outputs)
def new_weight(self, t_inputs, t_targets, a):
for aw in range(len(self.Neurons[a].Weights)):
totalsw = []
totalsb = []
for i in range(len(t_inputs)):
pd_c_wrt_output = 2 * (self.Neurons[a].Output(t_inputs[i], self.Bias) - t_targets[i][a])
pd_output_wrt_net = self.Neurons[a].Output(t_inputs[i], self.Bias) * (1 - self.Neurons[a].Output(t_inputs[i], self.Bias))
pd_net_wrt_weight = t_inputs[aw][aw]
pd_c_wrt_weight = pd_c_wrt_output * pd_output_wrt_net * pd_net_wrt_weight
totalsw.append(pd_c_wrt_weight)
pd_c_wrt_output = 2 * (self.Neurons[a].Output(t_inputs[i], self.Bias) - t_targets[i][a])
pd_output_wrt_net = self.Neurons[a].Output(t_inputs[i], self.Bias) * (1 - self.Neurons[a].Output(t_inputs[i], self.Bias))
pd_net_wrt_bias = 1
pd_c_wrt_bias = pd_c_wrt_output * pd_output_wrt_net * pd_net_wrt_bias
totalsb.append(pd_c_wrt_bias)
pd_weight = sum(totalsw)
pd_bias = sum(totalsb)
self.Neurons[a].Weights[aw] -= LN * pd_weight
self.Bias -= LN * pd_bias
class Neuron:
def __init__(self, matrix, index_of_M):
self.Weights = matrix[index_of_M]
def Weighted_sum(self, weights, inputs, bias):
ind = 0
weightedI = []
for i in weights:
output = i * inputs[ind]
weightedI.append(output)
ind += 1
list = sum(weightedI) + bias
return list
def Sigmoid(self, prediction):
e = math.exp(-prediction)
prediction = 1 / (1 + e)
return round(prediction, 8)
def Output(self, inputs, bias):
output = self.Sigmoid(self.Weighted_sum(self.Weights, inputs, bias))
return output
nn = Neuralnetwork(2, 2)
nn.Feed_forward([10, 20])
for i in range(100000):
nn.train([[10, 20], [15, 30], [8, 16], [3, 9], [6, 18], [2, 6]],
[[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])`
In my first neural network it worked fine. Really can't find the bug though. I tried different things like putting the new_weight in neuron class, different amounts of inputs and outputs etc.
Try setting the weight values to random. This will help to break the symmetry. Also set the biases to 1. You have two output classes. So , I suggest you to use a loss function like mean squared error with Gradient descent optimizer. Also set the learning rate to something like 0.001 or 0.01.
You can learn more here.