From dd47f73356b06cc6d2893739d3074e987024b5ed Mon Sep 17 00:00:00 2001 From: eynard Date: Sat, 18 Dec 2021 20:57:44 +0100 Subject: [PATCH] Reseau neuronal basique fonctionnel --- sobek/network.py | 83 ++++++++++++++++++++++++--------------------- testLearning.py | 40 ++++++++++------------ testLearningNAND.py | 63 ++++++++++++++++++++++++++++++++++ testNAND.py | 25 ++++++++++++++ 4 files changed, 149 insertions(+), 62 deletions(-) create mode 100644 testLearningNAND.py create mode 100644 testNAND.py diff --git a/sobek/network.py b/sobek/network.py index 5a7dd88..605c0fb 100755 --- a/sobek/network.py +++ b/sobek/network.py @@ -1,5 +1,4 @@ import numpy as np -import math class network: @@ -7,71 +6,71 @@ class network: if type(inputLayerSize) != int: raise TypeError("The input layer size must be an int!") - self.__weights = [] + self.weights = [] self.__inputLayerSize = inputLayerSize oldLayerSize = inputLayerSize for layerSize in layerSizes: - self.__weights.append( np.random.random((layerSize, oldLayerSize)) ) + self.weights.append( np.random.randn(layerSize, oldLayerSize) ) oldLayerSize = layerSize - self.__biases = [[0]*layerSize for layerSize in layerSizes] - self.__weights = np.array(self.__weights, dtype=object) - self.__biases = np.array(self.__biases, dtype=object) + self.biases = [np.random.randn(layerSize) for layerSize in layerSizes] def __reLu(value, derivative=False): if (derivative): - return 0 if (value == 0) else 1 + return 0 if (value < 0) else 1 return max(0, value) def __sigmoid(value, derivative=False): if (derivative): return network.__sigmoid(value) * (1 - network.__sigmoid(value)) - return 1/(1+math.exp(-value)) + return 1.0/(1.0+np.exp(-value)) def process(self, _input, __storeValues=False): if type(_input) != np.ndarray: raise TypeError("The input must be a vector!") if _input.size != self.__inputLayerSize: raise ValueError("The input vector has the wrong size!") - #if _input.dtype != np.float64: - # raise TypeError("The input vector must contain floats!") + if _input.dtype != np.float64: + print(_input.dtype) + raise TypeError("The input vector must contain floats!") if (__storeValues): self.activations = [] self.outputs = [] + self.outputs.append(_input) - for layerWeights, bias in zip(self.__weights, self.__biases): + for layerWeights, layerBias in zip(self.weights, self.biases): - _input = np.matmul(layerWeights, _input) - _input = np.add(_input, bias) + _input = np.dot(layerWeights, _input) + _input = np.add(_input, layerBias) - if (__storeValues): - self.activations.append(_input.copy()) + if (__storeValues): + self.activations.append(_input) #activation function application - for neuron in range(len(_input)): - _input[neuron] = network.__sigmoid(_input[neuron]) + #for i in range(len(_input)): + # _input[i] = network.__sigmoid(_input) + _input = network.__sigmoid(_input) #On peut comparer la performance si on recalcul plus tard - if (__storeValues): - self.outputs.append(_input.copy()) + if (__storeValues): + self.outputs.append(_input) - self.activations = np.array(self.activations, dtype=object) - self.outputs = np.array(self.outputs, dtype=object) - return _input def train(self, inputs, desiredOutputs, learningRate): - errorSumsWeights = [[[0]*len(neuron) for neuron in layer] for layer in self.__weights] - errorSumsBiases = [[0]*len(layer) for layer in self.__biases] - self.__errors = [[0]*len(layer) for layer in self.__weights] + if (len(inputs) != len(desiredOutputs)): + raise ValueError("The inputs and desired outputs vectors must have the same amount of data !") for _input, desiredOutput in zip(inputs, desiredOutputs): - #rempli self.activations et self.outputs - self.__output = self.process(_input, True) + errorSumsWeights = [np.zeros(layer.shape) for layer in self.weights] + errorSumsBiases = [np.zeros(layer.shape) for layer in self.biases] + self.__errors = [np.zeros(len(layer)) for layer in self.weights] + #rempli self.activations et self.outputs + self.process(_input, True) self.__desiredOutput = desiredOutput #Somme de matrice ? @@ -83,18 +82,24 @@ class network: errorSumsWeights[layerNumber][neuronNumber][weightNumber] += self.__PartialDerivative(layerNumber, neuronNumber, weightNumber) total = 0 - + + errorSumsWeights = np.multiply(errorSumsWeights, -(learningRate/len(inputs))) - self.__weights = np.add(self.__weights, errorSumsWeights) + self.weights = np.add(self.weights, errorSumsWeights) errorSumsBiases = np.multiply(errorSumsBiases, -(learningRate/len(inputs))) - self.__biases = np.add(self.__biases, errorSumsBiases) + self.biases = np.add(self.biases, errorSumsBiases) - print(self.__biases) + #print(self.__biases) """ for layerNumber in range(len(errorSumsWeights)): for neuronNumber in range(len(errorSumsWeights[layerNumber])): + + errorSumsBiases[layerNumber][neuronNumber] = errorSumsBiases[layerNumber][neuronNumber] / len(inputs) + total += errorSumsBiases[layerNumber][neuronNumber] + self.biases[layerNumber][neuronNumber] -= learningRate * errorSumsBiases[layerNumber][neuronNumber] + for weightNumber in range(len(errorSumsWeights[layerNumber][neuronNumber])): #Probablement faisable avec une multiplication de matrices @@ -103,24 +108,24 @@ class network: total += errorSumsWeights[layerNumber][neuronNumber][weightNumber] #Probablement faisable avec une somme de matrices - self.__weights[layerNumber][neuronNumber][weightNumber] -= learningRate * errorSumsWeights[layerNumber][neuronNumber][weightNumber] + self.weights[layerNumber][neuronNumber][weightNumber] -= learningRate * errorSumsWeights[layerNumber][neuronNumber][weightNumber] - print("Error : " + str(total))""" + #print("Error : " + str(total))""" def __Error(self, layer, neuron): if (self.__errors[layer][neuron] == 0 ): - self.__errors[layer][neuron] = self.__ErrorFinalLayer(neuron) if (layer == len(self.__weights)-1) else self.__ErrorHiddenLayer(layer, neuron) + self.__errors[layer][neuron] = self.__ErrorFinalLayer(neuron) if (layer == len(self.weights)-1) else self.__ErrorHiddenLayer(layer, neuron) return self.__errors[layer][neuron] def __ErrorFinalLayer(self, neuron): - return network.__sigmoid(self.activations[len(self.activations)-1][neuron], True) * (self.__output[neuron] - self.__desiredOutput[neuron]) + return network.__sigmoid(self.activations[-1][neuron], derivative=True) * (self.outputs[-1][neuron] - self.__desiredOutput[neuron]) def __ErrorHiddenLayer(self, layer, neuron): upperLayerLinksSum = 0 #Probablement faisable avec une multiplication de matrices - for upperLayerNeuron in range(len(self.__weights[layer+1])): - upperLayerLinksSum += self.__weights[layer+1][upperLayerNeuron][neuron] * self.__errors[layer+1][upperLayerNeuron] - return network.__sigmoid(self.activations[layer][neuron], True) * upperLayerLinksSum + for upperLayerNeuron in range(len(self.weights[layer+1])): + upperLayerLinksSum += self.weights[layer+1][upperLayerNeuron][neuron] * self.__errors[layer+1][upperLayerNeuron] + return network.__sigmoid(self.activations[layer][neuron], derivative=True) * upperLayerLinksSum def __PartialDerivative(self, layer, neuron, weight): - return self.__Error(layer, neuron) * self.outputs[layer-1][weight] \ No newline at end of file + return self.__Error(layer, neuron) * self.outputs[layer][weight] \ No newline at end of file diff --git a/testLearning.py b/testLearning.py index 9a827d9..db19caa 100644 --- a/testLearning.py +++ b/testLearning.py @@ -4,42 +4,36 @@ from sobek.network import network random.seed() -myNetwork = network(10, 10, 10) +myNetwork = network(10, 10) -learningRate = 1 +learningRate = 3 -for j in range(100): +for j in range(10000): + rand = [] inputs = [] - inputs2 = [] desiredOutputs = [] if (j%50 == 0): print(j) - for i in range(1000): - inputs.append([(random.randrange(10)/10)]) - inputs = np.array(inputs, dtype=object) + for i in range(10): + rand.append( random.randrange(10)/10) - for i in range(1000): - desiredOutputs.append([0]*10) - desiredOutputs[i][9 - int(inputs[i][0]*10)] = 1.0 - desiredOutputs = np.array(desiredOutputs, dtype=object) + for i in range(10): + desiredOutputs.append(np.zeros(10)) + desiredOutputs[i][9 - int(rand[i]*10)] = 1.0 - #for i in range(1000): - # inputs2.append([0]*10) - # inputs2[i][int(inputs[i][0]*10)] = 1.0 - inputs2 = np.array(inputs2, dtype=object) + for i in range(10): + inputs.append(np.zeros(10)) + inputs[i][int(rand[i]*10)] = 1.0 - if (j%10000 == 0): - learningRate*= 0.1 - - myNetwork.train(desiredOutputs, desiredOutputs, learningRate) + myNetwork.train(inputs, desiredOutputs, learningRate) test = [] -test.append([0]*10) -test.append([0]*10) +test.append(np.zeros(10)) +test.append(np.zeros(10)) test[0][1] = 1.0 -test[1][8] = 1.0 -test = np.array(test, dtype=object) +test[1][5] = 1.0 +print(test[0]) print(myNetwork.process(test[0])) print(myNetwork.process(test[1])) \ No newline at end of file diff --git a/testLearningNAND.py b/testLearningNAND.py new file mode 100644 index 0000000..14ba696 --- /dev/null +++ b/testLearningNAND.py @@ -0,0 +1,63 @@ +import numpy as np +import random +from sobek.network import network + +random.seed() + +myNetwork = network(2, 1) + +learningRate = 3 + +test = [] +result = [] +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test[1][1] = 1.0 +test[2][0] = 1.0 +test[3][0] = 1.0 +test[3][1] = 1.0 +result.append(np.ones(1)) +result.append(np.ones(1)) +result.append(np.ones(1)) +result.append(np.zeros(1)) + +for j in range(10000): + inputs = [] + desiredOutputs = [] + + if (j%1000 == 0): + print(j) + + random.shuffle(test) + + for i in range(4): + if (test[i][0] == 1.0) and (test[i][1] == 1.0): + result[i][0] = 0.0 + else: + result[i][0] = 1.0 + + myNetwork.train(test, result, learningRate) + +test = [] +result = [] +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test[1][1] = 1.0 +test[2][0] = 1.0 +test[3][0] = 1.0 +test[3][1] = 1.0 +result.append(np.ones(1)) +result.append(np.ones(1)) +result.append(np.ones(1)) +result.append(np.zeros(1)) + +print(myNetwork.weights) +print(myNetwork.biases) +print("0 0 : " + str(myNetwork.process(test[0])) + " == 1 ?") +print("0 1 : " + str(myNetwork.process(test[1])) + " == 1 ?") +print("1 0 : " + str(myNetwork.process(test[2])) + " == 1 ?") +print("1 1 : " + str(myNetwork.process(test[3])) + " == 0 ?") \ No newline at end of file diff --git a/testNAND.py b/testNAND.py new file mode 100644 index 0000000..6465536 --- /dev/null +++ b/testNAND.py @@ -0,0 +1,25 @@ +import numpy as np +import random +from sobek.network import network + +myNetwork = network(2, 1) + +test = [] +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test.append(np.zeros(2)) +test[1][1] = 1.0 +test[2][0] = 1.0 +test[3][0] = 1.0 +test[3][1] = 1.0 + +myNetwork.weights = [np.array([[-10.0, -10.0]])] +myNetwork.biases = [np.array([15.0])] +print(myNetwork.weights) +print(myNetwork.biases) + +print("0 0 : " + str(myNetwork.process(test[0])) + " == 1 ?") +print("0 1 : " + str(myNetwork.process(test[1])) + " == 1 ?") +print("1 0 : " + str(myNetwork.process(test[2])) + " == 1 ?") +print("1 1 : " + str(myNetwork.process(test[3])) + " == 0 ?") \ No newline at end of file