diff --git a/sobek/network.py b/sobek/network.py index 186a91e..04aa73b 100755 --- a/sobek/network.py +++ b/sobek/network.py @@ -47,7 +47,7 @@ class network: if (__storeValues): self.activations.append(_input.copy()) - #reLu application + #activation function application for neuron in range(len(_input)): _input[neuron] = network.__sigmoid(_input[neuron]) @@ -67,8 +67,12 @@ class network: self.__errors = [[0]*(len(layer)) for layer in self.__weights] for _input, desiredOutput in zip(inputs, desiredOutputs): + + #rempli self.activations et self.outputs self.__output = self.process(_input, True) + self.__desiredOutput = desiredOutput + for layerNumber in range(len(errorSums)-1, -1, -1): for neuronNumber in range(len(errorSums[layerNumber])): for weightNumber in range(len(errorSums[layerNumber][neuronNumber])): @@ -77,12 +81,17 @@ class network: total = 0 - for i in range(len(errorSums)): - for j in range(len(errorSums[i])): - for k in range(len(errorSums[i][j])): - errorSums[i][j][k] = errorSums[i][j][k] / len(inputs) - total += errorSums[i][j][k] - self.__weights[i][j][k] -= learningRate * errorSums[i][j][k] + for layerNumber in range(len(errorSums)): + for neuronNumber in range(len(errorSums[layerNumber])): + for weightNumber in range(len(errorSums[layerNumber][neuronNumber])): + + #Probablement faisable avec une multiplication de matrices + errorSums[layerNumber][neuronNumber][weightNumber] = errorSums[layerNumber][neuronNumber][weightNumber] / len(inputs) + + total += errorSums[layerNumber][neuronNumber][weightNumber] + + #Probablement faisable avec une somme de matrices + self.__weights[layerNumber][neuronNumber][weightNumber] -= learningRate * errorSums[layerNumber][neuronNumber][weightNumber] print("Error : " + str(total)) @@ -96,9 +105,10 @@ class network: def __ErrorHiddenLayer(self, layer, neuron): upperLayerLinksSum = 0 - for upperLayerNeuron in range(len(self.__weights[layer+1]-1)): + #Probablement faisable avec une multiplication de matrices + for upperLayerNeuron in range(len(self.__weights[layer+1])): #A comparer avec un acces direct au erreurs precalcules - upperLayerLinksSum += self.__weights[layer+1][upperLayerNeuron][neuron] * self.__Error(layer+1, neuron) + upperLayerLinksSum += self.__weights[layer+1][upperLayerNeuron][neuron] * self.__Error(layer+1, upperLayerNeuron) return network.__sigmoid(self.activations[layer][neuron], True) * upperLayerLinksSum def __partialDerivative(self, layer, neuron, weight): diff --git a/testLearning.py b/testLearning.py index aa63cab..8f2234c 100644 --- a/testLearning.py +++ b/testLearning.py @@ -4,25 +4,37 @@ from sobek.network import network random.seed() -myNetwork = network(1, 8, 8, 10) +myNetwork = network(1, 10) -for j in range(3000): +learningRate = 1 + +for j in range(100000): inputs = [] desiredOutputs = [] if (j%50 == 0): print(j) - for i in range(200): + for i in range(1000): inputs.append([random.randrange(10)]) inputs = np.array(inputs, dtype=object) - for i in range(200): + for i in range(1000): desiredOutputs.append([0]*10) - desiredOutputs[i][9 - inputs[i][0]] = 1 + desiredOutputs[i][9 - inputs[i][0]] = 1.0 desiredOutputs = np.array(desiredOutputs, dtype=object) + + if (j%10000 == 0): + learningRate*= 0.1 + myNetwork.train(inputs, desiredOutputs, learningRate) - myNetwork.train(inputs, desiredOutputs, 0.01) - +print(myNetwork.process(np.array([0.0], dtype=object))) +print(myNetwork.process(np.array([1.0], dtype=object))) +print(myNetwork.process(np.array([2.0], dtype=object))) +print(myNetwork.process(np.array([3.0], dtype=object))) +print(myNetwork.process(np.array([4.0], dtype=object))) +print(myNetwork.process(np.array([5.0], dtype=object))) +print(myNetwork.process(np.array([6.0], dtype=object))) +print(myNetwork.process(np.array([7.0], dtype=object))) print(myNetwork.process(np.array([8.0], dtype=object))) -print(myNetwork.process(np.array([1.0], dtype=object))) \ No newline at end of file +print(myNetwork.process(np.array([9.0], dtype=object))) \ No newline at end of file