changements de boucles en opérations de matrices

This commit is contained in:
eynard 2021-12-17 15:08:53 +01:00
parent bfd06b4f29
commit c0a705ffb9
2 changed files with 33 additions and 22 deletions

View File

@ -63,8 +63,9 @@ class network:
def train(self, inputs, desiredOutputs, learningRate): def train(self, inputs, desiredOutputs, learningRate):
errorSums = [[[0]*(len(neuron)) for neuron in layer] for layer in self.__weights] errorSumsWeights = [[[0]*len(neuron) for neuron in layer] for layer in self.__weights]
self.__errors = [[0]*(len(layer)) for layer in self.__weights] errorSumsBiases = [[0]*len(layer) for layer in self.__biases]
self.__errors = [[0]*len(layer) for layer in self.__weights]
for _input, desiredOutput in zip(inputs, desiredOutputs): for _input, desiredOutput in zip(inputs, desiredOutputs):
@ -73,27 +74,38 @@ class network:
self.__desiredOutput = desiredOutput self.__desiredOutput = desiredOutput
for layerNumber in range(len(errorSums)-1, -1, -1): #Somme de matrice ?
for neuronNumber in range(len(errorSums[layerNumber])): for layerNumber in range(len(errorSumsWeights)-1, -1, -1):
for weightNumber in range(len(errorSums[layerNumber][neuronNumber])): for neuronNumber in range(len(errorSumsWeights[layerNumber])):
errorSumsBiases[layerNumber][neuronNumber] += self.__Error(layerNumber, neuronNumber)
for weightNumber in range(len(errorSumsWeights[layerNumber][neuronNumber])):
#print("layer : " + str(layerNumber) + " neuron : " + str(neuronNumber) + " weight : " + str(weightNumber)) #print("layer : " + str(layerNumber) + " neuron : " + str(neuronNumber) + " weight : " + str(weightNumber))
errorSums[layerNumber][neuronNumber][weightNumber] += self.__partialDerivative(layerNumber, neuronNumber, weightNumber) errorSumsWeights[layerNumber][neuronNumber][weightNumber] += self.__PartialDerivative(layerNumber, neuronNumber, weightNumber)
total = 0 total = 0
for layerNumber in range(len(errorSums)): errorSumsWeights = np.multiply(errorSumsWeights, -(learningRate/len(inputs)))
for neuronNumber in range(len(errorSums[layerNumber])): self.__weights = np.add(self.__weights, errorSumsWeights)
for weightNumber in range(len(errorSums[layerNumber][neuronNumber])):
errorSumsBiases = np.multiply(errorSumsBiases, -(learningRate/len(inputs)))
self.__biases = np.add(self.__biases, errorSumsBiases)
print(self.__biases)
"""
for layerNumber in range(len(errorSumsWeights)):
for neuronNumber in range(len(errorSumsWeights[layerNumber])):
for weightNumber in range(len(errorSumsWeights[layerNumber][neuronNumber])):
#Probablement faisable avec une multiplication de matrices #Probablement faisable avec une multiplication de matrices
errorSums[layerNumber][neuronNumber][weightNumber] = errorSums[layerNumber][neuronNumber][weightNumber] / len(inputs) errorSumsWeights[layerNumber][neuronNumber][weightNumber] = errorSumsWeights[layerNumber][neuronNumber][weightNumber] / len(inputs)
total += errorSums[layerNumber][neuronNumber][weightNumber] total += errorSumsWeights[layerNumber][neuronNumber][weightNumber]
#Probablement faisable avec une somme de matrices #Probablement faisable avec une somme de matrices
self.__weights[layerNumber][neuronNumber][weightNumber] -= learningRate * errorSums[layerNumber][neuronNumber][weightNumber] self.__weights[layerNumber][neuronNumber][weightNumber] -= learningRate * errorSumsWeights[layerNumber][neuronNumber][weightNumber]
print("Error : " + str(total)) print("Error : " + str(total))"""
def __Error(self, layer, neuron): def __Error(self, layer, neuron):
if (self.__errors[layer][neuron] == 0 ): if (self.__errors[layer][neuron] == 0 ):
@ -107,9 +119,8 @@ class network:
upperLayerLinksSum = 0 upperLayerLinksSum = 0
#Probablement faisable avec une multiplication de matrices #Probablement faisable avec une multiplication de matrices
for upperLayerNeuron in range(len(self.__weights[layer+1])): for upperLayerNeuron in range(len(self.__weights[layer+1])):
#A comparer avec un acces direct au erreurs precalcules upperLayerLinksSum += self.__weights[layer+1][upperLayerNeuron][neuron] * self.__errors[layer+1][upperLayerNeuron]
upperLayerLinksSum += self.__weights[layer+1][upperLayerNeuron][neuron] * self.__Error(layer+1, upperLayerNeuron)
return network.__sigmoid(self.activations[layer][neuron], True) * upperLayerLinksSum return network.__sigmoid(self.activations[layer][neuron], True) * upperLayerLinksSum
def __partialDerivative(self, layer, neuron, weight): def __PartialDerivative(self, layer, neuron, weight):
return self.__Error(layer, neuron) * self.outputs[layer-1][weight] return self.__Error(layer, neuron) * self.outputs[layer-1][weight]

View File

@ -4,11 +4,11 @@ from sobek.network import network
random.seed() random.seed()
myNetwork = network(10, 10) myNetwork = network(10, 10, 10)
learningRate = 1 learningRate = 1
for j in range(10000): for j in range(100):
inputs = [] inputs = []
inputs2 = [] inputs2 = []
desiredOutputs = [] desiredOutputs = []
@ -25,15 +25,15 @@ for j in range(10000):
desiredOutputs[i][9 - int(inputs[i][0]*10)] = 1.0 desiredOutputs[i][9 - int(inputs[i][0]*10)] = 1.0
desiredOutputs = np.array(desiredOutputs, dtype=object) desiredOutputs = np.array(desiredOutputs, dtype=object)
for i in range(1000): #for i in range(1000):
inputs2.append([0]*10) # inputs2.append([0]*10)
inputs2[i][int(inputs[i][0]*10)] = 1.0 # inputs2[i][int(inputs[i][0]*10)] = 1.0
inputs2 = np.array(inputs2, dtype=object) inputs2 = np.array(inputs2, dtype=object)
if (j%10000 == 0): if (j%10000 == 0):
learningRate*= 0.1 learningRate*= 0.1
myNetwork.train(inputs2, desiredOutputs, learningRate) myNetwork.train(desiredOutputs, desiredOutputs, learningRate)
test = [] test = []
test.append([0]*10) test.append([0]*10)