From 7e9a93354893088f8529da8b9bd32742daf97bc8 Mon Sep 17 00:00:00 2001 From: eynard Date: Tue, 14 Dec 2021 22:26:11 +0100 Subject: [PATCH] Un bout de l'entrainement (erreur + partial derivative 1) --- sobek/network.py | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/sobek/network.py b/sobek/network.py index 2318dc1..be407af 100755 --- a/sobek/network.py +++ b/sobek/network.py @@ -23,7 +23,7 @@ class network: def __sigmoid(value, derivative=False): if (derivative): - return __sigmoid(value) * (1 - __sigmoid(value)) + return network.__sigmoid(value) * (1 - network.__sigmoid(value)) return 1/(1+np.exp(-value)) def process(self, input, storeValues=False): @@ -33,24 +33,47 @@ class network: raise ValueError("The input vector has the wrong size!") if input.dtype != np.float64: raise TypeError("The input vector must contain floats!") + + if (storeValues): + self.activations = [] + self.outputs = [] for layerWeights, bias in zip(self.__weights, self.__biases): input = np.matmul(input, layerWeights) input = np.add(input, bias) + + if (storeValues): + self.activations.append(input) + #reLu application with np.nditer(input, op_flags=['readwrite']) as layer: for neuron in layer: neuron = network.__reLu(neuron) + + #On peut comparer la performance si on recalcul plus tard + if (storeValues): + self.outputs.append(input) return input - def train(self, inputs, results): - self.__outputs = 1 - #for j in range(1,): + def train(self, inputs, desiredOutputs): + for input, desiredOutput in zip(inputs, desiredOutputs): + self.__output = self.process(input, True) + self.__desiredOutput = desiredOutput #partialDerivatives - def __Error(layer, output, desiredOutput): - return __ErrorFinalLayerFromValue() if (layer == 1) + def __Error(self, layer, neuron): + return self.__ErrorFinalLayer(neuron) if (layer == 1) else self.__ErrorHiddenLayer(layer, neuron) def __ErrorFinalLayer(self, neuron): - return __reLu(value, true) * (output - desiredOutput) \ No newline at end of file + return network.__reLu(self.activations[len(self.activations)-1][neuron], True) * (self.__output[neuron] - self.__desiredOutput[neuron]) + + def __ErrorHiddenLayer(self, layer, neuron): + upperLayerLinksSum = 0 + for upperLayerNeuron in range(len(self.__weights[layer+1]-1)): + #A comparer avec un acces direct au erreurs precalcules + upperLayerLinksSum += self.__weights[layer+1][upperLayerNeuron][neuron] * self.__Error(layer+1, neuron) + return network.__reLu(self.activations[layer][neuron], True) * upperLayerLinksSum + + def __partialDerivative(self, layer, neuron): + return self.__Error(layer, neuron) * self.outputs[layer][neuron] \ No newline at end of file