From b844d103477235dd282741108c8c1447dbf82d00 Mon Sep 17 00:00:00 2001 From: eynard Date: Fri, 3 Dec 2021 15:10:27 +0100 Subject: [PATCH] legere corrections --- sobek/__init.py__ | 0 .../test.py => sobek/network.py | 39 +++++++------------ test.py | 14 +++++++ 3 files changed, 28 insertions(+), 25 deletions(-) create mode 100644 sobek/__init.py__ rename neuralnetworkbuilder/test.py => sobek/network.py (57%) create mode 100644 test.py diff --git a/sobek/__init.py__ b/sobek/__init.py__ new file mode 100644 index 0000000..e69de29 diff --git a/neuralnetworkbuilder/test.py b/sobek/network.py similarity index 57% rename from neuralnetworkbuilder/test.py rename to sobek/network.py index ab86747..75d906a 100755 --- a/neuralnetworkbuilder/test.py +++ b/sobek/network.py @@ -3,6 +3,9 @@ import numpy as np class network: def __init__(self, inputLayerSize, *layerSizes): + if type(inputLayerSize) != int: + raise TypeError("The input layer size must be an int!") + self.weights = [] self.inputLayerSize = inputLayerSize self.oldLayerSize = inputLayerSize @@ -10,40 +13,26 @@ class network: self.weights.append( np.random.default_rng(42).random((self.oldLayerSize, layerSize)) ) self.oldLayerSize = layerSize self.biases = [[0]*layerSize for layerSize in layerSizes] - self.weights = np.array(self.weights) - self.biases = np.array(self.biases) + self.weights = np.array(self.weights, dtype=object) + self.biases = np.array(self.biases, dtype=object) def reLu(value): return max(0, value) def process(self, input): if type(input) != np.ndarray: - print("non") + raise TypeError("The input must be a vector!") if input.size != self.inputLayerSize: - print("vite") + raise ValueError("The input vector has the wrong size!") if input.dtype != np.float64: - print("aaa") - for layer, bias in zip(self.weights, self.biases): - print("---------------------") - print(input) - print(layer) - print(bias) - input = np.matmul(input, layer) + raise TypeError("The input vector must contain floats!") + + for layerWeights, bias in zip(self.weights, self.biases): + input = np.matmul(input, layerWeights) input = np.add(input, bias) + #reLu application with np.nditer(input, op_flags=['readwrite']) as layer: for neuron in layer: neuron = network.reLu(neuron) - return input - - -test = network(16, 16, 8, 4) - -for y in test.weights: - print(y, end="\n\n") - -for y in test.biases: - print(y, end="\n\n") - -print(network.reLu(8)) - -print(test.process(np.random.default_rng(42).random((16)))) \ No newline at end of file + + return input \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000..96d04bd --- /dev/null +++ b/test.py @@ -0,0 +1,14 @@ +import numpy as np +from sobek.network import network + +test = network(16, 16, 8, 4) +""" +for y in test.weights: + print(y, end="\n\n") + +for y in test.biases: + print(y, end="\n\n")""" + +print(network.reLu(8)) + +print(test.process(np.random.default_rng(42).random((16)))) \ No newline at end of file