diff --git a/sobek/activationFunctions.py b/sobek/activationFunctions.py new file mode 100644 index 0000000..6830024 --- /dev/null +++ b/sobek/activationFunctions.py @@ -0,0 +1,23 @@ +class activationFunction: + def applyTo(value): + pass + + def applyDerivateTo(value): + pass + +class sigmoid(activationFunction): + def applyTo(value): + return 1.0/(1.0+np.exp(-value)) + + def applyDerivateTo(value): + return sigmoid.applyTo(value) * (1 - sigmoid.applyTo(value)) + +class reLu(activationFunction): + def applyTo(value): + return max(0, value) + + def applyDerivateTo(value): + return 0 if (value < 0) else 1 + +class softMax(activationFunction): + pass \ No newline at end of file diff --git a/sobek/layers.py b/sobek/layers.py new file mode 100644 index 0000000..b53d5cc --- /dev/null +++ b/sobek/layers.py @@ -0,0 +1,30 @@ +import numpy as np + +class layer: + def __init__(self, neurons, activationFunction) + self.neurons = neurons + self.activationFunction = activationFunction + + def process(_input, __storeValues=False) + +class dense(layer): + def process(_input, __storeValues=False): + + _input = np.dot(layerWeights, _input) + _input = np.add(_input, layerBias) + + if (__storeValues): + self.activation = _input + + _input = self.activationFunction.applyTo(_input) + + if (__storeValues): + self.output = _input + + return _input + +class convolution(layer): + pass + +class flatten(layer): + pass \ No newline at end of file diff --git a/sobek/network.py b/sobek/network.py index 527d3c3..c53306d 100755 --- a/sobek/network.py +++ b/sobek/network.py @@ -194,3 +194,4 @@ class model: self.layers = [] def add(layerType, activation): + self.layers.add() \ No newline at end of file