Compare commits
2 Commits
master
...
convolutio
Author | SHA1 | Date | |
---|---|---|---|
|
7506b3756b | ||
|
c66c0ae87a |
@ -1,10 +1,2 @@
|
||||
# PT21-22-Reseau-Neurones
|
||||
|
||||
Cette librairie python permet de créer simplement un modèle de perceptron multicouche en choisissant les hyperparamètres suivants:
|
||||
- Le nombre de couches
|
||||
- Le nombre de neurones pour chaque couche
|
||||
- Le learning rate
|
||||
- Le batch size
|
||||
- Le nombre d’époques
|
||||
|
||||
Il est également possible de donner un jeu de données supplémentaire afin de mesurer la précision du modèle à la fin de l'entraînement. Lorsqu’il n'y a que deux neurones d'entrées, il y a une option pour visualiser l’entraînement.
|
23
sobek/activationFunctions.py
Normal file
23
sobek/activationFunctions.py
Normal file
@ -0,0 +1,23 @@
|
||||
class activationFunction:
|
||||
def applyTo(value):
|
||||
pass
|
||||
|
||||
def applyDerivateTo(value):
|
||||
pass
|
||||
|
||||
class sigmoid(activationFunction):
|
||||
def applyTo(value):
|
||||
return 1.0/(1.0+np.exp(-value))
|
||||
|
||||
def applyDerivateTo(value):
|
||||
return sigmoid.applyTo(value) * (1 - sigmoid.applyTo(value))
|
||||
|
||||
class reLu(activationFunction):
|
||||
def applyTo(value):
|
||||
return max(0, value)
|
||||
|
||||
def applyDerivateTo(value):
|
||||
return 0 if (value < 0) else 1
|
||||
|
||||
class softMax(activationFunction):
|
||||
pass
|
30
sobek/layers.py
Normal file
30
sobek/layers.py
Normal file
@ -0,0 +1,30 @@
|
||||
import numpy as np
|
||||
|
||||
class layer:
|
||||
def __init__(self, neurons, activationFunction)
|
||||
self.neurons = neurons
|
||||
self.activationFunction = activationFunction
|
||||
|
||||
def process(_input, __storeValues=False)
|
||||
|
||||
class dense(layer):
|
||||
def process(_input, __storeValues=False):
|
||||
|
||||
_input = np.dot(layerWeights, _input)
|
||||
_input = np.add(_input, layerBias)
|
||||
|
||||
if (__storeValues):
|
||||
self.activation = _input
|
||||
|
||||
_input = self.activationFunction.applyTo(_input)
|
||||
|
||||
if (__storeValues):
|
||||
self.output = _input
|
||||
|
||||
return _input
|
||||
|
||||
class convolution(layer):
|
||||
pass
|
||||
|
||||
class flatten(layer):
|
||||
pass
|
@ -1,6 +1,5 @@
|
||||
import random
|
||||
import numpy as np
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
import pickle
|
||||
@ -94,7 +93,7 @@ class network:
|
||||
vizualisationFrame = np.empty((30, 30))
|
||||
for x in range(30):
|
||||
for y in range(30):
|
||||
vizualisationFrame[x][y] = self.process(np.array([float(x)/30, float(y)/30]))
|
||||
vizualisationFrame[x][y] = self.process(np.array([float(x), float(y)]))
|
||||
vizualisationData.append([graph.imshow(vizualisationFrame, animated=True)])
|
||||
|
||||
inputBatches = [inputs[j:j+batchSize] for j in range(0, len(inputs), batchSize)]
|
||||
@ -116,6 +115,7 @@ class network:
|
||||
for layerNumber in range(len(errorSumsWeights)-1, -1, -1):
|
||||
for neuronNumber in range(len(errorSumsWeights[layerNumber])):
|
||||
errorSumsBiases[layerNumber][neuronNumber] += self.__Error(layerNumber, neuronNumber)
|
||||
#eventuellemtn sortir de boucle
|
||||
errorSumsWeights[layerNumber][neuronNumber] = np.dot(errorSumsBiases[layerNumber][neuronNumber],self.outputs[layerNumber])
|
||||
|
||||
total = 0
|
||||
@ -136,7 +136,7 @@ class network:
|
||||
print(self.accuracy(accuracyInputs, accuracyDesiredOutputs))
|
||||
|
||||
if (visualize):
|
||||
ani = animation.ArtistAnimation(fig, vizualisationData, interval=100, repeat_delay=1000)
|
||||
ani = animation.ArtistAnimation(fig, vizualisationData, interval=100)
|
||||
plt.show()
|
||||
|
||||
def __Error(self, layer, neuron):
|
||||
@ -180,4 +180,18 @@ class network:
|
||||
|
||||
def networkFromFile(fileName):
|
||||
with open(fileName, "rb") as file:
|
||||
return pickle.load(file)
|
||||
return pickle.load(file)
|
||||
|
||||
|
||||
|
||||
|
||||
class model:
|
||||
|
||||
def __init__(self, inputWidth, inputHeight, inputChannels):
|
||||
self.inputWidth = inputWidth
|
||||
self.inputHeight = inputHeight
|
||||
self.inputChannels = inputChannels
|
||||
self.layers = []
|
||||
|
||||
def add(layerType, activation):
|
||||
self.layers.add()
|
@ -14,7 +14,7 @@ class Sketchpad(tkinter.Canvas):
|
||||
self.bind("<B1-Motion>", self.add_line)
|
||||
self.PILImage = Image.new("F", (560, 560), 100)
|
||||
self.draw = ImageDraw.Draw(self.PILImage)
|
||||
self.MNISTNN = network.networkFromFile("MNIST30epoch")
|
||||
self.MNISTNN = network.networkFromFile("MNISTtest2")
|
||||
self.predictionLabel = predictionLabel
|
||||
|
||||
def add_line(self, event):
|
||||
|
Binary file not shown.
@ -1,17 +0,0 @@
|
||||
#!/bin/python3
|
||||
from sys import path
|
||||
path.insert(1, "..")
|
||||
from sobek.network import network
|
||||
import pickle
|
||||
|
||||
with open("flowerGardenData", "rb") as file:
|
||||
data = pickle.load(file)
|
||||
|
||||
trainPoints = data[0]
|
||||
trainLabels = data[1]
|
||||
|
||||
myNetwork = network(2, 16, 1)
|
||||
|
||||
learningRate = 3.0
|
||||
|
||||
myNetwork.train(trainPoints, trainLabels, learningRate, batchSize=100, epochs=3000, visualize=True)
|
@ -1,34 +0,0 @@
|
||||
#!/bin/python3
|
||||
import random
|
||||
import numpy as np
|
||||
import math
|
||||
import pickle
|
||||
|
||||
trainPoints = []
|
||||
trainLabels = []
|
||||
|
||||
random.seed(1216513)
|
||||
|
||||
for i in range(1000):
|
||||
x = random.randint(-50, 50)
|
||||
y = random.randint(-50, 50)
|
||||
|
||||
distance = math.sqrt(x**2 + y**2)
|
||||
|
||||
if (distance < 10 or 20 < distance < 30):
|
||||
trainLabels.append(np.ones(1))
|
||||
else :
|
||||
trainLabels.append(np.zeros(1))
|
||||
|
||||
x = (x+50)/100
|
||||
y = (y+50)/100
|
||||
|
||||
trainPoints.append(np.array([x, y]))
|
||||
|
||||
print(trainPoints[1])
|
||||
print(trainLabels[1])
|
||||
|
||||
data = [trainPoints, trainLabels]
|
||||
|
||||
with open("flowerGardenData", "wb") as file:
|
||||
pickle.dump(data, file)
|
Loading…
Reference in New Issue
Block a user