Compare commits
5 Commits
convolutio
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
57ab0af6b1 | ||
|
9b8c6ffea8 | ||
|
dbed51a2a0 | ||
|
5cdc7b52e1 | ||
|
dcddbd017f |
@ -1,2 +1,10 @@
|
|||||||
# PT21-22-Reseau-Neurones
|
# PT21-22-Reseau-Neurones
|
||||||
|
|
||||||
|
Cette librairie python permet de créer simplement un modèle de perceptron multicouche en choisissant les hyperparamètres suivants:
|
||||||
|
- Le nombre de couches
|
||||||
|
- Le nombre de neurones pour chaque couche
|
||||||
|
- Le learning rate
|
||||||
|
- Le batch size
|
||||||
|
- Le nombre d’époques
|
||||||
|
|
||||||
|
Il est également possible de donner un jeu de données supplémentaire afin de mesurer la précision du modèle à la fin de l'entraînement. Lorsqu’il n'y a que deux neurones d'entrées, il y a une option pour visualiser l’entraînement.
|
@ -1,23 +0,0 @@
|
|||||||
class activationFunction:
|
|
||||||
def applyTo(value):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def applyDerivateTo(value):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class sigmoid(activationFunction):
|
|
||||||
def applyTo(value):
|
|
||||||
return 1.0/(1.0+np.exp(-value))
|
|
||||||
|
|
||||||
def applyDerivateTo(value):
|
|
||||||
return sigmoid.applyTo(value) * (1 - sigmoid.applyTo(value))
|
|
||||||
|
|
||||||
class reLu(activationFunction):
|
|
||||||
def applyTo(value):
|
|
||||||
return max(0, value)
|
|
||||||
|
|
||||||
def applyDerivateTo(value):
|
|
||||||
return 0 if (value < 0) else 1
|
|
||||||
|
|
||||||
class softMax(activationFunction):
|
|
||||||
pass
|
|
@ -1,30 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
|
|
||||||
class layer:
|
|
||||||
def __init__(self, neurons, activationFunction)
|
|
||||||
self.neurons = neurons
|
|
||||||
self.activationFunction = activationFunction
|
|
||||||
|
|
||||||
def process(_input, __storeValues=False)
|
|
||||||
|
|
||||||
class dense(layer):
|
|
||||||
def process(_input, __storeValues=False):
|
|
||||||
|
|
||||||
_input = np.dot(layerWeights, _input)
|
|
||||||
_input = np.add(_input, layerBias)
|
|
||||||
|
|
||||||
if (__storeValues):
|
|
||||||
self.activation = _input
|
|
||||||
|
|
||||||
_input = self.activationFunction.applyTo(_input)
|
|
||||||
|
|
||||||
if (__storeValues):
|
|
||||||
self.output = _input
|
|
||||||
|
|
||||||
return _input
|
|
||||||
|
|
||||||
class convolution(layer):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class flatten(layer):
|
|
||||||
pass
|
|
@ -1,5 +1,6 @@
|
|||||||
import random
|
import random
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import matplotlib
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import matplotlib.animation as animation
|
import matplotlib.animation as animation
|
||||||
import pickle
|
import pickle
|
||||||
@ -93,7 +94,7 @@ class network:
|
|||||||
vizualisationFrame = np.empty((30, 30))
|
vizualisationFrame = np.empty((30, 30))
|
||||||
for x in range(30):
|
for x in range(30):
|
||||||
for y in range(30):
|
for y in range(30):
|
||||||
vizualisationFrame[x][y] = self.process(np.array([float(x), float(y)]))
|
vizualisationFrame[x][y] = self.process(np.array([float(x)/30, float(y)/30]))
|
||||||
vizualisationData.append([graph.imshow(vizualisationFrame, animated=True)])
|
vizualisationData.append([graph.imshow(vizualisationFrame, animated=True)])
|
||||||
|
|
||||||
inputBatches = [inputs[j:j+batchSize] for j in range(0, len(inputs), batchSize)]
|
inputBatches = [inputs[j:j+batchSize] for j in range(0, len(inputs), batchSize)]
|
||||||
@ -115,7 +116,6 @@ class network:
|
|||||||
for layerNumber in range(len(errorSumsWeights)-1, -1, -1):
|
for layerNumber in range(len(errorSumsWeights)-1, -1, -1):
|
||||||
for neuronNumber in range(len(errorSumsWeights[layerNumber])):
|
for neuronNumber in range(len(errorSumsWeights[layerNumber])):
|
||||||
errorSumsBiases[layerNumber][neuronNumber] += self.__Error(layerNumber, neuronNumber)
|
errorSumsBiases[layerNumber][neuronNumber] += self.__Error(layerNumber, neuronNumber)
|
||||||
#eventuellemtn sortir de boucle
|
|
||||||
errorSumsWeights[layerNumber][neuronNumber] = np.dot(errorSumsBiases[layerNumber][neuronNumber],self.outputs[layerNumber])
|
errorSumsWeights[layerNumber][neuronNumber] = np.dot(errorSumsBiases[layerNumber][neuronNumber],self.outputs[layerNumber])
|
||||||
|
|
||||||
total = 0
|
total = 0
|
||||||
@ -136,7 +136,7 @@ class network:
|
|||||||
print(self.accuracy(accuracyInputs, accuracyDesiredOutputs))
|
print(self.accuracy(accuracyInputs, accuracyDesiredOutputs))
|
||||||
|
|
||||||
if (visualize):
|
if (visualize):
|
||||||
ani = animation.ArtistAnimation(fig, vizualisationData, interval=100)
|
ani = animation.ArtistAnimation(fig, vizualisationData, interval=100, repeat_delay=1000)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
def __Error(self, layer, neuron):
|
def __Error(self, layer, neuron):
|
||||||
@ -180,18 +180,4 @@ class network:
|
|||||||
|
|
||||||
def networkFromFile(fileName):
|
def networkFromFile(fileName):
|
||||||
with open(fileName, "rb") as file:
|
with open(fileName, "rb") as file:
|
||||||
return pickle.load(file)
|
return pickle.load(file)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class model:
|
|
||||||
|
|
||||||
def __init__(self, inputWidth, inputHeight, inputChannels):
|
|
||||||
self.inputWidth = inputWidth
|
|
||||||
self.inputHeight = inputHeight
|
|
||||||
self.inputChannels = inputChannels
|
|
||||||
self.layers = []
|
|
||||||
|
|
||||||
def add(layerType, activation):
|
|
||||||
self.layers.add()
|
|
@ -14,7 +14,7 @@ class Sketchpad(tkinter.Canvas):
|
|||||||
self.bind("<B1-Motion>", self.add_line)
|
self.bind("<B1-Motion>", self.add_line)
|
||||||
self.PILImage = Image.new("F", (560, 560), 100)
|
self.PILImage = Image.new("F", (560, 560), 100)
|
||||||
self.draw = ImageDraw.Draw(self.PILImage)
|
self.draw = ImageDraw.Draw(self.PILImage)
|
||||||
self.MNISTNN = network.networkFromFile("MNISTtest2")
|
self.MNISTNN = network.networkFromFile("MNIST30epoch")
|
||||||
self.predictionLabel = predictionLabel
|
self.predictionLabel = predictionLabel
|
||||||
|
|
||||||
def add_line(self, event):
|
def add_line(self, event):
|
||||||
|
BIN
tests/flowerGardenData
Executable file
BIN
tests/flowerGardenData
Executable file
Binary file not shown.
17
tests/flowerGardenLearningVisualization.py
Executable file
17
tests/flowerGardenLearningVisualization.py
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/python3
|
||||||
|
from sys import path
|
||||||
|
path.insert(1, "..")
|
||||||
|
from sobek.network import network
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
with open("flowerGardenData", "rb") as file:
|
||||||
|
data = pickle.load(file)
|
||||||
|
|
||||||
|
trainPoints = data[0]
|
||||||
|
trainLabels = data[1]
|
||||||
|
|
||||||
|
myNetwork = network(2, 16, 1)
|
||||||
|
|
||||||
|
learningRate = 3.0
|
||||||
|
|
||||||
|
myNetwork.train(trainPoints, trainLabels, learningRate, batchSize=100, epochs=3000, visualize=True)
|
34
tests/generateSobekFlowerGarden.py
Executable file
34
tests/generateSobekFlowerGarden.py
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/python3
|
||||||
|
import random
|
||||||
|
import numpy as np
|
||||||
|
import math
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
trainPoints = []
|
||||||
|
trainLabels = []
|
||||||
|
|
||||||
|
random.seed(1216513)
|
||||||
|
|
||||||
|
for i in range(1000):
|
||||||
|
x = random.randint(-50, 50)
|
||||||
|
y = random.randint(-50, 50)
|
||||||
|
|
||||||
|
distance = math.sqrt(x**2 + y**2)
|
||||||
|
|
||||||
|
if (distance < 10 or 20 < distance < 30):
|
||||||
|
trainLabels.append(np.ones(1))
|
||||||
|
else :
|
||||||
|
trainLabels.append(np.zeros(1))
|
||||||
|
|
||||||
|
x = (x+50)/100
|
||||||
|
y = (y+50)/100
|
||||||
|
|
||||||
|
trainPoints.append(np.array([x, y]))
|
||||||
|
|
||||||
|
print(trainPoints[1])
|
||||||
|
print(trainLabels[1])
|
||||||
|
|
||||||
|
data = [trainPoints, trainLabels]
|
||||||
|
|
||||||
|
with open("flowerGardenData", "wb") as file:
|
||||||
|
pickle.dump(data, file)
|
Loading…
Reference in New Issue
Block a user