That reminds me the Windows Petzold's era, so again we can simplify the repetitive code,
creating a generic class TNeuronalNetWork in Python
This is a work in progress, very good to learn basic Python stuff!
class.py
Code: Select all
import tensorflow as tf
class TPerceptron :
aWeights = []
def New( self, nInputs ) :
for n in range( nInputs ) :
self.aWeights.append( tf.zeros( [] ) )
class TLayer :
aPerceptrons = []
bias = None
aPrevLayer = None
def New( self, nPerceptrons ) :
for n in range( nPerceptrons ) :
if self.aPrevLayer :
self.aPerceptrons.append( TPerceptron().New( len( self.aPrevLayer.aPerceptrons ) ) )
else :
self.aPerceptrons.append( TPerceptron().New( 0 ) )
self.bias = tf.zeros( [] )
return self
class TNeuronalNetwork :
aLayers = []
input = None
output = None
nLearningRate = 0.002
nTrainingEpochs = 500
tf = None
loss = None
optimizer = None
def New( self, aTopology ) :
self.input = tf.placeholder( "float", shape=( None, aTopology[ 0 ] ) )
self.output = tf.placeholder( "float", shape=( None, aTopology[ -1 ] ) )
for n in range( len( aTopology ) ) :
self.aLayers.append( [] )
self.aLayers[ -1 ] = TLayer().New( aTopology[ n ] )
if n == 0 :
self.tf = tf.add( tf.matmul( self.input, weights[ 'h1' ] ), self.aLayers[ -1 ].bias )
else :
self.aLayers[ -1 ].aPrevLayer = self.aLayers[ -2 ]
self.tf = tf.add( tf.matmul( self.tf, weights[ 'h1' ] ), self.aLayers[ -1 ].bias )
self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits=self.tf, labels=self.output ) )
self.optimizer = tf.train.AdamOptimizer( learning_rate = self.nLearningRate ).minimize( self.loss )
return self
oNN = TNeuronalNetwork().New( [ 5, 7, 1 ] )