Skip to content

Commit 245de20

Browse files
author
Michael ZBYSZYNSKI
committed
Factoring nn run code
1 parent 0fdba01 commit 245de20

File tree

2 files changed

+7
-49
lines changed

2 files changed

+7
-49
lines changed

src/neuralNetwork.cpp

Lines changed: 6 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -393,58 +393,17 @@ void neuralNetwork<T>::getJSONDescription(Json::Value& jsonModelDescription)
393393
template<typename T>
394394
T neuralNetwork<T>::run(const std::vector<T>& inputVector) const
395395
{
396-
std::vector<T> localInputLayer(numInputs);
397-
localInputLayer.push_back(1.0); // layer bias
398-
std::vector<std::vector<T> > localHiddenLayers(numHiddenLayers, std::vector<T>(numHiddenNodes + 1));
396+
std::vector<T> localInputLayer { inputLayer };
397+
std::vector<std::vector<T> > localHiddenLayers { hiddenLayers };
399398
T localOutputNeuron {};
400399

401-
std::vector<T> pattern;
402-
for (size_t h {}; h < numInputs; h++)
403-
{
404-
pattern.push_back(inputVector[whichInputs[h]]);
405-
}
406-
407-
//set input layer
408-
for (size_t i {}; i < numInputs; ++i)
409-
{
410-
localInputLayer[i] = (pattern[i] - inBases[i]) / inRanges[i];
411-
}
400+
runInternal(inputVector, localInputLayer, localHiddenLayers, localOutputNeuron);
412401

413-
//calculate hidden layers
414-
for (size_t layerNum {}; auto& layer : localHiddenLayers)
415-
{
416-
for (size_t j {}; j < numHiddenNodes; ++j)
417-
{
418-
layer[j] = 0;
419-
420-
const auto& previousLayer { layerNum == 0 ? localInputLayer : localHiddenLayers[layerNum - 1] };
421-
422-
for (size_t k {}; auto& input : previousLayer)
423-
{
424-
layer[j] += input * weights[layerNum][j][k];
425-
++k;
426-
}
427-
428-
layer[j] = activationFunction(layer[j]);
429-
}
430-
431-
layer.back() = 1.0; //for bias weight
432-
++layerNum;
433-
}
434-
435-
//calculate output
436-
for (size_t i {}; auto& hiddenNeuron : localHiddenLayers.back())
437-
{
438-
localOutputNeuron += hiddenNeuron * wHiddenOutput[i];
439-
++i;
440-
}
441-
442-
//if classifier, outputNeuron = activationFunction(outputNeuron), else...
443-
return (localOutputNeuron * outRange) + outBase;
402+
return localOutputNeuron;
444403
}
445404

446405
template<typename T>
447-
T neuralNetwork<T>::runForTraining(const std::vector<T>& inputVector)
406+
void neuralNetwork<T>::runInternal(const std::vector<T>& inputVector, std::vector<T>& inputLayer, std::vector<std::vector<T>>& hiddenLayers, T& outputNeuron) const
448407
{
449408
std::vector<T> pattern;
450409
for (size_t h {}; h < numInputs; ++h)
@@ -490,7 +449,6 @@ T neuralNetwork<T>::runForTraining(const std::vector<T>& inputVector)
490449

491450
//if classifier, outputNeuron = activationFunction(outputNeuron), else...
492451
outputNeuron = (outputNeuron * outRange) + outBase;
493-
return outputNeuron;
494452
}
495453

496454
template<typename T>
@@ -544,7 +502,7 @@ void neuralNetwork<T>::train(const std::vector<trainingExampleTemplate<T > >& tr
544502
//run through every training instance
545503
for (auto trainingExample : trainingSet)
546504
{
547-
runForTraining(trainingExample.input);
505+
runInternal(trainingExample.input, inputLayer, hiddenLayers, outputNeuron );
548506
backpropagate(trainingExample.output[whichOutput]);
549507
}
550508
}

src/neuralNetwork.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ class neuralNetwork final : public baseModel<T>
153153
/** Propagate output error back through the network.
154154
* @param The desired output of the network is fed into the function, and compared with the actual output
155155
*/
156-
T runForTraining(const std::vector<T>& inputVector);
156+
void runInternal(const std::vector<T>& inputVector, std::vector<T>& inputLayer, std::vector<std::vector<T>>& hiddenLayers, T& outputNeuron) const;
157157
void backpropagate(const T& desiredOutput);
158158

159159
/** Apply corrections to network weights, based on output error */

0 commit comments

Comments
 (0)