Skip to content

Commit

Permalink
adding cov loader.
Browse files Browse the repository at this point in the history
  • Loading branch information
JulioJerez committed Nov 12, 2023
1 parent 736d7d2 commit 45b67b1
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 62 deletions.
17 changes: 10 additions & 7 deletions newton-4.00/applications/ndSandbox/toolbox/ndTestDeepBrain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ static void ValidateData(const char* const title, ndBrain& brain, ndBrainMatrix*

const ndBrainVector& truth = (*testLabels)[i];

ndInt32 index = 0;
ndBrainFloat maxProbability = 0.0f;
ndInt32 index = -1;
ndBrainFloat maxProbability = -1.0f;
for (ndInt32 j = 0; j < output.GetCount(); j++)
{
if (output[j] > maxProbability)
Expand All @@ -250,7 +250,9 @@ static void ValidateData(const char* const title, ndBrain& brain, ndBrainMatrix*
}
}

if (truth[index] < 0.5f)
ndAssert(index >= 0);
//if (truth[index] < 0.5f)
if (truth[index] == ndReal(0.0f))
{
failCount++;
}
Expand Down Expand Up @@ -326,6 +328,7 @@ static void MnistTrainingSet()
}
}

ndAssert(index >= 0);
if (m_truth[index] == ndReal(0.0f))
{
(*m_failCount)++;
Expand Down Expand Up @@ -410,8 +413,8 @@ static void MnistTrainingSet()
const ndBrainVector& input = (*testDigits)[i];
m_brain.MakePrediction(input, output);

ndInt32 index = 0;
ndBrainFloat maxProbability = ndBrainFloat(0.0f);
ndInt32 index = -1;
ndBrainFloat maxProbability = ndBrainFloat(-1.0f);
for (ndInt32 j = 0; j < output.GetCount(); j++)
{
if (output[j] > maxProbability)
Expand Down Expand Up @@ -586,6 +589,6 @@ void ndTestDeedBrian()
//xxx.GaussianNormalize();

//ThreeLayersTwoInputsTwoOutputs();
MnistTrainingSet();
//MnistTestSet();
//MnistTrainingSet();
MnistTestSet();
}
86 changes: 49 additions & 37 deletions newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,42 +229,6 @@ void ndBrainLayerConvolutional::AdamUpdate(const ndBrainLayer& u, const ndBrainL
}
}

ndBrainLayer* ndBrainLayerConvolutional::Load(const ndBrainLoad* const loadSave)
{
//char buffer[1024];
//loadSave->ReadString(buffer);
//
//loadSave->ReadString(buffer);
//ndInt32 inputs = loadSave->ReadInt();
//loadSave->ReadString(buffer);
//ndInt32 outputs = loadSave->ReadInt();
//ndBrainLayerConvolutional* const layer = new ndBrainLayerConvolutional(inputs, outputs);
//
//loadSave->ReadString(buffer);
//for (ndInt32 i = 0; i < outputs; ++i)
//{
// ndBrainFloat val = ndBrainFloat(loadSave->ReadFloat());
// layer->m_bias[i] = val;
//}
//
//loadSave->ReadString(buffer);
//for (ndInt32 i = 0; i < outputs; ++i)
//{
// loadSave->ReadString(buffer);
// for (ndInt32 j = 0; j < inputs; ++j)
// {
// ndBrainFloat val = ndBrainFloat(loadSave->ReadFloat());
// layer->m_weights[i][j] = val;
// }
//}
//
//loadSave->ReadString(buffer);
//return layer;

ndAssert(0);
return nullptr;
}

void ndBrainLayerConvolutional::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const
{
ndAssert(input.GetCount() == GetInputSize());
Expand Down Expand Up @@ -324,7 +288,7 @@ void ndBrainLayerConvolutional::CalculateParamGradients(
ndBrainLayerConvolutional* const gradients = (ndBrainLayerConvolutional*)gradientOut;

ndAssert(gradients->m_bias.GetCount() == m_outputLayers);
ndAssert(output.GetCount() == outputDerivative.GetCount());
//ndAssert(output.GetCount() == outputDerivative.GetCount());

const ndInt32 inputSize = m_inputWidth * m_inputHeight;
const ndInt32 kernelSize = m_kernelSize * m_kernelSize;
Expand Down Expand Up @@ -507,4 +471,52 @@ void ndBrainLayerConvolutional::Save(const ndBrainSave* const loadSave) const
kernelOffset += kernelSize;
Save("\n");
}
}

ndBrainLayer* ndBrainLayerConvolutional::Load(const ndBrainLoad* const loadSave)
{
char buffer[1024];
loadSave->ReadString(buffer);

loadSave->ReadString(buffer);
ndInt32 inputWidth = loadSave->ReadInt();

loadSave->ReadString(buffer);
ndInt32 inputHeight = loadSave->ReadInt();

loadSave->ReadString(buffer);
ndInt32 inputLayers = loadSave->ReadInt();

loadSave->ReadString(buffer);
ndInt32 kernelSize = loadSave->ReadInt();

loadSave->ReadString(buffer);
ndInt32 ouputLayers = loadSave->ReadInt();

ndBrainLayerConvolutional* const layer = new ndBrainLayerConvolutional(inputWidth, inputHeight, inputLayers, kernelSize, ouputLayers);

loadSave->ReadString(buffer);
for (ndInt32 i = 0; i < ouputLayers; ++i)
{
ndBrainFloat val = ndBrainFloat(loadSave->ReadFloat());
layer->m_bias[i] = val;
}

loadSave->ReadString(buffer);
ndInt32 kernelWeights = kernelSize * kernelSize;

ndInt32 index = 0;
for (ndInt32 i = 0; i < ouputLayers * inputLayers; ++i)
{
loadSave->ReadString(buffer);
for (ndInt32 j = 0; j < kernelWeights; ++j)
{
ndBrainFloat val = ndBrainFloat(loadSave->ReadFloat());
layer->m_kernels[index] = val;
index++;
}
}

loadSave->ReadString(buffer);
return layer;
}
2 changes: 1 addition & 1 deletion newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
class ndBrainLayerConvolutional : public ndBrainLayer
{
public:
ndBrainLayerConvolutional(ndInt32 inputWidth, ndInt32 inputHeight, ndInt32 inputDepth, ndInt32 kernelSize, ndInt32 numberOfKernels);
ndBrainLayerConvolutional(ndInt32 inputWidth, ndInt32 inputHeight, ndInt32 inputLayers, ndInt32 kernelSize, ndInt32 ouputLayers);
ndBrainLayerConvolutional(const ndBrainLayerConvolutional& src);
virtual ~ndBrainLayerConvolutional();
virtual ndBrainLayer* Clone() const;
Expand Down
39 changes: 22 additions & 17 deletions newton-4.00/sdk/dBrain/ndBrainLayerConvolutionalMaxPooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@
#include "ndBrainSaveLoad.h"
#include "ndBrainLayerConvolutionalMaxPooling.h"

ndBrainLayerConvolutionalMaxPooling::ndBrainLayerConvolutionalMaxPooling(ndInt32 inputWidth, ndInt32 inputHeight, ndInt32 inputDepth)
:ndBrainLayerActivation(((inputWidth + 1) / 2) * ((inputHeight + 1) / 2) * inputDepth)
ndBrainLayerConvolutionalMaxPooling::ndBrainLayerConvolutionalMaxPooling(ndInt32 inputWidth, ndInt32 inputHeight, ndInt32 inputLayers)
:ndBrainLayerActivation(((inputWidth + 1) / 2) * ((inputHeight + 1) / 2) * inputLayers)
,m_width(inputWidth)
,m_height(inputHeight)
,m_channels(inputDepth)
,m_channels(inputLayers)
,m_index()
{
m_index.SetCount(m_neurons);
Expand Down Expand Up @@ -88,20 +88,6 @@ const char* ndBrainLayerConvolutionalMaxPooling::GetLabelId() const
return "ndBrainLayerConvolutionalMaxPooling";
}

ndBrainLayer* ndBrainLayerConvolutionalMaxPooling::Load(const ndBrainLoad* const loadSave)
{
ndAssert(0);
return nullptr;
//char buffer[1024];
//loadSave->ReadString(buffer);
//
//loadSave->ReadString(buffer);
//ndInt32 inputs = loadSave->ReadInt();
//ndBrainLayerConvolutionalMaxPooling* const layer = new ndBrainLayerConvolutionalMaxPooling(inputs);
//loadSave->ReadString(buffer);
//return layer;
}

void ndBrainLayerConvolutionalMaxPooling::InputDerivative(const ndBrainVector& output, const ndBrainVector& outputDerivative, ndBrainVector& inputDerivative) const
{
ndAssert(output.GetCount() == outputDerivative.GetCount());
Expand Down Expand Up @@ -222,3 +208,22 @@ void ndBrainLayerConvolutionalMaxPooling::Save(const ndBrainSave* const loadSave
sprintf(buffer, "\tinput_layers %d\n", m_channels);
loadSave->WriteData(buffer);
}

ndBrainLayer* ndBrainLayerConvolutionalMaxPooling::Load(const ndBrainLoad* const loadSave)
{
char buffer[1024];
loadSave->ReadString(buffer);

loadSave->ReadString(buffer);
ndInt32 inputWidth = loadSave->ReadInt();

loadSave->ReadString(buffer);
ndInt32 inputHeight = loadSave->ReadInt();

loadSave->ReadString(buffer);
ndInt32 inputLayers = loadSave->ReadInt();

ndBrainLayerConvolutionalMaxPooling* const layer = new ndBrainLayerConvolutionalMaxPooling(inputWidth, inputHeight, inputLayers);
loadSave->ReadString(buffer);
return layer;
}
15 changes: 15 additions & 0 deletions newton-4.00/sdk/dBrain/ndBrainSaveLoad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,13 @@
#include "ndBrain.h"
#include "ndBrainSaveLoad.h"
#include "ndBrainLayerLinear.h"
#include "ndBrainLayerConvolutional.h"
#include "ndBrainLayerReluActivation.h"
#include "ndBrainLayerTanhActivation.h"
#include "ndBrainLayerSoftmaxActivation.h"
#include "ndBrainLayerSigmoidActivation.h"
#include "ndBrainLayerConvolutionalMaxPooling.h"
#include "ndBrainLayerCategoricalSoftmaxActivation.h"

ndBrain* ndBrainLoad::Load(const char* const pathName)
{
Expand Down Expand Up @@ -116,10 +119,22 @@ ndBrain* ndBrainLoad::Load() const
{
layer = ndBrainLayerSoftmaxActivation::Load(this);
}
else if (!strcmp(layerType, "ndBrainLayerCategoricalSoftmaxActivation"))
{
layer = ndBrainLayerCategoricalSoftmaxActivation::Load(this);
}
else if (!strcmp(layerType, "ndBrainLayerApproximateTanhActivation"))
{
layer = ndBrainLayerApproximateTanhActivation::Load(this);
}
else if (!strcmp(layerType, "ndBrainLayerConvolutional"))
{
layer = ndBrainLayerConvolutional::Load(this);
}
else if (!strcmp(layerType, "ndBrainLayerConvolutionalMaxPooling"))
{
layer = ndBrainLayerConvolutionalMaxPooling::Load(this);
}
else
{
ndAssert(0);
Expand Down

0 comments on commit 45b67b1

Please sign in to comment.