aboutsummaryrefslogtreecommitdiff
path: root/src/armnnCaffeParser/CaffeParser.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnCaffeParser/CaffeParser.cpp')
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp18
1 files changed, 17 insertions, 1 deletions
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index e12badc3a0..254a819db4 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -529,7 +529,11 @@ void CaffeParser::ParseConvLayer(const LayerParameter& layerParam)
returnLayer = layer;
}
- BOOST_ASSERT(returnLayer);
+ if (!returnLayer)
+ {
+ throw ParseException("Loading Convolution Layer: invalid return layer");
+ }
+
SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
}
@@ -1014,6 +1018,18 @@ void CaffeParser::ParseBatchNormLayer(const LayerParameter& layerParam)
vector<float> varianceData(channels);
GetDataFromBlob(layerParam, varianceData, 1);
+ // read moving average factor and apply scaling (if required)
+ const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
+ const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
+ if(movingAverageFactor != 0.0f)
+ {
+ const float scaleFactor = 1.0f / movingAverageFactor;
+ auto scaleFunction = [scaleFactor](float f) -> float { return f * scaleFactor; };
+
+ std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
+ std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
+ }
+
// identity scale operation
vector<float> betaData(channels, 0.0f);
vector<float> gammaData(channels, 1.0f);