aboutsummaryrefslogtreecommitdiff
path: root/src/armnnCaffeParser
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-05-18 16:31:43 +0100
committertelsoa01 <telmo.soares@arm.com>2018-05-23 13:09:07 +0100
commit3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0 (patch)
tree5950603ad78ec3fe56fb31ddc7f4d52a19f5bc60 /src/armnnCaffeParser
parentbceff2fb3fc68bb0aa88b886900c34b77340c826 (diff)
downloadarmnn-3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0.tar.gz
Release 18.05
Diffstat (limited to 'src/armnnCaffeParser')
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp18
-rw-r--r--src/armnnCaffeParser/CaffeSupport.md11
2 files changed, 23 insertions, 6 deletions
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index e12badc3a0..254a819db4 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -529,7 +529,11 @@ void CaffeParser::ParseConvLayer(const LayerParameter& layerParam)
returnLayer = layer;
}
- BOOST_ASSERT(returnLayer);
+ if (!returnLayer)
+ {
+ throw ParseException("Loading Convolution Layer: invalid return layer");
+ }
+
SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
}
@@ -1014,6 +1018,18 @@ void CaffeParser::ParseBatchNormLayer(const LayerParameter& layerParam)
vector<float> varianceData(channels);
GetDataFromBlob(layerParam, varianceData, 1);
+ // read moving average factor and apply scaling (if required)
+ const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
+ const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
+ if(movingAverageFactor != 0.0f)
+ {
+ const float scaleFactor = 1.0f / movingAverageFactor;
+ auto scaleFunction = [scaleFactor](float f) -> float { return f * scaleFactor; };
+
+ std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
+ std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
+ }
+
// identity scale operation
vector<float> betaData(channels, 0.0f);
vector<float> gammaData(channels, 1.0f);
diff --git a/src/armnnCaffeParser/CaffeSupport.md b/src/armnnCaffeParser/CaffeSupport.md
index e7724800f6..b5229ebf04 100644
--- a/src/armnnCaffeParser/CaffeSupport.md
+++ b/src/armnnCaffeParser/CaffeSupport.md
@@ -1,5 +1,5 @@
#Caffe layers supported by the Arm NN SDK
-This reference guide provides a list of Caffe layers the Arm NN SDK currently supports.
+This reference guide provides a list of Caffe layers the Arm NN SDK currently supports.
Although some other neural networks might work, Arm tests the Arm NN SDK with Caffe implementations of the following neural networks:
@@ -12,11 +12,13 @@ Although some other neural networks might work, Arm tests the Arm NN SDK with Ca
- Lenet.
- MobileNetv1.
-The Arm NN SDK supports the following machine learning layers for Caffe networks:
+The Arm NN SDK supports the following machine learning layers for Caffe networks:
-- BatchNorm, in inference mode.
+- BatchNorm, in inference mode.
- Convolution, excluding the Dilation Size, Weight Filler, Bias Filler, Engine, Force nd_im2col, and Axis parameters.
+- Concat, along the channel dimension only.
+- Dropout, in inference mode.
- Eltwise, excluding the coeff parameter.
- Inner Product, excluding the Weight Filler, Bias Filler, Engine, and Axis parameters.
- Input.
@@ -26,6 +28,5 @@ The Arm NN SDK supports the following machine learning layers for Caffe networks
- Scale.
- Softmax, excluding the Axis and Engine parameters.
- Split.
-- Dropout, in inference mode.
-More machine learning layers will be supported in future releases. \ No newline at end of file
+More machine learning layers will be supported in future releases. \ No newline at end of file