18 #include <boost/numeric/conversion/cast.hpp> 19 #include <boost/format.hpp> 22 #include "caffe/proto/caffe.pb.h" 25 #include <google/protobuf/io/coded_stream.h> 26 #include <google/protobuf/io/zero_copy_stream.h> 27 #include <google/protobuf/io/zero_copy_stream_impl.h> 28 #include <google/protobuf/text_format.h> 29 #include <google/protobuf/stubs/common.h> 30 #include <google/protobuf/stubs/once.h> 31 #include <google/protobuf/io/coded_stream.h> 32 #include <google/protobuf/descriptor.h> 33 #include <google/protobuf/generated_message_reflection.h> 34 #include <google/protobuf/reflection_ops.h> 35 #include <google/protobuf/wire_format.h> 58 using namespace armnn;
59 using namespace caffe;
66 const float* GetArrayPtrFromBlob(
const LayerParameter& layerParam,
unsigned int blobIndex)
68 auto nBlobs = layerParam.blobs_size();
69 if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
74 "Expected data blob at index %1% in layer %2% not found. nBlobs=%2%. %4%") %
81 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
83 const float* arrayPtr = blob.data().data();
87 void GetDataFromBlob(
const LayerParameter& layerParam, vector<float>& outData,
unsigned int blobIndex)
89 auto nBlobs = layerParam.blobs_size();
90 if (blobIndex >= boost::numeric_cast<unsigned int>(nBlobs))
95 "Expected data blob at index %1% in layer %2% not found. %3%") %
101 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
104 if (blobSize != outData.size())
109 "Data blob at index %1% in layer %2% has an unexpected size. " 110 "Expected %3% elements but got %4% elements. %5%") %
119 for (
int i = 0; i < outSizeInt; ++i)
121 outData[
static_cast<size_t>(i)] = blob.data(i);
125 template <
typename T>
126 size_t SizeOfVectorData(
const vector<T>& vec)
128 return vec.size() *
sizeof(T);
131 void ValidateNumInputsOutputs(
const caffe::LayerParameter& layerParameter,
132 unsigned int numInputs,
133 unsigned int numOutputs)
135 int numInputsActual = layerParameter.bottom_size();
136 if (numInputs != boost::numeric_cast<unsigned int>(numInputsActual))
140 boost::format(
"Invalid number of inputs requested %1% for layer %2% " 141 "while only %3% present. %4%") %
143 layerParameter.name() %
148 int numOutputsActual = layerParameter.top_size();
149 if (numOutputs != boost::numeric_cast<unsigned int>(numOutputsActual))
153 boost::format(
"Invalid number of outputs requested %1% for layer %2% " 154 "while only %3% present. %4%") %
156 layerParameter.name() %
162 template <
typename ParamType,
typename ExtractOptional,
typename ExtractFallback,
typename ValueType>
163 ValueType GetOptionalWithFallback(
const ParamType& param,
164 ExtractOptional extractOptional,
165 ExtractFallback extractFallback,
166 ValueType defaultValue)
168 auto optValue = extractOptional(param, defaultValue);
171 return optValue.second;
173 auto fallbackValue = extractFallback(param, defaultValue);
174 return fallbackValue.second;
177 #define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, \ 183 GetOptionalWithFallback( \ 185 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 187 if (param.has_##OPTIONAL_VALUE ()) \ 189 return std::make_pair(true, param.OPTIONAL_VALUE ()); \ 193 return std::make_pair(false, defaultValue); \ 196 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 198 if (param.FALLBACK_VECTOR##_size() > 0) \ 200 return std::make_pair(true, (param.FALLBACK_VECTOR ()).Get(0)); \ 204 return std::make_pair(false, defaultValue); \ 209 #define GET_OPTIONAL_WITH_FALLBACK(PARAM, \ 215 GetOptionalWithFallback( \ 217 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 219 if (param.has_##OPTIONAL_VALUE ()) \ 221 return std::make_pair(true, param.OPTIONAL_VALUE ()); \ 225 return std::make_pair(false, defaultValue); \ 228 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 230 if (param.has_##FALLBACK_VALUE ()) \ 232 return std::make_pair(true, param.FALLBACK_VALUE ()); \ 236 return std::make_pair(false, defaultValue); \ 243 const std::map<std::string, CaffeParserBase::OperationParsingFunction>
276 : m_Network(nullptr, nullptr)
298 const char* bindingPointDesc,
299 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
301 auto it = nameToBindingInfo.find(layerName);
302 if (it == nameToBindingInfo.end())
307 "Unknown binding %1% for layer '%2%'. %3%") %
317 std::vector<unsigned int> shape;
318 for (
int j = 0; j < blobShape.dim_size(); ++j)
320 shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
323 return TensorInfo(boost::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
332 ret.set_dim(boost::numeric_cast<int>(i), desc.
GetShape()[i]);
342 std::vector<const caffe::LayerParameter*> ret;
343 ret.reserve(boost::numeric_cast<size_t>(layerParam.bottom_size()));
344 for (
int j = 0; j < layerParam.bottom_size(); ++j)
346 std::string inputName = layerParam.bottom(j);
353 "Can't find Caffe layer with top called '%1%', " 354 "which is listed as an input of '%2%'. %3%") %
359 ret.push_back(inputIt->second);
368 ValidateNumInputsOutputs(layerParam, 0, 1);
370 const InputParameter& param = layerParam.input_param();
380 const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ?
381 ¶m.shape(0) :
nullptr;
390 const TensorShape& overrideShape = overrideIt->second;
392 ( originalShape->dim(1) != overrideShape[1]
393 || originalShape->dim(2) != overrideShape[2]
394 || originalShape->dim(3) != overrideShape[3]))
399 "Parsed input shape for '%1%' is incompatible with the override provided. %2%") %
403 inputTensorInfo.
SetShape(overrideShape);
405 else if (!originalShape)
410 "No input descriptor given for '%1%' and no input shape found in caffe model. %2%") %
416 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
422 unsigned int kernelW,
423 unsigned int kernelH)
426 ValidateNumInputsOutputs(layerParam, 1, 1);
428 ConvolutionParameter convParam = layerParam.convolution_param();
430 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
439 vector<string> convLayerNames(numGroups);
440 vector<armnn::IConnectableLayer*> convLayers(numGroups);
441 convLayerNames[0] = layerParam.name();
446 unsigned int splitterDimSizes[4] = {
static_cast<unsigned int>(inputShape.dim(0)),
447 static_cast<unsigned int>(inputShape.dim(1)),
448 static_cast<unsigned int>(inputShape.dim(2)),
449 static_cast<unsigned int>(inputShape.dim(3))};
454 splitterDimSizes[1] /= numGroups;
455 inputShape.set_dim(1, splitterDimSizes[1]);
461 for (
unsigned int g = 0; g < numGroups; ++g)
465 ss << layerParam.name() <<
"_" << g;
466 convLayerNames[g] = ss.str();
471 for (
unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
473 splitterDesc.
SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
477 const std::string splitterLayerName = std::string(
"splitter_") + layerParam.bottom(0);
486 unsigned int numFilters = convParam.num_output();
489 BlobShape outputShape;
490 outputShape.add_dim(0);
491 outputShape.set_dim(0, inputShape.dim(0));
492 outputShape.add_dim(1);
494 outputShape.set_dim(1, numFilters / numGroups);
495 outputShape.add_dim(2);
497 2, (static_cast<int>(
498 static_cast<float>(inputShape.dim(2) + 2 * desc.
m_PadBottom - kernelH) /
499 static_cast<float>(desc.
m_StrideY)) + 1));
500 outputShape.add_dim(3);
502 3, (static_cast<int>(
503 static_cast<float>(inputShape.dim(3) + 2 * desc.
m_PadRight - kernelW) /
504 static_cast<float>(desc.
m_StrideX)) + 1));
507 vector<float> weightData(boost::numeric_cast<size_t>(numGroups *
512 GetDataFromBlob(layerParam, weightData, 0);
514 const unsigned int weightDimSizes[4] = {
515 static_cast<unsigned int>(outputShape.dim(1)),
516 static_cast<unsigned int>(inputShape.dim(1)),
521 vector<float> biasData;
525 biasData.resize(boost::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
526 GetDataFromBlob(layerParam, biasData, 1);
528 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
529 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
532 const unsigned int numWeightsPerGroup =
boost::numeric_cast<
unsigned int>(weightData.size()) / numGroups;
533 const unsigned int numBiasesPerGroup =
boost::numeric_cast<
unsigned int>(biasData.size()) / numGroups;
535 for (
unsigned int g = 0; g < numGroups; ++g)
542 weightData.data() + numWeightsPerGroup * g);
549 ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
552 convLayer =
m_Network->AddConvolution2dLayer(desc,
555 convLayerNames[g].c_str());
556 convLayers[g] = convLayer;
561 splitterLayer ? splitterLayer->
GetOutputSlot(g) : inputConnection;
562 splitterInputConnection.
Connect(convLayer->GetInputSlot(0));
569 unsigned int concatDimSizes[4] = {
static_cast<unsigned int>(outputShape.dim(0)),
570 static_cast<unsigned int>(outputShape.dim(1)),
571 static_cast<unsigned int>(outputShape.dim(2)),
572 static_cast<unsigned int>(outputShape.dim(3))};
579 for (
unsigned int g = 0; g < numGroups; ++g)
585 concatDimSizes[1] *= numGroups;
586 outputShape.set_dim(1, concatDimSizes[1]);
596 "Failed to create final concat layer for Split+Convolution+Concat. " 597 "Layer=%1% #groups=%2% #filters=%3% %4%") %
604 for (
unsigned int g = 0; g < numGroups; ++g)
606 convLayers[g]->GetOutputSlot(0).Connect(concatLayer->
GetInputSlot(g));
614 unsigned int kernelW,
615 unsigned int kernelH)
618 ValidateNumInputsOutputs(layerParam, 1, 1);
620 ConvolutionParameter convParam = layerParam.convolution_param();
632 unsigned int numFilters = convParam.num_output();
634 BlobShape outputShape;
635 outputShape.add_dim(0);
636 outputShape.set_dim(0, inputShape.dim(0));
637 outputShape.add_dim(1);
638 outputShape.set_dim(1, numFilters);
639 outputShape.add_dim(2);
641 2, (static_cast<int>(
642 static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - kernelH) /
643 static_cast<float>(desc.m_StrideY)) + 1));
644 outputShape.add_dim(3);
646 3, (static_cast<int>(
647 static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - kernelW) /
648 static_cast<float>(desc.m_StrideX)) + 1));
651 size_t allWeightsSize =
boost::numeric_cast<
size_t>(inputShape.dim(1) * kernelH * kernelW);
652 vector<float> weightData(allWeightsSize);
654 GetDataFromBlob(layerParam, weightData, 0);
657 const unsigned int weightDimSizes[4] = {
658 static_cast<unsigned int>(1),
659 static_cast<unsigned int>(inputShape.dim(1)),
666 vector<float> biasData;
667 if (desc.m_BiasEnabled)
671 biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
672 GetDataFromBlob(layerParam, biasData, 1);
674 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
675 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
680 returnLayer =
m_Network->AddDepthwiseConvolution2dLayer(desc,
683 layerParam.name().c_str());
690 "Failed to create depthwise convolution layer. " 691 "Layer=%1% #filters=%2% %3%") %
697 inputConnection.
Connect(returnLayer->GetInputSlot(0));
716 ValidateNumInputsOutputs(layerParam, 1, 1);
718 ConvolutionParameter convParam = layerParam.convolution_param();
720 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
721 unsigned int numFilters = convParam.num_output();
723 const auto notFound = std::numeric_limits<unsigned int>::max();
726 kernel_h, kernel_size,
unsigned int, notFound);
728 kernel_w, kernel_size,
unsigned int, notFound);
731 stride_h, stride,
unsigned int, 1u);
733 stride_w, stride,
unsigned int, 1u);
736 pad_h, pad,
unsigned int, 0u);
738 pad_w, pad,
unsigned int, 0u);
741 convolution2dDescriptor.
m_PadLeft = padW;
743 convolution2dDescriptor.
m_PadTop = padH;
745 convolution2dDescriptor.
m_StrideX = strideW;
746 convolution2dDescriptor.
m_StrideY = strideH;
747 convolution2dDescriptor.
m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() :
true;
749 if (numGroups > numFilters)
754 "Error parsing Convolution: %1%. " 755 "The 'group'=%2% parameter cannot be larger than the " 756 "number of filters supplied ='%3%'. %4%") %
763 if (inputShape.dim_size() != 4)
768 "Convolution input shape is expected to have 4 dimensions. " 769 "%1%'s input has only %2%. %3%") %
771 inputShape.dim_size() %
777 if (numGroups > inputShape.dim(1))
782 "Error parsing Convolution: %1%. " 783 "The 'group'=%2% parameter cannot be larger than the " 784 "channel of the input shape=%3% (in NCHW format). %4%") %
790 else if (numGroups == inputShape.dim(1))
810 BlobShape outputShape;
811 outputShape.add_dim(0);
812 outputShape.set_dim(0, inputShape.dim(0));
813 outputShape.add_dim(1);
814 outputShape.set_dim(1, numFilters);
815 outputShape.add_dim(2);
817 2, (static_cast<int>(
818 static_cast<float>(inputShape.dim(2) + 2 * padH - kernelH) /
819 static_cast<float>(strideH)) + 1));
820 outputShape.add_dim(3);
822 3, (static_cast<int>(
823 static_cast<float>(inputShape.dim(3) + 2 * padW - kernelW) /
824 static_cast<float>(strideW)) + 1));
827 vector<float> weightData(boost::numeric_cast<size_t>(inputShape.dim(1) *
831 GetDataFromBlob(layerParam, weightData, 0);
833 const unsigned int weightDimSizes[4] = {
834 static_cast<unsigned int>(outputShape.dim(1)),
835 static_cast<unsigned int>(inputShape.dim(1)),
844 vector<float> biasData;
849 biasData.resize(boost::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
850 GetDataFromBlob(layerParam, biasData, 1);
852 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
853 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
859 returnLayer =
m_Network->AddConvolution2dLayer(convolution2dDescriptor,
862 layerParam.name().c_str());
865 inputConnection.
Connect(returnLayer->GetInputSlot(0));
873 "Failed to create Convolution layer. " 874 "Layer=%1% #groups=%2% #filters=%3% %4%") %
890 ValidateNumInputsOutputs(layerParam, 1, 1);
891 PoolingParameter param = layerParam.pooling_param();
894 const auto notFound = std::numeric_limits<unsigned int>::max();
897 kernel_h, kernel_size,
unsigned int, notFound);
899 kernel_w, kernel_size,
unsigned int, notFound);
901 if ((kernel_h == notFound || kernel_w == notFound) && param.has_global_pooling())
908 stride_h, stride,
unsigned int, notFound);
910 stride_h, stride,
unsigned int, notFound);
912 if ((stride_h == notFound || stride_w == notFound) && param.has_global_pooling())
919 pad_h, pad,
unsigned int, 0u);
921 pad_w, pad,
unsigned int, 0u);
925 if (param.has_pool())
927 PoolingParameter_PoolMethod p = param.pool();
930 case PoolingParameter_PoolMethod_MAX:
935 case PoolingParameter_PoolMethod_AVE:
937 pooling2dDescriptor.
m_PoolType = PoolingAlgorithm::Average;
940 case PoolingParameter_PoolMethod_STOCHASTIC:
945 "Pooling Layer: Stochastic Pooling Not Supported. Layer=%1% %2%") %
954 "Pooling Layer: unknown pooling method: %1% for layer: %2% %3%") %
966 "No Pooling Method Defined for %1% %2%") %
973 pooling2dDescriptor.
m_PadTop = pad_h;
975 pooling2dDescriptor.
m_StrideX = stride_w;
976 pooling2dDescriptor.
m_StrideY = stride_h;
984 layerParam.name().c_str());
989 static_cast<unsigned int>(ceil(
990 static_cast<float>(inputInfo.
GetShape()[2] + 2 * pad_h - kernel_h) /
991 boost::numeric_cast<float>(stride_h))) + 1,
992 static_cast<unsigned int>(ceil(
993 static_cast<float>(inputInfo.
GetShape()[3] + 2 * pad_w - kernel_w) /
994 boost::numeric_cast<float>(stride_w))) + 1 },
1004 ValidateNumInputsOutputs(layerParam, 1, 1);
1006 const string& name = layerParam.name();
1007 const ReLUParameter& param = layerParam.relu_param();
1010 const float negativeSlope = param.negative_slope();
1011 if (negativeSlope == 0.0f)
1013 activationDescriptor.
m_Function = ActivationFunction::ReLu;
1017 activationDescriptor.
m_Function = ActivationFunction::LeakyReLu;
1018 activationDescriptor.
m_A = negativeSlope;
1030 ValidateNumInputsOutputs(layerParam, 1, 1);
1032 LRNParameter param = layerParam.lrn_param();
1042 if (param.has_norm_region())
1044 LRNParameter_NormRegion n = param.norm_region();
1047 case LRNParameter_NormRegion_ACROSS_CHANNELS:
1049 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
1052 case LRNParameter_NormRegion_WITHIN_CHANNEL:
1054 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Within;
1062 "Unknown region %1% for LRN layer %2% %3%") %
1072 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
1075 normalizationDescriptor.
m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1076 if (param.has_local_size())
1078 normalizationDescriptor.
m_NormSize = param.local_size();
1085 "local_size not defined for LRN layer %1% %2%") %
1090 if (param.has_alpha())
1092 normalizationDescriptor.
m_Alpha = param.alpha();
1100 "Alpha not defined for LRN layer %1% %2%") %
1104 if (param.has_beta())
1106 normalizationDescriptor.
m_Beta = param.beta();
1113 "Beta not defined for LRN layer %1% %2%") %
1120 normalizationDescriptor.
m_K = param.k();
1124 normalizationDescriptor.
m_K = 1;
1128 layerParam.name().c_str());
1137 InnerProductParameter param = layerParam.inner_product_param();
1139 ValidateNumInputsOutputs(layerParam, 1, 1);
1141 unsigned int outputSize = param.num_output();
1151 if (param.has_transpose())
1168 unsigned int inputSize = inputInfo.
GetShape()[1];
1171 inputSize *= inputInfo.
GetShape()[i];
1174 const float* weightDataPtr = GetArrayPtrFromBlob(layerParam, 0);
1175 const unsigned int swTD[2] = { outputSize, inputSize };
1184 const float* biasDataPtr = GetArrayPtrFromBlob(layerParam, 1);
1186 const unsigned int sbTD[1] = { outputSize };
1190 fullyConnectedLayer =
m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1193 layerParam.name().c_str());
1197 fullyConnectedLayer =
m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1200 layerParam.name().c_str());
1211 ValidateNumInputsOutputs(layerParam, 1, 1);
1213 SoftmaxParameter param = layerParam.softmax_param();
1222 softmaxDescriptor.
m_Axis = 1;
1225 layerParam.name().c_str());
1233 ValidateNumInputsOutputs(layerParam, 2, 1);
1240 EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM;
1242 if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation())
1244 operation = layerParam.eltwise_param().operation();
1250 case EltwiseParameter_EltwiseOp_SUM:
1252 newLayer =
m_Network->AddAdditionLayer(layerParam.name().c_str());
1255 case EltwiseParameter_EltwiseOp_PROD:
1257 newLayer =
m_Network->AddMultiplicationLayer(layerParam.name().c_str());
1265 "Unsupported operation %1% in Eltwise layer %2% %3%") %
1280 unsigned int numInputs =
static_cast<unsigned int>(layerParam.bottom_size());
1282 unsigned int concatDim = 1;
1283 unsigned int numOfDims = 4;
1286 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);
1287 std::vector<unsigned int>mergeDimSizes(numOfDims, 0u);
1289 unsigned int mergeDim = 0;
1290 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
1293 layerParam.bottom(boost::numeric_cast<int>(viewIndex))).
GetTensorInfo();
1300 "The number of dimensions for input tensors of " 1301 "the concatenation op should be 4. Inputs of %1% has " 1302 "%2% dimensions. %3%") %
1308 mergeDimSizes[0] = inputInfo.
GetShape()[0];
1309 mergeDimSizes[1] = inputInfo.
GetShape()[1];
1310 mergeDimSizes[2] = inputInfo.
GetShape()[2];
1311 mergeDimSizes[3] = inputInfo.
GetShape()[3];
1313 for (
unsigned int j = 0; j < concatDim; ++j)
1319 mergeDim += mergeDimSizes[concatDim];
1321 for (
unsigned int j = concatDim+1; j < numOfDims; ++j)
1326 mergeDimSizes[concatDim] = mergeDim;
1329 for (
unsigned int i = 0; i < numInputs; ++i)
1341 ValidateNumInputsOutputs(layerParam, 1, 1);
1345 string name = layerParam.name();
1347 BatchNormParameter param = layerParam.batch_norm_param();
1350 if (param.has_use_global_stats())
1352 if (!param.use_global_stats())
1357 "Error parsing Batch Norm layer '%1%': " 1358 "Parameter 'use_global_stats' is set to false, which is " 1359 "unsupported (value used for training). %2%") %
1366 desc.
m_Eps = param.eps();
1368 unsigned int channels = inputInfo.
GetShape()[1];
1369 unsigned int shape[] = {channels};
1371 vector<float> meanData(channels);
1372 GetDataFromBlob(layerParam, meanData, 0);
1374 vector<float> varianceData(channels);
1375 GetDataFromBlob(layerParam, varianceData, 1);
1378 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
1379 const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
1380 if(movingAverageFactor != 0.0f)
1382 const float scaleFactor = 1.0f / movingAverageFactor;
1383 auto scaleFunction = [scaleFactor](
float f) ->
float {
return f * scaleFactor; };
1385 std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
1386 std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
1390 vector<float> betaData(channels, 0.0f);
1391 vector<float> gammaData(channels, 1.0f);
1399 mean, variance, beta, gamma, name.c_str());
1408 ValidateNumInputsOutputs(layerParam, 1, 1);
1412 string name = layerParam.name();
1414 ScaleParameter param = layerParam.scale_param();
1415 if (param.axis() != 1)
1421 "Loading Scale Layer: Only axis 1 is supported currently. " 1422 "Layer=%1% Axis=%2% %3%") %
1428 unsigned int channels = inputInfo.
GetShape()[1];
1429 unsigned int shape[] = {channels};
1433 vector<float> meanData(channels, 0.0f);
1434 vector<float> varianceData(channels, 1.0f);
1435 vector<float> betaData(channels, 0.0f);
1436 vector<float> gammaData(channels);
1438 GetDataFromBlob(layerParam, gammaData, 0);
1440 if(param.has_bias_term())
1442 GetDataFromBlob(layerParam, betaData, 1);
1451 mean, variance, beta, gamma, name.c_str());
1460 if (layerParam.bottom_size() != 1)
1465 "Split layer '%1%' should have exactly 1 bottom. " 1466 "#bottoms=%2% %3%") %
1468 layerParam.bottom_size() %
1472 for (
int i = 0; i < layerParam.top_size(); i++)
1481 if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
1486 "Dropout layer '%1%' should have exactly 1 bottom and 1 top. " 1487 "#bottoms=%2% #tops=%3% %4%") %
1489 layerParam.bottom_size() %
1490 layerParam.top_size() %
1513 const char* bindingPointDesc,
1514 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
1516 const std::string layerName = layer->
GetName();
1517 auto it = nameToBindingInfo.find(layerName);
1518 if (it == nameToBindingInfo.end())
1520 nameToBindingInfo[layerName] = std::make_pair(
id, tensorInfo);
1527 "Id %1% used by more than one %2% layer %3%") %
1546 "Could not find armnn output slot for Caffe top '%1%' %2%") %
1565 "Attempting to add duplicate entry for Caffe top '%1%' %2%") %
1576 std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop;
1577 for (
int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx)
1579 caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx);
1580 std::string name = layer.name();
1581 for (
int i = 0; i < layer.top_size(); ++i)
1583 layersByTop[layer.top(i)].push_back(&layer);
1589 for (
auto layersWithSameTopIt : layersByTop)
1591 const std::string& top = layersWithSameTopIt.first;
1592 const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second;
1596 for (
unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
1598 caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx];
1599 caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1];
1600 if (layer1.top_size() != 1)
1605 "Node '%1%' is an in-place layer but doesn't have exactly one " 1606 "top. It has %2% instead. %3%") %
1611 std::string newTop = layer1.name() +
"_top";
1612 layer1.set_top(0, newTop);
1613 if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
1618 "Node '%1%' is an in-place layer but " 1619 "doesn't have exactly one bottom, or it doesn't match its top. " 1620 "#bottoms=%2%, first bottom is %3%, top is %4% %5%") %
1626 layer2.set_bottom(0, newTop);
1637 if (netParameter.input_size() > 0)
1639 LayerParameter* newLayer = netParameter.add_layer();
1641 newLayer->set_type(
"Input");
1642 newLayer->set_name(netParameter.input(0));
1643 newLayer->add_top(netParameter.input(0));
1645 InputParameter* inputParam = newLayer->mutable_input_param();
1646 BlobShape* shape = inputParam->add_shape();
1648 int dim_size = netParameter.input_dim_size();
1649 for (
int i = 0; i < dim_size; ++i)
1651 shape->add_dim(netParameter.input_dim(i));
1659 for (
int i = 0; i < netParameter.layer_size(); ++i)
1661 const caffe::LayerParameter& layer = netParameter.layer(i);
1662 for (
int i = 0; i < layer.top_size(); ++i)
1669 std::vector<const caffe::LayerParameter*> targetLayers;
1678 "Couldn't find requested output layer '%1%' in graph %2%") %
1679 requestedOutputName %
1682 targetLayers.push_back(nodeIt->second);
1686 std::vector<const caffe::LayerParameter*> sortedNodes;
1687 if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>(
1689 [
this](
const caffe::LayerParameter* node)
1698 "Cycle detected in graph. #nodes: %1% %2%") %
1699 sortedNodes.size() %
1704 for (
const caffe::LayerParameter* current : sortedNodes)
1711 boost::format(
"Unsupported layer type: '%1%' for layer %2% %3%") %
1716 auto func = it->second;
1717 (this->*func)(*current);
1721 for (
const std::string& requestedOutput : m_RequestedOutputs)
1728 outputSlot.
Connect(outputLayer->GetInputSlot(0));
1730 TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
1735 const std::map<std::string, armnn::TensorShape>& inputShapes,
1736 const std::vector<std::string>& requestedOutputs)
1738 FILE* fd = fopen(graphFile,
"r");
1745 "Failed to open graph file: %1% %2%") %
1751 NetParameter netParam;
1752 auto input =
new google::protobuf::io::FileInputStream(fileno(fd));
1753 bool success = google::protobuf::TextFormat::Parse(input, &netParam);
1762 "Failed to parse graph file: %1% %2%") %
1771 const std::map<std::string, armnn::TensorShape>& inputShapes,
1772 const std::vector<std::string>& requestedOutputs)
1775 NetParameter netParam;
1776 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam);
1783 "Failed to parse graph string %1%") %
1791 const std::map<std::string, armnn::TensorShape>& inputShapes,
1792 const std::vector<std::string>& requestedOutputs)
1794 FILE* fd = fopen(graphFile,
"rb");
1801 "Failed to open graph file at: %1% %2%") %
1807 NetParameter netParam;
1809 FileInputStream inStream(fileno(fd));
1810 CodedInputStream codedStream(&inStream);
1811 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
1812 bool success = netParam.ParseFromCodedStream(&codedStream);
1820 "Failed to parse protobuf file: %1% %2%") %
1831 const std::map<std::string, armnn::TensorShape>& inputShapes,
1832 const std::vector<std::string>& requestedOutputs)
1840 if (requestedOutputs.size() == 0)
1842 throw ParseException(
"requestedOutputs must have at least one entry");
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
void ParseConvLayer(const caffe::LayerParameter &layerParam)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const override
Retrieves binding info (layer id and tensor info) for the network input identified by the given layer...
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
static void Destroy(ICaffeParser *parser)
uint32_t m_PadLeft
Padding left value in the width dimension.
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
virtual armnn::INetworkPtr CreateNetworkFromString(const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Creates the network directly from protobuf text in a string. Useful for debugging/testing.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
static const std::map< std::string, OperationParsingFunction > ms_CaffeLayerNameToParsingFunctions
Maps Caffe layer names to parsing member functions.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
void LoadNetParam(caffe::NetParameter &netParameter)
does the actual conversion from caffe::NetParameter to armnn::INetwork
void ParseInputLayer(const caffe::LayerParameter &layerParam)
Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type and is responsible ...
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2020 ARM Limited.
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &bindingInfos)
std::vector< std::string > m_RequestedOutputs
Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the gen...
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
virtual armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Create the network from a protobuf text file on disk.
void SetShape(const TensorShape &newShape)
void ParseBatchNormLayer(const caffe::LayerParameter &layerParam)
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void ResolveInPlaceLayers(caffe::NetParameter &netParameter)
Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) ...
uint32_t m_PadRight
Padding right value in the width dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
static ICaffeParserPtr Create()
An output connection slot for a layer.
An OriginsDescriptor for the ConcatLayer.
std::unordered_map< std::string, armnn::IOutputSlot * > m_ArmnnOutputSlotForCaffeTop
As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops...
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Create the network from a protobuf binary file on disk.
#define ARMNN_ASSERT(COND)
#define GET_OPTIONAL_WITH_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VALUE, VALUE_TYPE, DEFAULT_VALUE)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos
std::unique_ptr< ICaffeParser, void(*)(ICaffeParser *parser)> ICaffeParserPtr
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe's protobuf tensor shape format to ArmNN's.
std::map< std::string, armnn::TensorShape > m_InputShapes
An ActivationDescriptor for the ActivationLayer.
void ParseInnerProductLayer(const caffe::LayerParameter &layerParam)
void ParseSoftmaxLayer(const caffe::LayerParameter &layerParam)
armnn::INetworkPtr CreateNetworkFromNetParameter(caffe::NetParameter &netParam, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a NetParameter loaded into memory from one of the other CreateNetwork*.
void ParseReluLayer(const caffe::LayerParameter &layerParam)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void AddConvLayerWithSplits(const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
ParseConv may use these helpers depending on the group parameter.
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
void ParseScaleLayer(const caffe::LayerParameter &layerParam)
void ParseDropoutLayer(const caffe::LayerParameter &layerParam)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const override
Retrieves binding info (layer id and tensor info) for the network output identified by the given laye...
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
void TrackInputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
void ParseEltwiseLayer(const caffe::LayerParameter &layerParam)
static ICaffeParser * CreateRaw()
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
std::vector< const caffe::LayerParameter * > GetInputs(const caffe::LayerParameter &layerParam)
Find the Caffe layers listed as inputs (bottoms) for a given layer.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void ParseLRNLayer(const caffe::LayerParameter &layerParam)
virtual const char * GetName() const =0
Returns the name of the layer.
#define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VECTOR, VALUE_TYPE, DEFAULT_VALUE)
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
A NormalizationDescriptor for the NormalizationLayer.
unsigned int GetNumDimensions() const
void AddConvLayerWithDepthwiseConv(const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
void ParsePoolingLayer(const caffe::LayerParameter &layerParam)
uint32_t m_NormSize
Depth radius value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
static INetworkPtr Create()
std::map< std::string, const caffe::LayerParameter * > m_CaffeLayersByTopName
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
armnn::INetworkPtr m_Network
void ParseConcatLayer(const caffe::LayerParameter &layerParam)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)
void ParseSplitLayer(const caffe::LayerParameter &layerParam)
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.