21 #include <fmt/format.h> 24 #include "caffe/proto/caffe.pb.h" 27 #include <google/protobuf/io/coded_stream.h> 28 #include <google/protobuf/io/zero_copy_stream.h> 29 #include <google/protobuf/io/zero_copy_stream_impl.h> 30 #include <google/protobuf/text_format.h> 31 #include <google/protobuf/stubs/common.h> 32 #include <google/protobuf/stubs/once.h> 33 #include <google/protobuf/io/coded_stream.h> 34 #include <google/protobuf/descriptor.h> 35 #include <google/protobuf/generated_message_reflection.h> 36 #include <google/protobuf/reflection_ops.h> 37 #include <google/protobuf/wire_format.h> 61 using namespace armnn;
62 using namespace caffe;
66 ICaffeParser::ICaffeParser() : pCaffeParserImpl(new RecordByRecordCaffeParser()) {}
68 ICaffeParser::~ICaffeParser() =
default;
86 const char* graphFile,
87 const std::map<std::string, armnn::TensorShape>& inputShapes,
88 const std::vector<std::string>& requestedOutputs)
94 const char* graphFile,
95 const std::map<std::string, armnn::TensorShape>& inputShapes,
96 const std::vector<std::string>& requestedOutputs)
98 return pCaffeParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes,requestedOutputs);
102 const char* protoText,
103 const std::map<std::string, armnn::TensorShape>& inputShapes,
104 const std::vector<std::string>& requestedOutputs)
106 return pCaffeParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs);
111 return pCaffeParserImpl->GetNetworkInputBindingInfo(name);
116 return pCaffeParserImpl->GetNetworkOutputBindingInfo(name);
122 const float* GetArrayPtrFromBlob(
const LayerParameter& layerParam,
unsigned int blobIndex)
124 auto nBlobs = layerParam.blobs_size();
125 if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
128 fmt::format(
"Expected data blob at index {} in layer {} not found. nBlobs={}. {}",
135 const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
137 const float* arrayPtr = blob.data().data();
141 void GetDataFromBlob(
const LayerParameter& layerParam, vector<float>& outData,
unsigned int blobIndex)
143 auto nBlobs = layerParam.blobs_size();
144 if (blobIndex >= armnn::numeric_cast<unsigned int>(nBlobs))
147 fmt::format(
"Expected data blob at index {} in layer {} not found. {}",
153 const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(blobIndex));
156 if (blobSize != outData.size())
159 fmt::format(
"Data blob at index {} in layer {} has an unexpected size. " 160 "Expected {} elements but got {} elements. {}",
169 for (
int i = 0; i < outSizeInt; ++i)
171 outData[
static_cast<size_t>(i)] = blob.data(i);
175 template <
typename T>
176 size_t SizeOfVectorData(
const vector<T>& vec)
178 return vec.size() *
sizeof(T);
181 void ValidateNumInputsOutputs(
const caffe::LayerParameter& layerParameter,
182 unsigned int numInputs,
183 unsigned int numOutputs)
185 int numInputsActual = layerParameter.bottom_size();
186 if (numInputs != armnn::numeric_cast<unsigned int>(numInputsActual))
189 fmt::format(
"Invalid number of inputs requested {} for layer {} " 190 "while only {} present. {}",
192 layerParameter.name(),
197 int numOutputsActual = layerParameter.top_size();
198 if (numOutputs != armnn::numeric_cast<unsigned int>(numOutputsActual))
201 fmt::format(
"Invalid number of outputs requested {} for layer {} " 202 "while only {} present. {}",
204 layerParameter.name(),
210 template <
typename ParamType,
typename ExtractOptional,
typename ExtractFallback,
typename ValueType>
211 ValueType GetOptionalWithFallback(
const ParamType& param,
212 ExtractOptional extractOptional,
213 ExtractFallback extractFallback,
214 ValueType defaultValue)
216 auto optValue = extractOptional(param, defaultValue);
219 return optValue.second;
221 auto fallbackValue = extractFallback(param, defaultValue);
222 return fallbackValue.second;
225 #define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, \ 231 GetOptionalWithFallback( \ 233 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 235 if (param.has_##OPTIONAL_VALUE ()) \ 237 return std::make_pair(true, param.OPTIONAL_VALUE ()); \ 241 return std::make_pair(false, defaultValue); \ 244 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 246 if (param.FALLBACK_VECTOR##_size() > 0) \ 248 return std::make_pair(true, (param.FALLBACK_VECTOR ()).Get(0)); \ 252 return std::make_pair(false, defaultValue); \ 257 #define GET_OPTIONAL_WITH_FALLBACK(PARAM, \ 263 GetOptionalWithFallback( \ 265 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 267 if (param.has_##OPTIONAL_VALUE ()) \ 269 return std::make_pair(true, param.OPTIONAL_VALUE ()); \ 273 return std::make_pair(false, defaultValue); \ 276 [](const PARAM_TYPE & param, VALUE_TYPE defaultValue) \ 278 if (param.has_##FALLBACK_VALUE ()) \ 280 return std::make_pair(true, param.FALLBACK_VALUE ()); \ 284 return std::make_pair(false, defaultValue); \ 291 const std::map<std::string, ICaffeParser::CaffeParserImpl::OperationParsingFunction>
292 ICaffeParser::CaffeParserImpl::ms_CaffeLayerNameToParsingFunctions = {
293 {
"Input", &CaffeParserImpl::ParseInputLayer },
294 {
"Convolution", &CaffeParserImpl::ParseConvLayer },
295 {
"Deconvolution",&CaffeParserImpl::ParseDeconvLayer },
296 {
"Pooling", &CaffeParserImpl::ParsePoolingLayer },
297 {
"ReLU", &CaffeParserImpl::ParseReluLayer },
298 {
"LRN", &CaffeParserImpl::ParseLRNLayer },
299 {
"InnerProduct", &CaffeParserImpl::ParseInnerProductLayer },
300 {
"Softmax", &CaffeParserImpl::ParseSoftmaxLayer },
301 {
"Eltwise", &CaffeParserImpl::ParseEltwiseLayer },
302 {
"Concat", &CaffeParserImpl::ParseConcatLayer },
303 {
"BatchNorm", &CaffeParserImpl::ParseBatchNormLayer },
304 {
"Scale", &CaffeParserImpl::ParseScaleLayer },
305 {
"Split", &CaffeParserImpl::ParseSplitLayer },
306 {
"Dropout", &CaffeParserImpl::ParseDropoutLayer},
307 {
"ArgMax", &CaffeParserImpl::ParseArgmaxLayer},
310 ICaffeParser::CaffeParserImpl::CaffeParserImpl()
311 : m_Network(nullptr, nullptr)
333 const std::string& layerName,
334 const char* bindingPointDesc,
335 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
337 auto it = nameToBindingInfo.find(layerName);
338 if (it == nameToBindingInfo.end())
341 fmt::format(
"Unknown binding {} for layer '{}'. {}",
351 std::vector<unsigned int> shape;
352 for (
int j = 0; j < blobShape.dim_size(); ++j)
354 shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
357 return TensorInfo(armnn::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
366 ret.set_dim(armnn::numeric_cast<int>(i), desc.
GetShape()[i]);
376 std::vector<const caffe::LayerParameter*> ret;
377 ret.reserve(armnn::numeric_cast<size_t>(layerParam.bottom_size()));
378 for (
int j = 0; j < layerParam.bottom_size(); ++j)
380 std::string inputName = layerParam.bottom(j);
385 fmt::format(
"Can't find Caffe layer with top called '{}', " 386 "which is listed as an input of '{}'. {}",
391 ret.push_back(inputIt->second);
400 ValidateNumInputsOutputs(layerParam, 0, 1);
402 const InputParameter& param = layerParam.input_param();
412 const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ?
413 ¶m.shape(0) :
nullptr;
422 const TensorShape& overrideShape = overrideIt->second;
424 ( originalShape->dim(1) != overrideShape[1]
425 || originalShape->dim(2) != overrideShape[2]
426 || originalShape->dim(3) != overrideShape[3]))
429 fmt::format(
"Parsed input shape for '{}' is incompatible with the override provided. {}",
433 inputTensorInfo.
SetShape(overrideShape);
435 else if (!originalShape)
438 fmt::format(
"No input descriptor given for '{}' and no input shape found in caffe model. {}",
443 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
449 unsigned int kernelW,
450 unsigned int kernelH)
453 ValidateNumInputsOutputs(layerParam, 1, 1);
455 ConvolutionParameter convParam = layerParam.convolution_param();
457 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
466 vector<string> convLayerNames(numGroups);
467 vector<armnn::IConnectableLayer*> convLayers(numGroups);
468 convLayerNames[0] = layerParam.name();
473 unsigned int splitterDimSizes[4] = {
static_cast<unsigned int>(inputShape.dim(0)),
474 static_cast<unsigned int>(inputShape.dim(1)),
475 static_cast<unsigned int>(inputShape.dim(2)),
476 static_cast<unsigned int>(inputShape.dim(3))};
481 splitterDimSizes[1] /= numGroups;
482 inputShape.set_dim(1, splitterDimSizes[1]);
488 for (
unsigned int g = 0; g < numGroups; ++g)
492 ss << layerParam.name() <<
"_" << g;
493 convLayerNames[g] = ss.str();
498 for (
unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
500 splitterDesc.
SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
504 const std::string splitterLayerName = std::string(
"splitter_") + layerParam.bottom(0);
513 unsigned int numFilters = convParam.num_output();
516 BlobShape outputShape;
517 outputShape.add_dim(0);
518 outputShape.set_dim(0, inputShape.dim(0));
519 outputShape.add_dim(1);
521 outputShape.set_dim(1, numFilters / numGroups);
522 outputShape.add_dim(2);
524 2, (static_cast<int>(
526 static_cast<float>(desc.
m_StrideY)) + 1));
527 outputShape.add_dim(3);
529 3, (static_cast<int>(
530 static_cast<float>(inputShape.dim(3) + 2 * desc.
m_PadRight - (desc.
m_DilationY * (kernelW - 1) + 1)) /
531 static_cast<float>(desc.
m_StrideX)) + 1));
534 vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
539 GetDataFromBlob(layerParam, weightData, 0);
541 const unsigned int weightDimSizes[4] = {
542 static_cast<unsigned int>(outputShape.dim(1)),
543 static_cast<unsigned int>(inputShape.dim(1)),
548 vector<float> biasData;
552 biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
553 GetDataFromBlob(layerParam, biasData, 1);
555 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
556 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
559 const unsigned int numWeightsPerGroup =
armnn::numeric_cast<
unsigned int>(weightData.size()) / numGroups;
560 const unsigned int numBiasesPerGroup =
armnn::numeric_cast<
unsigned int>(biasData.size()) / numGroups;
562 for (
unsigned int g = 0; g < numGroups; ++g)
569 weightData.data() + numWeightsPerGroup * g);
576 ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
579 convLayer =
m_Network->AddConvolution2dLayer(desc,
582 convLayerNames[g].c_str());
583 convLayers[g] = convLayer;
588 splitterLayer ? splitterLayer->
GetOutputSlot(g) : inputConnection;
589 splitterInputConnection.
Connect(convLayer->GetInputSlot(0));
596 unsigned int concatDimSizes[4] = {
static_cast<unsigned int>(outputShape.dim(0)),
597 static_cast<unsigned int>(outputShape.dim(1)),
598 static_cast<unsigned int>(outputShape.dim(2)),
599 static_cast<unsigned int>(outputShape.dim(3))};
606 for (
unsigned int g = 0; g < numGroups; ++g)
612 concatDimSizes[1] *= numGroups;
613 outputShape.set_dim(1, concatDimSizes[1]);
621 fmt::format(
"Failed to create final concat layer for Split+Convolution+Concat. " 622 "Layer={} #groups={} #filters={} {}",
629 for (
unsigned int g = 0; g < numGroups; ++g)
631 convLayers[g]->GetOutputSlot(0).Connect(concatLayer->
GetInputSlot(g));
639 unsigned int kernelW,
640 unsigned int kernelH)
643 ValidateNumInputsOutputs(layerParam, 1, 1);
645 ConvolutionParameter convParam = layerParam.convolution_param();
647 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
656 vector<string> convLayerNames(numGroups);
657 vector<armnn::IConnectableLayer*> convLayers(numGroups);
658 convLayerNames[0] = layerParam.name();
663 unsigned int splitterDimSizes[4] = {
static_cast<unsigned int>(inputShape.dim(0)),
664 static_cast<unsigned int>(inputShape.dim(1)),
665 static_cast<unsigned int>(inputShape.dim(2)),
666 static_cast<unsigned int>(inputShape.dim(3))};
671 splitterDimSizes[1] /= numGroups;
672 inputShape.set_dim(1, splitterDimSizes[1]);
678 for (
unsigned int g = 0; g < numGroups; ++g)
682 ss << layerParam.name() <<
"_" << g;
683 convLayerNames[g] = ss.str();
688 for (
unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
690 splitterDesc.
SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
694 const std::string splitterLayerName = std::string(
"splitter_") + layerParam.bottom(0);
703 unsigned int numFilters = convParam.num_output();
706 BlobShape outputShape;
707 outputShape.add_dim(0);
708 outputShape.set_dim(0, inputShape.dim(0));
709 outputShape.add_dim(1);
711 outputShape.set_dim(1, numFilters / numGroups);
712 outputShape.add_dim(2);
714 2, (static_cast<int>(
716 outputShape.add_dim(3);
718 3, (static_cast<int>(
722 vector<float> weightData(armnn::numeric_cast<size_t>(numGroups *
727 GetDataFromBlob(layerParam, weightData, 0);
729 const unsigned int weightDimSizes[4] = {
730 static_cast<unsigned int>(outputShape.dim(1)),
731 static_cast<unsigned int>(inputShape.dim(1)),
736 vector<float> biasData;
740 biasData.resize(armnn::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
741 GetDataFromBlob(layerParam, biasData, 1);
743 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
744 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
747 const unsigned int numWeightsPerGroup =
armnn::numeric_cast<
unsigned int>(weightData.size()) / numGroups;
748 const unsigned int numBiasesPerGroup =
armnn::numeric_cast<
unsigned int>(biasData.size()) / numGroups;
750 for (
unsigned int g = 0; g < numGroups; ++g)
757 weightData.data() + numWeightsPerGroup * g);
764 ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
767 deconvLayer =
m_Network->AddTransposeConvolution2dLayer(desc,
770 convLayerNames[g].c_str());
771 convLayers[g] = deconvLayer;
776 splitterLayer ? splitterLayer->
GetOutputSlot(g) : inputConnection;
777 splitterInputConnection.
Connect(deconvLayer->GetInputSlot(0));
784 unsigned int concatDimSizes[4] = {
static_cast<unsigned int>(outputShape.dim(0)),
785 static_cast<unsigned int>(outputShape.dim(1)),
786 static_cast<unsigned int>(outputShape.dim(2)),
787 static_cast<unsigned int>(outputShape.dim(3))};
794 for (
unsigned int g = 0; g < numGroups; ++g)
800 concatDimSizes[1] *= numGroups;
801 outputShape.set_dim(1, concatDimSizes[1]);
809 fmt::format(
"Failed to create final concat layer for Split+Deconvolution+Concat. " 810 "Layer={} #groups={} #filters={} {}",
817 for (
unsigned int g = 0; g < numGroups; ++g)
819 convLayers[g]->GetOutputSlot(0).Connect(concatLayer->
GetInputSlot(g));
827 unsigned int kernelW,
828 unsigned int kernelH)
831 ValidateNumInputsOutputs(layerParam, 1, 1);
833 ConvolutionParameter convParam = layerParam.convolution_param();
847 unsigned int numFilters = convParam.num_output();
849 BlobShape outputShape;
850 outputShape.add_dim(0);
851 outputShape.set_dim(0, inputShape.dim(0));
852 outputShape.add_dim(1);
853 outputShape.set_dim(1, numFilters);
854 outputShape.add_dim(2);
856 2, (static_cast<int>(
857 static_cast<float>(inputShape.dim(2) + 2 * desc.m_PadBottom - (desc.m_DilationX * (kernelH - 1) + 1)) /
858 static_cast<float>(desc.m_StrideY)) + 1));
859 outputShape.add_dim(3);
861 3, (static_cast<int>(
862 static_cast<float>(inputShape.dim(3) + 2 * desc.m_PadRight - (desc.m_DilationY * (kernelW - 1) + 1)) /
863 static_cast<float>(desc.m_StrideX)) + 1));
866 size_t allWeightsSize =
armnn::numeric_cast<
size_t>(inputShape.dim(1) * kernelH * kernelW);
867 vector<float> weightData(allWeightsSize);
869 GetDataFromBlob(layerParam, weightData, 0);
872 const unsigned int weightDimSizes[4] = {
873 static_cast<unsigned int>(1),
874 static_cast<unsigned int>(inputShape.dim(1)),
881 vector<float> biasData;
882 if (desc.m_BiasEnabled)
886 biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
887 GetDataFromBlob(layerParam, biasData, 1);
889 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
890 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
895 returnLayer =
m_Network->AddDepthwiseConvolution2dLayer(desc,
898 layerParam.name().c_str());
903 fmt::format(
"Failed to create depthwise convolution layer. " 904 "Layer={} #filters={} {}",
910 inputConnection.
Connect(returnLayer->GetInputSlot(0));
928 ValidateNumInputsOutputs(layerParam, 1, 1);
930 ConvolutionParameter convParam = layerParam.convolution_param();
932 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
933 unsigned int numFilters = convParam.num_output();
935 const auto notFound = std::numeric_limits<unsigned int>::max();
938 kernel_h, kernel_size,
unsigned int, notFound);
940 kernel_w, kernel_size,
unsigned int, notFound);
943 stride_h, stride,
unsigned int, 1u);
945 stride_w, stride,
unsigned int, 1u);
948 pad_h, pad,
unsigned int, 0u);
950 pad_w, pad,
unsigned int, 0u);
952 unsigned int dilationH = convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
953 unsigned int dilationW = convParam.dilation_size() > 1 ? convParam.dilation(1) :
954 convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
957 convolution2dDescriptor.
m_PadLeft = padW;
959 convolution2dDescriptor.
m_PadTop = padH;
961 convolution2dDescriptor.
m_StrideX = strideW;
962 convolution2dDescriptor.
m_StrideY = strideH;
965 convolution2dDescriptor.
m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() :
true;
967 if (numGroups > numFilters)
970 fmt::format(
"Error parsing Convolution: {}. " 971 "The 'group'={} parameter cannot be larger than the " 972 "number of filters supplied ='{}'. {}",
979 if (inputShape.dim_size() != 4)
982 fmt::format(
"Convolution input shape is expected to have 4 dimensions. " 983 "{}'s input has only {}. {}",
985 inputShape.dim_size(),
991 if (numGroups > inputShape.dim(1))
994 fmt::format(
"Error parsing Convolution: {}. " 995 "The 'group'={} parameter cannot be larger than the " 996 "channel of the input shape={} (in NCHW format). {}",
1002 else if (numGroups == inputShape.dim(1))
1022 BlobShape outputShape;
1023 outputShape.add_dim(0);
1024 outputShape.set_dim(0, inputShape.dim(0));
1025 outputShape.add_dim(1);
1026 outputShape.set_dim(1, numFilters);
1027 outputShape.add_dim(2);
1028 outputShape.set_dim(
1029 2, (static_cast<int>(
1030 static_cast<float>(inputShape.dim(2) + 2 * padH - (dilationH * (kernelH - 1) + 1)) /
1031 static_cast<float>(strideH)) + 1));
1032 outputShape.add_dim(3);
1033 outputShape.set_dim(
1034 3, (static_cast<int>(
1035 static_cast<float>(inputShape.dim(3) + 2 * padW - (dilationW * (kernelW - 1) + 1)) /
1036 static_cast<float>(strideW)) + 1));
1039 vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
1040 outputShape.dim(1) *
1043 GetDataFromBlob(layerParam, weightData, 0);
1045 const unsigned int weightDimSizes[4] = {
1046 static_cast<unsigned int>(outputShape.dim(1)),
1047 static_cast<unsigned int>(inputShape.dim(1)),
1056 vector<float> biasData;
1061 biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
1062 GetDataFromBlob(layerParam, biasData, 1);
1064 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
1065 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
1071 returnLayer =
m_Network->AddConvolution2dLayer(convolution2dDescriptor,
1074 layerParam.name().c_str());
1077 inputConnection.
Connect(returnLayer->GetInputSlot(0));
1083 fmt::format(
"Failed to create Convolution layer. " 1084 "Layer={} #groups={} #filters={} {}",
1107 ValidateNumInputsOutputs(layerParam, 1, 1);
1109 ConvolutionParameter convParam = layerParam.convolution_param();
1111 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
1112 unsigned int numFilters = convParam.num_output();
1114 const auto notFound = std::numeric_limits<unsigned int>::max();
1117 kernel_h, kernel_size,
unsigned int, notFound);
1119 kernel_w, kernel_size,
unsigned int, notFound);
1122 stride_h, stride,
unsigned int, 1u);
1124 stride_w, stride,
unsigned int, 1u);
1127 pad_h, pad,
unsigned int, 0u);
1129 pad_w, pad,
unsigned int, 0u);
1131 unsigned int dilationH = convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
1132 unsigned int dilationW = convParam.dilation_size() > 1 ? convParam.dilation(1) :
1133 convParam.dilation_size() > 0 ? convParam.dilation(0) : 1;
1135 if (dilationH != 1 || dilationW != 1) {
1136 fmt::format(
"Dilated decnvolution is not supported. " 1137 "{}'s input has dilation {} {}. {}",
1139 dilationW, dilationH,
1144 deconvolution2dDescriptor.
m_PadLeft = padW;
1145 deconvolution2dDescriptor.m_PadRight = padW;
1146 deconvolution2dDescriptor.m_PadTop = padH;
1147 deconvolution2dDescriptor.m_PadBottom = padH;
1148 deconvolution2dDescriptor.m_StrideX = strideW;
1149 deconvolution2dDescriptor.m_StrideY = strideH;
1150 deconvolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() :
true;
1152 if (numGroups > numFilters)
1155 fmt::format(
"Error parsing Deconvolution: {}. " 1156 "The 'group'={} parameter cannot be larger than the " 1157 "number of filters supplied ='{}'. {}",
1164 if (inputShape.dim_size() != 4)
1167 fmt::format(
"Deconvolution input shape is expected to have 4 dimensions. " 1168 "{}'s input has only {}. {}",
1170 inputShape.dim_size(),
1176 if (numGroups > inputShape.dim(1))
1179 fmt::format(
"Error parsing Deconvolution: {}. " 1180 "The 'group'={} parameter cannot be larger than the " 1181 "channel of the input shape={} (in NCHW format). {}",
1200 BlobShape outputShape;
1201 outputShape.add_dim(0);
1202 outputShape.set_dim(0, inputShape.dim(0));
1203 outputShape.add_dim(1);
1204 outputShape.set_dim(1, numFilters);
1205 outputShape.add_dim(2);
1206 outputShape.set_dim(
1207 2, (static_cast<int>(
1208 strideH * (inputShape.dim(2) - 1) - 2 * padH + (dilationH * (kernelH - 1) + 1))));
1209 outputShape.add_dim(3);
1210 outputShape.set_dim(
1211 3, (static_cast<int>(
1212 strideW * (inputShape.dim(3) - 1) - 2 * padW + (dilationW * (kernelW - 1) + 1))));
1215 vector<float> weightData(armnn::numeric_cast<size_t>(inputShape.dim(1) *
1216 outputShape.dim(1) *
1219 GetDataFromBlob(layerParam, weightData, 0);
1221 const unsigned int weightDimSizes[4] = {
1222 static_cast<unsigned int>(outputShape.dim(1)),
1223 static_cast<unsigned int>(inputShape.dim(1)),
1232 vector<float> biasData;
1233 if (deconvolution2dDescriptor.m_BiasEnabled)
1237 biasData.resize(armnn::numeric_cast<size_t>(outputShape.dim(1)), 1.f);
1238 GetDataFromBlob(layerParam, biasData, 1);
1240 const unsigned int biasDimSizes[1] = {
static_cast<unsigned int>(outputShape.dim(1))};
1241 biasInfo =
TensorInfo(1, biasDimSizes, DataType::Float32);
1247 returnLayer =
m_Network->AddTransposeConvolution2dLayer(deconvolution2dDescriptor,
1250 layerParam.name().c_str());
1253 inputConnection.
Connect(returnLayer->GetInputSlot(0));
1259 fmt::format(
"Failed to create Deconvolution layer. " 1260 "Layer={} #groups={} #filters={} {}",
1276 ValidateNumInputsOutputs(layerParam, 1, 1);
1277 PoolingParameter param = layerParam.pooling_param();
1280 const auto notFound = std::numeric_limits<unsigned int>::max();
1283 kernel_h, kernel_size,
unsigned int, notFound);
1285 kernel_w, kernel_size,
unsigned int, notFound);
1287 if ((kernel_h == notFound || kernel_w == notFound) && param.has_global_pooling())
1289 kernel_h = inputInfo.
GetShape()[2];
1290 kernel_w = inputInfo.
GetShape()[3];
1294 stride_h, stride,
unsigned int, notFound);
1296 stride_h, stride,
unsigned int, notFound);
1298 if ((stride_h == notFound || stride_w == notFound) && param.has_global_pooling())
1305 pad_h, pad,
unsigned int, 0u);
1307 pad_w, pad,
unsigned int, 0u);
1311 if (param.has_pool())
1313 PoolingParameter_PoolMethod p = param.pool();
1316 case PoolingParameter_PoolMethod_MAX:
1321 case PoolingParameter_PoolMethod_AVE:
1323 pooling2dDescriptor.
m_PoolType = PoolingAlgorithm::Average;
1326 case PoolingParameter_PoolMethod_STOCHASTIC:
1329 fmt::format(
"Pooling Layer: Stochastic Pooling Not Supported. Layer={} {}",
1336 fmt::format(
"Pooling Layer: unknown pooling method: {} for layer: {} {}",
1346 fmt::format(
"No Pooling Method Defined for {} {}",
1353 pooling2dDescriptor.
m_PadTop = pad_h;
1355 pooling2dDescriptor.
m_StrideX = stride_w;
1356 pooling2dDescriptor.
m_StrideY = stride_h;
1364 layerParam.name().c_str());
1369 static_cast<unsigned int>(ceil(
1370 static_cast<float>(inputInfo.
GetShape()[2] + 2 * pad_h - kernel_h) /
1371 armnn::numeric_cast<float>(stride_h))) + 1,
1372 static_cast<unsigned int>(ceil(
1373 static_cast<float>(inputInfo.
GetShape()[3] + 2 * pad_w - kernel_w) /
1374 armnn::numeric_cast<float>(stride_w))) + 1 },
1384 ValidateNumInputsOutputs(layerParam, 1, 1);
1385 ArgMaxParameter param = layerParam.argmax_param();
1389 const unsigned int topK = param.has_top_k() ? param.top_k() : 1;
1392 fmt::format(
"ArgMaxLayer: Only support top_k equals to 1. Layer={} {}",
1397 const unsigned int outMaxVal = param.has_out_max_val() ? param.out_max_val() :
false;
1400 fmt::format(
"ArgMaxLayer: Does not support out_max_val. Layer={} {}",
1405 int axis = param.has_axis() ? param.axis() : 1;
1407 axis = inputShape.dim_size() - axis;
1409 if ((axis < 0) || (axis >= inputShape.dim_size())) {
1411 fmt::format(
"ArgMaxLayer: Invalid axis value which outside range of input dims. " 1412 "{}'s input has input dim_size {}, requested axis: {}. {}",
1414 inputShape.dim_size(),
1425 layerParam.name().c_str());
1427 TensorShape outputShape(static_cast<unsigned int>(inputShape.dim_size() - 1));
1430 for (
int i = 0; i < inputShape.dim_size(); ++i)
1432 if (i == axis)
continue;
1433 outputShape[
static_cast<unsigned int>(j++)] = static_cast<unsigned int>(inputShape.dim(i));
1435 TensorInfo outputInfo(outputShape, DataType::Signed32);
1444 ValidateNumInputsOutputs(layerParam, 1, 1);
1446 const string& name = layerParam.name();
1447 const ReLUParameter& param = layerParam.relu_param();
1450 const float negativeSlope = param.negative_slope();
1451 if (negativeSlope == 0.0f)
1453 activationDescriptor.
m_Function = ActivationFunction::ReLu;
1457 activationDescriptor.
m_Function = ActivationFunction::LeakyReLu;
1458 activationDescriptor.
m_A = negativeSlope;
1470 ValidateNumInputsOutputs(layerParam, 1, 1);
1472 LRNParameter param = layerParam.lrn_param();
1482 if (param.has_norm_region())
1484 LRNParameter_NormRegion n = param.norm_region();
1487 case LRNParameter_NormRegion_ACROSS_CHANNELS:
1489 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
1492 case LRNParameter_NormRegion_WITHIN_CHANNEL:
1494 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Within;
1500 fmt::format(
"Unknown region {} for LRN layer {} {}",
1510 normalizationDescriptor.
m_NormChannelType = NormalizationAlgorithmChannel::Across;
1513 normalizationDescriptor.
m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1514 if (param.has_local_size())
1516 normalizationDescriptor.
m_NormSize = param.local_size();
1521 fmt::format(
"local_size not defined for LRN layer {} {}",
1526 if (param.has_alpha())
1528 normalizationDescriptor.
m_Alpha = param.alpha();
1534 fmt::format(
"Alpha not defined for LRN layer {} {}",
1538 if (param.has_beta())
1540 normalizationDescriptor.
m_Beta = param.beta();
1545 fmt::format(
"Beta not defined for LRN layer {} {}",
1552 normalizationDescriptor.
m_K = param.k();
1556 normalizationDescriptor.
m_K = 1;
1560 layerParam.name().c_str());
1569 InnerProductParameter param = layerParam.inner_product_param();
1571 ValidateNumInputsOutputs(layerParam, 1, 1);
1573 unsigned int outputSize = param.num_output();
1583 if (param.has_transpose())
1600 unsigned int inputSize = inputInfo.
GetShape()[1];
1603 inputSize *= inputInfo.
GetShape()[i];
1606 const float* weightDataPtr = GetArrayPtrFromBlob(layerParam, 0);
1607 const unsigned int swTD[2] = { outputSize, inputSize };
1616 const float* biasDataPtr = GetArrayPtrFromBlob(layerParam, 1);
1618 const unsigned int sbTD[1] = { outputSize };
1622 fullyConnectedLayer =
m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1625 layerParam.name().c_str());
1629 fullyConnectedLayer =
m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor,
1632 layerParam.name().c_str());
1643 ValidateNumInputsOutputs(layerParam, 1, 1);
1645 SoftmaxParameter param = layerParam.softmax_param();
1654 softmaxDescriptor.
m_Axis = 1;
1657 layerParam.name().c_str());
1665 ValidateNumInputsOutputs(layerParam, 2, 1);
1672 EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM;
1674 if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation())
1676 operation = layerParam.eltwise_param().operation();
1682 case EltwiseParameter_EltwiseOp_SUM:
1684 newLayer =
m_Network->AddAdditionLayer(layerParam.name().c_str());
1687 case EltwiseParameter_EltwiseOp_PROD:
1689 newLayer =
m_Network->AddMultiplicationLayer(layerParam.name().c_str());
1695 fmt::format(
"Unsupported operation {} in Eltwise layer {} {}",
1710 unsigned int numInputs =
static_cast<unsigned int>(layerParam.bottom_size());
1712 unsigned int concatDim = 1;
1713 unsigned int numOfDims = 4;
1716 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);
1717 std::vector<unsigned int>mergeDimSizes(numOfDims, 0u);
1719 unsigned int mergeDim = 0;
1720 for (
unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
1723 layerParam.bottom(armnn::numeric_cast<int>(viewIndex))).
GetTensorInfo();
1728 fmt::format(
"The number of dimensions for input tensors of " 1729 "the concatenation op should be 4. Inputs of {} has " 1730 "{} dimensions. {}",
1736 mergeDimSizes[0] = inputInfo.
GetShape()[0];
1737 mergeDimSizes[1] = inputInfo.
GetShape()[1];
1738 mergeDimSizes[2] = inputInfo.
GetShape()[2];
1739 mergeDimSizes[3] = inputInfo.
GetShape()[3];
1741 for (
unsigned int j = 0; j < concatDim; ++j)
1747 mergeDim += mergeDimSizes[concatDim];
1749 for (
unsigned int j = concatDim+1; j < numOfDims; ++j)
1754 mergeDimSizes[concatDim] = mergeDim;
1757 for (
unsigned int i = 0; i < numInputs; ++i)
1769 ValidateNumInputsOutputs(layerParam, 1, 1);
1773 string name = layerParam.name();
1775 BatchNormParameter param = layerParam.batch_norm_param();
1778 if (param.has_use_global_stats())
1780 if (!param.use_global_stats())
1783 fmt::format(
"Error parsing Batch Norm layer '{}': " 1784 "Parameter 'use_global_stats' is set to false, which is " 1785 "unsupported (value used for training). {}",
1792 desc.
m_Eps = param.eps();
1794 unsigned int channels = inputInfo.
GetShape()[1];
1795 unsigned int shape[] = {channels};
1797 vector<float> meanData(channels);
1798 GetDataFromBlob(layerParam, meanData, 0);
1800 vector<float> varianceData(channels);
1801 GetDataFromBlob(layerParam, varianceData, 1);
1804 const BlobProto& blob = layerParam.blobs(armnn::numeric_cast<int>(2));
1805 const float movingAverageFactor = blob.data(armnn::numeric_cast<int>(0));
1806 if(movingAverageFactor != 0.0f)
1808 const float scaleFactor = 1.0f / movingAverageFactor;
1809 auto scaleFunction = [scaleFactor](
float f) ->
float {
return f * scaleFactor; };
1811 std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
1812 std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
1816 vector<float> betaData(channels, 0.0f);
1817 vector<float> gammaData(channels, 1.0f);
1825 mean, variance, beta, gamma, name.c_str());
1834 ValidateNumInputsOutputs(layerParam, 1, 1);
1838 string name = layerParam.name();
1840 ScaleParameter param = layerParam.scale_param();
1841 if (param.axis() != 1)
1845 fmt::format(
"Loading Scale Layer: Only axis 1 is supported currently. " 1846 "Layer={} Axis={} {}",
1852 unsigned int channels = inputInfo.
GetShape()[1];
1853 unsigned int shape[] = {channels};
1857 vector<float> meanData(channels, 0.0f);
1858 vector<float> varianceData(channels, 1.0f);
1859 vector<float> betaData(channels, 0.0f);
1860 vector<float> gammaData(channels);
1862 GetDataFromBlob(layerParam, gammaData, 0);
1864 if(param.has_bias_term())
1866 GetDataFromBlob(layerParam, betaData, 1);
1875 mean, variance, beta, gamma, name.c_str());
1884 if (layerParam.bottom_size() != 1)
1887 fmt::format(
"Split layer '{}' should have exactly 1 bottom. " 1890 layerParam.bottom_size(),
1894 for (
int i = 0; i < layerParam.top_size(); i++)
1903 if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
1906 fmt::format(
"Dropout layer '{}' should have exactly 1 bottom and 1 top. " 1907 "#bottoms={} #tops={} {}",
1909 layerParam.bottom_size(),
1910 layerParam.top_size(),
1933 const char* bindingPointDesc,
1934 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
1936 const std::string layerName = layer->
GetName();
1937 auto it = nameToBindingInfo.find(layerName);
1938 if (it == nameToBindingInfo.end())
1940 nameToBindingInfo[layerName] = std::make_pair(
id, tensorInfo);
1945 fmt::format(
"Id {} used by more than one {} layer {}",
1962 fmt::format(
"Could not find armnn output slot for Caffe top '{}' {}",
1979 fmt::format(
"Attempting to add duplicate entry for Caffe top '{}' {}",
1990 std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop;
1991 for (
int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx)
1993 caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx);
1994 std::string name = layer.name();
1995 for (
int i = 0; i < layer.top_size(); ++i)
1997 layersByTop[layer.top(i)].push_back(&layer);
2003 for (
auto layersWithSameTopIt : layersByTop)
2005 const std::string& top = layersWithSameTopIt.first;
2006 const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second;
2010 for (
unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
2012 caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx];
2013 caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1];
2014 if (layer1.top_size() != 1)
2017 fmt::format(
"Node '{}' is an in-place layer but doesn't have exactly one " 2018 "top. It has {} instead. {}",
2023 std::string newTop = layer1.name() +
"_top";
2024 layer1.set_top(0, newTop);
2025 if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
2028 fmt::format(
"Node '{}' is an in-place layer but " 2029 "doesn't have exactly one bottom, or it doesn't match its top. " 2030 "#bottoms={}, first bottom is {}, top is {} {}",
2036 layer2.set_bottom(0, newTop);
2047 if (netParameter.input_size() > 0)
2049 LayerParameter* newLayer = netParameter.add_layer();
2051 newLayer->set_type(
"Input");
2052 newLayer->set_name(netParameter.input(0));
2053 newLayer->add_top(netParameter.input(0));
2055 InputParameter* inputParam = newLayer->mutable_input_param();
2056 BlobShape* shape = inputParam->add_shape();
2058 int dim_size = netParameter.input_dim_size();
2059 for (
int i = 0; i < dim_size; ++i)
2061 shape->add_dim(netParameter.input_dim(i));
2069 for (
int i = 0; i < netParameter.layer_size(); ++i)
2071 const caffe::LayerParameter& layer = netParameter.layer(i);
2072 for (
int i = 0; i < layer.top_size(); ++i)
2079 std::vector<const caffe::LayerParameter*> targetLayers;
2086 fmt::format(
"Couldn't find requested output layer '{}' in graph {}",
2087 requestedOutputName,
2090 targetLayers.push_back(nodeIt->second);
2094 std::vector<const caffe::LayerParameter*> sortedNodes;
2095 if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>(
2097 [
this](
const caffe::LayerParameter* node)
2104 fmt::format(
"Cycle detected in graph. #nodes: {} {}",
2110 for (
const caffe::LayerParameter* current : sortedNodes)
2116 fmt::format(
"Unsupported layer type: '{}' for layer {} {}",
2121 auto func = it->second;
2122 (this->*func)(*current);
2126 for (
const std::string& requestedOutput : m_RequestedOutputs)
2133 outputSlot.
Connect(outputLayer->GetInputSlot(0));
2135 TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
2140 const std::map<std::string, armnn::TensorShape>& inputShapes,
2141 const std::vector<std::string>& requestedOutputs)
2143 FILE* fd = fopen(graphFile,
"r");
2148 fmt::format(
"Failed to open graph file: {} {}",
2154 NetParameter netParam;
2155 auto input =
new google::protobuf::io::FileInputStream(fileno(fd));
2156 bool success = google::protobuf::TextFormat::Parse(input, &netParam);
2163 fmt::format(
"Failed to parse graph file: {} {}",
2172 const std::map<std::string, armnn::TensorShape>& inputShapes,
2173 const std::vector<std::string>& requestedOutputs)
2176 NetParameter netParam;
2177 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam);
2182 fmt::format(
"Failed to parse graph string {}",
2190 const std::map<std::string, armnn::TensorShape>& inputShapes,
2191 const std::vector<std::string>& requestedOutputs)
2193 FILE* fd = fopen(graphFile,
"rb");
2198 fmt::format(
"Failed to open graph file at: {} {}",
2204 NetParameter netParam;
2206 FileInputStream inStream(fileno(fd));
2207 CodedInputStream codedStream(&inStream);
2208 codedStream.SetTotalBytesLimit(INT_MAX);
2209 bool success = netParam.ParseFromCodedStream(&codedStream);
2215 fmt::format(
"Failed to parse protobuf file: {} {}",
2226 const std::map<std::string, armnn::TensorShape>& inputShapes,
2227 const std::vector<std::string>& requestedOutputs)
2235 if (requestedOutputs.size() == 0)
2237 throw ParseException(
"requestedOutputs must have at least one entry");
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
void ParseSplitLayer(const caffe::LayerParameter &layerParam)
void AddConvLayerWithDepthwiseConv(const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
void AddDeconvLayerWithSplits(const caffe::LayerParameter &layerParam, const armnn::TransposeConvolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::map< std::string, armnn::TensorShape > m_InputShapes
float m_K
Kappa value used for the across channel normalization equation.
void ParseLRNLayer(const caffe::LayerParameter &layerParam)
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const
Retrieves binding info (layer id and tensor info) for the network output identified by the given laye...
uint32_t m_PadLeft
Padding left value in the width dimension.
armnn::INetworkPtr CreateNetworkFromString(const char *protoText, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Creates the network directly from protobuf text in a string. Useful for debugging/testing.
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Create the network from a protobuf text file on disk.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
void ParseScaleLayer(const caffe::LayerParameter &layerParam)
static std::pair< armnn::LayerBindingId, armnn::TensorInfo > GetBindingInfo(const std::string &layerName, const char *bindingPointDesc, const std::unordered_map< std::string, BindingPointInfo > &bindingInfos)
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_BiasEnabled
Enable/disable bias.
void ParseDropoutLayer(const caffe::LayerParameter &layerParam)
void TrackOutputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
std::vector< std::string > m_RequestedOutputs
#define CAFFE_PARSER_VERSION
CAFFE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch vers...
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the gen...
static const std::string GetVersion()
Retrieve version in X.Y.Z form.
void ParseConcatLayer(const caffe::LayerParameter &layerParam)
uint32_t m_DilationY
Dilation along y axis.
void LoadNetParam(caffe::NetParameter &netParameter)
does the actual conversion from caffe::NetParameter to armnn::INetwork
static void TrackBindingPoint(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo, const char *bindingPointDesc, std::unordered_map< std::string, BindingPointInfo > &nameToBindingInfo)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::INetworkPtr CreateNetworkFromNetParameter(caffe::NetParameter &netParam, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Parses a NetParameter loaded into memory from one of the other CreateNetwork*.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
Create the network from a protobuf text file on the disk.
void SetShape(const TensorShape &newShape)
void ParseArgmaxLayer(const caffe::LayerParameter &layerParam)
std::unordered_map< std::string, BindingPointInfo > m_NetworkOutputsBindingInfo
maps output layer names to their corresponding ids and tensor infos
void AddConvLayerWithSplits(const caffe::LayerParameter &layerParam, const armnn::Convolution2dDescriptor &desc, unsigned int kernelW, unsigned int kernelH)
ParseConv may use these helpers depending on the group parameter.
uint32_t m_PoolHeight
Pooling height value.
BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const
Retrieves binding info (layer id and tensor info) for the network input identified by the given layer...
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::IOutputSlot & GetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName) const
Retrieves the Armnn IOutputSlot representing the given Caffe top.
uint32_t m_PadRight
Padding right value in the width dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
An output connection slot for a layer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
static const std::map< std::string, OperationParsingFunction > ms_CaffeLayerNameToParsingFunctions
Maps Caffe layer names to parsing member functions.
bool m_BiasEnabled
Enable/disable bias.
std::unordered_map< std::string, BindingPointInfo > m_NetworkInputsBindingInfo
maps input layer names to their corresponding ids and tensor infos
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile, const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs) override
Create the network from a protobuf binary file on disk.
#define ARMNN_ASSERT(COND)
void ParseReluLayer(const caffe::LayerParameter &layerParam)
void ParseSoftmaxLayer(const caffe::LayerParameter &layerParam)
#define GET_OPTIONAL_WITH_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VALUE, VALUE_TYPE, DEFAULT_VALUE)
std::unique_ptr< ICaffeParser, void(*)(ICaffeParser *parser)> ICaffeParserPtr
An ActivationDescriptor for the ActivationLayer.
void ParseBatchNormLayer(const caffe::LayerParameter &layerParam)
void ParseInnerProductLayer(const caffe::LayerParameter &layerParam)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void ResolveInPlaceLayers(caffe::NetParameter &netParameter)
Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) ...
void SetArmnnOutputSlotForCaffeTop(const std::string &caffeTopName, armnn::IOutputSlot &armnnOutputSlot)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
void ParsePoolingLayer(const caffe::LayerParameter &layerParam)
uint32_t m_PadLeft
Padding left value in the width dimension.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
void ParseEltwiseLayer(const caffe::LayerParameter &layerParam)
uint32_t m_PadRight
Padding right value in the width dimension.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::INetworkPtr m_Network
int m_Axis
Axis to reduce across the input tensor.
std::unordered_map< std::string, armnn::IOutputSlot * > m_ArmnnOutputSlotForCaffeTop
As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops...
virtual const char * GetName() const =0
Returns the name of the layer.
#define GET_OPTIONAL_WITH_VECTOR_FALLBACK(PARAM, PARAM_TYPE, OPTIONAL_VALUE, FALLBACK_VECTOR, VALUE_TYPE, DEFAULT_VALUE)
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
void TrackInputBinding(armnn::IConnectableLayer *layer, armnn::LayerBindingId id, const armnn::TensorInfo &tensorInfo)
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape &blobShape) const
Converts Caffe's protobuf tensor shape format to ArmNN's.
std::vector< const caffe::LayerParameter * > GetInputs(const caffe::LayerParameter &layerParam)
Find the Caffe layers listed as inputs (bottoms) for a given layer.
void ParseConvLayer(const caffe::LayerParameter &layerParam)
void ParseInputLayer(const caffe::LayerParameter &layerParam)
Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type and is responsible ...
static INetworkPtr Create(NetworkOptions networkOptions={})
unsigned int GetNumDimensions() const
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
armnn::DataType m_Output_Type
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void ParseDeconvLayer(const caffe::LayerParameter &layerParam)
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
BlobShape TensorDescToBlobShape(const TensorInfo &desc)
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
std::map< std::string, const caffe::LayerParameter * > m_CaffeLayersByTopName