22 #include <boost/format.hpp> 24 #include "flatbuffers/idl.h" 25 #include "flatbuffers/util.h" 26 #include "flatbuffers/flexbuffers.h" 28 #include <schema_generated.h> 43 ITfLiteParser::TfLiteParserOptions options;
44 options.m_StandInLayerForUnsupported =
true;
45 options.m_InferAndValidate =
true;
69 m_Parser->CreateNetworkFromBinary(m_GraphBinary);
76 m_Runtime->GetDeviceSpec());
77 std::string errorMessage;
79 armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
85 boost::format(
"The runtime failed to load the network. " 86 "Error was: %1%. in %2% [%3%:%4%]") %
97 m_SingleInputName = inputName;
98 m_SingleOutputName = outputName;
107 flatbuffers::Parser parser;
109 bool ok = parser.Parse(schemafile.c_str());
112 ok &= parser.Parse(m_JsonString.c_str());
121 const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
122 size_t size =
static_cast<size_t>(parser.builder_.GetSize());
123 m_GraphBinary.assign(bufferPtr, bufferPtr+size);
130 template <std::size_t NumOutputDimensions,
132 void RunTest(
size_t subgraphId,
138 template <std::size_t NumOutputDimensions,
140 void RunTest(
size_t subgraphId,
148 template <std::size_t NumOutputDimensions,
151 void RunTest(
size_t subgraphId,
154 bool isDynamic =
false);
163 void RunTest(std::size_t subgraphId,
170 flexbuffers::Builder detectPostProcess;
171 detectPostProcess.Map([&]() {
176 detectPostProcess.Int(
"num_classes", descriptor.
m_NumClasses);
179 detectPostProcess.Float(
"h_scale", descriptor.
m_ScaleH);
180 detectPostProcess.Float(
"w_scale", descriptor.
m_ScaleW);
181 detectPostProcess.Float(
"x_scale", descriptor.
m_ScaleX);
182 detectPostProcess.Float(
"y_scale", descriptor.
m_ScaleY);
184 detectPostProcess.Finish();
187 std::stringstream strStream;
188 std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
189 std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,
","));
191 return strStream.str();
195 tflite::TensorType tensorType, uint32_t buffer,
const std::string& name,
196 const std::vector<float>& min,
const std::vector<float>& max,
197 const std::vector<float>& scale,
const std::vector<int64_t>& zeroPoint)
199 BOOST_CHECK(tensors);
200 BOOST_CHECK_EQUAL(shapeSize, tensors->shape.size());
201 BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end());
202 BOOST_CHECK_EQUAL(tensorType, tensors->type);
203 BOOST_CHECK_EQUAL(buffer, tensors->buffer);
204 BOOST_CHECK_EQUAL(name, tensors->name);
205 BOOST_CHECK(tensors->quantization);
206 BOOST_CHECK_EQUAL_COLLECTIONS(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
207 tensors->quantization.get()->min.end());
208 BOOST_CHECK_EQUAL_COLLECTIONS(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
209 tensors->quantization.get()->max.end());
210 BOOST_CHECK_EQUAL_COLLECTIONS(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
211 tensors->quantization.get()->scale.end());
212 BOOST_CHECK_EQUAL_COLLECTIONS(zeroPoint.begin(), zeroPoint.end(),
213 tensors->quantization.get()->zero_point.begin(),
214 tensors->quantization.get()->zero_point.end());
221 template <std::size_t NumOutputDimensions,
227 RunTest<NumOutputDimensions, armnnType>(subgraphId,
235 template <std::size_t NumOutputDimensions,
241 RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
248 template <std::size_t NumOutputDimensions,
260 for (
auto&& it : inputData)
264 inputTensors.push_back({ bindingInfo.first,
armnn::ConstTensor(bindingInfo.second, it.second.data()) });
268 std::map<std::string, boost::multi_array<DataType2, NumOutputDimensions>> outputStorage;
270 for (
auto&& it : expectedOutputData)
277 BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
278 boost::str(boost::format(
"Number of dimensions expected %1%, but got %2% for output layer %3%")
279 % NumOutputDimensions
280 % outputNumDimensions
284 outputStorage.emplace(it.first, MakeTensor<DataType2, NumOutputDimensions>(outputTensorInfo));
285 outputTensors.push_back(
286 { outputBindingId,
armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
292 for (
auto&& it : expectedOutputData)
295 auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second, isDynamic);
296 BOOST_TEST(
CompareTensors(outputExpected, outputStorage[it.first],
false, isDynamic));
314 for (
auto&& it : inputData)
319 inputTensors.push_back({ bindingInfo.first,
armnn::ConstTensor(bindingInfo.second, it.second.data()) });
323 outputTensors.reserve(expectedOutputData.size());
324 std::map<std::string, std::vector<DataType2>> outputStorage;
325 for (
auto&& it : expectedOutputData)
330 std::vector<DataType2> out(it.second.size());
331 outputStorage.emplace(it.first, out);
332 outputTensors.push_back({ bindingInfo.first,
334 outputStorage.at(it.first).data()) });
340 for (
auto&& it : expectedOutputData)
342 std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
344 for (
unsigned int i = 0; i < out.size(); ++i)
346 BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
armnn::NetworkId m_NetworkIdentifier
float m_ScaleW
Center size encoding scale weight.
static std::string GenerateDetectionPostProcessJsonString(const armnn::DetectionPostProcessDescriptor &descriptor)
CPU Execution: Reference C++ kernels.
float m_ScaleX
Center size encoding scale x.
ParserFlatbuffersFixture()
boost::test_tools::predicate_result CompareTensors(const boost::multi_array< T, n > &a, const boost::multi_array< T, n > &b, bool compareBoolean=false, bool isDynamic=false)
void CheckTensors(const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
typename ResolveTypeImpl< DT >::Type ResolveType
bool ReadStringToBinary()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
Copyright (c) 2020 ARM Limited.
void RunTest(size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
uint32_t m_MaxDetections
Maximum numbers of detections.
float m_NmsIouThreshold
Intersection over union threshold.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
#define ARMNN_ASSERT_MSG(COND, MSG)
armnnSerializer::TensorInfo * TensorRawPtr
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
std::string m_SingleOutputName
std::vector< uint8_t > m_GraphBinary
armnn::IRuntimePtr m_Runtime
float m_ScaleH
Center size encoding scale height.
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
Base class for all ArmNN exceptions so that users can filter to just those.
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
float m_ScaleY
Center size encoding scale y.
unsigned char g_TfLiteSchemaText[]
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
unsigned int GetNumDimensions() const
ITfLiteParserPtr m_Parser
unsigned int g_TfLiteSchemaText_len