21 #include <boost/filesystem.hpp> 22 #include <boost/assert.hpp> 23 #include <boost/format.hpp> 25 #include "flatbuffers/idl.h" 26 #include "flatbuffers/util.h" 27 #include "flatbuffers/flexbuffers.h" 29 #include <schema_generated.h> 44 ITfLiteParser::TfLiteParserOptions
options;
45 options.m_StandInLayerForUnsupported =
true;
69 m_Parser->CreateNetworkFromBinary(m_GraphBinary);
76 m_Runtime->GetDeviceSpec());
77 std::string errorMessage;
79 armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
85 boost::format(
"The runtime failed to load the network. " 86 "Error was: %1%. in %2% [%3%:%4%]") %
97 m_SingleInputName = inputName;
98 m_SingleOutputName = outputName;
107 flatbuffers::Parser parser;
109 bool ok = parser.Parse(schemafile.c_str());
110 BOOST_ASSERT_MSG(ok,
"Failed to parse schema file");
112 ok &= parser.Parse(m_JsonString.c_str());
113 BOOST_ASSERT_MSG(ok,
"Failed to parse json input");
121 const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
122 size_t size =
static_cast<size_t>(parser.builder_.GetSize());
123 m_GraphBinary.assign(bufferPtr, bufferPtr+size);
130 template <std::size_t NumOutputDimensions,
132 void RunTest(
size_t subgraphId,
138 template <std::size_t NumOutputDimensions,
140 void RunTest(
size_t subgraphId,
148 template <std::size_t NumOutputDimensions,
151 void RunTest(
size_t subgraphId,
162 void RunTest(std::size_t subgraphId,
169 flexbuffers::Builder detectPostProcess;
170 detectPostProcess.Map([&]() {
175 detectPostProcess.Int(
"num_classes", descriptor.
m_NumClasses);
178 detectPostProcess.Float(
"h_scale", descriptor.
m_ScaleH);
179 detectPostProcess.Float(
"w_scale", descriptor.
m_ScaleW);
180 detectPostProcess.Float(
"x_scale", descriptor.
m_ScaleX);
181 detectPostProcess.Float(
"y_scale", descriptor.
m_ScaleY);
183 detectPostProcess.Finish();
186 std::stringstream strStream;
187 std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
188 std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,
","));
190 return strStream.str();
194 tflite::TensorType tensorType, uint32_t buffer,
const std::string& name,
195 const std::vector<float>& min,
const std::vector<float>& max,
196 const std::vector<float>& scale,
const std::vector<int64_t>& zeroPoint)
199 BOOST_CHECK_EQUAL(shapeSize, tensors->shape.size());
200 BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end());
201 BOOST_CHECK_EQUAL(tensorType, tensors->type);
202 BOOST_CHECK_EQUAL(buffer, tensors->buffer);
203 BOOST_CHECK_EQUAL(name, tensors->name);
205 BOOST_CHECK_EQUAL_COLLECTIONS(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
206 tensors->quantization.get()->min.end());
207 BOOST_CHECK_EQUAL_COLLECTIONS(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
208 tensors->quantization.get()->max.end());
209 BOOST_CHECK_EQUAL_COLLECTIONS(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
210 tensors->quantization.get()->scale.end());
211 BOOST_CHECK_EQUAL_COLLECTIONS(zeroPoint.begin(), zeroPoint.end(),
212 tensors->quantization.get()->zero_point.begin(),
213 tensors->quantization.get()->zero_point.end());
220 template <std::size_t NumOutputDimensions,
226 RunTest<NumOutputDimensions, armnnType>(subgraphId,
234 template <std::size_t NumOutputDimensions,
240 RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
247 template <std::size_t NumOutputDimensions,
258 for (
auto&& it : inputData)
262 inputTensors.push_back({ bindingInfo.first,
armnn::ConstTensor(bindingInfo.second, it.second.data()) });
266 std::map<std::string, boost::multi_array<DataType2, NumOutputDimensions>> outputStorage;
268 for (
auto&& it : expectedOutputData)
275 BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
276 boost::str(boost::format(
"Number of dimensions expected %1%, but got %2% for output layer %3%")
277 % NumOutputDimensions
278 % outputNumDimensions
282 outputStorage.emplace(it.first, MakeTensor<DataType2, NumOutputDimensions>(outputTensorInfo));
283 outputTensors.push_back(
284 { outputBindingId,
armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
290 for (
auto&& it : expectedOutputData)
293 auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second);
294 BOOST_TEST(
CompareTensors(outputExpected, outputStorage[it.first]));
312 for (
auto&& it : inputData)
317 inputTensors.push_back({ bindingInfo.first,
armnn::ConstTensor(bindingInfo.second, it.second.data()) });
321 outputTensors.reserve(expectedOutputData.size());
322 std::map<std::string, std::vector<DataType2>> outputStorage;
323 for (
auto&& it : expectedOutputData)
328 std::vector<DataType2> out(it.second.size());
329 outputStorage.emplace(it.first, out);
330 outputTensors.push_back({ bindingInfo.first,
332 outputStorage.at(it.first).data()) });
338 for (
auto&& it : expectedOutputData)
340 std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
342 for (
unsigned int i = 0; i < out.size(); ++i)
344 BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
static std::string GenerateDetectionPostProcessJsonString(const armnn::DetectionPostProcessDescriptor &descriptor)
float m_ScaleX
Center size encoding scale x.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_NumClasses
Number of classes.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
float m_ScaleW
Center size encoding scale weight.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
unsigned int GetNumDimensions() const
ITfLiteParserPtr m_Parser
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
float m_ScaleY
Center size encoding scale y.
typename ResolveTypeImpl< DT >::Type ResolveType
armnn::IRuntimePtr m_Runtime
void CheckTensors(const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
bool m_UseRegularNms
Use Regular NMS.
armnn::NetworkId m_NetworkIdentifier
unsigned int g_TfLiteSchemaText_len
std::string m_SingleInputName
float m_NmsIouThreshold
Intersection over union threshold.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
armnnSerializer::TensorInfo * TensorRawPtr
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
unsigned char g_TfLiteSchemaText[]
float m_NmsScoreThreshold
NMS score threshold.
Base class for all ArmNN exceptions so that users can filter to just those.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
CPU Execution: Reference C++ kernels.
void RunTest(size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
std::string m_SingleOutputName
ParserFlatbuffersFixture()
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
bool ReadStringToBinary()
std::vector< uint8_t > m_GraphBinary
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
boost::test_tools::predicate_result CompareTensors(const boost::multi_array< T, n > &a, const boost::multi_array< T, n > &b, bool compareBoolean=false)
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
float m_ScaleH
Center size encoding scale height.
uint32_t m_MaxDetections
Maximum numbers of detections.