From 44cfd848c1913f87a77c0427450dba93ba47fb94 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Fri, 14 Jun 2019 15:45:03 +0100 Subject: IVGCVSW-3283 Add test for converting CONV2D and DEPTHWISE_CONV2D operators with dilation params Signed-off-by: Aron Virginas-Tar Change-Id: I51a9c71d7a277ab530ac35faea2e8a069c134f45 --- test/1.1/Mean.cpp | 23 ++++-- test/1.1/Transpose.cpp | 32 +++++--- test/1.2/Dilation.cpp | 94 ++++++++++++++++++++++++ test/Android.mk | 86 ++++++++++++++++++++++ test/Concat.cpp | 23 +++--- test/Concurrent.cpp | 22 ++++-- test/Convolution2D.hpp | 16 ++-- test/Dilation.hpp | 179 +++++++++++++++++++++++++++++++++++++++++++++ test/DriverTestHelpers.hpp | 127 ++++++++++++++++++++++---------- test/FullyConnected.cpp | 48 ++++++------ test/GenericLayerTests.cpp | 111 ++++++++++++++++------------ test/Lstm.cpp | 119 +++++++++++++++++++----------- 12 files changed, 690 insertions(+), 190 deletions(-) create mode 100644 test/1.2/Dilation.cpp create mode 100644 test/Dilation.hpp (limited to 'test') diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp index cf9ddcb2..6e96d84b 100644 --- a/test/1.1/Mean.cpp +++ b/test/1.1/Mean.cpp @@ -6,6 +6,8 @@ #include "../DriverTestHelpers.hpp" #include "../TestTensor.hpp" +#include "../1.1/HalPolicy.hpp" + #include #include @@ -15,6 +17,8 @@ using namespace android::hardware; using namespace driverTestHelpers; using namespace armnn_driver; +using HalPolicy = hal_1_1::HalPolicy; + namespace { @@ -34,14 +38,21 @@ void MeanTestImpl(const TestTensor& input, { auto driver = std::make_unique(DriverOptions(computeDevice, fp16Enabled)); - V1_1::Model model = {}; - AddInputOperand (model, input.GetDimensions()); - AddTensorOperand(model, axisDimensions, const_cast(axisValues), V1_0::OperandType::TENSOR_INT32); - AddIntOperand (model, keepDims); - AddOutputOperand(model, expectedOutput.GetDimensions()); + HalPolicy::Model model = {}; + + AddInputOperand(model, input.GetDimensions()); + + AddTensorOperand(model, + axisDimensions, + const_cast(axisValues), + HalPolicy::OperandType::TENSOR_INT32); + + AddIntOperand(model, keepDims); + + AddOutputOperand(model, expectedOutput.GetDimensions()); model.operations.resize(1); - model.operations[0].type = V1_1::OperationType::MEAN; + model.operations[0].type = HalPolicy::OperationType::MEAN; model.operations[0].inputs = hidl_vec{ 0, 1, 2 }; model.operations[0].outputs = hidl_vec{ 3 }; model.relaxComputationFloat32toFloat16 = fp16Enabled; diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp index e32a25fe..f2c77b3f 100644 --- a/test/1.1/Transpose.cpp +++ b/test/1.1/Transpose.cpp @@ -2,14 +2,19 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // +#include "OperationsUtils.h" + #include "../DriverTestHelpers.hpp" -#include -#include -#include #include "../TestTensor.hpp" -#include "OperationsUtils.h" + +#include "../1.1/HalPolicy.hpp" + +#include +#include #include +#include + #include BOOST_AUTO_TEST_SUITE(TransposeTests) @@ -18,6 +23,8 @@ using namespace android::hardware; using namespace driverTestHelpers; using namespace armnn_driver; +using HalPolicy = hal_1_1::HalPolicy; + namespace { @@ -31,14 +38,19 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[], const TestTensor & expectedOutputTensor, armnn::Compute computeDevice) { auto driver = std::make_unique(DriverOptions(computeDevice)); - V1_1::Model model = {}; + HalPolicy::Model model = {}; - AddInputOperand(model,inputs.GetDimensions()); - AddTensorOperand(model, hidl_vec{4}, perm, V1_0::OperandType::TENSOR_INT32); - AddOutputOperand(model, expectedOutputTensor.GetDimensions()); + AddInputOperand(model,inputs.GetDimensions()); + + AddTensorOperand(model, + hidl_vec{4}, + perm, + HalPolicy::OperandType::TENSOR_INT32); + + AddOutputOperand(model, expectedOutputTensor.GetDimensions()); model.operations.resize(1); - model.operations[0].type = V1_1::OperationType::TRANSPOSE; + model.operations[0].type = HalPolicy::OperationType::TRANSPOSE; model.operations[0].inputs = hidl_vec{0, 1}; model.operations[0].outputs = hidl_vec{2}; @@ -84,8 +96,8 @@ void TransposeTestImpl(const TestTensor & inputs, int32_t perm[], { BOOST_TEST(outdata[i] == expectedOutput[i]); } - } + } // namespace BOOST_DATA_TEST_CASE(Transpose , COMPUTE_DEVICES) diff --git a/test/1.2/Dilation.cpp b/test/1.2/Dilation.cpp new file mode 100644 index 00000000..1a7ba4b4 --- /dev/null +++ b/test/1.2/Dilation.cpp @@ -0,0 +1,94 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../Dilation.hpp" + +#include "../../1.2/HalPolicy.hpp" + +#include + +BOOST_AUTO_TEST_SUITE(DilationTests) + +BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingNoDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = false; + options.m_IsPaddingExplicit = true; + options.m_HasDilation = false; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = false; + options.m_IsPaddingExplicit = true; + options.m_HasDilation = true; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingNoDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = false; + options.m_IsPaddingExplicit = false; + options.m_HasDilation = false; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = false; + options.m_IsPaddingExplicit = false; + options.m_HasDilation = true; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingNoDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = true; + options.m_IsPaddingExplicit = true; + options.m_HasDilation = false; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = true; + options.m_IsPaddingExplicit = true; + options.m_HasDilation = true; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingNoDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = true; + options.m_IsPaddingExplicit = false; + options.m_HasDilation = false; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingDilation) +{ + DilationTestOptions options; + options.m_IsDepthwiseConvolution = true; + options.m_IsPaddingExplicit = false; + options.m_HasDilation = true; + + DilationTestImpl(options); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/test/Android.mk b/test/Android.mk index 8f62149e..0ba319b7 100644 --- a/test/Android.mk +++ b/test/Android.mk @@ -203,3 +203,89 @@ include $(BUILD_EXECUTABLE) endif # PLATFORM_VERSION == 9 +ifeq ($(Q_OR_LATER),1) +# The following target is available starting from Android Q + +########################## +# armnn-driver-tests@1.2 # +########################## +include $(CLEAR_VARS) + +LOCAL_MODULE := armnn-driver-tests@1.2 +LOCAL_MODULE_TAGS := optional + +LOCAL_ARM_MODE := arm +LOCAL_PROPRIETARY_MODULE := true + +# Mark source files as dependent on Android.mk +LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk + +LOCAL_C_INCLUDES := \ + $(OPENCL_HEADER_PATH) \ + $(NN_HEADER_PATH) \ + $(ARMNN_HEADER_PATH) \ + $(ARMNN_DRIVER_HEADER_PATH) + +LOCAL_CFLAGS := \ + -std=c++14 \ + -fexceptions \ + -Werror \ + -O0 \ + -UNDEBUG \ + -DARMNN_ANDROID_Q \ + -DARMNN_ANDROID_NN_V1_2 + +LOCAL_SRC_FILES := \ + 1.0/Convolution2D.cpp \ + 1.1/Convolution2D.cpp \ + 1.1/Mean.cpp \ + 1.1/Transpose.cpp \ + 1.2/Dilation.cpp \ + Tests.cpp \ + UtilsTests.cpp \ + Concurrent.cpp \ + FullyConnected.cpp \ + GenericLayerTests.cpp \ + DriverTestHelpers.cpp \ + SystemProperties.cpp \ + Lstm.cpp \ + Concat.cpp \ + TestTensor.cpp + +LOCAL_STATIC_LIBRARIES := \ + libneuralnetworks_common \ + libboost_log \ + libboost_system \ + libboost_unit_test_framework \ + libboost_thread \ + armnn-arm_compute + +LOCAL_WHOLE_STATIC_LIBRARIES := \ + libarmnn-driver@1.2 + +LOCAL_SHARED_LIBRARIES := \ + libbase \ + libcutils \ + libfmq \ + libhidlbase \ + libhidltransport \ + libhidlmemory \ + liblog \ + libnativewindow \ + libtextclassifier_hash \ + libui \ + libutils \ + android.hardware.neuralnetworks@1.0 \ + android.hardware.neuralnetworks@1.1 \ + android.hardware.neuralnetworks@1.2 \ + android.hidl.allocator@1.0 \ + android.hidl.memory@1.0 + +ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1) +LOCAL_SHARED_LIBRARIES+= \ + libOpenCL +endif + +include $(BUILD_EXECUTABLE) + +endif # PLATFORM_VERSION == Q \ No newline at end of file diff --git a/test/Concat.cpp b/test/Concat.cpp index b5ea689e..02d66cb8 100644 --- a/test/Concat.cpp +++ b/test/Concat.cpp @@ -4,11 +4,14 @@ // #include "DriverTestHelpers.hpp" #include "TestTensor.hpp" + +#include "../1.0/HalPolicy.hpp" + #include #include #include -#include +#include BOOST_AUTO_TEST_SUITE(ConcatTests) @@ -16,6 +19,8 @@ using namespace android::hardware; using namespace driverTestHelpers; using namespace armnn_driver; +using HalPolicy = hal_1_0::HalPolicy; + namespace { @@ -34,31 +39,31 @@ ConcatTestImpl(const std::vector & inputs, ErrorStatus expectedExecStatus=ErrorStatus::NONE) { std::unique_ptr driver = std::make_unique(DriverOptions(computeDevice)); - V1_0::Model model{}; + HalPolicy::Model model{}; hidl_vec modelInputIds; modelInputIds.resize(inputs.size()+1); for (uint32_t i = 0; iGetDimensions()); + AddInputOperand(model, inputs[i]->GetDimensions()); } modelInputIds[inputs.size()] = inputs.size(); // add an id for the axis too - AddIntOperand(model, concatAxis); - AddOutputOperand(model, expectedOutputTensor.GetDimensions()); + AddIntOperand(model, concatAxis); + AddOutputOperand(model, expectedOutputTensor.GetDimensions()); // make the concat operation model.operations.resize(1); - model.operations[0].type = V1_0::OperationType::CONCATENATION; + model.operations[0].type = HalPolicy::OperationType::CONCATENATION; model.operations[0].inputs = modelInputIds; model.operations[0].outputs = hidl_vec{static_cast(inputs.size()+1)}; // make the prepared model ErrorStatus prepareStatus=ErrorStatus::NONE; android::sp preparedModel = PrepareModelWithStatus(model, - *driver, - prepareStatus, - expectedPrepareStatus); + *driver, + prepareStatus, + expectedPrepareStatus); BOOST_TEST(prepareStatus == expectedPrepareStatus); if (prepareStatus != ErrorStatus::NONE) { diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp index 0848a88c..87ac2e80 100644 --- a/test/Concurrent.cpp +++ b/test/Concurrent.cpp @@ -3,13 +3,19 @@ // SPDX-License-Identifier: MIT // #include "DriverTestHelpers.hpp" + +#include "../1.0/HalPolicy.hpp" + #include + #include BOOST_AUTO_TEST_SUITE(ConcurrentDriverTests) -using ArmnnDriver = armnn_driver::ArmnnDriver; +using ArmnnDriver = armnn_driver::ArmnnDriver; using DriverOptions = armnn_driver::DriverOptions; +using HalPolicy = armnn_driver::hal_1_0::HalPolicy; + using namespace android::nn; using namespace android::hardware; using namespace driverTestHelpers; @@ -24,22 +30,22 @@ BOOST_AUTO_TEST_CASE(ConcurrentExecute) ALOGI("ConcurrentExecute: entry"); auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - V1_0::Model model = {}; + HalPolicy::Model model = {}; // add operands int32_t actValue = 0; float weightValue[] = {2, 4, 1}; float biasValue[] = {4}; - AddInputOperand(model, hidl_vec{1, 3}); - AddTensorOperand(model, hidl_vec{1, 3}, weightValue); - AddTensorOperand(model, hidl_vec{1}, biasValue); - AddIntOperand(model, actValue); - AddOutputOperand(model, hidl_vec{1, 1}); + AddInputOperand(model, hidl_vec{1, 3}); + AddTensorOperand(model, hidl_vec{1, 3}, weightValue); + AddTensorOperand(model, hidl_vec{1}, biasValue); + AddIntOperand(model, actValue); + AddOutputOperand(model, hidl_vec{1, 1}); // make the fully connected operation model.operations.resize(1); - model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model.operations[0].outputs = hidl_vec{4}; diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp index c8d573d7..ec43ae35 100644 --- a/test/Convolution2D.hpp +++ b/test/Convolution2D.hpp @@ -57,14 +57,14 @@ void PaddingTestImpl(android::nn::PaddingScheme paddingScheme, bool fp16Enabled float weightValue[] = {1.f, -1.f, 0.f, 1.f}; float biasValue[] = {0.f}; - AddInputOperand(model, hidl_vec{1, 2, 3, 1}); - AddTensorOperand(model, hidl_vec{1, 2, 2, 1}, weightValue); - AddTensorOperand(model, hidl_vec{1}, biasValue); - AddIntOperand(model, (int32_t)paddingScheme); // padding - AddIntOperand(model, 2); // stride x - AddIntOperand(model, 2); // stride y - AddIntOperand(model, 0); // no activation - AddOutputOperand(model, hidl_vec{1, 1, outSize, 1}); + AddInputOperand(model, hidl_vec{1, 2, 3, 1}); + AddTensorOperand(model, hidl_vec{1, 2, 2, 1}, weightValue); + AddTensorOperand(model, hidl_vec{1}, biasValue); + AddIntOperand(model, (int32_t)paddingScheme); // padding + AddIntOperand(model, 2); // stride x + AddIntOperand(model, 2); // stride y + AddIntOperand(model, 0); // no activation + AddOutputOperand(model, hidl_vec{1, 1, outSize, 1}); // make the convolution operation model.operations.resize(1); diff --git a/test/Dilation.hpp b/test/Dilation.hpp new file mode 100644 index 00000000..adc9947d --- /dev/null +++ b/test/Dilation.hpp @@ -0,0 +1,179 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "DriverTestHelpers.hpp" + +#include + +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(DilationTests) + +using namespace armnn; +using namespace boost; +using namespace driverTestHelpers; + +struct DilationTestOptions +{ + DilationTestOptions() : + m_IsDepthwiseConvolution{false}, + m_IsPaddingExplicit{false}, + m_HasDilation{false} + {} + + ~DilationTestOptions() = default; + + bool m_IsDepthwiseConvolution; + bool m_IsPaddingExplicit; + bool m_HasDilation; +}; + +class DilationTestVisitor : public LayerVisitorBase +{ +public: + DilationTestVisitor() : + DilationTestVisitor(1u, 1u) + {} + + DilationTestVisitor(uint32_t expectedDilationX, uint32_t expectedDilationY) : + m_ExpectedDilationX{expectedDilationX}, + m_ExpectedDilationY{expectedDilationY} + {} + + void VisitConvolution2dLayer(const IConnectableLayer *layer, + const Convolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char *name = nullptr) override + { + ignore_unused(layer); + ignore_unused(weights); + ignore_unused(biases); + ignore_unused(name); + + CheckDilationParams(descriptor); + } + + void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer, + const DepthwiseConvolution2dDescriptor& descriptor, + const ConstTensor& weights, + const Optional& biases, + const char *name = nullptr) override + { + ignore_unused(layer); + ignore_unused(weights); + ignore_unused(biases); + ignore_unused(name); + + CheckDilationParams(descriptor); + } + +private: + uint32_t m_ExpectedDilationX; + uint32_t m_ExpectedDilationY; + + template + void CheckDilationParams(const ConvolutionDescriptor& descriptor) + { + BOOST_CHECK_EQUAL(descriptor.m_DilationX, m_ExpectedDilationX); + BOOST_CHECK_EQUAL(descriptor.m_DilationY, m_ExpectedDilationY); + } +}; + +template +void DilationTestImpl(const DilationTestOptions& options) +{ + using HalModel = typename HalPolicy::Model; + using HalOperationType = typename HalPolicy::OperationType; + + const armnn::Compute backend = armnn::Compute::CpuRef; + auto driver = std::make_unique(DriverOptions(backend, false)); + HalModel model = {}; + + // add operands + std::vector weightData(9, 1.0f); + std::vector biasData(1, 0.0f ); + + // input + AddInputOperand(model, hidl_vec{1, 3, 3, 1}); + + // weights & biases + AddTensorOperand(model, hidl_vec{1, 3, 3, 1}, weightData.data()); + AddTensorOperand(model, hidl_vec{1}, biasData.data()); + + uint32_t numInputs = 3u; + // padding + if (options.m_IsPaddingExplicit) + { + AddIntOperand(model, 1); + AddIntOperand(model, 1); + AddIntOperand(model, 1); + AddIntOperand(model, 1); + numInputs += 4; + } + else + { + AddIntOperand(model, android::nn::kPaddingSame); + numInputs += 1; + } + + AddIntOperand(model, 2); // stride x + AddIntOperand(model, 2); // stride y + numInputs += 2; + + if (options.m_IsDepthwiseConvolution) + { + AddIntOperand(model, 1); // depth multiplier + numInputs++; + } + + AddIntOperand(model, 0); // no activation + numInputs += 1; + + // dilation + if (options.m_HasDilation) + { + AddBoolOperand(model, false); // default data layout + + AddIntOperand(model, 2); // dilation X + AddIntOperand(model, 2); // dilation Y + + numInputs += 3; + } + + // output + AddOutputOperand(model, hidl_vec{1, 1, 1, 1}); + + // set up the convolution operation + model.operations.resize(1); + model.operations[0].type = options.m_IsDepthwiseConvolution ? + HalOperationType::DEPTHWISE_CONV_2D : HalOperationType::CONV_2D; + + std::vector inputs(numInputs); + std::iota(inputs.begin(), inputs.end(), 0u); + std::vector outputs = { numInputs }; + + model.operations[0].inputs = hidl_vec(inputs); + model.operations[0].outputs = hidl_vec(outputs); + + // convert model + ConversionData data({backend}); + data.m_Network = armnn::INetwork::Create(); + data.m_OutputSlotForOperand = std::vector(model.operands.size(), nullptr); + + bool ok = HalPolicy::ConvertOperation(model.operations[0], model, data); + BOOST_CHECK(ok); + + // check if dilation params are as expected + DilationTestVisitor visitor = options.m_HasDilation ? DilationTestVisitor(2, 2) : DilationTestVisitor(); + data.m_Network->Accept(visitor); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp index 4a8f607e..428359e2 100644 --- a/test/DriverTestHelpers.hpp +++ b/test/DriverTestHelpers.hpp @@ -73,29 +73,58 @@ android::sp AddPoolAndGetData(uint32_t size, Request& request); void AddPoolAndSetData(uint32_t size, Request& request, const float* data); -template -void AddOperand(HalModel& model, const V1_0::Operand& op) +template +void AddOperand(HalModel& model, const HalOperand& op) { model.operands.resize(model.operands.size() + 1); model.operands[model.operands.size() - 1] = op; } -template +template void AddIntOperand(HalModel& model, int32_t value) { + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + using HalOperandLifeTime = typename HalPolicy::OperandLifeTime; + DataLocation location = {}; location.offset = model.operandValues.size(); location.length = sizeof(int32_t); - V1_0::Operand op = {}; - op.type = V1_0::OperandType::INT32; - op.dimensions = hidl_vec{}; - op.lifetime = V1_0::OperandLifeTime::CONSTANT_COPY; - op.location = location; + HalOperand op = {}; + op.type = HalOperandType::INT32; + op.dimensions = hidl_vec{}; + op.lifetime = HalOperandLifeTime::CONSTANT_COPY; + op.location = location; model.operandValues.resize(model.operandValues.size() + location.length); *reinterpret_cast(&model.operandValues[location.offset]) = value; + AddOperand(model, op); +} + +template +void AddBoolOperand(HalModel& model, bool value) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + using HalOperandLifeTime = typename HalPolicy::OperandLifeTime; + + DataLocation location = {}; + location.offset = model.operandValues.size(); + location.length = sizeof(uint8_t); + + HalOperand op = {}; + op.type = HalOperandType::BOOL; + op.dimensions = hidl_vec{}; + op.lifetime = HalOperandLifeTime::CONSTANT_COPY; + op.location = location; + + model.operandValues.resize(model.operandValues.size() + location.length); + *reinterpret_cast(&model.operandValues[location.offset]) = static_cast(value); + AddOperand(model, op); } @@ -108,13 +137,19 @@ OperandType TypeToOperandType(); template<> OperandType TypeToOperandType(); -template +template void AddTensorOperand(HalModel& model, const hidl_vec& dimensions, const T* values, - V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32, - V1_0::OperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY) + HalOperandType operandType = HalOperandType::TENSOR_FLOAT32, + HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY) { + using HalOperand = typename HalPolicy::Operand; + uint32_t totalElements = 1; for (uint32_t dim : dimensions) { @@ -124,16 +159,16 @@ void AddTensorOperand(HalModel& model, DataLocation location = {}; location.length = totalElements * sizeof(T); - if(operandLifeTime == V1_0::OperandLifeTime::CONSTANT_COPY) + if(operandLifeTime == HalOperandLifeTime::CONSTANT_COPY) { location.offset = model.operandValues.size(); } - V1_0::Operand op = {}; - op.type = operandType; - op.dimensions = dimensions; - op.lifetime = V1_0::OperandLifeTime::CONSTANT_COPY; - op.location = location; + HalOperand op = {}; + op.type = operandType; + op.dimensions = dimensions; + op.lifetime = HalOperandLifeTime::CONSTANT_COPY; + op.location = location; model.operandValues.resize(model.operandValues.size() + location.length); for (uint32_t i = 0; i < totalElements; i++) @@ -141,48 +176,62 @@ void AddTensorOperand(HalModel& model, *(reinterpret_cast(&model.operandValues[location.offset]) + i) = values[i]; } - AddOperand(model, op); + AddOperand(model, op); } -template +template void AddTensorOperand(HalModel& model, const hidl_vec& dimensions, const std::vector& values, - V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32, - V1_0::OperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY) + HalOperandType operandType = HalPolicy::OperandType::TENSOR_FLOAT32, + HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY) { - AddTensorOperand(model, dimensions, values.data(), operandType, operandLifeTime); + AddTensorOperand(model, dimensions, values.data(), operandType, operandLifeTime); } -template +template void AddInputOperand(HalModel& model, const hidl_vec& dimensions, - V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32) + HalOperandType operandType = HalOperandType::TENSOR_FLOAT32) { - V1_0::Operand op = {}; - op.type = operandType; - op.scale = operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f; - op.dimensions = dimensions; - op.lifetime = V1_0::OperandLifeTime::MODEL_INPUT; + using HalOperand = typename HalPolicy::Operand; + using HalOperandLifeTime = typename HalPolicy::OperandLifeTime; - AddOperand(model, op); + HalOperand op = {}; + op.type = operandType; + op.scale = operandType == HalOperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f; + op.dimensions = dimensions; + op.lifetime = HalOperandLifeTime::MODEL_INPUT; + + AddOperand(model, op); model.inputIndexes.resize(model.inputIndexes.size() + 1); model.inputIndexes[model.inputIndexes.size() - 1] = model.operands.size() - 1; } -template +template void AddOutputOperand(HalModel& model, const hidl_vec& dimensions, - V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32) + HalOperandType operandType = HalOperandType::TENSOR_FLOAT32) { - V1_0::Operand op = {}; - op.type = operandType; - op.scale = operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f; - op.dimensions = dimensions; - op.lifetime = V1_0::OperandLifeTime::MODEL_OUTPUT; + using HalOperand = typename HalPolicy::Operand; + using HalOperandLifeTime = typename HalPolicy::OperandLifeTime; - AddOperand(model, op); + HalOperand op = {}; + op.type = operandType; + op.scale = operandType == HalOperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f; + op.dimensions = dimensions; + op.lifetime = HalOperandLifeTime::MODEL_OUTPUT; + + AddOperand(model, op); model.outputIndexes.resize(model.outputIndexes.size() + 1); model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1; @@ -204,7 +253,7 @@ android::sp PrepareModelWithStatus(const V1_1::Model& mode template android::sp PrepareModel(const HalModel& model, - armnn_driver::ArmnnDriver& driver) + armnn_driver::ArmnnDriver& driver) { ErrorStatus prepareStatus = ErrorStatus::NONE; return PrepareModelWithStatus(model, driver, prepareStatus); diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp index 6ab63ff2..de885153 100644 --- a/test/FullyConnected.cpp +++ b/test/FullyConnected.cpp @@ -3,7 +3,11 @@ // SPDX-License-Identifier: MIT // #include "DriverTestHelpers.hpp" + +#include "../1.0/HalPolicy.hpp" + #include + #include BOOST_AUTO_TEST_SUITE(FullyConnectedTests) @@ -12,6 +16,8 @@ using namespace android::hardware; using namespace driverTestHelpers; using namespace armnn_driver; +using HalPolicy = hal_1_0::HalPolicy; + // Add our own test here since we fail the fc tests which Google supplies (because of non-const weights) BOOST_AUTO_TEST_CASE(FullyConnected) { @@ -19,22 +25,22 @@ BOOST_AUTO_TEST_CASE(FullyConnected) // but that uses slightly weird dimensions which I don't think we need to support for now auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); - V1_0::Model model = {}; + HalPolicy::Model model = {}; // add operands int32_t actValue = 0; float weightValue[] = {2, 4, 1}; float biasValue[] = {4}; - AddInputOperand(model, hidl_vec{1, 3}); - AddTensorOperand(model, hidl_vec{1, 3}, weightValue); - AddTensorOperand(model, hidl_vec{1}, biasValue); - AddIntOperand(model, actValue); - AddOutputOperand(model, hidl_vec{1, 1}); + AddInputOperand(model, hidl_vec{1, 3}); + AddTensorOperand(model, hidl_vec{1, 3}, weightValue); + AddTensorOperand(model, hidl_vec{1}, biasValue); + AddIntOperand(model, actValue); + AddOutputOperand(model, hidl_vec{1, 1}); // make the fully connected operation model.operations.resize(1); - model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model.operations[0].outputs = hidl_vec{4}; @@ -90,7 +96,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput) sup = supported; }; - V1_0::Model model = {}; + HalPolicy::Model model = {}; // operands int32_t actValue = 0; @@ -105,15 +111,15 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput) float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fully connected operation - AddInputOperand(model, hidl_vec{1, 1, 1, 8}); - AddTensorOperand(model, hidl_vec{8, 8}, weightValue); - AddTensorOperand(model, hidl_vec{8}, biasValue); - AddIntOperand(model, actValue); - AddOutputOperand(model, hidl_vec{1, 8}); + AddInputOperand(model, hidl_vec{1, 1, 1, 8}); + AddTensorOperand(model, hidl_vec{8, 8}, weightValue); + AddTensorOperand(model, hidl_vec{8}, biasValue); + AddIntOperand(model, actValue); + AddOutputOperand(model, hidl_vec{1, 8}); model.operations.resize(1); - model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0,1,2,3}; model.operations[0].outputs = hidl_vec{4}; @@ -177,7 +183,7 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape) sup = supported; }; - V1_0::Model model = {}; + HalPolicy::Model model = {}; // operands int32_t actValue = 0; @@ -192,15 +198,15 @@ BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape) float biasValue[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fully connected operation - AddInputOperand(model, hidl_vec{1, 2, 2, 2}); - AddTensorOperand(model, hidl_vec{8, 8}, weightValue); - AddTensorOperand(model, hidl_vec{8}, biasValue); - AddIntOperand(model, actValue); - AddOutputOperand(model, hidl_vec{1, 8}); + AddInputOperand(model, hidl_vec{1, 2, 2, 2}); + AddTensorOperand(model, hidl_vec{8, 8}, weightValue); + AddTensorOperand(model, hidl_vec{8}, biasValue); + AddIntOperand(model, actValue); + AddOutputOperand(model, hidl_vec{1, 8}); model.operations.resize(1); - model.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + model.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED; model.operations[0].inputs = hidl_vec{0,1,2,3}; model.operations[0].outputs = hidl_vec{4}; diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp index ccd4caa2..6b51fb93 100644 --- a/test/GenericLayerTests.cpp +++ b/test/GenericLayerTests.cpp @@ -3,7 +3,11 @@ // SPDX-License-Identifier: MIT // #include "DriverTestHelpers.hpp" + +#include "../1.0/HalPolicy.hpp" + #include + #include BOOST_AUTO_TEST_SUITE(GenericLayerTests) @@ -12,6 +16,8 @@ using namespace android::hardware; using namespace driverTestHelpers; using namespace armnn_driver; +using HalPolicy = hal_1_0::HalPolicy; + BOOST_AUTO_TEST_CASE(GetSupportedOperations) { auto driver = std::make_unique(DriverOptions(armnn::Compute::CpuRef)); @@ -25,23 +31,23 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) supported = _supported; }; - V1_0::Model model0 = {}; + HalPolicy::Model model0 = {}; // Add operands int32_t actValue = 0; float weightValue[] = {2, 4, 1}; float biasValue[] = {4}; - AddInputOperand (model0, hidl_vec{1, 3}); - AddTensorOperand(model0, hidl_vec{1, 3}, weightValue); - AddTensorOperand(model0, hidl_vec{1}, biasValue); - AddIntOperand (model0, actValue); - AddOutputOperand(model0, hidl_vec{1, 1}); + AddInputOperand(model0, hidl_vec{1, 3}); + AddTensorOperand(model0, hidl_vec{1, 3}, weightValue); + AddTensorOperand(model0, hidl_vec{1}, biasValue); + AddIntOperand(model0, actValue); + AddOutputOperand(model0, hidl_vec{1, 1}); model0.operations.resize(1); // Make a correct fully connected operation - model0.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + model0.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED; model0.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model0.operations[0].outputs = hidl_vec{4}; @@ -52,23 +58,24 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) V1_0::Model model1 = {}; - AddInputOperand (model1, hidl_vec{1, 3}); - AddTensorOperand(model1, hidl_vec{1, 3}, weightValue); - AddTensorOperand(model1, hidl_vec{1}, biasValue); - AddIntOperand (model1, actValue); - AddOutputOperand(model1, hidl_vec{1, 1}); + AddInputOperand(model1, hidl_vec{1, 3}); + AddTensorOperand(model1, hidl_vec{1, 3}, weightValue); + AddTensorOperand(model1, hidl_vec{1}, biasValue); + AddIntOperand(model1, actValue); + AddOutputOperand(model1, hidl_vec{1, 1}); model1.operations.resize(2); // Make a correct fully connected operation - model1.operations[0].type = V1_0::OperationType::FULLY_CONNECTED; + model1.operations[0].type = HalPolicy::OperationType::FULLY_CONNECTED; model1.operations[0].inputs = hidl_vec{0, 1, 2, 3}; model1.operations[0].outputs = hidl_vec{4}; // Add an incorrect fully connected operation - AddIntOperand (model1, actValue); - AddOutputOperand(model1, hidl_vec{1, 1}); - model1.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; + AddIntOperand(model1, actValue); + AddOutputOperand(model1, hidl_vec{1, 1}); + + model1.operations[1].type = HalPolicy::OperationType::FULLY_CONNECTED; model1.operations[1].inputs = hidl_vec{4}; // Only 1 input operand, expected 4 model1.operations[1].outputs = hidl_vec{5}; @@ -89,21 +96,21 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) #endif // Test Broadcast on add/mul operators - V1_0::Model model2 = {}; + HalPolicy::Model model2 = {}; - AddInputOperand (model2, hidl_vec{1, 1, 3, 4}); - AddInputOperand (model2, hidl_vec{4}); - AddIntOperand (model2, actValue); - AddOutputOperand(model2, hidl_vec{1, 1, 3, 4}); - AddOutputOperand(model2, hidl_vec{1, 1, 3, 4}); + AddInputOperand(model2, hidl_vec{1, 1, 3, 4}); + AddInputOperand(model2, hidl_vec{4}); + AddIntOperand(model2, actValue); + AddOutputOperand(model2, hidl_vec{1, 1, 3, 4}); + AddOutputOperand(model2, hidl_vec{1, 1, 3, 4}); model2.operations.resize(2); - model2.operations[0].type = V1_0::OperationType::ADD; + model2.operations[0].type = HalPolicy::OperationType::ADD; model2.operations[0].inputs = hidl_vec{0, 1, 2}; model2.operations[0].outputs = hidl_vec{3}; - model2.operations[1].type = V1_0::OperationType::MUL; + model2.operations[1].type = HalPolicy::OperationType::MUL; model2.operations[1].inputs = hidl_vec{0, 1, 2}; model2.operations[1].outputs = hidl_vec{4}; @@ -115,14 +122,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) V1_0::Model model3 = {}; - AddInputOperand (model3, hidl_vec{1, 1, 1, 8}); - AddIntOperand (model3, 2); - AddOutputOperand(model3, hidl_vec{1, 2, 2, 2}); + AddInputOperand(model3, hidl_vec{1, 1, 1, 8}); + AddIntOperand(model3, 2); + AddOutputOperand(model3, hidl_vec{1, 2, 2, 2}); model3.operations.resize(1); // Add unsupported operation, should return no error but we don't support it - model3.operations[0].type = V1_0::OperationType::DEPTH_TO_SPACE; + model3.operations[0].type = HalPolicy::OperationType::DEPTH_TO_SPACE; model3.operations[0].inputs = hidl_vec{0, 1}; model3.operations[0].outputs = hidl_vec{2}; @@ -131,14 +138,14 @@ BOOST_AUTO_TEST_CASE(GetSupportedOperations) BOOST_TEST(supported.size() == (size_t)1); BOOST_TEST(supported[0] == false); - V1_0::Model model4 = {}; + HalPolicy::Model model4 = {}; - AddIntOperand(model4, 0); + AddIntOperand(model4, 0); model4.operations.resize(1); // Add invalid operation - model4.operations[0].type = static_cast(100); + model4.operations[0].type = static_cast(100); model4.operations[0].outputs = hidl_vec{0}; driver->getSupportedOperations(model4, cb); @@ -162,7 +169,7 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) supported = _supported; }; - V1_0::Model model = {}; + HalPolicy::Model model = {}; // Operands int32_t actValue = 0; @@ -170,36 +177,46 @@ BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure) float biasValue[] = {4}; // HASHTABLE_LOOKUP is unsupported at the time of writing this test, but any unsupported layer will do - AddInputOperand (model, hidl_vec{1, 1, 3, 4}, V1_0::OperandType::TENSOR_INT32); - AddInputOperand (model, hidl_vec{4}, V1_0::OperandType::TENSOR_INT32); - AddInputOperand (model, hidl_vec{1, 1, 3, 4}); - AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); - AddOutputOperand(model, hidl_vec{1, 1, 3, 4}, V1_0::OperandType::TENSOR_QUANT8_ASYMM); + AddInputOperand(model, + hidl_vec{1, 1, 3, 4}, + HalPolicy::OperandType::TENSOR_INT32); + AddInputOperand(model, + hidl_vec{4}, + HalPolicy::OperandType::TENSOR_INT32); + AddInputOperand(model, hidl_vec{1, 1, 3, 4}); + + AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); + AddOutputOperand(model, + hidl_vec{1, 1, 3, 4}, + HalPolicy::OperandType::TENSOR_QUANT8_ASYMM); // Fully connected is supported - AddInputOperand (model, hidl_vec{1, 3}); - AddTensorOperand(model, hidl_vec{1, 3}, weightValue); - AddTensorOperand(model, hidl_vec{1}, biasValue); - AddIntOperand (model, actValue); - AddOutputOperand(model, hidl_vec{1, 1}); + AddInputOperand(model, hidl_vec{1, 3}); + + AddTensorOperand(model, hidl_vec{1, 3}, weightValue); + AddTensorOperand(model, hidl_vec{1}, biasValue); + + AddIntOperand(model, actValue); + + AddOutputOperand(model, hidl_vec{1, 1}); // EMBEDDING_LOOKUP is unsupported - AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); + AddOutputOperand(model, hidl_vec{1, 1, 3, 4}); model.operations.resize(3); // Unsupported - model.operations[0].type = V1_0::OperationType::HASHTABLE_LOOKUP; + model.operations[0].type = HalPolicy::OperationType::HASHTABLE_LOOKUP; model.operations[0].inputs = hidl_vec{0, 1, 2}; model.operations[0].outputs = hidl_vec{3, 4}; // Supported - model.operations[1].type = V1_0::OperationType::FULLY_CONNECTED; + model.operations[1].type = HalPolicy::OperationType::FULLY_CONNECTED; model.operations[1].inputs = hidl_vec{5, 6, 7, 8}; model.operations[1].outputs = hidl_vec{9}; // Unsupported - model.operations[2].type = V1_0::OperationType::EMBEDDING_LOOKUP; + model.operations[2].type = HalPolicy::OperationType::EMBEDDING_LOOKUP; model.operations[2].inputs = hidl_vec{1, 2}; model.operations[2].outputs = hidl_vec{10}; @@ -227,7 +244,7 @@ BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail) supported = _supported; }; - V1_0::Model model = {}; + HalPolicy::Model model = {}; model.pools = hidl_vec{hidl_memory("Unsuported hidl memory type", nullptr, 0)}; diff --git a/test/Lstm.cpp b/test/Lstm.cpp index 56812326..579524ca 100644 --- a/test/Lstm.cpp +++ b/test/Lstm.cpp @@ -5,6 +5,8 @@ #include "DriverTestHelpers.hpp" #include "OperationsUtils.h" +#include "../1.0/HalPolicy.hpp" + #include #include #include @@ -15,8 +17,10 @@ BOOST_AUTO_TEST_SUITE(LstmTests) -using ArmnnDriver = armnn_driver::ArmnnDriver; +using ArmnnDriver = armnn_driver::ArmnnDriver; using DriverOptions = armnn_driver::DriverOptions; +using HalPolicy = armnn_driver::hal_1_0::HalPolicy; + using namespace driverTestHelpers; using namespace android::hardware; @@ -128,99 +132,130 @@ void LstmTestImpl(const hidl_vec& inputDimensions, armnn::Compute compute) { auto driver = std::make_unique(DriverOptions(compute)); - V1_0::Model model = {}; + HalPolicy::Model model = {}; // Inputs: // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input. - AddInputOperand(model, inputDimensions); + AddInputOperand(model, inputDimensions); // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, input_size], where “num_units” corresponds to the number of cell units. - AddTensorOperand(model, inputToInputWeightsDimensions, inputToInputWeightsValue, V1_0::OperandType::TENSOR_FLOAT32, - CreateNoValueLifeTime(inputToInputWeightsDimensions)); + AddTensorOperand(model, + inputToInputWeightsDimensions, + inputToInputWeightsValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(inputToInputWeightsDimensions)); // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, input_size]. - AddTensorOperand(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue); - // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size]. - AddTensorOperand(model, inputToCellWeightsDimensions, inputToCellWeightsValue); + AddTensorOperand(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue); + // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape + // [num_units, input_size]. + AddTensorOperand(model, inputToCellWeightsDimensions, inputToCellWeightsValue); // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, input_size]. - AddTensorOperand(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue); + AddTensorOperand(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue); // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e., // “num_units”), or the second dimension of the “projection_weights”, if defined. - AddTensorOperand(model, recurrentToInputWeightsDimensions, recurrentToInputWeightsValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(recurrentToInputWeightsDimensions)); + AddTensorOperand(model, + recurrentToInputWeightsDimensions, + recurrentToInputWeightsValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(recurrentToInputWeightsDimensions)); // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, output_size]. - AddTensorOperand(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue); + AddTensorOperand(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue); // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, output_size]. - AddTensorOperand(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue); + AddTensorOperand(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue); // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [num_units, output_size]. - AddTensorOperand(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue); + AddTensorOperand(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue); // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, cellToInputWeightsDimensions, cellToInputWeightsValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToInputWeightsDimensions)); + AddTensorOperand(model, + cellToInputWeightsDimensions, + cellToInputWeightsValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(cellToInputWeightsDimensions)); // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, cellToForgetWeightsDimensions, cellToForgetWeightsValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToForgetWeightsDimensions)); + AddTensorOperand(model, + cellToForgetWeightsDimensions, + cellToForgetWeightsValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(cellToForgetWeightsDimensions)); // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, cellToOutputWeightsDimensions, cellToOutputWeightsValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToOutputWeightsDimensions)); + AddTensorOperand(model, + cellToOutputWeightsDimensions, + cellToOutputWeightsValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(cellToOutputWeightsDimensions)); // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, inputGateBiasDimensions, inputGateBiasValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(inputGateBiasDimensions)); + AddTensorOperand(model, + inputGateBiasDimensions, + inputGateBiasValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(inputGateBiasDimensions)); // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, forgetGateBiasDimensions, forgetGateBiasValue); + AddTensorOperand(model, forgetGateBiasDimensions, forgetGateBiasValue); // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, cellBiasDimensions, cellBiasValue); + AddTensorOperand(model, cellBiasDimensions, cellBiasValue); // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units]. - AddTensorOperand(model, outputGateBiasDimensions, outputGateBiasValue); + AddTensorOperand(model, outputGateBiasDimensions, outputGateBiasValue); // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape // [output_size, num_units]. - AddTensorOperand(model, projectionWeightsDimensions, projectionWeightsValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionWeightsDimensions)); + AddTensorOperand(model, + projectionWeightsDimensions, + projectionWeightsValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(projectionWeightsDimensions)); // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size]. - AddTensorOperand(model, projectionBiasDimensions, projectionBiasValue, - V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionBiasDimensions)); + AddTensorOperand(model, + projectionBiasDimensions, + projectionBiasValue, + HalPolicy::OperandType::TENSOR_FLOAT32, + CreateNoValueLifeTime(projectionBiasDimensions)); // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. - AddInputOperand(model, outputStateInDimensions); + AddInputOperand(model, outputStateInDimensions); // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units]. - AddInputOperand(model, cellStateInDimensions); + AddInputOperand(model, cellStateInDimensions); // Constant scalar values (the VTS test adds these as tensors of dim {}) // 20: The activation function: A value indicating the activation function: // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid. - AddTensorOperand(model, activationFunctionDimensions, - activationFunctionValue, V1_0::OperandType::INT32); + AddTensorOperand(model, + activationFunctionDimensions, + activationFunctionValue, + HalPolicy::OperandType::INT32); // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip]. // If set to 0.0 then clipping is disabled. - AddTensorOperand(model, cellClippingThresholdDimensions, - cellClippingThresholdValue, V1_0::OperandType::FLOAT32); + AddTensorOperand(model, + cellClippingThresholdDimensions, + cellClippingThresholdValue, + HalPolicy::OperandType::FLOAT32); // 22: The clipping threshold: for the output from the projection layer, such that values are bound within // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - AddTensorOperand(model, projectionClippingThresholdDimensions, - projectionClippingThresholdValue, V1_0::OperandType::FLOAT32); + AddTensorOperand(model, + projectionClippingThresholdDimensions, + projectionClippingThresholdValue, + HalPolicy::OperandType::FLOAT32); // Outputs: // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with // CIFG, or [batch_size, num_units * 3] without CIFG. - AddOutputOperand(model, scratchBufferDimensions); + AddOutputOperand(model, scratchBufferDimensions); // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. - AddOutputOperand(model, outputStateOutDimensions); + AddOutputOperand(model, outputStateOutDimensions); // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units]. - AddOutputOperand(model, cellStateOutDimensions); + AddOutputOperand(model, cellStateOutDimensions); // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is // effectively the same as the current “output state (out)” value. - AddOutputOperand(model, outputDimensions); + AddOutputOperand(model, outputDimensions); // make the lstm operation model.operations.resize(1); - model.operations[0].type = V1_0::OperationType::LSTM; + model.operations[0].type = HalPolicy::OperationType::LSTM; model.operations[0].inputs = hidl_vec {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}; model.operations[0].outputs = hidl_vec {23, 24, 25, 26}; -- cgit v1.2.1