From 2ff0009ca9245304c48889c8ba8d3a39d42febed Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 30 Sep 2019 16:50:08 +0100 Subject: COMPMID-2661: Implement complex function dynamic tensor support. Change-Id: I80772cb25514009b030e5ade28cbb71ed352da67 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2019 Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins --- .../fixtures/UNIT/DynamicTensorFixture.h | 157 +++++++++++++++++++-- 1 file changed, 148 insertions(+), 9 deletions(-) (limited to 'tests/validation/fixtures/UNIT/DynamicTensorFixture.h') diff --git a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h index 66ef6c4aff..b2600f13f0 100644 --- a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h +++ b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h @@ -32,6 +32,7 @@ #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" +#include "tests/validation/reference/ConvolutionLayer.h" #include "tests/validation/reference/NormalizationLayer.h" namespace arm_compute @@ -48,6 +49,9 @@ template struct MemoryManagementService { +public: + using LftMgrType = LifetimeMgrType; + public: MemoryManagementService() : allocator(), lifetime_mgr(nullptr), pool_mgr(nullptr), mm(nullptr), mg(), num_pools(0) @@ -118,15 +122,11 @@ private: */ template class DynamicTensorType3SingleFunction : public framework::Fixture { - using T = float; - using MemoryManagementServiceType = MemoryManagementService; + using T = float; public: template @@ -234,9 +234,148 @@ protected: } public: - TensorShape input_l0{}, input_l1{}; - typename LifetimeMgrType::info_type internal_l0{}, internal_l1{}; - typename LifetimeMgrType::info_type cross_l0{}, cross_l1{}; + TensorShape input_l0{}, input_l1{}; + typename MemoryManagementServiceType::LftMgrType::info_type internal_l0{}, internal_l1{}; + typename MemoryManagementServiceType::LftMgrType::info_type cross_l0{}, cross_l1{}; +}; + +/** Simple test case to run a single function with different shapes twice. + * + * Runs a specified function twice, where the second time the size of the input/output is different + * Internal memory of the function and input/output are managed by different services + */ +template +class DynamicTensorType3ComplexFunction : public framework::Fixture +{ + using T = float; + +public: + template + void setup(std::vector input_shapes, TensorShape weights_shape, TensorShape bias_shape, std::vector output_shapes, PadStrideInfo info) + { + num_iterations = input_shapes.size(); + _data_type = DataType::F32; + _data_layout = DataLayout::NHWC; + _input_shapes = input_shapes; + _output_shapes = output_shapes; + _weights_shape = weights_shape; + _bias_shape = bias_shape; + _info = info; + + // Create function + _f_target = support::cpp14::make_unique(_ms.mm); + } + + void run_iteration(unsigned int idx) + { + auto input_shape = _input_shapes[idx]; + auto output_shape = _output_shapes[idx]; + + dst_ref = run_reference(input_shape, _weights_shape, _bias_shape, output_shape, _info); + dst_target = run_target(input_shape, _weights_shape, _bias_shape, output_shape, _info, WeightsInfo()); + } + +protected: + template + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::F32: + { + std::uniform_real_distribution<> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + + TensorType run_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, + PadStrideInfo info, WeightsInfo weights_info) + { + if(_data_layout == DataLayout::NHWC) + { + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + } + + _weights_target = create_tensor(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout); + _bias_target = create_tensor(bias_shape, _data_type, 1); + + // Create tensors + TensorType src = create_tensor(input_shape, _data_type, 1, QuantizationInfo(), _data_layout); + TensorType dst = create_tensor(output_shape, _data_type, 1, QuantizationInfo(), _data_layout); + + // Create and configure function + _f_target->configure(&src, &_weights_target, &_bias_target, &dst, info, weights_info); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + _weights_target.allocator()->allocate(); + _bias_target.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src), 0); + fill(AccessorType(_weights_target), 1); + fill(AccessorType(_bias_target), 2); + + // Populate and validate memory manager + _ms.clear(); + _ms.populate(1); + _ms.mg.acquire(); + + // Compute NEConvolutionLayer function + _f_target->run(); + _ms.mg.release(); + + return dst; + } + + SimpleTensor run_reference(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info) + { + // Create reference + SimpleTensor src{ input_shape, _data_type, 1 }; + SimpleTensor weights{ weights_shape, _data_type, 1 }; + SimpleTensor bias{ bias_shape, _data_type, 1 }; + + // Fill reference + fill(src, 0); + fill(weights, 1); + fill(bias, 2); + + return reference::convolution_layer(src, weights, bias, output_shape, info); + } + +public: + unsigned int num_iterations{ 0 }; + SimpleTensor dst_ref{}; + TensorType dst_target{}; + +private: + DataType _data_type{ DataType::UNKNOWN }; + DataLayout _data_layout{ DataLayout::UNKNOWN }; + PadStrideInfo _info{}; + std::vector _input_shapes{}; + std::vector _output_shapes{}; + TensorShape _weights_shape{}; + TensorShape _bias_shape{}; + MemoryManagementServiceType _ms{}; + TensorType _weights_target{}; + TensorType _bias_target{}; + std::unique_ptr _f_target{}; }; } // namespace validation } // namespace test -- cgit v1.2.1