From a09de0c8b2ed0f1481502d3b023375609362d9e3 Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Fri, 1 Sep 2017 20:41:12 +0100 Subject: COMPMID-415: Rename and move tests The boost validation is now "standalone" in validation_old and builds as arm_compute_validation_old. The new validation builds now as arm_compute_validation. Change-Id: Ib93ba848a25680ac60afb92b461d574a0757150d Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86187 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- .../fixtures/FullyConnectedLayerFixture.h | 250 +++++++++++++++++++++ 1 file changed, 250 insertions(+) create mode 100644 tests/validation/fixtures/FullyConnectedLayerFixture.h (limited to 'tests/validation/fixtures/FullyConnectedLayerFixture.h') diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h new file mode 100644 index 0000000000..d4d68f1af8 --- /dev/null +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE +#define ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/RawTensor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/CPP/FullyConnectedLayer.h" +#include "tests/validation/Helpers.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +RawTensor transpose(const RawTensor &src, int interleave = 1) +{ + // Create reference + TensorShape dst_shape(src.shape()); + dst_shape.set(0, src.shape().y() * interleave); + dst_shape.set(1, std::ceil(src.shape().x() / static_cast(interleave))); + + RawTensor dst{ dst_shape, src.data_type() }; + + // Compute reference + uint8_t *out_ptr = dst.data(); + + for(int i = 0; i < dst.num_elements(); i += interleave) + { + Coordinates coord = index2coord(dst.shape(), i); + size_t coord_x = coord.x(); + coord.set(0, coord.y() * interleave); + coord.set(1, coord_x / interleave); + + const int num_elements = std::min(interleave, src.shape().x() - coord.x()); + + std::copy_n(static_cast(src(coord)), num_elements * src.element_size(), out_ptr); + + out_ptr += interleave * dst.element_size(); + } + + return dst; +} +} // namespace + +template +class FullyConnectedLayerValidationFixedPointFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, int fractional_bits) + { + ARM_COMPUTE_UNUSED(weights_shape); + ARM_COMPUTE_UNUSED(bias_shape); + + _fractional_bits = fractional_bits; + _data_type = data_type; + + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, fractional_bits); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, fractional_bits); + } + +protected: + template + void fill(U &&tensor, int i) + { + if(is_data_type_float(_data_type)) + { + std::uniform_real_distribution<> distribution(0.5f, 1.f); + library->fill(tensor, distribution, i); + } + else + { + library->fill_tensor_uniform(tensor, i); + } + } + + TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights, + bool reshape_weights, DataType data_type, int fixed_point_position) + { + TensorShape reshaped_weights_shape(weights_shape); + + // Test actions depending on the target settings + // + // | reshape | !reshape + // -----------+-----------+--------------------------- + // transpose | | *** + // -----------+-----------+--------------------------- + // !transpose | transpose | transpose & + // | | transpose1xW (if required) + // + // ***: That combination is invalid. But we can ignore the transpose flag and handle all !reshape the same + if(!reshape_weights || !transpose_weights) + { + const size_t shape_x = reshaped_weights_shape.x(); + reshaped_weights_shape.set(0, reshaped_weights_shape.y()); + reshaped_weights_shape.set(1, shape_x); + + // Weights have to be passed reshaped + // Transpose 1xW for batched version + if(!reshape_weights && output_shape.y() > 1 && run_interleave) + { + const int transpose_width = 16 / data_size_from_type(data_type); + const float shape_x = reshaped_weights_shape.x(); + reshaped_weights_shape.set(0, reshaped_weights_shape.y() * transpose_width); + reshaped_weights_shape.set(1, static_cast(std::ceil(shape_x / transpose_width))); + } + } + + // Create tensors + TensorType src = create_tensor(input_shape, data_type, 1, fixed_point_position); + TensorType weights = create_tensor(reshaped_weights_shape, data_type, 1, fixed_point_position); + TensorType bias = create_tensor(bias_shape, data_type, 1, fixed_point_position); + TensorType dst = create_tensor(output_shape, data_type, 1, fixed_point_position); + + // Create and configure function. + FunctionType fc; + fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + weights.allocator()->allocate(); + bias.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src), 0); + fill(AccessorType(bias), 2); + + if(!reshape_weights || !transpose_weights) + { + TensorShape tmp_shape(weights_shape); + RawTensor tmp(tmp_shape, data_type, 1, fixed_point_position); + + // Fill with original shape + fill(tmp, 1); + + // Transpose elementwise + tmp = transpose(tmp); + + // Reshape weights for batched runs + if(!reshape_weights && output_shape.y() > 1 && run_interleave) + { + // Transpose with interleave + const int interleave_size = 16 / tmp.element_size(); + tmp = transpose(tmp, interleave_size); + } + + AccessorType weights_accessor(weights); + + for(int i = 0; i < tmp.num_elements(); ++i) + { + Coordinates coord = index2coord(tmp.shape(), i); + std::copy_n(static_cast(tmp(coord)), + tmp.element_size(), + static_cast(weights_accessor(coord))); + } + } + else + { + fill(AccessorType(weights), 1); + } + + // Compute NEFullyConnectedLayer function + fc.run(); + + return dst; + } + + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights, + bool reshape_weights, DataType data_type, int fixed_point_position = 0) + { + // Create reference + SimpleTensor src{ input_shape, data_type, 1, fixed_point_position }; + SimpleTensor weights{ weights_shape, data_type, 1, fixed_point_position }; + SimpleTensor bias{ bias_shape, data_type, 1, fixed_point_position }; + + // Fill reference + fill(src, 0); + fill(weights, 1); + fill(bias, 2); + + return reference::fully_connected_layer(src, weights, bias, output_shape); + } + + TensorType _target{}; + SimpleTensor _reference{}; + int _fractional_bits{}; + DataType _data_type{}; +}; + +template +class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationFixedPointFixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type) + { + FullyConnectedLayerValidationFixedPointFixture::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, + reshape_weights, data_type, + 0); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE */ -- cgit v1.2.1