aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiuseppe Rossini <giuseppe.rossini@arm.com>2020-02-18 10:59:58 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2020-02-18 11:02:08 +0000
commit0d719442cc4e822821cdd6192a04153329f2657e (patch)
tree65f9f84dabc84dbd788a77ec0a492e5a64790253
parent15687801ee61046101dca5a32f2b33d5bd296c24 (diff)
downloadComputeLibrary-0d719442cc4e822821cdd6192a04153329f2657e.tar.gz
Revert "Remove tests/validate_examples and the corresponding build options"
This reverts commit 35d56ec743ee04cc07e36e9a3c62089f88de5245. Change-Id: Ib370e6129f98258504db2aefcbe3495898867240 Signed-off-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
-rw-r--r--tests/validate_examples/RunExample.cpp212
-rw-r--r--tests/validate_examples/ValidateExample.h87
-rw-r--r--tests/validate_examples/cl_gemm.cpp431
-rw-r--r--tests/validate_examples/graph_convolution.cpp398
-rw-r--r--tests/validate_examples/graph_depthwiseconvolution.cpp394
-rw-r--r--tests/validate_examples/graph_fully_connected.cpp315
-rw-r--r--tests/validate_examples/graph_validate_utils.h696
7 files changed, 2533 insertions, 0 deletions
diff --git a/tests/validate_examples/RunExample.cpp b/tests/validate_examples/RunExample.cpp
new file mode 100644
index 0000000000..5d5291abfb
--- /dev/null
+++ b/tests/validate_examples/RunExample.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2018-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "utils/Utils.h"
+
+#define BENCHMARK_EXAMPLES
+#include "utils/Utils.cpp"
+
+#include "ValidateExample.h"
+#include "arm_compute/runtime/Scheduler.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/framework/Framework.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/command_line/CommonOptions.h"
+#include "tests/framework/instruments/Instruments.h"
+#include "utils/command_line/CommandLineParser.h"
+
+#ifdef ARM_COMPUTE_CL
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#endif /* ARM_COMPUTE_CL */
+#ifdef ARM_COMPUTE_GC
+#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
+#endif /* ARM_COMPUTE_GC */
+
+#include <libgen.h>
+
+using namespace arm_compute;
+using namespace arm_compute::test;
+
+namespace arm_compute
+{
+namespace test
+{
+std::unique_ptr<AssetsLibrary> library;
+} // namespace test
+namespace utils
+{
+static std::unique_ptr<ValidateExample> g_example = nullptr;
+static std::vector<char *> g_example_argv = {};
+
+namespace
+{
+std::string command_line(int argc, char **argv)
+{
+ std::stringstream ss;
+ for(int i = 0; i < argc; i++)
+ {
+ ss << argv[i] << " ";
+ }
+ return ss.str();
+}
+
+template <bool validate>
+class ExampleTest : public arm_compute::test::framework::TestCase
+{
+public:
+ ExampleTest() = default;
+ void do_setup() override
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(g_example.get());
+ _is_setup = g_example->do_setup(g_example_argv.size(), &g_example_argv[0]);
+ }
+ void do_run() override
+ {
+ if(_is_setup)
+ {
+ g_example->do_run();
+ }
+ }
+ void do_teardown() override
+ {
+ if(_is_setup)
+ {
+ if(validate)
+ {
+ g_example->do_validate();
+ }
+ g_example->do_teardown();
+ }
+ g_example = nullptr;
+ }
+
+private:
+ bool _is_setup{ false };
+};
+
+} // namespace
+int run_example(int argc, char **argv, std::unique_ptr<ValidateExample> example)
+{
+ utils::CommandLineParser parser;
+ framework::CommonOptions options(parser);
+ auto example_args = parser.add_option<utils::ListOption<std::string>>("example_args");
+ example_args->set_help("Arguments to pass to the example separated by commas (e.g: arg0,arg1,arg2)");
+ auto seed = parser.add_option<utils::SimpleOption<std::random_device::result_type>>("seed", std::random_device()());
+ seed->set_help("Global seed for random number generation");
+ auto validate = parser.add_option<utils::SimpleOption<int>>("validate", 1);
+ validate->set_help("Enable / disable output validation (0/1)");
+
+ framework::Framework &framework = framework::Framework::get();
+
+ parser.parse(argc, argv);
+
+ if(options.help->is_set() && options.help->value())
+ {
+ parser.print_help(argv[0]);
+ return 0;
+ }
+
+ std::vector<std::unique_ptr<framework::Printer>> printers = options.create_printers();
+ g_example = std::move(example);
+ g_example_argv.clear();
+ g_example_argv.emplace_back(argv[0]);
+ for(auto &arg : example_args->value())
+ {
+ g_example_argv.emplace_back(const_cast<char *>(arg.c_str())); // NOLINT
+ }
+
+ library = support::cpp14::make_unique<AssetsLibrary>("." /* Only using random values */, seed->value());
+
+ if(options.log_level->value() > framework::LogLevel::NONE)
+ {
+ for(auto &p : printers)
+ {
+ p->print_global_header();
+ }
+ }
+
+ if(options.log_level->value() >= framework::LogLevel::CONFIG)
+ {
+ for(auto &p : printers)
+ {
+ p->print_entry("Version", build_information());
+ p->print_entry("CommandLine", command_line(argc, argv));
+ p->print_entry("Seed", support::cpp11::to_string(seed->value()));
+#ifdef ARM_COMPUTE_CL
+ if(opencl_is_available())
+ {
+ if(!CLScheduler::get().is_initialised())
+ {
+ CLScheduler::get().default_init();
+ }
+ p->print_entry("CL_DEVICE_VERSION", CLKernelLibrary::get().get_device_version());
+ }
+ else
+ {
+ p->print_entry("CL_DEVICE_VERSION", "Unavailable");
+ }
+#endif /* ARM_COMPUTE_CL */
+ p->print_entry("Iterations", support::cpp11::to_string(options.iterations->value()));
+ g_example->print_parameters(*p);
+ }
+ }
+
+ // Initialize framework
+ framework::FrameworkConfig fconfig;
+ fconfig.instruments = options.instruments->value();
+ fconfig.num_iterations = options.iterations->value();
+ fconfig.log_level = options.log_level->value();
+ framework.init(fconfig);
+
+ for(auto &p : printers)
+ {
+ framework.add_printer(p.get());
+ }
+
+ framework.set_throw_errors(options.throw_errors->value());
+ arm_compute::test::framework::detail::TestSuiteRegistrar suite{ "Examples" };
+ if(validate->value() != 0)
+ {
+ framework.add_test_case<ExampleTest<true>>(basename(argv[0]), framework::DatasetMode::ALL, arm_compute::test::framework::TestCaseFactory::Status::ACTIVE);
+ }
+ else
+ {
+ framework.add_test_case<ExampleTest<false>>(basename(argv[0]), framework::DatasetMode::ALL, arm_compute::test::framework::TestCaseFactory::Status::ACTIVE);
+ }
+
+ //func(argc, argv);
+ bool success = framework.run();
+ if(options.log_level->value() > framework::LogLevel::NONE)
+ {
+ for(auto &p : printers)
+ {
+ p->print_global_footer();
+ }
+ }
+
+ return (success ? 0 : 1);
+}
+
+} // namespace utils
+} // namespace arm_compute
diff --git a/tests/validate_examples/ValidateExample.h b/tests/validate_examples/ValidateExample.h
new file mode 100644
index 0000000000..ce67d7c583
--- /dev/null
+++ b/tests/validate_examples/ValidateExample.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef VALIDATE_EXAMPLE_H
+#define VALIDATE_EXAMPLE_H
+
+#include "utils/Utils.h"
+namespace arm_compute
+{
+namespace test
+{
+namespace framework
+{
+class Printer;
+} // namespace framework
+} // namespace test
+namespace utils
+{
+/** Abstract ValidateExample class.
+ *
+ * All examples with a validation stage have to inherit from this class.
+ */
+class ValidateExample
+{
+public:
+ /** Setup the example.
+ *
+ * @param[in] argc Argument count.
+ * @param[in] argv Argument values.
+ */
+ virtual bool do_setup(int argc, char **argv)
+ {
+ ARM_COMPUTE_UNUSED(argc, argv);
+ return true;
+ };
+ /** Run the example. */
+ virtual void do_run() {};
+ /** Run reference implementation and validate against the target output
+ */
+ virtual void do_validate()
+ {
+ }
+ /** Teardown the example. */
+ virtual void do_teardown() {};
+ /** Print the example parameters
+ *
+ * @param[in,out] printer Printer to use to print the parameters
+ */
+ virtual void print_parameters(test::framework::Printer &printer)
+ {
+ ARM_COMPUTE_UNUSED(printer);
+ }
+
+ /** Default destructor */
+ virtual ~ValidateExample() = default;
+};
+/** Run an example and handle the potential exceptions it throws
+ *
+ * @param[in] argc Number of command line arguments
+ * @param[in] argv Command line arguments
+ * @param[in] example Example to run
+ */
+int run_example(int argc, char **argv, std::unique_ptr<ValidateExample> example);
+
+} // namespace utils
+} // namespace arm_compute
+#endif /* VALIDATE_EXAMPLE_H */
diff --git a/tests/validate_examples/cl_gemm.cpp b/tests/validate_examples/cl_gemm.cpp
new file mode 100644
index 0000000000..cdf60cd65b
--- /dev/null
+++ b/tests/validate_examples/cl_gemm.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2017-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
+#error "This example needs to be built with -DARM_COMPUTE_CL"
+#endif /* ARM_COMPUTE_CL */
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/CL/CLFunctions.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include "tests/AssetsLibrary.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/GEMM.h"
+#include "tests/validation/reference/GEMMLowp.h"
+
+#include "utils/TypePrinter.h"
+#include "utils/Utils.h"
+#include "utils/command_line/CommandLineOptions.h"
+#include "utils/command_line/CommandLineParser.h"
+
+#include "ValidateExample.h"
+
+#include <cstdlib>
+
+using namespace arm_compute;
+using namespace utils;
+using namespace arm_compute::test;
+using namespace arm_compute::test::validation;
+
+constexpr float abs_tolerance_f32(0.0001f); /**< F32 Absolute tolerance value for comparing reference's output against implementation's output for
+ * floating point data types in case using relative tolerance fails because of small values */
+RelativeTolerance<float> tolerance_f32(0.001f); /**< F32 Tolerance value for comparing reference's output against implementation's output for floating point data types */
+RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); /**< F16 Tolerance value for comparing reference's output against implementation's output for floating point data types */
+constexpr float tolerance_num_f16 = 0.02f; /**< F16 Tolerance number */
+
+namespace arm_compute
+{
+DataType data_type_from_name(const std::string &name)
+{
+ static const std::map<std::string, DataType> data_types =
+ {
+ { "f16", DataType::F16 },
+ { "f32", DataType::F32 },
+ { "qasymm8", DataType::QASYMM8 },
+ };
+
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ try
+ {
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+ return data_types.at(utility::tolower(name));
+
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(name);
+ }
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+}
+
+inline ::std::istream &operator>>(::std::istream &stream, DataType &data_type)
+{
+ std::string value;
+ stream >> value;
+ data_type = data_type_from_name(value);
+ return stream;
+}
+} // namespace arm_compute
+namespace
+{
+class GEMMCommandLineOptions final
+{
+public:
+ explicit GEMMCommandLineOptions(CommandLineParser &parser) noexcept
+ : help(parser.add_option<ToggleOption>("help")),
+ add_bias(parser.add_option<ToggleOption>("add_bias")),
+ M(parser.add_option<SimpleOption<int>>("m", 7)),
+ N(parser.add_option<SimpleOption<int>>("n", 3)),
+ K(parser.add_option<SimpleOption<int>>("k", 5)),
+ B(parser.add_option<SimpleOption<int>>("b", 1)),
+ alpha(parser.add_option<SimpleOption<float>>("alpha", 1.f)),
+ beta(parser.add_option<SimpleOption<float>>("beta", 0.f)),
+ offset_src0(parser.add_option<SimpleOption<int>>("offset_i0", 10)),
+ offset_src1(parser.add_option<SimpleOption<int>>("offset_i1", 10)),
+ offset_dst(parser.add_option<SimpleOption<int>>("offset_o", 10)),
+ scale_src0(parser.add_option<SimpleOption<float>>("scale_i0", 1.f / 255)),
+ scale_src1(parser.add_option<SimpleOption<float>>("scale_i1", 1.f / 255)),
+ scale_dst(parser.add_option<SimpleOption<float>>("scale_o", 1.f / 255)),
+ data_type()
+ {
+ // Setup data type
+ const std::set<arm_compute::DataType> supported_data_types
+ {
+ DataType::F16,
+ DataType::F32,
+ DataType::QASYMM8,
+ };
+ data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
+
+ // Setup help strings
+ help->set_help("Show this help message");
+ add_bias->set_help("Add bias to the GEMM. Used when running in QASYMM8");
+ M->set_help("M value");
+ N->set_help("N value");
+ K->set_help("K value");
+ B->set_help("B value - number of batches");
+ alpha->set_help("Alpha value");
+ beta->set_help("Beta value");
+ offset_src0->set_help("Offset of first input. Used when running in QASYMM8");
+ offset_src1->set_help("Offset of second input. Used when running in QASYMM8");
+ offset_dst->set_help("Offset of output. Used when running in QASYMM8");
+ scale_src0->set_help("Scale of first input. Used when running in QASYMM8");
+ scale_src1->set_help("Scale of second input. Used when running in QASYMM8");
+ scale_dst->set_help("Scale of output. Used when running in QASYMM8");
+ data_type->set_help("Data type to use");
+ }
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ GEMMCommandLineOptions(const GEMMCommandLineOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ GEMMCommandLineOptions &operator=(const GEMMCommandLineOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ GEMMCommandLineOptions(GEMMCommandLineOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ GEMMCommandLineOptions &operator=(GEMMCommandLineOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ ~GEMMCommandLineOptions() = default;
+
+public:
+ ToggleOption *help;
+ ToggleOption *add_bias;
+ SimpleOption<int> *M;
+ SimpleOption<int> *N;
+ SimpleOption<int> *K;
+ SimpleOption<int> *B;
+ SimpleOption<float> *alpha;
+ SimpleOption<float> *beta;
+ SimpleOption<int> *offset_src0;
+ SimpleOption<int> *offset_src1;
+ SimpleOption<int> *offset_dst;
+ SimpleOption<float> *scale_src0;
+ SimpleOption<float> *scale_src1;
+ SimpleOption<float> *scale_dst;
+ EnumOption<arm_compute::DataType> *data_type;
+};
+} // namespace
+
+class CLGEMMValidateExample : public ValidateExample
+{
+public:
+ bool do_setup(int argc, char **argv) override
+ {
+ CLScheduler::get().default_init();
+
+ // Parse options
+ CommandLineParser parser;
+ GEMMCommandLineOptions gemm_options(parser);
+ parser.parse(argc, argv);
+
+ // Print help
+ const bool print_help = gemm_options.help->is_set() ? gemm_options.help->value() : false;
+ if(print_help)
+ {
+ parser.print_help(argv[0]);
+ return false;
+ }
+
+ // Consume parameters
+ consume_params(gemm_options);
+ print_parameters_internal();
+
+ const bool is_quantized = is_data_type_quantized(data_type);
+
+ // Calculate re-quantization parameters
+ if(is_quantized)
+ {
+ float multiplier = scale_src0 * scale_src1 / scale_dst;
+ quantization::calculate_quantized_multiplier(multiplier, &dst_multiplier, &dst_shift);
+ }
+
+ // Initialize GEMM inputs/outputs
+ src0.allocator()->init(TensorInfo(TensorShape(K, M, B), 1, data_type));
+ src1.allocator()->init(TensorInfo(TensorShape(N, K, B), 1, data_type));
+ src2.allocator()->init(TensorInfo(TensorShape(N, M, B), 1, data_type));
+ init_sgemm_output(dst, src0, src1, data_type);
+
+ // Configure function
+ if(is_quantized)
+ {
+ src0.info()->set_quantization_info(QuantizationInfo(scale_src0, offset_src0));
+ src1.info()->set_quantization_info(QuantizationInfo(scale_src1, offset_src1));
+ dst.info()->set_quantization_info(QuantizationInfo(scale_dst, offset_dst));
+ biases.allocator()->init(TensorInfo(TensorShape(N), 1, DataType::S32));
+ init_sgemm_output(tmp_dst, src0, src1, DataType::S32);
+
+ // Configure GEMMlowp matrix multiply function
+ mm_gemmlowp.configure(&src0, &src1, nullptr, &tmp_dst);
+
+ // Configure GEMMlowp output stage
+ mm_gemmlowp_output_stage.configure(&tmp_dst, add_bias ? &biases : nullptr, &dst, dst_multiplier, dst_shift, offset_dst);
+ tmp_dst.allocator()->allocate();
+ biases.allocator()->allocate();
+ fill(CLAccessor(biases), 3);
+ }
+ else
+ {
+ // Configure matrix multiply function
+ mm_gemm.configure(&src0, &src1, &src2, &dst, alpha, beta);
+ }
+
+ // Allocate all the tensors
+ src0.allocator()->allocate();
+ src1.allocator()->allocate();
+ dst.allocator()->allocate();
+ src2.allocator()->allocate();
+
+ fill(CLAccessor(src0), 0);
+ fill(CLAccessor(src1), 1);
+ fill(CLAccessor(src2), 2);
+
+ return true;
+ }
+
+ void print_parameters_internal()
+ {
+ std::cout << "Datatype : " << string_from_data_type(data_type) << "\n";
+ std::cout << "M : " << support::cpp11::to_string(M) << "\n";
+ std::cout << "N : " << support::cpp11::to_string(N) << "\n";
+ std::cout << "K : " << support::cpp11::to_string(K) << "\n";
+ std::cout << "B : " << support::cpp11::to_string(B) << "\n";
+ if(data_type == DataType::QASYMM8)
+ {
+ std::cout << "Scale_Src0 : " << support::cpp11::to_string(scale_src0) << "\n";
+ std::cout << "Offset_Src0 : " << support::cpp11::to_string(offset_src0) << "\n";
+ std::cout << "Scale_Scr1 : " << support::cpp11::to_string(scale_src1) << "\n";
+ std::cout << "Offset_Src1 : " << support::cpp11::to_string(offset_src1) << "\n";
+ std::cout << "Scale_Dst : " << support::cpp11::to_string(scale_dst) << "\n";
+ std::cout << "Offset_Dst : " << support::cpp11::to_string(offset_dst) << "\n";
+ std::cout << "Bias : " << support::cpp11::to_string(add_bias) << "\n";
+ }
+ else
+ {
+ std::cout << "Alpha : " << support::cpp11::to_string(alpha) << "\n";
+ std::cout << "Beta : " << support::cpp11::to_string(beta) << "\n";
+ }
+ }
+
+ void do_validate() override
+ {
+ switch(data_type)
+ {
+ case DataType::F16:
+ {
+ SimpleTensor<half> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
+ SimpleTensor<half> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
+ SimpleTensor<half> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
+
+ fill(ref_src0, 0);
+ fill(ref_src1, 1);
+ fill(ref_src2, 2);
+
+ SimpleTensor<half> ref_dst = reference::gemm<half>(ref_src0, ref_src1, ref_src2, alpha, beta);
+ validate(CLAccessor(dst), ref_dst, tolerance_f16, tolerance_num_f16);
+ break;
+ }
+ case DataType::F32:
+ {
+ SimpleTensor<float> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
+ SimpleTensor<float> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
+ SimpleTensor<float> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
+
+ fill(ref_src0, 0);
+ fill(ref_src1, 1);
+ fill(ref_src2, 2);
+
+ SimpleTensor<float> ref_dst = reference::gemm<float>(ref_src0, ref_src1, ref_src2, alpha, beta);
+ validate(CLAccessor(dst), ref_dst, tolerance_f32, 0.f, abs_tolerance_f32);
+ break;
+ }
+ case DataType::QASYMM8:
+ {
+ SimpleTensor<uint8_t> ref_src0{ TensorShape(K, M, B), data_type, 1 };
+ SimpleTensor<uint8_t> ref_src1{ TensorShape(N, K, B), data_type, 1 };
+ SimpleTensor<uint8_t> ref_dst;
+
+ // Fill reference
+ fill(ref_src0, 0);
+ fill(ref_src1, 1);
+
+ SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, TensorShape(N, M, B), offset_src0, offset_src1);
+
+ const std::vector<int32_t> dst_multiplier_vec = { dst_multiplier };
+ const std::vector<int32_t> dst_shift_vec = { dst_shift };
+
+ if(add_bias)
+ {
+ SimpleTensor<int32_t> biases{ TensorShape(N), DataType::S32, 1 };
+ // Fill bias
+ fill(biases, 3);
+ ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, biases, dst_multiplier_vec, dst_shift_vec, offset_dst);
+ }
+ else
+ {
+ ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, dst_multiplier_vec, dst_shift_vec, offset_dst);
+ }
+ validate(CLAccessor(dst), ref_dst);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ void do_run() override
+ {
+ // Execute the function
+ if(data_type == DataType::QASYMM8)
+ {
+ // Run gemmlowp
+ mm_gemmlowp.run();
+ // Run output stage
+ mm_gemmlowp_output_stage.run();
+ }
+ else
+ {
+ // Run gemm
+ mm_gemm.run();
+ }
+
+ // Make sure all the OpenCL jobs are done executing:
+ CLScheduler::get().sync();
+ }
+
+private:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F16:
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::S32:
+ case DataType::QASYMM8:
+ {
+ std::uniform_int_distribution<> distribution(-6000, 6000);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ void consume_params(const GEMMCommandLineOptions &opts)
+ {
+ ARM_COMPUTE_ERROR_ON(opts.M->value() <= 0);
+ ARM_COMPUTE_ERROR_ON(opts.N->value() <= 0);
+ ARM_COMPUTE_ERROR_ON(opts.K->value() <= 0);
+ ARM_COMPUTE_ERROR_ON(opts.B->value() <= 0);
+ M = opts.M->value();
+ N = opts.N->value();
+ K = opts.K->value();
+ B = opts.B->value();
+ alpha = opts.alpha->value();
+ beta = opts.beta->value();
+ offset_src0 = opts.offset_src0->value();
+ offset_src1 = opts.offset_src1->value();
+ offset_dst = opts.offset_dst->value();
+ scale_src0 = opts.scale_src0->value();
+ scale_src1 = opts.scale_src1->value();
+ scale_dst = opts.scale_dst->value();
+ add_bias = opts.add_bias->is_set() ? opts.add_bias->value() : true;
+ data_type = opts.data_type->value();
+ }
+
+ CLTensor src0{}, src1{}, src2{}, dst{};
+ CLTensor tmp_dst{}, biases{};
+
+ CLGEMM mm_gemm{};
+ CLGEMMLowpMatrixMultiplyCore mm_gemmlowp{};
+ CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint mm_gemmlowp_output_stage{};
+
+ size_t M{ 7 }, N{ 3 }, K{ 5 }, B{ 1 };
+ DataType data_type{ DataType::F32 };
+ float alpha{ 1.0 }, beta{ 0.0 };
+ int offset_src0{ 10 }, offset_src1{ 10 }, offset_dst{ 10 };
+ float scale_src0{ 1.0f / 255 }, scale_src1{ 1.0f / 255 }, scale_dst{ 1.0f / 255 };
+ int32_t dst_multiplier{ 0 }, dst_shift{ 0 };
+ bool add_bias{ true };
+};
+
+/** Main program for gemm test
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments
+ *
+ */
+int main(int argc, char **argv)
+{
+ return utils::run_example<CLGEMMValidateExample>(argc, argv);
+}
diff --git a/tests/validate_examples/graph_convolution.cpp b/tests/validate_examples/graph_convolution.cpp
new file mode 100644
index 0000000000..1ab6691e57
--- /dev/null
+++ b/tests/validate_examples/graph_convolution.cpp
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph.h"
+
+#include "support/ToolchainSupport.h"
+
+#include "tests/NEON/Accessor.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/Permute.h"
+
+#include "utils/CommonGraphOptions.h"
+#include "utils/GraphUtils.h"
+#include "utils/Utils.h"
+
+#include "ValidateExample.h"
+#include "graph_validate_utils.h"
+
+#include <utility>
+
+using namespace arm_compute::utils;
+using namespace arm_compute::graph::frontend;
+using namespace arm_compute::graph_utils;
+using namespace arm_compute::graph;
+using namespace arm_compute;
+using namespace arm_compute::test;
+using namespace arm_compute::test::validation;
+
+namespace
+{
+/** Convolution command line options used to configure the graph examples
+ *
+ * (Similar to common options)
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class ConvolutionOptions final : public CommonGraphValidateOptions
+{
+public:
+ explicit ConvolutionOptions(CommandLineParser &parser) noexcept
+ : CommonGraphValidateOptions(parser),
+ width(parser.add_option<SimpleOption<int>>("width", 9)),
+ height(parser.add_option<SimpleOption<int>>("height", 9)),
+ channels(parser.add_option<SimpleOption<int>>("channels", 1)),
+ batch(parser.add_option<SimpleOption<int>>("batch", 1)),
+ weights_width(parser.add_option<SimpleOption<int>>("weights_width", 3)),
+ weights_height(parser.add_option<SimpleOption<int>>("weights_height", 3)),
+ OFM(parser.add_option<SimpleOption<int>>("OFM", 1)),
+ padding_top(parser.add_option<SimpleOption<int>>("padding_top", 0)),
+ padding_left(parser.add_option<SimpleOption<int>>("padding_left", 0)),
+ padding_bottom(parser.add_option<SimpleOption<int>>("padding_bottom", 0)),
+ padding_right(parser.add_option<SimpleOption<int>>("padding_right", 0)),
+ stride_x(parser.add_option<SimpleOption<int>>("stride_x", 1)),
+ stride_y(parser.add_option<SimpleOption<int>>("stride_y", 1)),
+ padding_mode(),
+ conv_mode(),
+ data_layout(),
+ scale(parser.add_option<SimpleOption<float>>("scale", 1.0f)),
+ offset(parser.add_option<SimpleOption<int>>("offset", 0)),
+ weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
+ weights_offset(parser.add_option<SimpleOption<int>>("weights_offset", 0)),
+ output_scale(parser.add_option<SimpleOption<float>>("output_scale", 1.0f)),
+ output_offset(parser.add_option<SimpleOption<int>>("output_offset", 0)),
+ input_range_low(parser.add_option<SimpleOption<uint64_t>>("input_range_low")),
+ input_range_high(parser.add_option<SimpleOption<uint64_t>>("input_range_high")),
+ weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
+ weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high")),
+ input_npy(parser.add_option<SimpleOption<std::string>>("input_image")),
+ output_npy(parser.add_option<SimpleOption<std::string>>("reference_image")),
+ weights_npy(parser.add_option<SimpleOption<std::string>>("weights_npy")),
+ bias_npy(parser.add_option<SimpleOption<std::string>>("bias_image"))
+ {
+ const std::set<ConvolutionPaddingMode> available_padding_modes
+ {
+ ConvolutionPaddingMode::Valid,
+ ConvolutionPaddingMode::Same
+ };
+
+ const std::set<arm_compute::graph::ConvolutionMethod> supported_convolution_methods
+ {
+ arm_compute::graph::ConvolutionMethod::Default,
+ arm_compute::graph::ConvolutionMethod::GEMM,
+ arm_compute::graph::ConvolutionMethod::Winograd,
+ arm_compute::graph::ConvolutionMethod::Direct
+ };
+
+ const std::set<DataLayout> supported_data_layouts
+ {
+ DataLayout::NHWC,
+ DataLayout::NCHW,
+ };
+
+ padding_mode = parser.add_option<EnumOption<ConvolutionPaddingMode>>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid);
+ conv_mode = parser.add_option<EnumOption<arm_compute::graph::ConvolutionMethod>>("convolution_method", supported_convolution_methods, arm_compute::graph::ConvolutionMethod::Default);
+ data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NHWC);
+
+ padding_mode->set_help("Set padding mode");
+ help->set_help("Show this help message");
+ width->set_help("Set Input dimension width");
+ height->set_help("Set Input dimension height");
+ channels->set_help("Set Input dimension channels");
+ batch->set_help("Set Input dimension batch");
+ weights_width->set_help("Set weights_dimensions width");
+ weights_height->set_help("Set weights_dimensions height");
+ OFM->set_help("Set OFM");
+ padding_top->set_help("Set padding top");
+ padding_bottom->set_help("Set padding bottom");
+ padding_left->set_help("Set padding left");
+ padding_right->set_help("Set padding right");
+ stride_x->set_help("Set padding stride x");
+ stride_y->set_help("Set padding stride y");
+ conv_mode->set_help("Set convolution method");
+ scale->set_help("Quantization scale from QASYMM8");
+ offset->set_help("Quantization offset from QASYMM8");
+ weights_scale->set_help("Quantization scale from QASYMM8");
+ weights_offset->set_help("Quantization offset from QASYMM8");
+ output_scale->set_help("Quantization scale from QASYMM8");
+ output_offset->set_help("Quantization offset from QASYMM8");
+ input_npy->set_help("Use input .npy instead");
+ output_npy->set_help("Use .npy as a reference");
+ input_range_low->set_help("Lower bound for input randomization range");
+ input_range_high->set_help("Lower bound for input randomization range");
+ weights_range_low->set_help("Lower bound for input randomization range");
+ weights_range_high->set_help("Lower bound for input randomization range");
+ }
+
+ /** Fill out the supplied parameters with user supplied parameters
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ void consume_parameters(ExampleParams &common_params)
+ {
+ common_params.input.width = width->value();
+ common_params.input.height = height->value();
+ common_params.input.fm = channels->value();
+ common_params.input.batch = batch->value();
+ common_params.input.quant_info = QuantizationInfo(scale->value(), offset->value());
+ common_params.input.npy = input_npy->value();
+ common_params.input.range_low = input_range_low->value();
+ common_params.input.range_high = input_range_high->value();
+
+ common_params.weights.width = weights_width->value();
+ common_params.weights.height = weights_height->value();
+ common_params.weights.fm = OFM->value();
+ common_params.weights.npy = weights_npy->value();
+ common_params.weights.quant_info = QuantizationInfo(weights_scale->value(), weights_offset->value());
+ common_params.weights.range_low = weights_range_low->value();
+ common_params.weights.range_high = weights_range_high->value();
+
+ common_params.bias.npy = bias_npy->value();
+
+ common_params.output.quant_info = QuantizationInfo(output_scale->value(), output_offset->value());
+ common_params.output.npy = output_npy->value();
+
+ common_params.convolution.padding_mode = padding_mode->value();
+ common_params.convolution.padding_top = padding_top->value();
+ common_params.convolution.padding_bottom = padding_bottom->value();
+ common_params.convolution.padding_left = padding_left->value();
+ common_params.convolution.padding_right = padding_right->value();
+ common_params.convolution.padding_stride_x = stride_x->value();
+ common_params.convolution.padding_stride_y = stride_y->value();
+
+ common_params.data_type = data_type->value();
+ common_params.data_layout = data_layout->value();
+ common_params.convolution_method = conv_mode->value();
+ }
+
+ void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
+ << std::endl;
+ os << "Weight dimensions(X,Y, Channels(same as input), OFM) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << "," <<
+ common_params.weights.fm << ")" << std::endl;
+ os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
+ common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
+ ")" << std::endl;
+ os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
+ os << "Convolution Method: " << common_params.convolution_method << std::endl;
+ }
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ConvolutionOptions(const ConvolutionOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ConvolutionOptions &operator=(const ConvolutionOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ ConvolutionOptions(ConvolutionOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ ConvolutionOptions &operator=(ConvolutionOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ ~ConvolutionOptions() override = default;
+
+private:
+ SimpleOption<int> *width; /**< Input width */
+ SimpleOption<int> *height; /**< Input height */
+ SimpleOption<int> *channels; /**< Input channels */
+ SimpleOption<int> *batch; /**< Input batch */
+ SimpleOption<int> *weights_width; /**< weights width */
+ SimpleOption<int> *weights_height; /**< weights height */
+ SimpleOption<int> *OFM; /**< Output Feature Map */
+ SimpleOption<int> *padding_top; /**< Padding top */
+ SimpleOption<int> *padding_left; /**< Padding left */
+ SimpleOption<int> *padding_bottom; /**< Padding bottom */
+ SimpleOption<int> *padding_right; /**< Padding right */
+ SimpleOption<int> *stride_x; /**< Padding stride x */
+ SimpleOption<int> *stride_y; /**< Padding stride y */
+ EnumOption<ConvolutionPaddingMode> *padding_mode; /**< Padding mode */
+ EnumOption<arm_compute::graph::ConvolutionMethod> *conv_mode; /**< Convolution method */
+ EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
+ SimpleOption<float> *scale; /**< Input Quantization scale from QASYMM8 */
+ SimpleOption<int> *offset; /**< Input Quantization offset from QASYMM8 */
+ SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASYMM8 */
+ SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASYMM8 */
+ SimpleOption<float> *output_scale; /**< Output Quantization scale from QASYMM8 */
+ SimpleOption<int> *output_offset; /**< Output Quantization offset from QASYMM8 */
+ SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
+ SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
+ SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
+ SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
+
+ SimpleOption<std::string> *input_npy; /**< Use input .npy image */
+ SimpleOption<std::string> *output_npy; /**< Use output .npy image to verify*/
+ SimpleOption<std::string> *weights_npy; /**< Use weights .npy image */
+ SimpleOption<std::string> *bias_npy; /**< Use bias .npy image */
+};
+
+/** ConvolutionLayer Graph example validation accessor class */
+template <typename D>
+class ConvolutionVerifyAccessor final : public VerifyAccessor<D>
+{
+ using BaseClassType = VerifyAccessor<D>;
+ using BaseClassType::BaseClassType;
+ using BaseClassType::_params;
+ using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
+
+ SimpleTensor<D> reference(SimpleTensor<D> &src, SimpleTensor<D> &weights, SimpleTensor<TBias> &bias, const TensorShape &output_shape) override
+ {
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(_params);
+
+ //Calculate reference
+ return reference::convolution_layer<D>(src, weights, bias, output_shape, padding_info, Size2D(1, 1),
+ 1, _params.output.quant_info);
+ }
+
+ float relative_tolerance() override
+ {
+ const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
+ {
+ {
+ arm_compute::graph::Target::CL,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.5f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ },
+ {
+ arm_compute::graph::Target::NEON,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.01f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ if(_params.convolution_method == arm_compute::graph::ConvolutionMethod::Winograd
+ && _params.data_type == DataType::F32
+ && _params.common_params.target == arm_compute::graph::Target::NEON)
+ {
+ return 0.05f;
+ }
+ else
+ {
+ return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+ }
+
+ float absolute_tolerance() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.0f },
+ { DataType::F32, 0.0001f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.002f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float tolerance_number() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.07f },
+ { DataType::F32, 0.07f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.07f },
+ { DataType::F32, 0.0f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+};
+
+} // namespace
+
+class GraphConvolutionValidateExample final : public GraphValidateExample<ConvolutionLayer, ConvolutionOptions, ConvolutionVerifyAccessor>
+{
+ using GraphValidateExample::graph;
+
+public:
+ GraphConvolutionValidateExample()
+ : GraphValidateExample("Convolution Graph example")
+ {
+ }
+
+ ConvolutionLayer GraphFunctionLayer(ExampleParams &params) override
+ {
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
+
+ const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
+ const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
+
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(params);
+
+ return ConvolutionLayer(params.weights.width, params.weights.height, params.weights.fm,
+ get_accessor(params.weights, weights_lower, weights_upper, 1),
+ get_accessor(params.bias, lower, upper, 2),
+ padding_info, 1, params.weights.quant_info, params.output.quant_info);
+ }
+};
+
+/** Main program for Graph Convolution test
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments ( Input dimensions [width, height, channels, batch]
+ * Weights dimensions [width, height, OFM]
+ * Padding [top,bottom,left,right, Stride x, Stride y, mode [Valid / Same / Manual] )
+ * Convolution Method[ Auto/GEMM/Winograd/Direct]
+ * Verification[tolerance_number,absolute_tolerance,relative_tolerance] )
+ *
+ */
+int main(int argc, char **argv)
+{
+ return arm_compute::utils::run_example<GraphConvolutionValidateExample>(argc, argv);
+}
diff --git a/tests/validate_examples/graph_depthwiseconvolution.cpp b/tests/validate_examples/graph_depthwiseconvolution.cpp
new file mode 100644
index 0000000000..3ea33e1deb
--- /dev/null
+++ b/tests/validate_examples/graph_depthwiseconvolution.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph.h"
+
+#include "support/ToolchainSupport.h"
+
+#include "tests/NEON/Accessor.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/DepthwiseConvolutionLayer.h"
+#include "tests/validation/reference/Permute.h"
+
+#include "utils/CommonGraphOptions.h"
+#include "utils/GraphUtils.h"
+#include "utils/Utils.h"
+
+#include "ValidateExample.h"
+#include "graph_validate_utils.h"
+
+#include <utility>
+
+using namespace arm_compute::utils;
+using namespace arm_compute::graph::frontend;
+using namespace arm_compute::graph_utils;
+using namespace arm_compute::graph;
+using namespace arm_compute;
+using namespace arm_compute::test;
+using namespace arm_compute::test::validation;
+
+namespace
+{
+/** Depthwise Convolution command line options used to configure the graph examples
+ *
+ * (Similar to common options)
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class DepthConvolutionOptions final : public CommonGraphValidateOptions
+{
+public:
+ explicit DepthConvolutionOptions(CommandLineParser &parser) noexcept
+ : CommonGraphValidateOptions(parser),
+ width(parser.add_option<SimpleOption<int>>("width", 9)),
+ height(parser.add_option<SimpleOption<int>>("height", 9)),
+ channels(parser.add_option<SimpleOption<int>>("channels", 1)),
+ batch(parser.add_option<SimpleOption<int>>("batch", 1)),
+ weights_width(parser.add_option<SimpleOption<int>>("weights_width", 3)),
+ weights_height(parser.add_option<SimpleOption<int>>("weights_height", 3)),
+ padding_top(parser.add_option<SimpleOption<int>>("padding_top", 0)),
+ padding_left(parser.add_option<SimpleOption<int>>("padding_left", 0)),
+ padding_bottom(parser.add_option<SimpleOption<int>>("padding_bottom", 0)),
+ padding_right(parser.add_option<SimpleOption<int>>("padding_right", 0)),
+ stride_x(parser.add_option<SimpleOption<int>>("stride_x", 1)),
+ stride_y(parser.add_option<SimpleOption<int>>("stride_y", 1)),
+ padding_mode(),
+ conv_mode(),
+ depth_multiplier(parser.add_option<SimpleOption<int>>("depth_multiplier", 1)),
+ data_layout(),
+ scale(parser.add_option<SimpleOption<float>>("scale", 1.0f)),
+ offset(parser.add_option<SimpleOption<int>>("offset", 0)),
+ weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
+ weights_offset(parser.add_option<SimpleOption<int>>("weights_offset", 0)),
+ output_scale(parser.add_option<SimpleOption<float>>("output_scale", 1.0f)),
+ output_offset(parser.add_option<SimpleOption<int>>("output_offset", 0)),
+ input_range_low(parser.add_option<SimpleOption<uint64_t>>("input_range_low")),
+ input_range_high(parser.add_option<SimpleOption<uint64_t>>("input_range_high")),
+ weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
+ weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high")),
+ input_npy(parser.add_option<SimpleOption<std::string>>("input_image")),
+ output_npy(parser.add_option<SimpleOption<std::string>>("reference_image")),
+ weights_npy(parser.add_option<SimpleOption<std::string>>("weights_npy")),
+ bias_npy(parser.add_option<SimpleOption<std::string>>("bias_image"))
+ {
+ const std::set<ConvolutionPaddingMode> available_padding_modes
+ {
+ ConvolutionPaddingMode::Valid,
+ ConvolutionPaddingMode::Same
+ };
+
+ const std::set<arm_compute::graph::DepthwiseConvolutionMethod> supported_convolution_methods
+ {
+ arm_compute::graph::DepthwiseConvolutionMethod::Default,
+ arm_compute::graph::DepthwiseConvolutionMethod::GEMV,
+ arm_compute::graph::DepthwiseConvolutionMethod::Optimized3x3,
+ };
+
+ const std::set<DataLayout> supported_data_layouts
+ {
+ DataLayout::NHWC,
+ DataLayout::NCHW,
+ };
+
+ padding_mode = parser.add_option<EnumOption<ConvolutionPaddingMode>>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid);
+ conv_mode = parser.add_option<EnumOption<arm_compute::graph::DepthwiseConvolutionMethod>>("convolution_method", supported_convolution_methods,
+ arm_compute::graph::DepthwiseConvolutionMethod::Default);
+ data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NHWC);
+
+ padding_mode->set_help("Set padding mode");
+ width->set_help("Set Input dimension width");
+ height->set_help("Set Input dimension height");
+ channels->set_help("Set Input dimension channels");
+ batch->set_help("Set Input dimension batch");
+ weights_width->set_help("Set weights_dimensions width");
+ weights_height->set_help("Set weights_dimensions height");
+ padding_top->set_help("Set padding top");
+ padding_bottom->set_help("Set padding bottom");
+ padding_left->set_help("Set padding left");
+ padding_right->set_help("Set padding right");
+ stride_x->set_help("Set padding stride x");
+ stride_y->set_help("Set padding stride y");
+ conv_mode->set_help("Set convolution method");
+ data_layout->set_help("Data layout to use");
+ scale->set_help("Quantization scale from QASYMM8");
+ offset->set_help("Quantization offset from QASYMM8");
+ output_scale->set_help("Quantization scale from QASYMM8");
+ output_offset->set_help("Quantization offset from QASYMM8");
+ input_npy->set_help("Use input .npy instead");
+ output_npy->set_help("Use .npy as a reference");
+ input_range_low->set_help("Lower bound for input randomization range");
+ input_range_high->set_help("Lower bound for input randomization range");
+ weights_scale->set_help("Quantization scale from QASYMM8");
+ weights_offset->set_help("Quantization offset from QASYMM8");
+ weights_range_low->set_help("Lower bound for input randomization range");
+ weights_range_high->set_help("Lower bound for input randomization range");
+ depth_multiplier->set_help("Depth multiplier");
+ }
+
+ /** Fill out the supplied parameters with user supplied parameters
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ void consume_parameters(ExampleParams &common_params)
+ {
+ common_params.input.width = width->value();
+ common_params.input.height = height->value();
+ common_params.input.fm = channels->value();
+ common_params.input.batch = batch->value();
+ common_params.input.quant_info = QuantizationInfo(scale->value(), offset->value());
+ common_params.input.npy = input_npy->value();
+ common_params.input.range_low = input_range_low->value();
+ common_params.input.range_high = input_range_high->value();
+
+ common_params.weights.width = weights_width->value();
+ common_params.weights.height = weights_height->value();
+ common_params.weights.npy = weights_npy->value();
+ common_params.weights.range_low = weights_range_low->value();
+ common_params.weights.range_high = weights_range_high->value();
+ common_params.weights.quant_info = QuantizationInfo(weights_scale->value(), weights_offset->value());
+
+ common_params.bias.npy = bias_npy->value();
+
+ common_params.output.quant_info = QuantizationInfo(output_scale->value(), output_offset->value());
+ common_params.output.npy = output_npy->value();
+
+ common_params.convolution.padding_mode = padding_mode->value();
+ common_params.convolution.padding_top = padding_top->value();
+ common_params.convolution.padding_bottom = padding_bottom->value();
+ common_params.convolution.padding_left = padding_left->value();
+ common_params.convolution.padding_right = padding_right->value();
+ common_params.convolution.padding_stride_x = stride_x->value();
+ common_params.convolution.padding_stride_y = stride_y->value();
+ common_params.convolution.depth_multiplier = depth_multiplier->value();
+
+ common_params.data_type = data_type->value();
+ common_params.data_layout = data_layout->value();
+ common_params.depth_convolution_method = conv_mode->value();
+ }
+
+ void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
+ << std::endl;
+ os << "Weight dimensions(X,Y, Channels(same as input)) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << ","
+ << ")" << std::endl;
+ os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
+ common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
+ ")" << std::endl;
+ os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
+ os << "Convolution Method: " << common_params.depth_convolution_method << std::endl;
+ os << "Depth multiplier: " << common_params.convolution.depth_multiplier;
+ }
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ DepthConvolutionOptions(const DepthConvolutionOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ DepthConvolutionOptions &operator=(const DepthConvolutionOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ DepthConvolutionOptions(DepthConvolutionOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ DepthConvolutionOptions &operator=(DepthConvolutionOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ ~DepthConvolutionOptions() override = default;
+
+private:
+ SimpleOption<int> *width; /**< Input width */
+ SimpleOption<int> *height; /**< Input height */
+ SimpleOption<int> *channels; /**< Input channels */
+ SimpleOption<int> *batch; /**< Input batch */
+ SimpleOption<int> *weights_width; /**< weights width */
+ SimpleOption<int> *weights_height; /**< weights height */
+ SimpleOption<int> *padding_top; /**< Padding top */
+ SimpleOption<int> *padding_left; /**< Padding left */
+ SimpleOption<int> *padding_bottom; /**< Padding bottom */
+ SimpleOption<int> *padding_right; /**< Padding right */
+ SimpleOption<int> *stride_x; /**< Padding stride x */
+ SimpleOption<int> *stride_y; /**< Padding stride y */
+ EnumOption<ConvolutionPaddingMode> *padding_mode; /**< Padding mode */
+ EnumOption<arm_compute::graph::DepthwiseConvolutionMethod> *conv_mode; /**< Convolution method */
+ SimpleOption<int> *depth_multiplier; /**< Depth multiplier */
+ EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
+ SimpleOption<float> *scale; /**< Input Quantization scale from QASYMM8 */
+ SimpleOption<int> *offset; /**< Input Quantization offset from QASYMM8 */
+ SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASYMM8 */
+ SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASYMM8 */
+ SimpleOption<float> *output_scale; /**< Output Quantization scale from QASYMM8 */
+ SimpleOption<int> *output_offset; /**< Output Quantization offset from QASYMM8 */
+ SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
+ SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
+ SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
+ SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
+
+ SimpleOption<std::string> *input_npy; /**< Use input .npy image */
+ SimpleOption<std::string> *output_npy; /**< Use output .npy image to verify*/
+ SimpleOption<std::string> *weights_npy; /**< Use weights .npy image */
+ SimpleOption<std::string> *bias_npy; /**< Use bias .npy image */
+};
+
+/** DepthwiseConvolutionLayer Graph example validation accessor class */
+template <typename D>
+class DepthConvolutionVerifyAccessor final : public VerifyAccessor<D>
+{
+public:
+ using BaseClassType = VerifyAccessor<D>;
+ using BaseClassType::BaseClassType;
+ using BaseClassType::_params;
+ using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
+
+public:
+ SimpleTensor<D> reference(SimpleTensor<D> &src, SimpleTensor<D> &weights, SimpleTensor<TBias> &bias, const TensorShape &output_shape) override
+ {
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(_params);
+
+ //Calculate reference
+ return reference::depthwise_convolution<D>(src, weights, bias, output_shape, padding_info,
+ _params.convolution.depth_multiplier,
+ Size2D(1U, 1U),
+ _params.output.quant_info);
+ }
+
+ float relative_tolerance() override
+ {
+ const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
+ {
+ {
+ arm_compute::graph::Target::CL,
+ { { DataType::F16, 0.01f },
+ { DataType::F32, 0.01f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ arm_compute::graph::Target::NEON,
+ { { DataType::F16, 0.01f },
+ { DataType::F32, 0.01f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ }
+ };
+
+ return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float absolute_tolerance() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.0f },
+ { DataType::F32, 0.0000f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.002f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float tolerance_number() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.05f },
+ { DataType::F32, 0.00f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.05f },
+ { DataType::F32, 0.0f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+};
+
+} // namespace
+
+class GraphDepthwiseConvolutionValidateExample final : public GraphValidateExample<DepthwiseConvolutionLayer, DepthConvolutionOptions, DepthConvolutionVerifyAccessor>
+{
+ using GraphValidateExample::graph;
+
+public:
+ GraphDepthwiseConvolutionValidateExample()
+ : GraphValidateExample("DepthWiseConvolution Graph example")
+ {
+ }
+
+ DepthwiseConvolutionLayer GraphFunctionLayer(ExampleParams &params) override
+ {
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
+
+ const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
+ const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
+
+ // Calculate padding information
+ const PadStrideInfo padding_info = calculate_convolution_padding(params);
+
+ return DepthwiseConvolutionLayer(params.weights.width, params.weights.height,
+ get_accessor(params.weights, weights_lower, weights_upper, 1),
+ get_accessor(params.bias, lower, upper, 2),
+ padding_info, params.convolution.depth_multiplier, params.weights.quant_info, params.output.quant_info);
+ }
+};
+
+/** Main program for Graph Depthwise Convolution test
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments ( Input dimensions [width, height, channels, batch]
+ * Weights dimensions [width, height, channels]
+ * Padding [top,bottom,left,right, Stride x, Stride y, mode [Valid / Same / Manual] )
+ * Convolution Method[ Default/GEMV/Optimized3x3]
+ * Verification[tolerance_number,absolute_tolerance,relative_tolerance] )
+ *
+ */
+int main(int argc, char **argv)
+{
+ return arm_compute::utils::run_example<GraphDepthwiseConvolutionValidateExample>(argc, argv);
+}
diff --git a/tests/validate_examples/graph_fully_connected.cpp b/tests/validate_examples/graph_fully_connected.cpp
new file mode 100644
index 0000000000..645fa8b124
--- /dev/null
+++ b/tests/validate_examples/graph_fully_connected.cpp
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph.h"
+
+#include "support/ToolchainSupport.h"
+
+#include "tests/NEON/Accessor.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/FullyConnectedLayer.h"
+#include "tests/validation/reference/Permute.h"
+
+#include "utils/CommonGraphOptions.h"
+#include "utils/GraphUtils.h"
+#include "utils/Utils.h"
+
+#include "ValidateExample.h"
+#include "graph_validate_utils.h"
+
+#include <utility>
+
+using namespace arm_compute::utils;
+using namespace arm_compute::graph::frontend;
+using namespace arm_compute::graph_utils;
+using namespace arm_compute::graph;
+using namespace arm_compute;
+using namespace arm_compute::test;
+using namespace arm_compute::test::validation;
+
+namespace
+{
+/** Fully connected command line options used to configure the graph examples
+ *
+ * (Similar to common options)
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class FullyConnectedOptions final : public CommonGraphValidateOptions
+{
+public:
+ explicit FullyConnectedOptions(CommandLineParser &parser) noexcept
+ : CommonGraphValidateOptions(parser),
+ width(parser.add_option<SimpleOption<int>>("width", 3)),
+ batch(parser.add_option<SimpleOption<int>>("batch", 1)),
+ input_scale(parser.add_option<SimpleOption<float>>("input_scale", 1.0f)),
+ input_offset(parser.add_option<SimpleOption<int>>("input_offset", 0)),
+ weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
+ weights_offset(parser.add_option<SimpleOption<int>>("weights_offset", 0)),
+ output_scale(parser.add_option<SimpleOption<float>>("output_scale", 1.0f)),
+ output_offset(parser.add_option<SimpleOption<int>>("output_offset", 0)),
+ num_outputs(parser.add_option<SimpleOption<int>>("num_outputs", 1)),
+ input_range_low(parser.add_option<SimpleOption<uint64_t>>("input_range_low")),
+ input_range_high(parser.add_option<SimpleOption<uint64_t>>("input_range_high")),
+ weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
+ weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high"))
+ {
+ width->set_help("Set Input dimension width");
+ batch->set_help("Set Input dimension batch");
+ input_scale->set_help("Quantization scale from QASYMM8");
+ input_offset->set_help("Quantization offset from QASYMM8");
+ weights_scale->set_help("Quantization scale from QASYMM8");
+ weights_offset->set_help("Quantization offset from QASYMM8");
+ output_scale->set_help("Quantization scale from QASYMM8");
+ output_offset->set_help("Quantization offset from QASYMM8");
+ num_outputs->set_help("Number of outputs.");
+ input_range_low->set_help("Lower bound for input randomization range");
+ input_range_high->set_help("Lower bound for input randomization range");
+ weights_range_low->set_help("Lower bound for input randomization range");
+ weights_range_high->set_help("Lower bound for input randomization range");
+ }
+
+ /** Fill out the supplied parameters with user supplied parameters
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ void consume_parameters(ExampleParams &common_params)
+ {
+ common_params.input.width = width->value();
+ common_params.input.batch = batch->value();
+ common_params.input.quant_info = QuantizationInfo(input_scale->value(), input_offset->value());
+ common_params.input.range_low = input_range_low->value();
+ common_params.input.range_high = input_range_high->value();
+
+ common_params.weights.quant_info = QuantizationInfo(weights_scale->value(), weights_offset->value());
+ common_params.weights.range_low = weights_range_low->value();
+ common_params.weights.range_high = weights_range_high->value();
+
+ common_params.output.quant_info = QuantizationInfo(output_scale->value(), output_offset->value());
+
+ common_params.data_type = data_type->value();
+ common_params.fully_connected.num_outputs = num_outputs->value();
+ }
+
+ void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
+ << std::endl;
+ os << "Number of outputs : " << common_params.fully_connected.num_outputs << std::endl;
+ }
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ FullyConnectedOptions(const FullyConnectedOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ FullyConnectedOptions &operator=(const FullyConnectedOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ FullyConnectedOptions(FullyConnectedOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ FullyConnectedOptions &operator=(FullyConnectedOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ ~FullyConnectedOptions() override = default;
+
+private:
+ SimpleOption<int> *width; /**< Input width */
+ SimpleOption<int> *batch; /**< Input batch */
+ SimpleOption<float> *input_scale; /**< Input Quantization scale from QASSYMM8 */
+ SimpleOption<int> *input_offset; /**< Input Quantization offset from QASSYMM8 */
+ SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASSYMM8 */
+ SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASSYMM8 */
+ SimpleOption<float> *output_scale; /**< Output Quantization scale from QASSYMM8 */
+ SimpleOption<int> *output_offset; /**< Output Quantization offset from QASSYMM8 */
+ SimpleOption<int> *num_outputs; /**< Number of outputs. */
+ SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
+ SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
+ SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
+ SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
+};
+
+/** Fully Connected Layer Graph example validation accessor class */
+template <typename D>
+class FullyConnectedVerifyAccessor final : public VerifyAccessor<D>
+{
+ using BaseClassType = VerifyAccessor<D>;
+ using BaseClassType::BaseClassType;
+ using BaseClassType::_params;
+ using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
+
+ // Inherited methods overriden:
+ void create_tensors(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ ITensor &tensor) override
+ {
+ // Calculate Tensor shapes for verification
+ const TensorShape input_shape = TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch);
+ const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.data_type, _params.input.quant_info);
+ const TensorDescriptor weights_descriptor = FullyConnectedLayerNode::compute_weights_descriptor(input_descriptor,
+ _params.fully_connected.num_outputs,
+ _params.fully_connected.info,
+ _params.weights.quant_info);
+ const TensorDescriptor output_desciptor = FullyConnectedLayerNode::compute_output_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.output.quant_info);
+
+ //Create Input tensors
+ src = SimpleTensor<D> { input_descriptor.shape, _params.data_type, 1, input_descriptor.quant_info };
+ weights = SimpleTensor<D> { weights_descriptor.shape, _params.data_type, 1, weights_descriptor.quant_info };
+ bias = SimpleTensor<TBias> { TensorShape(tensor.info()->tensor_shape().x()), _params.data_type, 1, _params.input.quant_info };
+ }
+
+ TensorShape output_shape(ITensor &tensor) override
+ {
+ ARM_COMPUTE_UNUSED(tensor);
+
+ const TensorShape input_shape = TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch);
+ const TensorDescriptor input_descriptor = TensorDescriptor(input_shape, _params.data_type, _params.input.quant_info);
+ const TensorDescriptor output_desciptor = FullyConnectedLayerNode::compute_output_descriptor(input_descriptor, _params.fully_connected.num_outputs, _params.output.quant_info);
+
+ return output_desciptor.shape;
+ }
+
+ arm_compute::test::SimpleTensor<D> reference(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ const arm_compute::TensorShape &output_shape) override
+ {
+ return reference::fully_connected_layer<D>(src, weights, bias, output_shape, _params.output.quant_info);
+ }
+
+ float relative_tolerance() override
+ {
+ const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
+ {
+ {
+ arm_compute::graph::Target::CL,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.05f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ },
+ {
+ arm_compute::graph::Target::NEON,
+ { { DataType::F16, 0.2f },
+ { DataType::F32, 0.01f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ }
+ };
+
+ return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float absolute_tolerance() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.0f },
+ { DataType::F32, 0.0001f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.3f },
+ { DataType::F32, 0.1f },
+ { DataType::QASYMM8, 1.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+
+ float tolerance_number() override
+ {
+ const std::map<Target, const std::map<DataType, float>> absolute_tolerance
+ {
+ {
+ Target::CL,
+ { { DataType::F16, 0.07f },
+ { DataType::F32, 0.07f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ },
+ {
+ Target::NEON,
+ { { DataType::F16, 0.07f },
+ { DataType::F32, 0.0f },
+ { DataType::QASYMM8, 0.0f }
+ }
+ }
+ };
+
+ return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
+ }
+};
+
+} // namespace
+
+class GraphFullyConnectedValidateExample final : public GraphValidateExample<FullyConnectedLayer, FullyConnectedOptions, FullyConnectedVerifyAccessor>
+{
+ using GraphValidateExample::graph;
+
+public:
+ GraphFullyConnectedValidateExample()
+ : GraphValidateExample("Fully_connected Graph example")
+ {
+ }
+
+ FullyConnectedLayer GraphFunctionLayer(ExampleParams &params) override
+ {
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
+
+ const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
+ const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
+
+ return FullyConnectedLayer(params.fully_connected.num_outputs,
+ get_random_accessor(weights_lower, weights_upper, 1),
+ get_random_accessor(lower, upper, 2),
+ params.fully_connected.info, params.weights.quant_info, params.output.quant_info);
+ }
+};
+
+/** Main program for Graph fully_connected test
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments ( Input dimensions [width, batch]
+ * Fully connected [num_outputs,type]
+ * Verification[tolerance_number,absolute_tolerance,relative_tolerance] )
+ *
+ */
+int main(int argc, char **argv)
+{
+ return arm_compute::utils::run_example<GraphFullyConnectedValidateExample>(argc, argv);
+}
diff --git a/tests/validate_examples/graph_validate_utils.h b/tests/validate_examples/graph_validate_utils.h
new file mode 100644
index 0000000000..edc3a6514a
--- /dev/null
+++ b/tests/validate_examples/graph_validate_utils.h
@@ -0,0 +1,696 @@
+/*
+ * Copyright (c) 2019-2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef GRAPH_VALIDATE_UTILS_H
+#define GRAPH_VALIDATE_UTILS_H
+
+#include "arm_compute/graph.h"
+
+#include "ValidateExample.h"
+#include "utils/command_line/CommandLineParser.h"
+
+namespace arm_compute
+{
+namespace utils
+{
+/*Available Padding modes */
+enum class ConvolutionPaddingMode
+{
+ Valid,
+ Same,
+ Manual
+};
+
+/** Stream Input operator for the ConvolutionPaddingMode type
+ *
+ * @param[in] stream Input stream.
+ * @param[out] Mode Convolution parameters to output
+ *
+ * @return input stream.
+ */
+inline ::std::istream &operator>>(::std::istream &stream, ConvolutionPaddingMode &Mode)
+{
+ static const std::map<std::string, ConvolutionPaddingMode> modes =
+ {
+ { "valid", ConvolutionPaddingMode::Valid },
+ { "same", ConvolutionPaddingMode::Same },
+ { "manual", ConvolutionPaddingMode::Manual }
+ };
+ std::string value;
+ stream >> value;
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ try
+ {
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+ Mode = modes.at(arm_compute::utility::tolower(value));
+#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
+ }
+ catch(const std::out_of_range &)
+ {
+ throw std::invalid_argument(value);
+ }
+#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
+
+ return stream;
+}
+
+/** Formatted output of the ConvolutionPaddingMode type
+ *
+ * @param[out] os Output stream.
+ * @param[in] Mode ConvolutionPaddingMode to output
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, ConvolutionPaddingMode Mode)
+{
+ switch(Mode)
+ {
+ case ConvolutionPaddingMode::Valid:
+ os << "Valid";
+ break;
+ case ConvolutionPaddingMode::Same:
+ os << "Same";
+ break;
+ case ConvolutionPaddingMode::Manual:
+ os << "Manual";
+ break;
+ default:
+ throw std::invalid_argument("Unsupported padding mode format");
+ }
+
+ return os;
+}
+
+/** Structure holding all the input tensor graph parameters */
+struct TensorParams
+{
+ int width{ 1 };
+ int height{ 1 };
+ int fm{ 1 };
+ int batch{ 1 };
+ QuantizationInfo quant_info{ 1.0f, 0 };
+ std::string npy{};
+ uint64_t range_low{ 0 };
+ uint64_t range_high{ 16 };
+};
+
+/** Structure holding all the verification graph parameters */
+struct VerificationParams
+{
+ float absolute_tolerance{ -1.f };
+ float relative_tolerance{ -1.f };
+ float tolerance_number{ -1.f };
+};
+
+/** Structure holding all the common graph parameters */
+struct FrameworkParams
+{
+ bool help{ false };
+ int threads{ 0 };
+ arm_compute::graph::Target target{ arm_compute::graph::Target::NEON };
+};
+
+/** Structure holding all the graph Example parameters */
+struct CommonParams
+{
+ FrameworkParams common_params{};
+ TensorParams input{};
+ TensorParams weights{};
+ TensorParams bias{};
+ TensorParams output{};
+ VerificationParams verification{};
+ arm_compute::DataType data_type{ DataType::F32 };
+};
+
+/** Structure holding all the Convolution layer graph parameters */
+struct ConvolutionParams
+{
+ int depth_multiplier{ 1 };
+ /** Padding graph parameters */
+ int padding_top{ 0 };
+ int padding_bottom{ 0 };
+ int padding_left{ 0 };
+ int padding_right{ 0 };
+ int padding_stride_x{ 0 };
+ int padding_stride_y{ 0 };
+ ConvolutionPaddingMode padding_mode{ ConvolutionPaddingMode::Valid };
+ struct
+ {
+ struct
+ {
+ int X{ 0 };
+ int Y{ 0 };
+ } stride{};
+ ConvolutionPaddingMode mode{ ConvolutionPaddingMode::Valid };
+ } padding{};
+};
+
+/** Structure holding all the fully_connected layer graph parameters */
+struct FullyConnectedParams
+{
+ FullyConnectedLayerInfo info{};
+ int num_outputs{ 1 };
+};
+
+/** Structure holding all the graph Example parameters */
+struct ExampleParams : public CommonParams
+{
+ FullyConnectedParams fully_connected{};
+ ConvolutionParams convolution{};
+ arm_compute::graph::DepthwiseConvolutionMethod depth_convolution_method{ arm_compute::graph::DepthwiseConvolutionMethod::Default };
+ arm_compute::graph::ConvolutionMethod convolution_method{ arm_compute::graph::ConvolutionMethod::Default };
+ arm_compute::DataLayout data_layout{ DataLayout::NCHW };
+};
+
+/** Calculate stride information.
+ *
+ * Depending on the selected padding mode create the desired PadStrideInfo
+ *
+ * @param[in] params Convolution parameters supplied by the user.
+ *
+ * @return PadStrideInfo with the correct padding mode.
+ */
+inline PadStrideInfo calculate_convolution_padding(ExampleParams params)
+{
+ switch(params.convolution.padding_mode)
+ {
+ case ConvolutionPaddingMode::Manual:
+ {
+ return PadStrideInfo(params.convolution.padding_stride_x, params.convolution.padding_stride_y, params.convolution.padding_left, params.convolution.padding_right, params.convolution.padding_top,
+ params.convolution.padding_bottom, DimensionRoundingType::FLOOR);
+ }
+ case ConvolutionPaddingMode::Valid:
+ {
+ return PadStrideInfo();
+ }
+ case ConvolutionPaddingMode::Same:
+ {
+ return arm_compute::calculate_same_pad(TensorShape(params.input.width, params.input.height), TensorShape(params.weights.width, params.weights.height),
+ PadStrideInfo(params.convolution.padding_stride_x,
+ params.convolution.padding_stride_y));
+ }
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+/** CommonGraphValidateOptions command line options used to configure the graph examples
+ *
+ * (Similar to common options)
+ * The options in this object get populated when "parse()" is called on the parser used to construct it.
+ * The expected workflow is:
+ *
+ * CommandLineParser parser;
+ * CommonOptions options( parser );
+ * parser.parse(argc, argv);
+ */
+class CommonGraphValidateOptions
+{
+public:
+ explicit CommonGraphValidateOptions(CommandLineParser &parser) noexcept
+ : help(parser.add_option<ToggleOption>("help")),
+ threads(parser.add_option<SimpleOption<int>>("threads")),
+ target(),
+ data_type(),
+ absolute_tolerance(parser.add_option<SimpleOption<float>>("abs_tolerance", -1.0f)),
+ relative_tolerance(parser.add_option<SimpleOption<float>>("rel_tolerance", -1.0f)),
+ tolerance_number(parser.add_option<SimpleOption<float>>("tolerance_num", -1.0f))
+ {
+ const std::set<arm_compute::graph::Target> supported_targets
+ {
+ arm_compute::graph::Target::NEON,
+ arm_compute::graph::Target::CL,
+ arm_compute::graph::Target::GC,
+ };
+
+ const std::set<arm_compute::DataType> supported_data_types
+ {
+ DataType::F16,
+ DataType::F32,
+ DataType::QASYMM8,
+ };
+
+ target = parser.add_option<EnumOption<arm_compute::graph::Target>>("target", supported_targets, arm_compute::graph::Target::NEON);
+ data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
+
+ target->set_help("Target to execute on");
+ data_type->set_help("Data type to use");
+ help->set_help("Show this help message");
+ absolute_tolerance->set_help("Absolute tolerance used for verification");
+ relative_tolerance->set_help("Absolute tolerance used for verification");
+ tolerance_number->set_help("Absolute tolerance used for verification");
+ }
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CommonGraphValidateOptions(const CommonGraphValidateOptions &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CommonGraphValidateOptions &operator=(const CommonGraphValidateOptions &) = delete;
+ /** Allow instances of this class to be moved */
+ CommonGraphValidateOptions(CommonGraphValidateOptions &&) noexcept(true) = default;
+ /** Allow instances of this class to be moved */
+ CommonGraphValidateOptions &operator=(CommonGraphValidateOptions &&) noexcept(true) = default;
+ /** Default destructor */
+ virtual ~CommonGraphValidateOptions() = default;
+
+ void consume_common_parameters(CommonParams &common_params)
+ {
+ common_params.common_params.help = help->is_set() ? help->value() : false;
+ common_params.common_params.threads = threads->value();
+ common_params.common_params.target = target->value();
+
+ common_params.verification.absolute_tolerance = absolute_tolerance->value();
+ common_params.verification.relative_tolerance = relative_tolerance->value();
+ common_params.verification.tolerance_number = tolerance_number->value();
+ }
+
+ /** Formatted output of the ExampleParams type
+ *
+ * @param[out] os Output stream.
+ * @param[in] common_params Example parameters to output
+ *
+ * @return None.
+ */
+ virtual void print_parameters(::std::ostream &os, const ExampleParams &common_params)
+ {
+ os << "Threads : " << common_params.common_params.threads << std::endl;
+ os << "Target : " << common_params.common_params.target << std::endl;
+ os << "Data type : " << common_params.data_type << std::endl;
+ }
+
+ ToggleOption *help; /**< show help message */
+ SimpleOption<int> *threads; /**< Number of threads option */
+ EnumOption<arm_compute::graph::Target> *target; /**< Graph execution target */
+ EnumOption<arm_compute::DataType> *data_type; /**< Graph data type */
+ SimpleOption<float> *absolute_tolerance; /**< Absolute tolerance used in verification */
+ SimpleOption<float> *relative_tolerance; /**< Relative tolerance used in verification */
+ SimpleOption<float> *tolerance_number; /**< Tolerance number used in verification */
+};
+
+/** Consumes the consume_common_graph_parameters graph options and creates a structure containing any information
+ *
+ * @param[in] options Options to consume
+ * @param[out] common_params params structure to consume.
+ *
+ * @return consume_common_graph_parameters structure containing the common graph parameters
+ */
+void consume_common_graph_parameters(CommonGraphValidateOptions &options, CommonParams &common_params)
+{
+ common_params.common_params.help = options.help->is_set() ? options.help->value() : false;
+ common_params.common_params.threads = options.threads->value();
+ common_params.common_params.target = options.target->value();
+
+ common_params.verification.absolute_tolerance = options.absolute_tolerance->value();
+ common_params.verification.relative_tolerance = options.relative_tolerance->value();
+ common_params.verification.tolerance_number = options.tolerance_number->value();
+}
+
+/** Generates appropriate accessor according to the specified graph parameters
+ *
+ * @param[in] tensor Tensor parameters
+ * @param[in] lower Lower random values bound
+ * @param[in] upper Upper random values bound
+ * @param[in] seed Random generator seed
+ *
+ * @return An appropriate tensor accessor
+ */
+inline std::unique_ptr<graph::ITensorAccessor> get_accessor(const TensorParams &tensor, PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0)
+{
+ if(!tensor.npy.empty())
+ {
+ return arm_compute::support::cpp14::make_unique<arm_compute::graph_utils::NumPyBinLoader>(tensor.npy);
+ }
+ else
+ {
+ return arm_compute::support::cpp14::make_unique<arm_compute::graph_utils::RandomAccessor>(lower, upper, seed);
+ }
+}
+
+/** Graph example validation accessor class */
+template <typename D>
+class VerifyAccessor : public graph::ITensorAccessor
+{
+public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
+ /** Constructor
+ *
+ * @param[in] params Convolution parameters
+ */
+ explicit VerifyAccessor(ExampleParams &params)
+ : _params(std::move(params))
+ {
+ }
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override
+ {
+ if(_params.output.npy.empty())
+ {
+ arm_compute::test::SimpleTensor<D> src;
+ arm_compute::test::SimpleTensor<D> weights;
+ arm_compute::test::SimpleTensor<TBias> bias;
+
+ //Create Input tensors
+ create_tensors(src, weights, bias, tensor);
+
+ //Fill the tensors with random values
+ fill_tensor(src, 0, static_cast<D>(_params.input.range_low), static_cast<D>(_params.input.range_high));
+ fill_tensor(weights, 1, static_cast<D>(_params.weights.range_low), static_cast<D>(_params.weights.range_high));
+ fill_tensor(bias, 2, static_cast<TBias>(_params.input.range_low), static_cast<TBias>(_params.input.range_high));
+
+ arm_compute::test::SimpleTensor<D> output = reference(src, weights, bias, output_shape(tensor));
+
+ validate(tensor, output);
+ }
+ else
+ {
+ //The user provided a reference file use an npy accessor to validate
+ arm_compute::graph_utils::NumPyAccessor(_params.output.npy, tensor.info()->tensor_shape(), tensor.info()->data_type()).access_tensor(tensor);
+ }
+ return false;
+ }
+
+ /** Create reference tensors.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] src The tensor with the source data.
+ * @param[out] weights The tensor with the weigths data.
+ * @param[out] bias The tensor with the bias data.
+ * @param[in] tensor Tensor result of the actual operation passed into the Accessor.
+ *
+ * @return None.
+ */
+ virtual void create_tensors(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ ITensor &tensor)
+ {
+ ARM_COMPUTE_UNUSED(tensor);
+ //Create Input tensors
+ src = arm_compute::test::SimpleTensor<D> { TensorShape(_params.input.width, _params.input.height, _params.input.fm, _params.input.batch), _params.data_type, 1, _params.input.quant_info };
+ weights = arm_compute::test::SimpleTensor<D> { TensorShape(_params.weights.width, _params.weights.height, _params.weights.fm), _params.data_type, 1, _params.weights.quant_info };
+ bias = arm_compute::test::SimpleTensor<TBias> { TensorShape(_params.input.height), _params.data_type, 1, _params.input.quant_info };
+ }
+
+ /** Calculate reference output tensor shape.
+ *
+ * @param[in] tensor Tensor result of the actual operation passed into the Accessor.
+ *
+ * @return output tensor shape.
+ */
+ virtual TensorShape output_shape(ITensor &tensor)
+ {
+ return arm_compute::graph_utils::permute_shape(tensor.info()->tensor_shape(), _params.data_layout, DataLayout::NCHW);
+ }
+
+ /** Calculate reference tensor.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[in] src The tensor with the source data.
+ * @param[in] weights The tensor with the weigths data.
+ * @param[in] bias The tensor with the bias data.
+ * @param[in] output_shape Shape of the output tensor.
+ *
+ * @return Tensor with the reference output.
+ */
+ virtual arm_compute::test::SimpleTensor<D> reference(arm_compute::test::SimpleTensor<D> &src,
+ arm_compute::test::SimpleTensor<D> &weights,
+ arm_compute::test::SimpleTensor<TBias> &bias,
+ const arm_compute::TensorShape &output_shape) = 0;
+
+ /** Fill QASYMM tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<uint8_t> &tensor, std::random_device::result_type seed, uint8_t low, uint8_t high)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::QASYMM8);
+
+ const UniformQuantizationInfo qinfo = tensor.quantization_info().uniform();
+
+ uint8_t qasymm8_low = quantize_qasymm8(low, qinfo);
+ uint8_t qasymm8_high = quantize_qasymm8(high, qinfo);
+
+ std::mt19937 gen(seed);
+ std::uniform_int_distribution<uint8_t> distribution(qasymm8_low, qasymm8_high);
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = quantize_qasymm8(distribution(gen), qinfo);
+ }
+ }
+ /** Fill S32 tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<int32_t> &tensor, std::random_device::result_type seed, int32_t low, int32_t high)
+ {
+ std::mt19937 gen(seed);
+ std::uniform_int_distribution<int32_t> distribution(static_cast<int32_t>(low), static_cast<uint32_t>(high));
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = distribution(gen);
+ }
+ }
+ /** Fill F32 tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<float> &tensor, std::random_device::result_type seed, float low, float high)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F32);
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> distribution(low, high);
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = distribution(gen);
+ }
+ }
+ /** Fill F16 tensor with Random values.
+ *
+ * Validate the given tensor against the reference result.
+ *
+ * @param[out] tensor The tensor we want to file
+ * @param[in] seed seed for the randomization function
+ * @param[in] low lower bound for random values
+ * @param[in] high upper bound for random values
+ *
+ * @return None.
+ */
+ void fill_tensor(arm_compute::test::SimpleTensor<half> &tensor, std::random_device::result_type seed, half low, half high)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != arm_compute::DataType::F16);
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> distribution(static_cast<half>(low), static_cast<half>(high));
+
+ for(int i = 0; i < tensor.num_elements(); ++i)
+ {
+ tensor[i] = static_cast<half>(distribution(gen));
+ }
+ }
+
+ /** Select relative tolerance.
+ *
+ * Select relative tolerance if not supplied by user.
+ *
+ * @return Appropriate relative tolerance.
+ */
+ virtual float relative_tolerance() = 0;
+
+ /** Select absolute tolerance.
+ *
+ * Select absolute tolerance if not supplied by user.
+ *
+ * @return Appropriate absolute tolerance.
+ */
+ virtual float absolute_tolerance() = 0;
+
+ /** Select tolerance number.
+ *
+ * Select tolerance number if not supplied by user.
+ *
+ * @return Appropriate tolerance number.
+ */
+ virtual float tolerance_number() = 0;
+
+ /** Validate the output versus the reference.
+ *
+ * @param[in] tensor Tensor result of the actual operation passed into the Accessor.
+ * @param[in] output Tensor result of the reference implementation.
+ *
+ * @return None.
+ */
+ void validate(ITensor &tensor, arm_compute::test::SimpleTensor<D> output)
+ {
+ float user_relative_tolerance = _params.verification.relative_tolerance;
+ float user_absolute_tolerance = _params.verification.absolute_tolerance;
+ float user_tolerance_num = _params.verification.tolerance_number;
+ /* If no user input was provided override with defaults. */
+ if(user_relative_tolerance == -1)
+ {
+ user_relative_tolerance = relative_tolerance();
+ }
+
+ if(user_absolute_tolerance == -1)
+ {
+ user_absolute_tolerance = absolute_tolerance();
+ }
+
+ if(user_tolerance_num == -1)
+ {
+ user_tolerance_num = tolerance_number();
+ }
+
+ const arm_compute::test::validation::RelativeTolerance<float> rel_tolerance(user_relative_tolerance); /**< Relative tolerance */
+ const arm_compute::test::validation::AbsoluteTolerance<float> abs_tolerance(user_absolute_tolerance); /**< Absolute tolerance */
+ const float tolerance_num(user_tolerance_num); /**< Tolerance number */
+
+ arm_compute::test::validation::validate(arm_compute::test::Accessor(tensor), output, rel_tolerance, tolerance_num, abs_tolerance);
+ }
+
+ ExampleParams _params;
+};
+
+/** Generates appropriate convolution verify accessor
+ *
+ * @param[in] params User supplied parameters for convolution.
+ *
+ * @return A convolution verify accessor for the requested datatype.
+ */
+template <template <typename D> class VerifyAccessorT>
+inline std::unique_ptr<graph::ITensorAccessor> get_verify_accessor(ExampleParams params)
+{
+ switch(params.data_type)
+ {
+ case DataType::QASYMM8:
+ {
+ return arm_compute::support::cpp14::make_unique<VerifyAccessorT<uint8_t>>(
+ params);
+ }
+ case DataType::F16:
+ {
+ return arm_compute::support::cpp14::make_unique<VerifyAccessorT<half>>(
+ params);
+ }
+ case DataType::F32:
+ {
+ return arm_compute::support::cpp14::make_unique<VerifyAccessorT<float>>(
+ params);
+ }
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
+template <typename LayerT, typename OptionsT, template <typename D> class VerifyAccessorT>
+class GraphValidateExample : public ValidateExample
+{
+public:
+ GraphValidateExample(std::string name)
+ : graph(0, name)
+ {
+ }
+
+ virtual LayerT GraphFunctionLayer(ExampleParams &params) = 0;
+
+ bool do_setup(int argc, char **argv) override
+ {
+ CommandLineParser parser;
+
+ OptionsT Options(parser);
+
+ parser.parse(argc, argv);
+
+ ExampleParams params;
+
+ Options.consume_common_parameters(params);
+ Options.consume_parameters(params);
+
+ if(params.common_params.help)
+ {
+ parser.print_help(argv[0]);
+ return false;
+ }
+
+ Options.print_parameters(std::cout, params);
+ // Create input descriptor
+ const TensorShape input_shape = arm_compute::graph_utils::permute_shape(TensorShape(params.input.width, params.input.height, params.input.fm, params.input.batch),
+ DataLayout::NCHW, params.data_layout);
+ arm_compute::graph::TensorDescriptor input_descriptor = arm_compute::graph::TensorDescriptor(input_shape, params.data_type, params.input.quant_info, params.data_layout);
+
+ const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
+ const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
+
+ graph << params.common_params.target
+ << params.convolution_method
+ << params.depth_convolution_method
+ << arm_compute::graph::frontend::InputLayer(input_descriptor, get_accessor(params.input, lower, upper, 0))
+ << GraphFunctionLayer(params)
+ << arm_compute::graph::frontend::OutputLayer(get_verify_accessor<VerifyAccessorT>(params));
+
+ arm_compute::graph::GraphConfig config;
+ config.num_threads = params.common_params.threads;
+
+ graph.finalize(params.common_params.target, config);
+
+ return true;
+ }
+
+ void do_run() override
+ {
+ graph.run();
+ }
+
+ void do_teardown() override
+ {
+ }
+
+ arm_compute::graph::frontend::Stream graph;
+};
+
+} // graph_validate_utils
+} // arm_compute
+#endif //GRAPH_VALIDATE_UTILS_H