aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/Globals.h2
-rw-r--r--tests/Utils.h10
-rw-r--r--tests/framework/Framework.cpp28
-rw-r--r--tests/framework/ParametersLibrary.cpp (renamed from tests/ParametersLibrary.cpp)15
-rw-r--r--tests/framework/ParametersLibrary.h (renamed from tests/ParametersLibrary.h)29
-rw-r--r--tests/main.cpp33
-rw-r--r--tests/validation/CL/ActivationLayer.cpp9
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h14
8 files changed, 104 insertions, 36 deletions
diff --git a/tests/Globals.h b/tests/Globals.h
index 569b1a31c6..989fdfdcd4 100644
--- a/tests/Globals.h
+++ b/tests/Globals.h
@@ -25,7 +25,7 @@
#define __ARM_COMPUTE_TEST_GLOBALS_H__
#include "tests/AssetsLibrary.h"
-#include "tests/ParametersLibrary.h"
+#include "tests/framework/ParametersLibrary.h"
#include <memory>
diff --git a/tests/Utils.h b/tests/Utils.h
index ea70fffe3a..3bb6060951 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -520,14 +520,15 @@ inline bool is_in_valid_region(const ValidRegion &valid_region, Coordinates coor
* @param[in] num_channels (Optional) Number of channels.
* @param[in] quantization_info (Optional) Quantization info for asymmetric quantized types.
* @param[in] data_layout (Optional) Data layout. Default is NCHW.
+ * @param[in] ctx (Optional) Pointer to the runtime context.
*
* @return Initialized tensor of given type.
*/
template <typename T>
inline T create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1,
- QuantizationInfo quantization_info = QuantizationInfo(), DataLayout data_layout = DataLayout::NCHW)
+ QuantizationInfo quantization_info = QuantizationInfo(), DataLayout data_layout = DataLayout::NCHW, IRuntimeContext *ctx = nullptr)
{
- T tensor;
+ T tensor(ctx);
TensorInfo info(shape, num_channels, data_type);
info.set_quantization_info(quantization_info);
info.set_data_layout(data_layout);
@@ -540,15 +541,16 @@ inline T create_tensor(const TensorShape &shape, DataType data_type, int num_cha
*
* @param[in] shape Tensor shape.
* @param[in] format Format type.
+ * @param[in] ctx (Optional) Pointer to the runtime context.
*
* @return Initialized tensor of given type.
*/
template <typename T>
-inline T create_tensor(const TensorShape &shape, Format format)
+inline T create_tensor(const TensorShape &shape, Format format, IRuntimeContext *ctx = nullptr)
{
TensorInfo info(shape, format);
- T tensor;
+ T tensor(ctx);
tensor.allocator()->init(info);
return tensor;
diff --git a/tests/framework/Framework.cpp b/tests/framework/Framework.cpp
index fbc2456047..5d1600e083 100644
--- a/tests/framework/Framework.cpp
+++ b/tests/framework/Framework.cpp
@@ -25,8 +25,12 @@
#include "arm_compute/runtime/Scheduler.h"
#include "support/ToolchainSupport.h"
+#include "tests/framework/ParametersLibrary.h"
+
#ifdef ARM_COMPUTE_CL
+#include "arm_compute/runtime/CL/CLRuntimeContext.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
+
#endif /* ARM_COMPUTE_CL */
#include <chrono>
@@ -38,6 +42,8 @@ namespace arm_compute
{
namespace test
{
+std::unique_ptr<ParametersLibrary> parameters;
+
namespace framework
{
std::unique_ptr<InstrumentsInfo> instruments_info;
@@ -558,17 +564,23 @@ bool Framework::run()
// Every 100 tests, reset the OpenCL context to release the allocated memory
if(opencl_is_available() && (id_run_test % 100) == 0)
{
- auto ctx_properties = CLScheduler::get().context().getInfo<CL_CONTEXT_PROPERTIES>(nullptr);
- auto queue_properties = CLScheduler::get().queue().getInfo<CL_QUEUE_PROPERTIES>(nullptr);
-
- cl::Context new_ctx = cl::Context(CL_DEVICE_TYPE_DEFAULT, ctx_properties.data());
- cl::CommandQueue new_queue = cl::CommandQueue(new_ctx, CLKernelLibrary::get().get_device(), queue_properties);
-
CLKernelLibrary::get().clear_programs_cache();
- CLScheduler::get().set_context(new_ctx);
- CLScheduler::get().set_queue(new_queue);
+ auto cl_ctx = support::cpp14::make_unique<CLRuntimeContext>();
+ assert(cl_ctx != nullptr);
+ CLScheduler *gpu_scheduler = cl_ctx->gpu_scheduler();
+ assert(gpu_scheduler != nullptr);
+ {
+ // Legacy singletons API: This has been deprecated and the singletons will be removed
+ // Setup singleton for backward compatibility
+ CLScheduler::get().init(gpu_scheduler->context(), gpu_scheduler->queue(), cl_ctx->kernel_library().get_device());
+ }
+ if(parameters)
+ {
+ parameters->set_gpu_ctx(std::move(cl_ctx));
+ }
}
#endif // ARM_COMPUTE_CL
+
run_test(test_info, *test_factory);
++id_run_test;
diff --git a/tests/ParametersLibrary.cpp b/tests/framework/ParametersLibrary.cpp
index 16152c8482..65a09eeb64 100644
--- a/tests/ParametersLibrary.cpp
+++ b/tests/framework/ParametersLibrary.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "tests/ParametersLibrary.h"
+#include "tests/framework/ParametersLibrary.h"
namespace arm_compute
{
@@ -32,10 +32,23 @@ void ParametersLibrary::set_cpu_ctx(std::unique_ptr<IRuntimeContext> cpu_ctx)
_cpu_ctx = std::move(cpu_ctx);
}
+void ParametersLibrary::set_gpu_ctx(std::unique_ptr<IRuntimeContext> gpu_ctx)
+{
+ _gpu_ctx = std::move(gpu_ctx);
+}
+
template <>
typename ContextType<Tensor>::type *ParametersLibrary::get_ctx<Tensor>()
{
return _cpu_ctx.get();
}
+
+#if ARM_COMPUTE_CL
+template <>
+typename ContextType<CLTensor>::type *ParametersLibrary::get_ctx<CLTensor>()
+{
+ return static_cast<typename ContextType<CLTensor>::type *>(_gpu_ctx.get());
+}
+#endif /* ARM_COMPUTE_CL */
} // namespace test
} // namespace arm_compute
diff --git a/tests/ParametersLibrary.h b/tests/framework/ParametersLibrary.h
index a99be46d3f..4079ab25b9 100644
--- a/tests/ParametersLibrary.h
+++ b/tests/framework/ParametersLibrary.h
@@ -26,6 +26,13 @@
#include "arm_compute/runtime/IRuntimeContext.h"
#include "arm_compute/runtime/Tensor.h"
+#if ARM_COMPUTE_CL
+#include "arm_compute/runtime/CL/CLRuntimeContext.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#endif /* ARM_COMPUTE_CL */
+#ifdef ARM_COMPUTE_GC
+#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
+#endif /* ARM_COMPUTE_GC */
#include <memory>
@@ -45,6 +52,22 @@ struct ContextType<Tensor>
using type = IRuntimeContext;
};
+#if ARM_COMPUTE_CL
+template <>
+struct ContextType<CLTensor>
+{
+ using type = CLRuntimeContext;
+};
+#endif /* ARM_COMPUTE_CL */
+
+#ifdef ARM_COMPUTE_GC
+template <>
+struct ContextType<GCTensor>
+{
+ using type = IRuntimeContext;
+};
+#endif /* ARM_COMPUTE_GC */
+
/** Class that contains all the global parameters used by the tests */
class ParametersLibrary final
{
@@ -56,6 +79,11 @@ public:
* @param[in] cpu_ctx CPU context to use
*/
void set_cpu_ctx(std::unique_ptr<IRuntimeContext> cpu_ctx);
+ /** Set gpu context to be used by the tests
+ *
+ * @param[in] gpu_ctx GPU context to use
+ */
+ void set_gpu_ctx(std::unique_ptr<IRuntimeContext> gpu_ctx);
/** Get context given a tensor type
*
* @tparam TensorType
@@ -70,6 +98,7 @@ public:
private:
std::unique_ptr<IRuntimeContext> _cpu_ctx{ nullptr };
+ std::unique_ptr<IRuntimeContext> _gpu_ctx{ nullptr };
};
} // namespace test
} // namespace arm_compute
diff --git a/tests/main.cpp b/tests/main.cpp
index 01741939a0..415dba0405 100644
--- a/tests/main.cpp
+++ b/tests/main.cpp
@@ -23,11 +23,11 @@
*/
#include "support/ToolchainSupport.h"
#include "tests/AssetsLibrary.h"
-#include "tests/ParametersLibrary.h"
#include "tests/framework/DatasetModes.h"
#include "tests/framework/Exceptions.h"
#include "tests/framework/Framework.h"
#include "tests/framework/Macros.h"
+#include "tests/framework/ParametersLibrary.h"
#include "tests/framework/Profiler.h"
#include "tests/framework/command_line/CommonOptions.h"
#include "tests/framework/instruments/Instruments.h"
@@ -74,8 +74,8 @@ namespace arm_compute
{
namespace test
{
-std::unique_ptr<AssetsLibrary> library;
-std::unique_ptr<ParametersLibrary> parameters;
+std::unique_ptr<AssetsLibrary> library;
+extern std::unique_ptr<ParametersLibrary> parameters;
} // namespace test
} // namespace arm_compute
@@ -92,17 +92,6 @@ bool file_exists(const std::string &filename)
int main(int argc, char **argv)
{
-#ifdef ARM_COMPUTE_CL
- CLTuner cl_tuner(false);
- if(opencl_is_available())
- {
- auto ctx_dev_err = create_opencl_context_and_device();
- ARM_COMPUTE_ERROR_ON_MSG(std::get<2>(ctx_dev_err) != CL_SUCCESS, "Failed to create OpenCL context");
- CLScheduler::get()
- .default_init_with_context(std::get<1>(ctx_dev_err), std::get<0>(ctx_dev_err), &cl_tuner);
- }
-#endif /* ARM_COMPUTE_CL */
-
#ifdef ARM_COMPUTE_GC
GCScheduler::get().default_init();
#endif /* ARM_COMPUTE_GC */
@@ -185,6 +174,20 @@ int main(int argc, char **argv)
parameters->set_cpu_ctx(std::move(cpu_ctx));
#ifdef ARM_COMPUTE_CL
+ CLTuner cl_tuner(false);
+ // Create GPU context
+ auto cl_ctx = support::cpp14::make_unique<CLRuntimeContext>();
+ assert(cl_ctx != nullptr);
+ CLScheduler *gpu_scheduler = cl_ctx->gpu_scheduler();
+ assert(gpu_scheduler != nullptr);
+ const auto device_version = cl_ctx->kernel_library().get_device_version();
+ {
+ // Legacy singletons API: This has been deprecated and the singletons will be removed
+ // Setup singleton for backward compatibility
+ CLScheduler::get().init(gpu_scheduler->context(), gpu_scheduler->queue(), cl_ctx->kernel_library().get_device(), &cl_tuner);
+ }
+ parameters->set_gpu_ctx(std::move(cl_ctx));
+
if(enable_tuner->is_set())
{
cl_tuner.set_tune_new_kernels(enable_tuner->value());
@@ -222,7 +225,7 @@ int main(int argc, char **argv)
#ifdef ARM_COMPUTE_CL
if(opencl_is_available())
{
- p->print_entry("CL_DEVICE_VERSION", CLKernelLibrary::get().get_device_version());
+ p->print_entry("CL_DEVICE_VERSION", device_version);
}
else
{
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 250777d541..a17ad9b269 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -95,15 +95,18 @@ TEST_SUITE(ActivationLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
shape, data_type, in_place)
{
+ // Create context
+ auto ctx = parameters->get_ctx<CLTensor>();
+
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type, 1);
- CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1);
+ CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, QuantizationInfo(), DataLayout::NCHW, ctx);
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, QuantizationInfo(), DataLayout::NCHW, ctx);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
- CLActivationLayer act_layer;
+ CLActivationLayer act_layer(ctx);
if(in_place)
{
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 8fa74979a8..f6d43ddd89 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -29,9 +29,9 @@
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
-#include "tests/ParametersLibrary.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
+#include "tests/framework/ParametersLibrary.h"
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/ActivationLayer.h"
@@ -47,6 +47,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ActivationValidationGenericFixture : public framework::Fixture
{
public:
+ ActivationValidationGenericFixture()
+ : _target(parameters->get_ctx<TensorType>())
+ {
+ }
+
template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
@@ -90,12 +95,13 @@ protected:
TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info)
{
+ auto ctx = parameters->get_ctx<TensorType>();
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info);
- TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info);
+ TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW, ctx);
+ TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW, ctx);
// Create and configure function
- FunctionType act_layer(parameters->get_ctx<TensorType>());
+ FunctionType act_layer(ctx);
TensorType *dst_ptr = _in_place ? nullptr : &dst;