aboutsummaryrefslogtreecommitdiff
path: root/src/backends/gpuFsa/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/gpuFsa/test')
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp13
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp19
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp54
3 files changed, 79 insertions, 7 deletions
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index da6431f857..06b2a71dee 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -5,6 +5,7 @@
#include "backendsCommon/test/EndToEndTestImpl.hpp"
+#include "backendsCommon/test/ActivationEndToEndTestImpl.hpp"
#include "backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/layerTests/CastTestImpl.hpp"
@@ -21,6 +22,18 @@ TEST_SUITE("GpuFsaEndToEnd")
std::vector<BackendId> gpuFsaDefaultBackends = {"GpuFsa"};
+// Activation
+// TanH
+TEST_CASE("GpuFsaTanHEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<DataType::Float32>(gpuFsaDefaultBackends, ActivationFunction::TanH, 1.f, 0, 1.f, 1.f);
+}
+// Sigmoid
+TEST_CASE("GpuFsaSigmoidEndToEndTestFloat32")
+{
+ ActivationEndToEndTest<DataType::Float32>(gpuFsaDefaultBackends, ActivationFunction::Sigmoid);
+}
+
// BatchMatMul
TEST_CASE("RefBatchMatMulEndToEndFloat32Test")
{
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index cb1ddd8182..cf465c28ff 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -17,6 +17,24 @@ using namespace armnn;
TEST_SUITE("GpuFsaLayerSupport")
{
+TEST_CASE("IsLayerSupportedGpuFsaActivation")
+{
+ TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+ TensorInfo outputInfo({ 1, 5, 5, 1 }, DataType::Float32);
+
+ ActivationDescriptor desc{};
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::Activation,
+ {inputInfo, outputInfo},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(supported);
+}
+
TEST_CASE("IsLayerSupportedGpuFsaBatchMatMul")
{
TensorInfo input0Info({ 2, 2 }, DataType::Float32);
@@ -82,7 +100,6 @@ TEST_CASE("IsLayerSupportedGpuFsaConv2dUnsupported")
TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
- // NCHW is unsupported.
Convolution2dDescriptor desc;
desc.m_DataLayout = DataLayout::NCHW;
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 1e5c976c00..ac341c2476 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -15,10 +15,56 @@ using namespace armnn;
TEST_SUITE("GpuFsaOptimizedNetwork")
{
-TEST_CASE("BatchMatMulSupportedOptimizedNetwork")
+TEST_CASE("ActivationSupportedOptimizedNetwork")
{
- using namespace armnn;
+ const float qScale = 1.0f;
+ const int32_t qOffset = 0;
+
+ const TensorShape& inputShape = { 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2 };
+
+ TensorInfo inputTensorInfo(inputShape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ ActivationDescriptor desc;
+ SUBCASE("TanH")
+ {
+ desc.m_Function = ActivationFunction::TanH;
+ desc.m_A = 1.f;
+ desc.m_B = 1.f;
+ }
+ SUBCASE("Sigmoid")
+ {
+ desc.m_Function = ActivationFunction::Sigmoid;
+ }
+
+ IConnectableLayer* input = network->AddInputLayer(0, "input");
+ IConnectableLayer* activationLayer = network->AddActivationLayer(desc, "activation");
+ IConnectableLayer* output = network->AddOutputLayer(1, "output");
+
+ Connect(input, activationLayer, inputTensorInfo, 0, 0);
+ Connect(activationLayer, output, outputTensorInfo, 0, 0);
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+TEST_CASE("BatchMatMulSupportedOptimizedNetwork")
+{
const float qScale = 1.0f;
const int32_t qOffset = 0;
@@ -63,8 +109,6 @@ TEST_CASE("BatchMatMulSupportedOptimizedNetwork")
TEST_CASE("CastSupportedOptimizedNetwork")
{
- using namespace armnn;
-
const float qScale = 1.0f;
const int32_t qOffset = 0;
@@ -221,8 +265,6 @@ TEST_CASE("TwoConv2dSupportedOptimizedNetwork")
TEST_CASE("ElementwiseBinarySupportedOptimizedNetwork")
{
- using namespace armnn;
-
const float qScale = 1.0f;
const int32_t qOffset = 0;