aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/test/ClContextSerializerTests.cpp4
-rw-r--r--src/backends/cl/test/ClCustomAllocatorTests.cpp6
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp5
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp4
-rw-r--r--src/backends/cl/test/Fp16SupportTest.cpp6
5 files changed, 19 insertions, 6 deletions
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 495aa69bff..862ed2ecab 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -44,9 +44,11 @@ void RunInference(armnn::NetworkId& netId, armnn::IRuntimePtr& runtime, std::vec
1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
};
+ armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
armnn::OutputTensors outputTensors
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 60145139ff..c09d0b2bc2 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -67,7 +67,7 @@ armnn::INetworkPtr CreateTestNetwork(armnn::TensorInfo& inputTensorInfo)
armnn::FullyConnectedDescriptor fullyConnectedDesc;
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant(true);
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -145,9 +145,11 @@ TEST_CASE("ClCustomAllocatorTest")
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
std::fill_n(outputPtr, numElements, -10.0f);
+ armnn::TensorInfo inputTensorInfo2 = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo2.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), alignedInputPtr)},
+ {0, armnn::ConstTensor(inputTensorInfo2, alignedInputPtr)},
};
armnn::OutputTensors outputTensors
{
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 7721206d3d..7cd05d193b 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -35,6 +35,7 @@ TEST_CASE("ClImportEnabledFallbackToNeon")
sub->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32);
+ info.SetConstant(true);
input0->GetOutputSlot(0).SetTensorInfo(info);
input1->GetOutputSlot(0).SetTensorInfo(info);
@@ -181,6 +182,7 @@ TEST_CASE("ClImportDisabledFallbackToNeon")
sub->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+ info.SetConstant(true);
input0->GetOutputSlot(0).SetTensorInfo(info);
input1->GetOutputSlot(0).SetTensorInfo(info);
@@ -311,6 +313,7 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32);
+ info.SetConstant(true);
TensorInfo poolingInfo = TensorInfo({ 1, 2, 2, 1 }, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
@@ -468,6 +471,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+ info.SetConstant(true);
TensorInfo poolingInfo = TensorInfo({ 1, 2, 1, 1 }, DataType::Float32);
input0->GetOutputSlot(0).SetTensorInfo(info);
@@ -536,6 +540,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
std::vector<float> expectedOutput{ 11.0f, -1.0f };
+
InputTensors inputTensors
{
{ 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 6b1d3521d5..0403d5379e 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -171,9 +171,11 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
std::fill_n(outputPtr, numElements, -10.0f);
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), alignedInputPtr)},
+ {0,armnn::ConstTensor(inputTensorInfo, alignedInputPtr)},
};
OutputTensors outputTensors
{
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index 1974d4d856..b30a447f9f 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -88,10 +88,12 @@ TEST_CASE("Fp16AdditionTest")
100.0_h, 200.0_h, 300.0_h, 400.0_h
};
+ TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
- {1,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ {0,ConstTensor(inputTensorInfo, input1Data.data())},
+ {1,ConstTensor(inputTensorInfo, input2Data.data())}
};
std::vector<Half> outputData(input1Data.size());