aboutsummaryrefslogtreecommitdiff
path: root/samples
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-10-22 11:12:07 +0100
committerDavid Monahan <david.monahan@arm.com>2021-11-08 19:05:11 +0000
commit5b8093c17044e8eaaaa42d96ba4902dee5791be4 (patch)
tree7f49f91e76f171041fe51c2c078b9271aa220b48 /samples
parentd69cb904415621b066599dc20164bdb71558dc14 (diff)
downloadarmnn-5b8093c17044e8eaaaa42d96ba4902dee5791be4.tar.gz
IVGCVSW-6420: Constant flag in tensor info is not set correctly
!android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template<typename MemoryType>. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
Diffstat (limited to 'samples')
-rw-r--r--samples/AsyncExecutionSample.cpp9
-rw-r--r--samples/CustomMemoryAllocatorSample.cpp7
-rw-r--r--samples/DynamicSample.cpp6
-rw-r--r--samples/SimpleSample.cpp11
4 files changed, 19 insertions, 14 deletions
diff --git a/samples/AsyncExecutionSample.cpp b/samples/AsyncExecutionSample.cpp
index 6d2fe243dd..a789aade01 100644
--- a/samples/AsyncExecutionSample.cpp
+++ b/samples/AsyncExecutionSample.cpp
@@ -49,7 +49,7 @@ int main()
INetworkPtr myNetwork = INetwork::Create();
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant();
ConstTensor weights(weightsInfo, weightsData);
@@ -104,11 +104,12 @@ int main()
std::vector<std::vector<float>> outputData;
outputData.resize(2, std::vector<float>(1));
-
+ inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
std::vector<InputTensors> inputTensors
{
- {{0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData[0].data())}},
- {{0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData[1].data())}}
+ {{0, armnn::ConstTensor(inputTensorInfo, inputData[0].data())}},
+ {{0, armnn::ConstTensor(inputTensorInfo, inputData[1].data())}}
};
std::vector<OutputTensors> outputTensors
{
diff --git a/samples/CustomMemoryAllocatorSample.cpp b/samples/CustomMemoryAllocatorSample.cpp
index 171d8e2b5d..a1b05d4be0 100644
--- a/samples/CustomMemoryAllocatorSample.cpp
+++ b/samples/CustomMemoryAllocatorSample.cpp
@@ -78,7 +78,7 @@ int main()
INetworkPtr myNetwork = INetwork::Create();
armnn::FullyConnectedDescriptor fullyConnectedDesc;
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant(true);
armnn::ConstTensor weights(weightsInfo, weightsData);
ARMNN_NO_DEPRECATE_WARN_BEGIN
@@ -152,10 +152,11 @@ int main()
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
std::fill_n(outputPtr, numElements, -10.0f);
-
+ inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, 0), alignedInputPtr)},
+ {0, armnn::ConstTensor(inputTensorInfo, alignedInputPtr)},
};
armnn::OutputTensors outputTensors
{
diff --git a/samples/DynamicSample.cpp b/samples/DynamicSample.cpp
index ffcc9de083..8a6ff92706 100644
--- a/samples/DynamicSample.cpp
+++ b/samples/DynamicSample.cpp
@@ -62,10 +62,12 @@ int main()
};
std::vector<float> outputData(2);
+ TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
InputTensors inputTensors
{
- {0,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input0Data.data())},
- {1,armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+ {0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
+ {1,armnn::ConstTensor(inputTensorInfo, 0), input1Data.data())}
};
OutputTensors outputTensors
{
diff --git a/samples/SimpleSample.cpp b/samples/SimpleSample.cpp
index 3f94b53ca1..01f078bd56 100644
--- a/samples/SimpleSample.cpp
+++ b/samples/SimpleSample.cpp
@@ -28,7 +28,7 @@ int main()
INetworkPtr myNetwork = INetwork::Create();
float weightsData[] = {1.0f}; // Identity
- TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32);
+ TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
weightsInfo.SetConstant();
ConstTensor weights(weightsInfo, weightsData);
@@ -75,11 +75,12 @@ int main()
std::vector<float> inputData{number};
std::vector<float> outputData(1);
-
- InputTensors inputTensors{{0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0),
- inputData.data())}};
+ inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
+ InputTensors inputTensors{{0, armnn::ConstTensor(inputTensorInfo,
+ inputData.data())}};
OutputTensors outputTensors{{0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0),
- outputData.data())}};
+ outputData.data())}};
// Execute network
run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);