diff options
author | Matthew Sloyan <matthew.sloyan@arm.com> | 2021-08-09 12:49:23 +0100 |
---|---|---|
committer | Matthew Sloyan <matthew.sloyan@arm.com> | 2021-08-10 10:05:43 +0100 |
commit | 56c249c1bf961440b6c2f40f1cdef17ee8ee55ef (patch) | |
tree | e5a5b724d12e665ec2b818939e55ddfbd570f6aa | |
parent | 29cc961839c3ce4b0b69b684424b417e173b7ddd (diff) | |
download | android-nn-driver-56c249c1bf961440b6c2f40f1cdef17ee8ee55ef.tar.gz |
BugFix: Fix skipping VTS tests.
* Some tests on GpuAcc and CpuAcc P & Q were skipping because
IsConstant flag wasn't set on TensorInfo passed to validation function.
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Idd977bf621b71bb9625d8fc6620c29915de4b431
-rw-r--r-- | ConversionUtils.hpp | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 66e02333..ca5c99ec 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1244,9 +1244,10 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor()); armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0); - outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo()); + armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo(); + outputSlot.SetTensorInfo(constantTensorInfo); - return LayerInputHandle(true, &outputSlot, operandTensorInfo); + return LayerInputHandle(true, &outputSlot, constantTensorInfo); } else { @@ -1371,9 +1372,10 @@ LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetw armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor()); armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0); - outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo()); + armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo(); + outputSlot.SetTensorInfo(constantTensorInfo); - return LayerInputHandle(true, &outputSlot, operandTensorInfo); + return LayerInputHandle(true, &outputSlot, constantTensorInfo); } else { @@ -3036,7 +3038,6 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, { return Fail("%s: Could not read weights", __func__); } - const armnn::TensorInfo& weightsInfo = GetTensorInfoForOperand(*weightsOperand); // If weights are constant a separate constant layer will be created to store data. // Otherwise handle non const weights as inputs. @@ -3052,7 +3053,6 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, { return Fail("%s: Could not read bias", __func__); } - armnn::TensorInfo biasInfo = GetTensorInfoForOperand(*biasOperand); // If bias are constant a separate constant layer will be created to store data. // Otherwise handle non const bias as inputs. @@ -3062,6 +3062,7 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, return Fail("%s: Operation has invalid inputs", __func__); } + armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo(); armnn::TensorInfo reshapedInfo = inputInfo; try { @@ -3073,6 +3074,7 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, } // Ensuring that the bias value is within 1% of the weights input (small float differences can exist) + armnn::TensorInfo biasInfo = biasInput.GetTensorInfo(); SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo); ActivationFn activationFunction; |