aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefBackend.hpp3
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp18
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp12
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp42
4 files changed, 42 insertions, 33 deletions
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 441f4ebdf4..2855957e31 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -12,7 +12,8 @@ namespace armnn
const BackendCapabilities cpuRefCapabilities("CpuRef",
{
{"NonConstWeights", true},
- {"AsyncExecution", true}
+ {"AsyncExecution", true},
+ {"ConstantTensorsAsInputs", true}
});
const std::set<armnn::BackendCapability> oldCpuRefCapabilities {
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 4293ef54f3..fae8d0cdd4 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -486,6 +486,24 @@ TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest")
TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
}
+TEST_CASE("CreateFullyConnectedWorkloadWeightsBiasesAsInputsFloat32")
+{
+ Graph graph;
+ RefWorkloadFactory factory = GetFactory();
+
+ auto workload =
+ CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest<RefFullyConnectedWorkload,
+ armnn::DataType::Float32>(factory, graph);
+
+ // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
+ float inputsQScale = 0.0f;
+ float outputQScale = 0.0f;
+ CheckInputsOutput(std::move(workload),
+ TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale),
+ TensorInfo({ 7, 20 }, armnn::DataType::Float32, inputsQScale),
+ TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
+}
+
template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
static void RefCreateFullyConnectedWorkloadTest()
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 69a2048078..424df977c8 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -600,11 +600,21 @@ TEST_CASE("RefFillEndToEndTestInt32")
FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
}
-TEST_CASE("RefFullyConnectedEndToEndTestInt32")
+TEST_CASE("RefFullyConnectedEndToEndTestFloat32")
{
FullyConnectedWithDynamicWeightsEndToEnd<armnn::DataType::Float32>(defaultBackends);
}
+TEST_CASE("RefFullyConnectedEndToEndTestNonConstantWeightsConstantBiasesFloat32")
+{
+ FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestConstantWeightsNonConstantBiasesFloat32")
+{
+ FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, false);
+}
+
TEST_CASE("RefGatherFloatTest")
{
GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index 99e3eab075..5a7951ec48 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -16,20 +16,6 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
{
- if (descriptor.m_Parameters.m_ConstantWeights)
- {
- m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
- const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
- m_WeightShape = rWeightInfo.GetShape();
- m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(true));
-
- if (descriptor.m_Parameters.m_BiasEnabled)
- {
- m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
- const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
- m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
- }
- }
}
void RefFullyConnectedWorkload::PostAllocationConfigure()
@@ -44,18 +30,15 @@ void RefFullyConnectedWorkload::PostAllocationConfigure(std::vector<ITensorHandl
ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
m_InputShape = inputInfo.GetShape();
- if (!m_Data.m_Parameters.m_ConstantWeights)
+ const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
+ m_WeightShape = rWeightInfo.GetShape();
+ m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
+
+ if (m_Data.m_Parameters.m_BiasEnabled)
{
- const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
- ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
- m_WeightShape = rWeightInfo.GetShape();
- m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
-
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
- m_BiasDecoder = MakeDecoder<float>(biasInfo);
- }
+ const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
+ m_BiasDecoder = MakeDecoder<float>(biasInfo);
}
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
@@ -87,13 +70,10 @@ void RefFullyConnectedWorkload::Execute(std::vector<ITensorHandle*> inputs, std:
std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
- if (!m_Data.m_Parameters.m_ConstantWeights)
+ m_WeightDecoder->Reset(inputs[1]->Map());
+ if (m_Data.m_Parameters.m_BiasEnabled)
{
- m_WeightDecoder->Reset(inputs[1]->Map());
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- m_BiasDecoder->Reset(inputs[2]->Map());
- }
+ m_BiasDecoder->Reset(inputs[2]->Map());
}
FullyConnected(m_InputShape,