1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
//
// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <backendsCommon/test/EndToEndTestImpl.hpp>
#include <armnnTestUtils/LayerTestResult.hpp>
#include <ResolveType.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <armnn/backends/WorkloadFactory.hpp>
#include <Half.hpp>
template<armnn::DataType inputDataType, armnn::DataType outputDataType,
typename TInput=armnn::ResolveType<inputDataType>,
typename TOutput=armnn::ResolveType<outputDataType>>
LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
const std::vector<TInput>& inputTensor,
const std::vector<TOutput>& outputTensor);
LayerTestResult<float, 4> CastInt32ToFloat2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> CastInt16ToFloat2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> CastInt8ToFloat2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> CastInt8AsymmToFloat2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> CastUInt8ToFloat2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> CastInt8ToUInt82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> CastInt8AsymmToUInt82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> CastFloat16ToFloat322dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> CastBFloat16ToFloat322dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<armnn::Half, 4> CastFloat32ToFloat162dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<int8_t , 4> CastFloat32ToInt82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t , 4> CastFloat32ToUInt82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
template<armnn::DataType inputDataType, armnn::DataType outputDataType, typename TInput, typename TOutput>
void CastSimpleTest(const std::vector<armnn::BackendId>& backends,
const std::vector<unsigned int>& shape,
const std::vector<TInput>& inputValues,
const std::vector<TOutput>& outputValues,
float qScale = 1.0f,
int32_t qOffset = 0)
{
using namespace armnn;
const TensorShape inputShape(static_cast<unsigned int>(shape.size()), shape.data());
const TensorShape outputShape(static_cast<unsigned int>(shape.size()), shape.data());
TensorInfo inputTensorInfo(inputShape, inputDataType, qScale, qOffset, true);
TensorInfo outputTensorInfo(outputShape, outputDataType, qScale, qOffset);
IRuntime::CreationOptions options;
IRuntimePtr runtime(IRuntime::Create(options));
INetworkPtr network(INetwork::Create());
IConnectableLayer* input = network->AddInputLayer(0, "input");
IConnectableLayer* castLayer = network->AddCastLayer("cast");
IConnectableLayer* output = network->AddOutputLayer(0, "output");
Connect(input, castLayer, inputTensorInfo, 0, 0);
Connect(castLayer, output, outputTensorInfo, 0, 0);
std::map<int, std::vector<TInput>> inputTensorData = {{ 0, inputValues }};
std::map<int, std::vector<TOutput>> expectedOutputData = {{ 0, outputValues }};
EndToEndLayerTestImpl<inputDataType, outputDataType>(std::move(network),
inputTensorData,
expectedOutputData,
backends);
}
|