diff options
author | jimfly01 <jim.flynn@arm.com> | 2019-01-24 22:29:33 +0000 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2019-01-25 14:10:53 +0000 |
commit | e9e7bfd09e3435d2bbb334e07c6a0a2514c80048 (patch) | |
tree | ae64daa5c6349ac821fb1b8534f8c9881dec0355 /src/armnn/layers/ConvertFp32ToFp16Layer.cpp | |
parent | 5e9d29802e2cfbb13adc49c2a0ac9ba952dc7650 (diff) | |
download | armnn-e9e7bfd09e3435d2bbb334e07c6a0a2514c80048.tar.gz |
IVGCVSW-2547 Add Accept function to IConnectableLayer
* Layers which callback VisitXXXLayer with ConstTensor
have been implemented with dummies to speed up implementation
and unblock IVGCVSW-2531
Change-Id: I49b8035f12ec72d6bd6cee95075692f98c48e193
Diffstat (limited to 'src/armnn/layers/ConvertFp32ToFp16Layer.cpp')
-rw-r--r-- | src/armnn/layers/ConvertFp32ToFp16Layer.cpp | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp index 2bcc4e1917..068594bf99 100644 --- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp @@ -44,4 +44,11 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs() inferredShapes[0]); } +void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const +{ + // These conversion layers are only inserted by the + // optimizer and so will never be in an input graph. + throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph"); +} + } // namespace armnn |