From 0722c03c0e361752413e3ae8041117b04ffa9149 Mon Sep 17 00:00:00 2001 From: Ratnesh Kumar Rai Date: Mon, 29 Aug 2022 14:03:27 +0530 Subject: [PATCH 01/12] Resolved segfault while running CTS for nnhal This patch add validation checks for the operation which were causing segfault like: Dequantize, Div, L2Pooling2D, Less, Logistic, MaxPool2d, Mul, Quantize, ReduceMin, ReduceSum, Relu, Relu1, Relu6, Softmax, Sub, Tanh Also validation checks of few operations were modified to check for zero tensor operands, and operand lifetime Add, AveragePool2D, FullyConnected, SpaceToBatch, Squeeze, Transpose, TransposeConv2D All the above operations were causing segfault while running run cts -m CtsNNAPITestCases -t TestGenerated/GeneratedTest* Change-Id: I300d230dad1ab2f919a097d4db7a3865d7f91e11 Tracked-On: OAM-102888 Signed-off-by: Ratnesh Kumar Rai --- .../operations/include/Dequantize.hpp | 1 + ngraph_creator/operations/include/Div.hpp | 1 + .../operations/include/L2Pooling2D.hpp | 1 + ngraph_creator/operations/include/Less.hpp | 1 + .../operations/include/Logistic.hpp | 1 + .../operations/include/MaxPool2d.hpp | 1 + ngraph_creator/operations/include/Mul.hpp | 1 + .../operations/include/Quantize.hpp | 1 + .../operations/include/ReduceMin.hpp | 1 + .../operations/include/ReduceSum.hpp | 1 + ngraph_creator/operations/include/Relu.hpp | 1 + ngraph_creator/operations/include/Relu1.hpp | 1 + ngraph_creator/operations/include/Relu6.hpp | 1 + ngraph_creator/operations/include/Softmax.hpp | 1 + ngraph_creator/operations/include/Sub.hpp | 1 + ngraph_creator/operations/include/Tanh.hpp | 1 + ngraph_creator/operations/src/Add.cpp | 33 +++++++++++++--- .../operations/src/AveragePool2D.cpp | 5 +++ ngraph_creator/operations/src/Dequantize.cpp | 10 +++++ ngraph_creator/operations/src/Div.cpp | 35 +++++++++++++++++ .../operations/src/FullyConnected.cpp | 12 ++++++ ngraph_creator/operations/src/L2Pooling2D.cpp | 16 ++++++++ ngraph_creator/operations/src/Less.cpp | 25 ++++++++++++ ngraph_creator/operations/src/Logistic.cpp | 16 ++++++++ ngraph_creator/operations/src/MaxPool2d.cpp | 16 ++++++++ ngraph_creator/operations/src/Mul.cpp | 39 +++++++++++++++++++ ngraph_creator/operations/src/Quantize.cpp | 10 +++++ ngraph_creator/operations/src/ReduceMin.cpp | 26 +++++++++++++ ngraph_creator/operations/src/ReduceSum.cpp | 26 +++++++++++++ ngraph_creator/operations/src/Relu.cpp | 16 ++++++++ ngraph_creator/operations/src/Relu1.cpp | 16 ++++++++ ngraph_creator/operations/src/Relu6.cpp | 16 ++++++++ ngraph_creator/operations/src/Softmax.cpp | 11 ++++++ .../operations/src/SpaceToBatch.cpp | 18 +++++---- ngraph_creator/operations/src/Squeeze.cpp | 27 ++++++++++--- ngraph_creator/operations/src/Sub.cpp | 34 ++++++++++++++++ ngraph_creator/operations/src/Tanh.cpp | 11 ++++++ ngraph_creator/operations/src/Transpose.cpp | 31 ++++++++++----- .../operations/src/TransposeConv2D.cpp | 12 +++++- 39 files changed, 448 insertions(+), 29 deletions(-) diff --git a/ngraph_creator/operations/include/Dequantize.hpp b/ngraph_creator/operations/include/Dequantize.hpp index 4e9248a9e..a77fc86d3 100644 --- a/ngraph_creator/operations/include/Dequantize.hpp +++ b/ngraph_creator/operations/include/Dequantize.hpp @@ -11,6 +11,7 @@ class Dequantize : public OperationsBase { public: Dequantize(int operationIndex); std::shared_ptr createNode() override; + bool validate() override; }; } // namespace nnhal diff --git a/ngraph_creator/operations/include/Div.hpp b/ngraph_creator/operations/include/Div.hpp index 6a78e9b53..0e3f9686c 100644 --- a/ngraph_creator/operations/include/Div.hpp +++ b/ngraph_creator/operations/include/Div.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Div : public OperationsBase { public: Div(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/L2Pooling2D.hpp b/ngraph_creator/operations/include/L2Pooling2D.hpp index ba9e7015f..354ada6bb 100644 --- a/ngraph_creator/operations/include/L2Pooling2D.hpp +++ b/ngraph_creator/operations/include/L2Pooling2D.hpp @@ -10,6 +10,7 @@ namespace nnhal { class L2Pooling2D : public OperationsBase { public: L2Pooling2D(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Less.hpp b/ngraph_creator/operations/include/Less.hpp index 612df2660..45058c56b 100644 --- a/ngraph_creator/operations/include/Less.hpp +++ b/ngraph_creator/operations/include/Less.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Less : public OperationsBase { public: Less(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Logistic.hpp b/ngraph_creator/operations/include/Logistic.hpp index d7bdf09fb..85dea4a8c 100644 --- a/ngraph_creator/operations/include/Logistic.hpp +++ b/ngraph_creator/operations/include/Logistic.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Logistic : public OperationsBase { public: Logistic(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/MaxPool2d.hpp b/ngraph_creator/operations/include/MaxPool2d.hpp index 58acc2645..52fd38e1f 100644 --- a/ngraph_creator/operations/include/MaxPool2d.hpp +++ b/ngraph_creator/operations/include/MaxPool2d.hpp @@ -10,6 +10,7 @@ namespace nnhal { class MaxPool2d : public OperationsBase { public: MaxPool2d(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Mul.hpp b/ngraph_creator/operations/include/Mul.hpp index 77687ba1e..77a70e704 100644 --- a/ngraph_creator/operations/include/Mul.hpp +++ b/ngraph_creator/operations/include/Mul.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Mul : public OperationsBase { public: Mul(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Quantize.hpp b/ngraph_creator/operations/include/Quantize.hpp index 73c36c7e8..95940a0c2 100644 --- a/ngraph_creator/operations/include/Quantize.hpp +++ b/ngraph_creator/operations/include/Quantize.hpp @@ -11,6 +11,7 @@ class Quantize : public OperationsBase { public: Quantize(int operationIndex); std::shared_ptr createNode() override; + bool validate() override; void connectOperationToGraph() override; }; diff --git a/ngraph_creator/operations/include/ReduceMin.hpp b/ngraph_creator/operations/include/ReduceMin.hpp index 577d5f1fd..44deef838 100644 --- a/ngraph_creator/operations/include/ReduceMin.hpp +++ b/ngraph_creator/operations/include/ReduceMin.hpp @@ -10,6 +10,7 @@ namespace nnhal { class ReduceMin : public OperationsBase { public: ReduceMin(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/ReduceSum.hpp b/ngraph_creator/operations/include/ReduceSum.hpp index 62591963c..145170f6b 100644 --- a/ngraph_creator/operations/include/ReduceSum.hpp +++ b/ngraph_creator/operations/include/ReduceSum.hpp @@ -10,6 +10,7 @@ namespace nnhal { class ReduceSum : public OperationsBase { public: ReduceSum(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Relu.hpp b/ngraph_creator/operations/include/Relu.hpp index cff5fd250..730fbe4df 100644 --- a/ngraph_creator/operations/include/Relu.hpp +++ b/ngraph_creator/operations/include/Relu.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Relu : public OperationsBase { public: Relu(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Relu1.hpp b/ngraph_creator/operations/include/Relu1.hpp index 81127ca0f..fc4114a03 100644 --- a/ngraph_creator/operations/include/Relu1.hpp +++ b/ngraph_creator/operations/include/Relu1.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Relu1 : public OperationsBase { public: Relu1(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Relu6.hpp b/ngraph_creator/operations/include/Relu6.hpp index b06aeadbc..cbd1cb715 100644 --- a/ngraph_creator/operations/include/Relu6.hpp +++ b/ngraph_creator/operations/include/Relu6.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Relu6 : public OperationsBase { public: Relu6(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Softmax.hpp b/ngraph_creator/operations/include/Softmax.hpp index 4241b7209..fbca45961 100644 --- a/ngraph_creator/operations/include/Softmax.hpp +++ b/ngraph_creator/operations/include/Softmax.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Softmax : public OperationsBase { public: Softmax(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Sub.hpp b/ngraph_creator/operations/include/Sub.hpp index af217602d..ca99958b2 100644 --- a/ngraph_creator/operations/include/Sub.hpp +++ b/ngraph_creator/operations/include/Sub.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Sub : public OperationsBase { public: Sub(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Tanh.hpp b/ngraph_creator/operations/include/Tanh.hpp index 3ecfd5019..90e92f8a8 100644 --- a/ngraph_creator/operations/include/Tanh.hpp +++ b/ngraph_creator/operations/include/Tanh.hpp @@ -10,6 +10,7 @@ namespace nnhal { class Tanh : public OperationsBase { public: Tanh(int operationIndex); + bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/src/Add.cpp b/ngraph_creator/operations/src/Add.cpp index 702845cc0..6bce2562e 100644 --- a/ngraph_creator/operations/src/Add.cpp +++ b/ngraph_creator/operations/src/Add.cpp @@ -12,14 +12,37 @@ Add::Add(int operationIndex) : OperationsBase(operationIndex) { } bool Add::validate() { - ALOGV("%s PASSED", __func__); - - const auto& activationIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - if (!sModelInfo->isOperandLifeTimeConst(activationIndex)) { - ALOGE("%s Only Constant supported for specifying Activation", __func__); + auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); + const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); + if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + const auto& operandIndex3 = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + if(!sModelInfo->isOperandLifeTimeConst(operandIndex1) || + !sModelInfo->isOperandLifeTimeConst(operandIndex2) || + !sModelInfo->isOperandLifeTimeConst(operandIndex3)) { + ALOGE("%s Only Const lifetime is supported", __func__); return false; } + // check if both tensors are of same type + if(elementType1 != elementType2 ) { + ALOGE("%s Input type mismatch", __func__); + return false; + } else if ( elementType1 == OperandType::TENSOR_INT32 ) { + //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, + //the {@link FusedActivationFunc} must be "NONE". + auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); + if (activationFn != 0) { + ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); + return false; + } + } + ALOGV("%s PASSED", __func__); return true; } diff --git a/ngraph_creator/operations/src/AveragePool2D.cpp b/ngraph_creator/operations/src/AveragePool2D.cpp index f72c71486..dc49baed8 100644 --- a/ngraph_creator/operations/src/AveragePool2D.cpp +++ b/ngraph_creator/operations/src/AveragePool2D.cpp @@ -18,6 +18,11 @@ bool AveragePool2D::validate() { ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); return false; } + //check Input are of valid dimension or not + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } ALOGV("%s PASSED", __func__); return true; diff --git a/ngraph_creator/operations/src/Dequantize.cpp b/ngraph_creator/operations/src/Dequantize.cpp index 85c19eb7b..d3a529c45 100644 --- a/ngraph_creator/operations/src/Dequantize.cpp +++ b/ngraph_creator/operations/src/Dequantize.cpp @@ -11,6 +11,16 @@ Dequantize::Dequantize(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Dequantize::validate() { + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Dequantize::createNode() { // Creating input nodes std::shared_ptr input, outputNode; diff --git a/ngraph_creator/operations/src/Div.cpp b/ngraph_creator/operations/src/Div.cpp index a89995f8f..bb294662d 100644 --- a/ngraph_creator/operations/src/Div.cpp +++ b/ngraph_creator/operations/src/Div.cpp @@ -11,6 +11,41 @@ Div::Div(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Div::validate() { + auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); + const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); + if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + const auto& operandIndex3 = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + if(!sModelInfo->isOperandLifeTimeConst(operandIndex1) || + !sModelInfo->isOperandLifeTimeConst(operandIndex2) || + !sModelInfo->isOperandLifeTimeConst(operandIndex3)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + + // check if both tensors are of same type + if(elementType1 != elementType2 ) { + ALOGE("%s Input type mismatch", __func__); + return false; + } else if ( elementType1 == OperandType::TENSOR_INT32 ) { + //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, + //the {@link FusedActivationFunc} must be "NONE". + auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); + if (activationFn != 0) { + ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); + return false; + } + } + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Div::createNode() { // Creating input nodes auto input1 = getInputNode(0); diff --git a/ngraph_creator/operations/src/FullyConnected.cpp b/ngraph_creator/operations/src/FullyConnected.cpp index 7771c66fa..5dcb7218e 100644 --- a/ngraph_creator/operations/src/FullyConnected.cpp +++ b/ngraph_creator/operations/src/FullyConnected.cpp @@ -25,6 +25,18 @@ bool FullyConnected::validate() { ALOGE("%s Invalid input parameter dimensions!!!", __func__); return false; } + //check operand lifetime + const auto& dimsOperandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + const auto& dimsOperandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& dimsOperandIndex3 = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + const auto& dimsOperandIndex4 = sModelInfo->getOperationInput(mNnapiOperationIndex, 3); + if(!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex1) || + !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex2) || + !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex3) || + !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex4)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } ALOGD("%s succeeded", __func__); return true; diff --git a/ngraph_creator/operations/src/L2Pooling2D.cpp b/ngraph_creator/operations/src/L2Pooling2D.cpp index d8520b132..0e11e52a7 100644 --- a/ngraph_creator/operations/src/L2Pooling2D.cpp +++ b/ngraph_creator/operations/src/L2Pooling2D.cpp @@ -11,6 +11,22 @@ L2Pooling2D::L2Pooling2D(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool L2Pooling2D::validate() { + const auto& inputDimensionsSize = getInputOperandDimensions(0).size(); + if (inputDimensionsSize != 4) { + ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); + return false; + } + //check Input are of valid dimension or not + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr L2Pooling2D::createNode() { const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); bool isImplicit = false, isExplicit = false; diff --git a/ngraph_creator/operations/src/Less.cpp b/ngraph_creator/operations/src/Less.cpp index c521414b3..950434fae 100644 --- a/ngraph_creator/operations/src/Less.cpp +++ b/ngraph_creator/operations/src/Less.cpp @@ -11,6 +11,31 @@ Less::Less(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Less::validate() { + auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); + const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); + if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + if(!sModelInfo->isOperandLifeTimeConst(operandIndex1) || + !sModelInfo->isOperandLifeTimeConst(operandIndex2)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + + // check if both tensors are of same type + if(elementType1 != elementType2 ) { + ALOGE("%s Input type mismatch", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} std::shared_ptr Less::createNode() { // Creating input nodes std::shared_ptr input1, input2; diff --git a/ngraph_creator/operations/src/Logistic.cpp b/ngraph_creator/operations/src/Logistic.cpp index f134cb956..a6aa88e46 100644 --- a/ngraph_creator/operations/src/Logistic.cpp +++ b/ngraph_creator/operations/src/Logistic.cpp @@ -11,6 +11,22 @@ Logistic::Logistic(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Logistic::validate() { + const auto& inputDimensionsSize = getInputOperandDimensions(0).size(); + if (inputDimensionsSize > 4) { + ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); + return false; + } + //check Input are of valid dimension or not + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Logistic::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/MaxPool2d.cpp b/ngraph_creator/operations/src/MaxPool2d.cpp index bf320b897..ab613f52a 100644 --- a/ngraph_creator/operations/src/MaxPool2d.cpp +++ b/ngraph_creator/operations/src/MaxPool2d.cpp @@ -11,6 +11,22 @@ MaxPool2d::MaxPool2d(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool MaxPool2d::validate() { + // Check Input Dimension size + const auto& inputDimensionsSize = getInputOperandDimensions(0).size(); + if (inputDimensionsSize != 4) { + ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); + return false; + } + //check Input are of valid dimension or not + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} std::shared_ptr MaxPool2d::createNode() { const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); diff --git a/ngraph_creator/operations/src/Mul.cpp b/ngraph_creator/operations/src/Mul.cpp index 84a3e647b..2212b4c8c 100644 --- a/ngraph_creator/operations/src/Mul.cpp +++ b/ngraph_creator/operations/src/Mul.cpp @@ -11,6 +11,45 @@ Mul::Mul(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Mul::validate() { + const auto inputRank1 = getInputOperandDimensions(0).size(); + const auto inputRank2 = getInputOperandDimensions(1).size(); + auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); + const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); + + if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + //check operand lifetime + const auto& operandIndex3 = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + if(!sModelInfo->isOperandLifeTimeConst(operandIndex1) || + !sModelInfo->isOperandLifeTimeConst(operandIndex2) || + !sModelInfo->isOperandLifeTimeConst(operandIndex3)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + + if(elementType1 != elementType2 ) { + ALOGE("%s Input type mismatch", __func__); + return false; + } else if ( elementType1 == OperandType::TENSOR_INT32 ) { + //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, + //the {@link FusedActivationFunc} must be "NONE". + auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); + if (activationFn != 0) { + ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); + return false; + } + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Mul::createNode() { // Creating input nodes std::shared_ptr input1, input2; diff --git a/ngraph_creator/operations/src/Quantize.cpp b/ngraph_creator/operations/src/Quantize.cpp index 24c09174d..02809b29b 100755 --- a/ngraph_creator/operations/src/Quantize.cpp +++ b/ngraph_creator/operations/src/Quantize.cpp @@ -11,6 +11,16 @@ Quantize::Quantize(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Quantize::validate() { + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + void Quantize::connectOperationToGraph() { createNode(); } std::shared_ptr Quantize::createNode() { diff --git a/ngraph_creator/operations/src/ReduceMin.cpp b/ngraph_creator/operations/src/ReduceMin.cpp index 4eec8b34f..46dfab6f3 100644 --- a/ngraph_creator/operations/src/ReduceMin.cpp +++ b/ngraph_creator/operations/src/ReduceMin.cpp @@ -11,6 +11,32 @@ ReduceMin::ReduceMin(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool ReduceMin::validate() { + // Check input rank + const auto inputRank = getInputOperandDimensions(0).size(); + + if (inputRank > 4) + return false; + + if ( !isValidInputTensor(0) || !isValidInputTensor(1)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + auto& input_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto& dim_reduce_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + + // TODO: Add Support for all_tensors_as_inputs + if (!sModelInfo->isOperandLifeTimeConst(input_OperandIndex) || + !sModelInfo->isOperandLifeTimeConst(dim_reduce_OperandIndex)) { + ALOGE("%s Only Constant dimensions supported now", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr ReduceMin::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/ReduceSum.cpp b/ngraph_creator/operations/src/ReduceSum.cpp index e60859c12..9e111c21b 100644 --- a/ngraph_creator/operations/src/ReduceSum.cpp +++ b/ngraph_creator/operations/src/ReduceSum.cpp @@ -11,6 +11,32 @@ ReduceSum::ReduceSum(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool ReduceSum::validate() { + // Check input rank + const auto inputRank = getInputOperandDimensions(0).size(); + + if (inputRank > 4) + return false; + + if ( !isValidInputTensor(0) || !isValidInputTensor(1)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + auto& input_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto& dim_reduce_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + + // TODO: Add Support for all_tensors_as_inputs + if (!sModelInfo->isOperandLifeTimeConst(input_OperandIndex) || + !sModelInfo->isOperandLifeTimeConst(dim_reduce_OperandIndex)) { + ALOGE("%s Only Constant dimensions supported now", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr ReduceSum::createNode() { // Creating input nodes auto input = getInputNode(0); diff --git a/ngraph_creator/operations/src/Relu.cpp b/ngraph_creator/operations/src/Relu.cpp index 815dd7914..04415abbf 100644 --- a/ngraph_creator/operations/src/Relu.cpp +++ b/ngraph_creator/operations/src/Relu.cpp @@ -11,6 +11,22 @@ Relu::Relu(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Relu::validate() { + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + if(!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Relu::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Relu1.cpp b/ngraph_creator/operations/src/Relu1.cpp index 4c5a40799..4fd0da7b6 100644 --- a/ngraph_creator/operations/src/Relu1.cpp +++ b/ngraph_creator/operations/src/Relu1.cpp @@ -11,6 +11,22 @@ Relu1::Relu1(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Relu1::validate() { + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + if(!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Relu1::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Relu6.cpp b/ngraph_creator/operations/src/Relu6.cpp index 3f16afe5d..2d25e3175 100644 --- a/ngraph_creator/operations/src/Relu6.cpp +++ b/ngraph_creator/operations/src/Relu6.cpp @@ -11,6 +11,22 @@ Relu6::Relu6(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Relu6::validate() { + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + if(!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Relu6::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Softmax.cpp b/ngraph_creator/operations/src/Softmax.cpp index 18f1bcdea..80ea14212 100644 --- a/ngraph_creator/operations/src/Softmax.cpp +++ b/ngraph_creator/operations/src/Softmax.cpp @@ -11,6 +11,17 @@ Softmax::Softmax(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Softmax::validate() { + const auto inputRank = getInputOperandDimensions(0).size(); + if ( !isValidInputTensor(0) || inputRank > 4 ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Softmax::createNode() { // Creating input nodes std::shared_ptr input, outputNode; diff --git a/ngraph_creator/operations/src/SpaceToBatch.cpp b/ngraph_creator/operations/src/SpaceToBatch.cpp index 976c16e7d..f925e039a 100644 --- a/ngraph_creator/operations/src/SpaceToBatch.cpp +++ b/ngraph_creator/operations/src/SpaceToBatch.cpp @@ -16,17 +16,21 @@ bool SpaceToBatch::validate() { const auto inputRank = getInputOperandDimensions(0).size(); if (inputRank != 4) return false; + if ( !isValidInputTensor(0) || !isValidInputTensor(1) || !isValidInputTensor(2) ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + auto& input_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); auto& block_shape_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - // TODO: Add Support for all_tensors_as_inputs - if (!sModelInfo->isOperandLifeTimeConst(block_shape_OperandIndex)) { - ALOGE("%s Only Constant dimensions supported now", __func__); - return false; - } + auto& pad_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - auto pad_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + //check operand lifetime is const or not as for now only const operand lifetime is supported // TODO: Add Support for all_tensors_as_inputs - if (!sModelInfo->isOperandLifeTimeConst(pad_OperandIndex)) { + if (!sModelInfo->isOperandLifeTimeConst(input_OperandIndex) || + !sModelInfo->isOperandLifeTimeConst(block_shape_OperandIndex) || + !sModelInfo->isOperandLifeTimeConst(pad_OperandIndex)) { ALOGE("%s Only Constant dimensions supported now", __func__); return false; } diff --git a/ngraph_creator/operations/src/Squeeze.cpp b/ngraph_creator/operations/src/Squeeze.cpp index bb466e793..0364fd875 100644 --- a/ngraph_creator/operations/src/Squeeze.cpp +++ b/ngraph_creator/operations/src/Squeeze.cpp @@ -12,17 +12,32 @@ Squeeze::Squeeze(int operationIndex) : OperationsBase(operationIndex) { } bool Squeeze::validate() { + const auto inputRank = getInputOperandDimensions(0).size(); + if (inputRank > 4) return false; + + if ( !isValidInputTensor(0)) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } // TODO: Add Support for all_tensors_as_inputs - const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& dimsOperandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - // TODO: Support OmittedInput. - // The empty 2nd argument in Squeeze op causes dynamic output - // To add support, the dims will have to be calculated statically - if (sModelInfo->isOmittedInput(mNnapiOperationIndex, 1) || - !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { + if (!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex1)) { ALOGE("%s Only Constant dimensions supported now", __func__); return false; } + const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); + if (inputsSize == 2) { + const auto& dimsOperandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + // TODO: Support OmittedInput. + // The empty 2nd argument in Squeeze op causes dynamic output + // To add support, the dims will have to be calculated statically + if (!isValidInputTensor(1) || !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex2) || + sModelInfo->isOmittedInput(mNnapiOperationIndex, 1) ) { + ALOGE("%s Invalid operand type or operand lifetime", __func__); + return false; + } + } return true; } diff --git a/ngraph_creator/operations/src/Sub.cpp b/ngraph_creator/operations/src/Sub.cpp index c90d7f7b5..e3717ccf1 100644 --- a/ngraph_creator/operations/src/Sub.cpp +++ b/ngraph_creator/operations/src/Sub.cpp @@ -11,6 +11,40 @@ Sub::Sub(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Sub::validate() { + auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); + const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); + if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + //check operand lifetime + const auto& operandIndex3 = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + if(!sModelInfo->isOperandLifeTimeConst(operandIndex1) || + !sModelInfo->isOperandLifeTimeConst(operandIndex2) || + !sModelInfo->isOperandLifeTimeConst(operandIndex3)) { + ALOGE("%s Only Const lifetime is supported", __func__); + return false; + } + // check if both tensors are of same type + if(elementType1 != elementType2 ) { + ALOGE("%s Input type mismatch", __func__); + return false; + } else if ( elementType1 == OperandType::TENSOR_INT32 ) { + //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, + //the {@link FusedActivationFunc} must be "NONE". + auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); + if (activationFn != 0) { + ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); + return false; + } + } + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Sub::createNode() { // Creating input nodes std::shared_ptr input1, input2; diff --git a/ngraph_creator/operations/src/Tanh.cpp b/ngraph_creator/operations/src/Tanh.cpp index 65adaa342..7f3679489 100644 --- a/ngraph_creator/operations/src/Tanh.cpp +++ b/ngraph_creator/operations/src/Tanh.cpp @@ -11,6 +11,17 @@ Tanh::Tanh(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } +bool Tanh::validate() { + const auto inputRank = getInputOperandDimensions(0).size(); + if ( !isValidInputTensor(0) || inputRank > 4 ) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + ALOGV("%s PASSED", __func__); + return true; +} + std::shared_ptr Tanh::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Transpose.cpp b/ngraph_creator/operations/src/Transpose.cpp index 052453386..af2bc37c4 100644 --- a/ngraph_creator/operations/src/Transpose.cpp +++ b/ngraph_creator/operations/src/Transpose.cpp @@ -13,13 +13,28 @@ Transpose::Transpose(int operationIndex) : OperationsBase(operationIndex) { bool Transpose::validate() { // TODO: Add Support for all_tensors_as_inputs - const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& dims = getInputOperandDimensions(1); - if (!dims.empty() && dims[0] != 0 && !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { - ALOGE("%s Only Constant dimensions supported now", __func__); + const auto& dimsOperandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + const auto inputRank = getInputOperandDimensions(0).size(); + if ( !isValidInputTensor(0) || inputRank > 4) { + ALOGE("%s Empty or Invalid dimensions size for input", __func__); + return false; + } + + if(!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex1)) { + ALOGE("%s Only Const lifetime is supported", __func__); return false; } + const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); + if (inputsSize == 2) { + const auto& dimsOperandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + if (!isValidInputTensor(1) || !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex2)) { + ALOGE("%s Invalid operand type or operand lifetime", __func__); + return false; + } + } + + ALOGV("%s PASSED", __func__); return true; } @@ -30,14 +45,12 @@ std::shared_ptr Transpose::createNode() { input = getInputNode(0); std::shared_ptr order; + order = createConstNode(ngraph::element::i32, {0}, convertToVector(0)); - const auto& dims = getInputOperandDimensions(1); - if (!dims.empty() && dims[0] != 0) { + const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); + if (inputsSize == 2) { order = getInputNode(1); - } else { - order = createConstNode(ngraph::element::i32, {0}, convertToVector(0)); } - std::shared_ptr outputNode; outputNode = std::make_shared(input, order); diff --git a/ngraph_creator/operations/src/TransposeConv2D.cpp b/ngraph_creator/operations/src/TransposeConv2D.cpp index 5b9d6999a..a9e9c2d85 100644 --- a/ngraph_creator/operations/src/TransposeConv2D.cpp +++ b/ngraph_creator/operations/src/TransposeConv2D.cpp @@ -22,7 +22,7 @@ bool TransposeConv2D::validate() { inputDimensionsSize, filterDimensionsSize); return false; } - if (!isValidInputTensor(0) || !isValidInputTensor(1)) { + if (!isValidInputTensor(0) || !isValidInputTensor(1) || !isValidInputTensor(2)) { ALOGE("%s Invalid dimensions for input or filter", __func__); return false; } @@ -38,8 +38,16 @@ bool TransposeConv2D::validate() { // TODO: Issue from OV 2021.4, remove this check once CVS-61723 is resolved // Workaround to ignore VTS large input error test cases const auto& inputDimensions = getInputOperandDimensions(0); + const auto& filterDimensions = getInputOperandDimensions(1); + const auto& biasDimensions = getInputOperandDimensions(2); - if (inputDimensions[1] == 1 && inputDimensions[2] == 1 && inputDimensions[3] == 1) return false; + if (inputDimensions[1] == 1 && inputDimensions[2] == 1 && inputDimensions[3] == 1) { + return false; + } + //check if the bias dimension == filter depth_out && filter depth_in == input depth_in + if(filterDimensions[0] != biasDimensions[0] && biasDimensions[3] != inputDimensions[3]) { + return false; + } ALOGV("%s PASSED", __func__); return true; From e19bead64584259c117c1e87ad21f19676ef415e Mon Sep 17 00:00:00 2001 From: Ratnesh Kumar Rai Date: Mon, 29 Aug 2022 14:06:48 +0530 Subject: [PATCH 02/12] Disable operations to resolve segfault in CTS This patch disables the following operations MAX_POOL_2D, L2_POOL_2D, DEPTHWISE_CONV_2D, CONV_2D, AVERAGE_POOL_2D, L2_NORMALIZATION, RESIZE_BILINEAR RESIZE_NEAREST_NEIGHBOR Above operations were causing segfault while running CtsNNAPITestCases -t TestRandomGraph/RandomGraphTest* This operations doesn't have validation check for operand with zero values which was causing segfault in mkldnn plugin while loading the model generated by nnhal Following operartions behaviour/parameters is changed in V1.3, thus disbaling them for now L2_NORMALIZATION, RESIZE_BILINEAR , RESIZE_NEAREST_NEIGHBOR Change-Id: I4a1e230dad1ab2f919a097d4db7a3865d7f91e11 Tracked-On: OAM-102888 Signed-off-by: Ratnesh Kumar Rai --- ngraph_creator/src/OperationsFactory.cpp | 32 ++++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/ngraph_creator/src/OperationsFactory.cpp b/ngraph_creator/src/OperationsFactory.cpp index f31deb796..1f205b759 100755 --- a/ngraph_creator/src/OperationsFactory.cpp +++ b/ngraph_creator/src/OperationsFactory.cpp @@ -26,8 +26,8 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::ARGMIN: return std::make_shared(operationIndex); - case OperationType::AVERAGE_POOL_2D: - return std::make_shared(operationIndex); + // case OperationType::AVERAGE_POOL_2D: + // return std::make_shared(operationIndex); case OperationType::BATCH_TO_SPACE_ND: return std::make_shared(operationIndex); case OperationType::BIDIRECTIONAL_SEQUENCE_RNN: @@ -38,12 +38,12 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::CONCATENATION: return std::make_shared(operationIndex); - case OperationType::CONV_2D: - return std::make_shared(operationIndex); + // case OperationType::CONV_2D: + // return std::make_shared(operationIndex); case OperationType::DEPTH_TO_SPACE: return std::make_shared(operationIndex); - case OperationType::DEPTHWISE_CONV_2D: - return std::make_shared(operationIndex); + // case OperationType::DEPTHWISE_CONV_2D: + // return std::make_shared(operationIndex); case OperationType::DEQUANTIZE: return std::make_shared(operationIndex); case OperationType::DIV: @@ -70,10 +70,10 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::INSTANCE_NORMALIZATION: return std::make_shared(operationIndex); - case OperationType::L2_POOL_2D: - return std::make_shared(operationIndex); - case OperationType::L2_NORMALIZATION: - return std::make_shared(operationIndex); + // case OperationType::L2_POOL_2D: + // return std::make_shared(operationIndex); + // case OperationType::L2_NORMALIZATION: + // return std::make_shared(operationIndex); case OperationType::LSTM: return std::make_shared(operationIndex); case OperationType::LESS: @@ -94,8 +94,8 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::MAXIMUM: return std::make_shared(operationIndex); - case OperationType::MAX_POOL_2D: - return std::make_shared(operationIndex); + // case OperationType::MAX_POOL_2D: + // return std::make_shared(operationIndex); case OperationType::MEAN: return std::make_shared(operationIndex); case OperationType::MINIMUM: @@ -144,10 +144,10 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::RSQRT: return std::make_shared(operationIndex); - case OperationType::RESIZE_BILINEAR: - return std::make_shared(operationIndex); - case OperationType::RESIZE_NEAREST_NEIGHBOR: - return std::make_shared(operationIndex); + // case OperationType::RESIZE_BILINEAR: + // return std::make_shared(operationIndex); + // case OperationType::RESIZE_NEAREST_NEIGHBOR: + // return std::make_shared(operationIndex); case OperationType::SELECT: return std::make_shared(operationIndex); case OperationType::SOFTMAX: From d8627f3cabceb72d46fabbe47534181e9580b636 Mon Sep 17 00:00:00 2001 From: Ratnesh Kumar Rai Date: Tue, 14 Jun 2022 13:20:08 +0530 Subject: [PATCH 07/12] Added remote infrencing logic using grpc Tracked-On: OAM-102337 Signed-off-by: Ratnesh Kumar Rai Signed-off-by: akodanka --- Android.bp | 56 +++++- BUILD.gn | 1 + BasePreparedModel.cpp | 163 ++++++++++++------ BasePreparedModel.h | 6 +- DetectionClient.cpp | 128 ++++++++++++++ DetectionClient.h | 52 ++++++ IENetwork.cpp | 4 - IENetwork.h | 6 + cpu/CpuPreparedModel.cpp | 7 + .../include/NgraphNetworkCreator.hpp | 1 + ngraph_creator/include/NgraphNodes.hpp | 1 + ngraph_creator/src/NgraphNetworkCreator.cpp | 5 + ngraph_creator/src/NgraphNodes.cpp | 7 + proto/nnhal_object_detection.proto | 60 +++++++ utils.cpp | 8 + utils.h | 1 + 16 files changed, 447 insertions(+), 59 deletions(-) create mode 100644 DetectionClient.cpp create mode 100644 DetectionClient.h create mode 100644 proto/nnhal_object_detection.proto diff --git a/Android.bp b/Android.bp index 82ffdfb63..008de5f22 100644 --- a/Android.bp +++ b/Android.bp @@ -9,6 +9,7 @@ cc_library_shared { srcs: [ "Driver.cpp", "BasePreparedModel.cpp", + "DetectionClient.cpp", "utils.cpp", "IENetwork.cpp", "ModelManager.cpp", @@ -27,7 +28,17 @@ cc_library_shared { "packages/modules/NeuralNetworks/common/include", "packages/modules/NeuralNetworks/runtime/include", "frameworks/native/libs/nativewindow/include", - "external/mesa3d/include/android_stub" + "external/mesa3d/include/android_stub", + "external/grpc-grpc", + "external/grpc-grpc/include", + "external/grpc-grpc/third_party/cares", + "external/grpc-grpc/third_party/cares/config_android", + "external/grpc-grpc/src/core/ext/filters/client_channel", + "external/grpc-grpc/third_party/nanopb", + "external/protobuf", + "external/protobuf/src", + "external/protobuf/config", + "external/protobuf/android" ], header_libs: [ @@ -94,7 +105,9 @@ cc_library_shared { "libutils", "libinference_engine", "libngraph", - "libMKLDNNPlugin" + "libMKLDNNPlugin", + "libgrpc++", + "libprotobuf-cpp-full" ], static_libs: [ @@ -103,6 +116,13 @@ cc_library_shared { "libngraph_creator", ], + generated_headers: [ + "ObjectDetectionProtoStub_h", + ], + generated_sources: [ + "ObjectDetectionProtoStub_cc", + ], + defaults: [ "neuralnetworks_defaults" ], @@ -111,6 +131,38 @@ cc_library_shared { } +genrule { + name: "ObjectDetectionProtoStub_h", + tools: [ + "aprotoc", + "protoc-gen-grpc-cpp-plugin", + ], + cmd: "$(location aprotoc) -I$$(dirname $(in)) -Iexternal/protobuf/src --plugin=protoc-gen-grpc=$(location protoc-gen-grpc-cpp-plugin) $(in) --grpc_out=$(genDir) --cpp_out=$(genDir)", + srcs: [ + "proto/nnhal_object_detection.proto", + ], + out: [ + "nnhal_object_detection.pb.h", + "nnhal_object_detection.grpc.pb.h", + ], +} + +genrule { + name: "ObjectDetectionProtoStub_cc", + tools: [ + "aprotoc", + "protoc-gen-grpc-cpp-plugin", + ], + cmd: "$(location aprotoc) -I$$(dirname $(in)) -Iexternal/protobuf/src --plugin=protoc-gen-grpc=$(location protoc-gen-grpc-cpp-plugin) $(in) --grpc_out=$(genDir) --cpp_out=$(genDir)", + srcs: [ + "proto/nnhal_object_detection.proto", + ], + out: [ + "nnhal_object_detection.pb.cc", + "nnhal_object_detection.grpc.pb.cc", + ], +} + //############################################################## cc_binary { name: "android.hardware.neuralnetworks@1.3-generic-service", diff --git a/BUILD.gn b/BUILD.gn index 288347ebc..5f3a67d8a 100755 --- a/BUILD.gn +++ b/BUILD.gn @@ -160,6 +160,7 @@ shared_library("intel_nnhal") { "ModelManager.cpp", "cpu/CpuPreparedModel.cpp", "BasePreparedModel.cpp", + "DetectionClient.cpp", ] include_dirs = [ diff --git a/BasePreparedModel.cpp b/BasePreparedModel.cpp index 94704fdec..781bf119d 100644 --- a/BasePreparedModel.cpp +++ b/BasePreparedModel.cpp @@ -36,6 +36,8 @@ namespace nnhal { using namespace android::nn; static const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; +bool mRemoteCheck = false; +std::shared_ptr mDetectionClient; void BasePreparedModel::deinitialize() { ALOGV("Entering %s", __func__); @@ -56,6 +58,39 @@ bool BasePreparedModel::initialize() { return true; } +bool BasePreparedModel::checkRemoteConnection() { + char ip_port[PROPERTY_VALUE_MAX] = ""; + bool is_success = false; + if (!getGrpcIpPort(ip_port)) { + ALOGV("Invalid value for ip_port property : %s", ip_port); + return is_success; + } + mDetectionClient = std::make_shared( + grpc::CreateChannel(ip_port, grpc::InsecureChannelCredentials())); + if(!mDetectionClient) { + ALOGV("Unable to create channel with ip_port: %s", ip_port); + return is_success; + } + auto reply = mDetectionClient->prepare(is_success); + ALOGI("GRPC (%s) prepare response - %d : %s", ip_port, is_success, reply.c_str()); + mRemoteCheck = is_success; + return is_success; +} + +bool BasePreparedModel::loadRemoteModel() { + ALOGI("Entering %s", __func__); + bool is_success = false; + if(mDetectionClient) { + auto reply = mDetectionClient->sendIRs(is_success); + ALOGI("sendIRs response GRPC %d %s", is_success, reply.c_str()); + } + else { + ALOGE("%s mDetectionClient is null",__func__); + } + mRemoteCheck = is_success; + return is_success; +} + static Return notify(const sp& callback, const ErrorStatus& status, const hidl_vec&, Timing) { return callback->notify(status); @@ -338,28 +373,43 @@ static std::tuple, Timing> executeSynch continue; } ALOGD("Input index: %d layername : %s", inIndex, inputNodeName.c_str()); - auto destBlob = plugin->getBlob(inputNodeName); - if (modelInfo->getOperandType(inIndex) == OperandType::TENSOR_FLOAT16) { - float* dest = destBlob->buffer().as(); - _Float16* src = (_Float16*)srcPtr; - - for (unsigned int i = 0; i < len / 2; i++) { - dest[i] = src[i]; - } + //check if remote infer is available + //TODO: Need to add FLOAT16 support for remote inferencing + if(mRemoteCheck && mDetectionClient) { + mDetectionClient->add_input_data(inputNodeName, (uint8_t*)srcPtr, ngraphNw->getOutputShape(inIndex)); } else { - uint8_t* dest = destBlob->buffer().as(); - std::memcpy(dest, (uint8_t*)srcPtr, len); + auto destBlob = plugin->getBlob(inputNodeName); + if (modelInfo->getOperandType(inIndex) == OperandType::TENSOR_FLOAT16) { + float* dest = destBlob->buffer().as(); + _Float16* src = (_Float16*)srcPtr; + + for (unsigned int i = 0; i < len / 2; i++) { + dest[i] = src[i]; + } + } else { + uint8_t* dest = destBlob->buffer().as(); + std::memcpy(dest, (uint8_t*)srcPtr, len); + } } + } ALOGD("%s Run", __func__); if (measure == MeasureTiming::YES) deviceStart = now(); - try { - plugin->infer(); - } catch (const std::exception& ex) { - ALOGE("%s Exception !!! %s", __func__, ex.what()); - return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; + if(mRemoteCheck) { + ALOGI("%s Remote Infer", __func__); + auto reply = mDetectionClient->remote_infer(); + ALOGI("***********GRPC server response************* %s", reply.c_str()); + } + if (!mRemoteCheck || !mDetectionClient->get_status()){ + try { + ALOGI("%s Client Infer", __func__); + plugin->infer(); + } catch (const std::exception& ex) { + ALOGE("%s Exception !!! %s", __func__, ex.what()); + return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; + } } if (measure == MeasureTiming::YES) deviceEnd = now(); @@ -420,44 +470,50 @@ static std::tuple, Timing> executeSynch "OUTPUT_INSUFFICIENT_SIZE error"); return {ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, modelInfo->getOutputShapes(), kNoTiming}; } - - switch (operandType) { - case OperandType::TENSOR_INT32: - case OperandType::TENSOR_FLOAT32: { - std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), - srcBlob->byteSize()); - break; - } - case OperandType::TENSOR_BOOL8: { - floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT8_ASYMM: { - floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: { - floatToint8(srcBlob->buffer().as(), (int8_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_FLOAT16: { - floatToFloat16(srcBlob->buffer().as(), (_Float16*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT16_SYMM: { - floatToInt16(srcBlob->buffer().as(), (int16_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT16_ASYMM: { - floatToUInt16(srcBlob->buffer().as(), (uint16_t*)destPtr, srcBlob->size()); - break; + //copy output from remote infer + //TODO: Add support for other OperandType + if (mRemoteCheck && mDetectionClient && mDetectionClient->get_status()) { + mDetectionClient->get_output_data(outputNodeName, (uint8_t*)destPtr, ngraphNw->getOutputShape(outIndex)); + } else { + switch (operandType) { + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_FLOAT32: { + std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), + srcBlob->byteSize()); + break; + } + case OperandType::TENSOR_BOOL8: { + floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT8_ASYMM: { + floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: { + floatToint8(srcBlob->buffer().as(), (int8_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_FLOAT16: { + floatToFloat16(srcBlob->buffer().as(), (_Float16*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT16_SYMM: { + floatToInt16(srcBlob->buffer().as(), (int16_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT16_ASYMM: { + floatToUInt16(srcBlob->buffer().as(), (uint16_t*)destPtr, srcBlob->size()); + break; + } + default: + std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), + srcBlob->byteSize()); + break; } - default: - std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), - srcBlob->byteSize()); - break; + } } @@ -465,6 +521,9 @@ static std::tuple, Timing> executeSynch ALOGE("Failed to update the request pool infos"); return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; } + if (mRemoteCheck && mDetectionClient && mDetectionClient->get_status()) { + mDetectionClient->clear_data(); + } if (measure == MeasureTiming::YES) { driverEnd = now(); diff --git a/BasePreparedModel.h b/BasePreparedModel.h index ba9b2e2e6..c2ff83cb5 100755 --- a/BasePreparedModel.h +++ b/BasePreparedModel.h @@ -33,6 +33,7 @@ #include "Driver.h" #include "IENetwork.h" #include "ModelManager.h" +#include "DetectionClient.h" #include "utils.h" #if __ANDROID__ @@ -51,7 +52,8 @@ namespace nnhal { template using vec = std::vector; typedef uint8_t* memory; - +extern bool mRemoteCheck; +extern std::shared_ptr mDetectionClient; class BasePreparedModel : public V1_3::IPreparedModel { public: BasePreparedModel(const Model& model) : mTargetDevice(IntelDeviceType::CPU) { @@ -89,6 +91,8 @@ class BasePreparedModel : public V1_3::IPreparedModel { executeFenced_cb cb) override; virtual bool initialize(); + virtual bool checkRemoteConnection(); + virtual bool loadRemoteModel(); std::shared_ptr getModelInfo() { return mModelInfo; } diff --git a/DetectionClient.cpp b/DetectionClient.cpp new file mode 100644 index 000000000..d78cf7b86 --- /dev/null +++ b/DetectionClient.cpp @@ -0,0 +1,128 @@ +#include "DetectionClient.h" + +#undef LOG_TAG +#define LOG_TAG "DetectionClient" + +std::string DetectionClient::prepare(bool& flag) { + RequestString request; + request.set_value(""); + ReplyStatus reply; + ClientContext context; + time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(100); + context.set_deadline(deadline); + + Status status = stub_->prepare(&context, request, &reply); + + if (status.ok()) { + flag = reply.status(); + return (flag ? "status True" : "status False"); + } else { + return std::string(status.error_message()); + } +} + +Status DetectionClient::sendFile(std::string fileName, + std::unique_ptr >& writer) { + RequestDataChunks request; + uint32_t CHUNK_SIZE = 1024 * 1024; + std::ifstream fin(fileName, std::ifstream::binary); + std::vector buffer(CHUNK_SIZE, 0); + ALOGI("GRPC sendFile %d sized chunks from %s", CHUNK_SIZE, fileName.c_str()); + + if (!fin.is_open()) ALOGE("GRPC sendFile file Open Error "); + while (!fin.eof()) { + fin.read(buffer.data(), buffer.size()); + std::streamsize s = fin.gcount(); + // ALOGI("GRPC sendFile read %d", s); + request.set_data(buffer.data(), s); + if (!writer->Write(request)) { + ALOGE("GRPC Broken Stream "); + break; + } + } + + writer->WritesDone(); + ALOGI("GRPC sendFile completed %s", fileName.c_str()); + return writer->Finish(); +} + +std::string DetectionClient::sendIRs(bool& flag) { + ReplyStatus reply; + ClientContext context; + std::unique_ptr > writerXml = + std::unique_ptr >(stub_->sendXml(&context, &reply)); + Status status = sendFile(IR_XML, writerXml); + + if (status.ok()) { + ClientContext newContext; + std::unique_ptr > writerBin = + std::unique_ptr >( + stub_->sendBin(&newContext, &reply)); + status = sendFile(IR_BIN, writerBin); + if (status.ok()) { + flag = reply.status(); + return (flag ? "status True" : "status False"); + } + } + return std::string(status.error_message()); +} + +void DetectionClient::add_input_data(std::string label, const uint8_t* buffer, std::vector shape) { + const float* src; + size_t index; + size_t size = 1; + + DataTensor* input = request.add_data_tensors(); + input->set_node_name(label); + for (index = 0; index < shape.size(); index++) { + input->add_tensor_shape(shape[index]); + size *= shape[index]; + } + input->set_data(buffer, size * sizeof(float)); +} + +void DetectionClient::get_output_data(std::string label, uint8_t* buffer, std::vector shape) { + std::string src; + size_t index; + size_t size = 1; + + for (index = 0; index < shape.size(); index++) { + size *= shape[index]; + } + for (index = 0; index < reply.data_tensors_size(); index++) { + if (label.compare(reply.data_tensors(index).node_name()) == 0) { + src = reply.data_tensors(index).data(); + memcpy(buffer, src.data(), src.length()); + break; + } + } +} + +void DetectionClient::clear_data() { + request.clear_data_tensors(); + reply.clear_data_tensors(); +} + +std::string DetectionClient::remote_infer() { + ClientContext context; + time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(1000); + context.set_deadline(deadline); + + status = stub_->getInferResult(&context, request, &reply); + if (status.ok()) { + if (reply.data_tensors_size() == 0) ALOGE("GRPC reply empty, ovms failure ?"); + return "Success"; + } else { + ALOGE("GRPC Error code: %d, message: %s", status.error_code(), + status.error_message().c_str()); + return std::string(status.error_message()); + } +} + +bool DetectionClient::get_status() { + if (status.ok() && (reply.data_tensors_size() > 0)) + return 1; + else { + return 0; + } +} \ No newline at end of file diff --git a/DetectionClient.h b/DetectionClient.h new file mode 100644 index 000000000..1b272f699 --- /dev/null +++ b/DetectionClient.h @@ -0,0 +1,52 @@ +#ifndef __DETECTION_CLIENT_H +#define __DETECTION_CLIENT_H + +#include +#include +#include +#include +#include +#include +#include "nnhal_object_detection.grpc.pb.h" + +using grpc::Channel; +using grpc::ClientContext; +using grpc::ClientWriter; +using grpc::Status; +using objectDetection::DataTensor; +using objectDetection::Detection; +using objectDetection::ReplyDataTensors; +using objectDetection::ReplyStatus; +using objectDetection::RequestDataChunks; +using objectDetection::RequestDataTensors; +using objectDetection::RequestString; +using time_point = std::chrono::system_clock::time_point; + +static std::string IR_XML("/data/vendor/neuralnetworks/ngraph_ir.xml"); +static std::string IR_BIN("/data/vendor/neuralnetworks/ngraph_ir.bin"); + +class DetectionClient { +public: + DetectionClient(std::shared_ptr channel) : stub_(Detection::NewStub(channel)){} + + std::string prepare(bool& flag); + + Status sendFile(std::string fileName, + std::unique_ptr >& writer); + + std::string sendIRs(bool& flag); + + void add_input_data(std::string label, const uint8_t* buffer, std::vector shape); + void get_output_data(std::string label, uint8_t* buffer, std::vector shape); + void clear_data(); + std::string remote_infer(); + bool get_status(); + +private: + std::unique_ptr stub_; + RequestDataTensors request; + ReplyDataTensors reply; + Status status; +}; + +#endif \ No newline at end of file diff --git a/IENetwork.cpp b/IENetwork.cpp index f0c481c52..155307ffa 100644 --- a/IENetwork.cpp +++ b/IENetwork.cpp @@ -1,10 +1,6 @@ #include "IENetwork.h" #include "ie_common.h" - -#include -#include #include -#include #undef LOG_TAG #define LOG_TAG "IENetwork" diff --git a/IENetwork.h b/IENetwork.h index d00faf438..54f0545e3 100644 --- a/IENetwork.h +++ b/IENetwork.h @@ -9,6 +9,10 @@ #include #include "utils.h" +#include +#include +#include +#include // #include "ie_blob.h" // #include "ie_common.h" // #include "ie_core.hpp" @@ -56,6 +60,8 @@ class IENetwork : public IIENetwork { InferenceEngine::InferRequest getInferRequest() { return mInferRequest; } void queryState() {} void infer(); + bool getGrpcIpPort(char *ip_port); + }; } // namespace nnhal diff --git a/cpu/CpuPreparedModel.cpp b/cpu/CpuPreparedModel.cpp index 1c3fa986e..1046102d6 100755 --- a/cpu/CpuPreparedModel.cpp +++ b/cpu/CpuPreparedModel.cpp @@ -31,6 +31,7 @@ bool CpuPreparedModel::initialize() { ALOGE("Failed to initialize Model runtime parameters!!"); return false; } + BasePreparedModel::checkRemoteConnection(); mNgraphNetCreator = std::make_shared(mModelInfo, mTargetDevice); if (!mNgraphNetCreator->validateOperations()) return false; @@ -50,6 +51,12 @@ bool CpuPreparedModel::initialize() { #endif mPlugin = std::make_shared(cnnNetworkPtr); mPlugin->loadNetwork(); + if(mRemoteCheck) { + auto resp = loadRemoteModel(); + ALOGD("%s Load Remote Model returns %d", __func__, resp); + } else { + ALOGI("%s Remote connection unavailable", __func__); + } } catch (const std::exception& ex) { ALOGE("%s Exception !!! %s", __func__, ex.what()); return false; diff --git a/ngraph_creator/include/NgraphNetworkCreator.hpp b/ngraph_creator/include/NgraphNetworkCreator.hpp index c2ff98d6a..3d51e8a1d 100644 --- a/ngraph_creator/include/NgraphNetworkCreator.hpp +++ b/ngraph_creator/include/NgraphNetworkCreator.hpp @@ -27,6 +27,7 @@ class NgraphNetworkCreator { bool validateOperations(); const std::string& getNodeName(uint32_t index); + std::vector getOutputShape(uint32_t index); std::shared_ptr generateGraph(); }; diff --git a/ngraph_creator/include/NgraphNodes.hpp b/ngraph_creator/include/NgraphNodes.hpp index 783d23634..79f95055f 100644 --- a/ngraph_creator/include/NgraphNodes.hpp +++ b/ngraph_creator/include/NgraphNodes.hpp @@ -35,6 +35,7 @@ class NgraphNodes { const std::string& getNodeName(size_t index); void removeInputParameter(std::string name, size_t index); + std::vector getOutputShape(size_t index); std::shared_ptr generateGraph(); // Setting the node name to empty string "". Caller of getNodeName should validate against "". diff --git a/ngraph_creator/src/NgraphNetworkCreator.cpp b/ngraph_creator/src/NgraphNetworkCreator.cpp index 69209129a..f908d1d2d 100644 --- a/ngraph_creator/src/NgraphNetworkCreator.cpp +++ b/ngraph_creator/src/NgraphNetworkCreator.cpp @@ -159,6 +159,11 @@ const std::string& NgraphNetworkCreator::getNodeName(uint32_t index) { return mNgraphNodes->getNodeName(index); } +std::vector NgraphNetworkCreator::getOutputShape(uint32_t index) { + + ALOGV("get node %d outputsize ", __func__, index); + return mNgraphNodes->getOutputShape(index); +} std::shared_ptr NgraphNetworkCreator::generateGraph() { ALOGV("%s Called", __func__); std::shared_ptr ret; diff --git a/ngraph_creator/src/NgraphNodes.cpp b/ngraph_creator/src/NgraphNodes.cpp index ebdbc1788..cfbd98bff 100644 --- a/ngraph_creator/src/NgraphNodes.cpp +++ b/ngraph_creator/src/NgraphNodes.cpp @@ -39,6 +39,13 @@ const std::string& NgraphNodes::getNodeName(size_t index) { ALOGV("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); return mNodeNames[index]; } + +std::vector NgraphNodes::getOutputShape(size_t index) { + + ALOGD("outputshape of node %d index ", __func__, index); + return mOutputAtOperandIndex[index].get_node_shared_ptr()->get_output_shape(0); +} + // remove null input node parameter void NgraphNodes::removeInputParameter(std::string name, size_t index) { for (size_t i = 0; i < mInputParams.size(); i++) { diff --git a/proto/nnhal_object_detection.proto b/proto/nnhal_object_detection.proto new file mode 100644 index 000000000..e0f14722a --- /dev/null +++ b/proto/nnhal_object_detection.proto @@ -0,0 +1,60 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "com.intel.examples.objectDetection"; +option java_outer_classname = "objectDetectionProto"; +option objc_class_prefix = "HLW"; + +package objectDetection; + +// The Detection service definition. +service Detection { + // Send Input Blobs and receive Output Blobs + rpc getInferResult (RequestDataTensors) returns (ReplyDataTensors) {} + rpc sendXml (stream RequestDataChunks) returns (ReplyStatus) {} + rpc sendBin (stream RequestDataChunks) returns (ReplyStatus) {} + rpc prepare (RequestString) returns (ReplyStatus) {} //Placeholder for any future support : RequestString +} + + +message RequestDataChunks { + bytes data = 1; +} + +message RequestString { + string value = 1; +} +message ReplyStatus { + bool status = 1; +} + +// Structure defining the structure of the Data Tensor(blob) +message DataTensor { + bytes data = 1; + string node_name = 2; + repeated int32 tensor_shape = 3; +} + +// Reply message containing the Output Data Tensors(blobs) +message ReplyDataTensors { + repeated DataTensor data_tensors = 1; +} + +// Request message containing the Input Data Tensors(blobs) +message RequestDataTensors { + repeated DataTensor data_tensors = 1; +} \ No newline at end of file diff --git a/utils.cpp b/utils.cpp index cbd2dd52a..c20883d6e 100644 --- a/utils.cpp +++ b/utils.cpp @@ -501,6 +501,14 @@ bool createDirs(std::string path) { return true; } +bool getGrpcIpPort(char *ip_port) { + if (property_get("vendor.nn.hal.grpc_ip_port", ip_port, NULL) <= 0) { + ALOGV("%s : failed to read vendor.nn.hal.grpc_ip_port", __func__); + return false; + } + return true; +} + void writeBufferToFile(std::string filename, const float* buf, size_t length) { if (!createDirs(filename)) return; diff --git a/utils.h b/utils.h index 8e5315738..2b7d81f61 100755 --- a/utils.h +++ b/utils.h @@ -252,6 +252,7 @@ T getOperandConstVal(const Model& model, const Operand& operand) { int sizeOfData(OperandType type, std::vector dims); +bool getGrpcIpPort(char *ip_port); void writeBufferToFile(std::string filename, const float* buf, size_t length); template std::shared_ptr As(const std::shared_ptr& src) { From 194df1b9ef512a8f5facd39db2cc11998cde2869 Mon Sep 17 00:00:00 2001 From: Anoob Anto K Date: Thu, 19 Jan 2023 12:53:09 +0530 Subject: [PATCH 08/12] Use unix sockets for grpc communication with AI Dispatcher Introducing unix socket for AI Dispatcher grpc connection along with TCP socket. Using additional vendor property vendor.nn.hal.grpc_socket_path. Tracked-On: OAM-105559 Signed-off-by: Anoob Anto K --- BasePreparedModel.cpp | 34 ++++++++++++++++++++-------------- utils.cpp | 8 ++++++++ utils.h | 1 + 3 files changed, 29 insertions(+), 14 deletions(-) mode change 100755 => 100644 utils.h diff --git a/BasePreparedModel.cpp b/BasePreparedModel.cpp index 781bf119d..5ded09558 100644 --- a/BasePreparedModel.cpp +++ b/BasePreparedModel.cpp @@ -59,20 +59,26 @@ bool BasePreparedModel::initialize() { } bool BasePreparedModel::checkRemoteConnection() { - char ip_port[PROPERTY_VALUE_MAX] = ""; + char grpc_prop[PROPERTY_VALUE_MAX] = ""; bool is_success = false; - if (!getGrpcIpPort(ip_port)) { - ALOGV("Invalid value for ip_port property : %s", ip_port); - return is_success; - } - mDetectionClient = std::make_shared( - grpc::CreateChannel(ip_port, grpc::InsecureChannelCredentials())); - if(!mDetectionClient) { - ALOGV("Unable to create channel with ip_port: %s", ip_port); - return is_success; - } - auto reply = mDetectionClient->prepare(is_success); - ALOGI("GRPC (%s) prepare response - %d : %s", ip_port, is_success, reply.c_str()); + if(getGrpcIpPort(grpc_prop)) { + ALOGD("Attempting GRPC via TCP : %s", grpc_prop); + mDetectionClient = std::make_shared( + grpc::CreateChannel(grpc_prop, grpc::InsecureChannelCredentials())); + if(mDetectionClient) { + auto reply = mDetectionClient->prepare(is_success); + ALOGI("GRPC prepare response is %d : %s", is_success, reply.c_str()); + } + } + if (!is_success && getGrpcSocketPath(grpc_prop)) { + ALOGD("Attempting GRPC via unix : %s", grpc_prop); + mDetectionClient = std::make_shared( + grpc::CreateChannel(std::string("unix:") + grpc_prop, grpc::InsecureChannelCredentials())); + if(mDetectionClient) { + auto reply = mDetectionClient->prepare(is_success); + ALOGI("GRPC prepare response is %d : %s", is_success, reply.c_str()); + } + } mRemoteCheck = is_success; return is_success; } @@ -398,7 +404,7 @@ static std::tuple, Timing> executeSynch if (measure == MeasureTiming::YES) deviceStart = now(); if(mRemoteCheck) { - ALOGI("%s Remote Infer", __func__); + ALOGI("%s GRPC Remote Infer", __func__); auto reply = mDetectionClient->remote_infer(); ALOGI("***********GRPC server response************* %s", reply.c_str()); } diff --git a/utils.cpp b/utils.cpp index c20883d6e..4f6dee016 100644 --- a/utils.cpp +++ b/utils.cpp @@ -501,6 +501,14 @@ bool createDirs(std::string path) { return true; } +bool getGrpcSocketPath(char *socket_path) { + if (property_get("vendor.nn.hal.grpc_socket_path", socket_path, NULL) <= 0) { + ALOGV("%s : failed to read vendor.nn.hal.grpc_socket_path", __func__); + return false; + } + return true; +} + bool getGrpcIpPort(char *ip_port) { if (property_get("vendor.nn.hal.grpc_ip_port", ip_port, NULL) <= 0) { ALOGV("%s : failed to read vendor.nn.hal.grpc_ip_port", __func__); diff --git a/utils.h b/utils.h old mode 100755 new mode 100644 index 2b7d81f61..334e12334 --- a/utils.h +++ b/utils.h @@ -252,6 +252,7 @@ T getOperandConstVal(const Model& model, const Operand& operand) { int sizeOfData(OperandType type, std::vector dims); +bool getGrpcSocketPath(char *socket_path); bool getGrpcIpPort(char *ip_port); void writeBufferToFile(std::string filename, const float* buf, size_t length); template From d3962fbc37ec6d12eb4c9ed42b42b9330c9c98e1 Mon Sep 17 00:00:00 2001 From: Anoob Anto K Date: Mon, 23 Jan 2023 19:29:25 +0530 Subject: [PATCH 09/12] Fix build warnings at NN-HAL Tracked-On: OAM-105572 Signed-off-by: Anoob Anto K --- ngraph_creator/src/NgraphNetworkCreator.cpp | 2 +- ngraph_creator/src/NgraphNodes.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ngraph_creator/src/NgraphNetworkCreator.cpp b/ngraph_creator/src/NgraphNetworkCreator.cpp index f908d1d2d..c9980f7f4 100644 --- a/ngraph_creator/src/NgraphNetworkCreator.cpp +++ b/ngraph_creator/src/NgraphNetworkCreator.cpp @@ -161,7 +161,7 @@ const std::string& NgraphNetworkCreator::getNodeName(uint32_t index) { std::vector NgraphNetworkCreator::getOutputShape(uint32_t index) { - ALOGV("get node %d outputsize ", __func__, index); + ALOGV("%s get node %d outputsize ", __func__, index); return mNgraphNodes->getOutputShape(index); } std::shared_ptr NgraphNetworkCreator::generateGraph() { diff --git a/ngraph_creator/src/NgraphNodes.cpp b/ngraph_creator/src/NgraphNodes.cpp index cfbd98bff..a4277bb86 100644 --- a/ngraph_creator/src/NgraphNodes.cpp +++ b/ngraph_creator/src/NgraphNodes.cpp @@ -42,7 +42,7 @@ const std::string& NgraphNodes::getNodeName(size_t index) { std::vector NgraphNodes::getOutputShape(size_t index) { - ALOGD("outputshape of node %d index ", __func__, index); + ALOGD("%s outputshape of node %d index ", __func__, index); return mOutputAtOperandIndex[index].get_node_shared_ptr()->get_output_shape(0); } From dc79b2ee68cdd96f0f136faa2916df05f1b77c21 Mon Sep 17 00:00:00 2001 From: Anoob Anto K Date: Fri, 27 Jan 2023 16:29:44 +0530 Subject: [PATCH 10/12] Remove hardcoding for the nn-hal path Tracked-On: OAM-105568 Signed-off-by: Anoob Anto K --- Android.bp | 9 ++++++++- ngraph_creator/Android.bp | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Android.bp b/Android.bp index 008de5f22..f4996e139 100644 --- a/Android.bp +++ b/Android.bp @@ -210,4 +210,11 @@ cc_binary { ], compile_multilib: "64", -} \ No newline at end of file +} + +//############################################################## +cc_library_headers { + name: "libnnhal_headers", + vendor: true, + export_include_dirs: ["."], +} diff --git a/ngraph_creator/Android.bp b/ngraph_creator/Android.bp index 3d1072752..c5028db30 100755 --- a/ngraph_creator/Android.bp +++ b/ngraph_creator/Android.bp @@ -95,9 +95,9 @@ cc_library_static { header_libs: [ "libngraph_headers", "libinference_headers", + "libnnhal_headers", ], include_dirs: [ - "vendor/intel/external/project-celadon/nn-hal", "packages/modules/NeuralNetworks/common/include", "packages/modules/NeuralNetworks/runtime/include", "external/mesa3d/include/android_stub" From c5ae93151ce6b1ed21d33a43cba5cc990bd867d5 Mon Sep 17 00:00:00 2001 From: Anoob Anto K Date: Fri, 27 Jan 2023 16:31:41 +0530 Subject: [PATCH 11/12] Fix issues raised by Coverity Tracked-On: OAM-105568 Signed-off-by: Anoob Anto K --- Driver.cpp | 6 ++-- ModelManager.h | 4 +-- .../operations/src/AveragePool2D.cpp | 3 ++ ngraph_creator/operations/src/Conv2d.cpp | 17 ++++------- .../operations/src/DepthwiseConv2d.cpp | 11 ++++--- .../operations/src/GroupedConv2d.cpp | 27 ++++++----------- ngraph_creator/operations/src/L2Pooling2D.cpp | 19 +++++------- ngraph_creator/operations/src/MaxPool2d.cpp | 15 ++++------ .../operations/src/TransposeConv2D.cpp | 8 +++-- ngraph_creator/src/NgraphNodes.cpp | 3 +- utils.cpp | 30 ------------------- utils.h | 1 - 12 files changed, 50 insertions(+), 94 deletions(-) diff --git a/Driver.cpp b/Driver.cpp index 06a86f5f3..af690d03a 100644 --- a/Driver.cpp +++ b/Driver.cpp @@ -124,7 +124,7 @@ Return Driver::prepareModel(const V1_0_Model& model, ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } - for (auto opn : model.operations) dumpOperation(opn); + for (auto& opn : model.operations) dumpOperation(opn); if (!driverPreparedModel->initialize()) { ALOGE("failed to initialize preparedmodel"); @@ -179,7 +179,7 @@ Return Driver::prepareModel_1_1(const V1_1_Model& model, ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } - for (auto opn : model.operations) dumpOperation(opn); + for (auto& opn : model.operations) dumpOperation(opn); if (!driverPreparedModel->initialize()) { ALOGE("failed to initialize preparedmodel"); @@ -284,7 +284,7 @@ Return Driver::prepareModel_1_2(const V1_2_Model& model, ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } - for (auto opn : model.operations) dumpOperation(opn); + for (auto& opn : model.operations) dumpOperation(opn); if (!driverPreparedModel->initialize()) { ALOGE("failed to initialize preparedmodel"); diff --git a/ModelManager.h b/ModelManager.h index e432f2bc0..a666579b1 100755 --- a/ModelManager.h +++ b/ModelManager.h @@ -177,7 +177,7 @@ class NnapiModelInfo { const hidl_vec& pools); bool updateRequestPoolInfos() { - for (auto runtimeInfo : mRequestPoolInfos) { + for (auto& runtimeInfo : mRequestPoolInfos) { runtimeInfo.update(); } @@ -187,7 +187,7 @@ class NnapiModelInfo { std::vector getOutputShapes() { return mOutputShapes; } void unmapRuntimeMemPools() { - for (auto runtimeInfo : mRequestPoolInfos) { + for (auto& runtimeInfo : mRequestPoolInfos) { runtimeInfo.unmap_mem(); } } diff --git a/ngraph_creator/operations/src/AveragePool2D.cpp b/ngraph_creator/operations/src/AveragePool2D.cpp index f72c71486..c5abdd6af 100644 --- a/ngraph_creator/operations/src/AveragePool2D.cpp +++ b/ngraph_creator/operations/src/AveragePool2D.cpp @@ -53,6 +53,9 @@ std::shared_ptr AveragePool2D::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 8) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } if (isExplicit) { diff --git a/ngraph_creator/operations/src/Conv2d.cpp b/ngraph_creator/operations/src/Conv2d.cpp index 4f210399f..732fe7669 100644 --- a/ngraph_creator/operations/src/Conv2d.cpp +++ b/ngraph_creator/operations/src/Conv2d.cpp @@ -39,6 +39,8 @@ bool Conv2d::validate() { } std::shared_ptr Conv2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); @@ -49,6 +51,9 @@ std::shared_ptr Conv2d::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 10) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -108,15 +113,6 @@ std::shared_ptr Conv2d::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - { - if (useNchw) { - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - } else { - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - } - } } if (isImplicit) { @@ -172,10 +168,9 @@ std::shared_ptr Conv2d::createNode() { } } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/operations/src/DepthwiseConv2d.cpp b/ngraph_creator/operations/src/DepthwiseConv2d.cpp index 3507c67c4..dd67aabaa 100644 --- a/ngraph_creator/operations/src/DepthwiseConv2d.cpp +++ b/ngraph_creator/operations/src/DepthwiseConv2d.cpp @@ -41,6 +41,8 @@ bool DepthwiseConv2d::validate() { } std::shared_ptr DepthwiseConv2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); bool isImplicit = false, isExplicit = false; @@ -50,6 +52,9 @@ std::shared_ptr DepthwiseConv2d::createNode() { isExplicit = true; } else if (inputsSize >= 8 && inputsSize <= 11) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -123,8 +128,7 @@ std::shared_ptr DepthwiseConv2d::createNode() { } } } - - if (isImplicit) { + else if (isImplicit) { padding_scheme = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 3); stride_width = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 4); @@ -181,10 +185,9 @@ std::shared_ptr DepthwiseConv2d::createNode() { } } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/operations/src/GroupedConv2d.cpp b/ngraph_creator/operations/src/GroupedConv2d.cpp index 7c09a2fd6..6deca60d3 100644 --- a/ngraph_creator/operations/src/GroupedConv2d.cpp +++ b/ngraph_creator/operations/src/GroupedConv2d.cpp @@ -35,6 +35,8 @@ bool GroupedConv2d::validate() { } std::shared_ptr GroupedConv2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); bool isImplicit = false, isExplicit = false; @@ -43,17 +45,20 @@ std::shared_ptr GroupedConv2d::createNode() { isExplicit = true; } else if (inputsSize >= 8 && inputsSize <= 9) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } - int32_t padding_left, padding_right; - int32_t padding_top, padding_bottom; + int32_t padding_left = 0, padding_right = 0; + int32_t padding_top = 0, padding_bottom = 0; int32_t stride_width, stride_height; int32_t dilation_width_factor = 1, dilation_height_factor = 1; int32_t number_groups; int32_t activationFn; int32_t layout = 0; int32_t padding_scheme; - int32_t input_width, input_height, input_channel; + int32_t input_width, input_height; int32_t filter_width, filter_height; bool useNchw = false; std::vector strides; @@ -89,17 +94,6 @@ std::shared_ptr GroupedConv2d::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - { - if (useNchw) { // NCHW - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - input_channel = inputDimensions[1]; - } else { // NHWC - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - input_channel = inputDimensions[3]; - } - } } if (isImplicit) { @@ -120,11 +114,9 @@ std::shared_ptr GroupedConv2d::createNode() { if (useNchw) { input_width = inputDimensions[3]; input_height = inputDimensions[2]; - input_channel = inputDimensions[1]; } else { input_width = inputDimensions[2]; input_height = inputDimensions[1]; - input_channel = inputDimensions[3]; } if (padding_scheme == 1) { @@ -144,10 +136,9 @@ std::shared_ptr GroupedConv2d::createNode() { } } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/operations/src/L2Pooling2D.cpp b/ngraph_creator/operations/src/L2Pooling2D.cpp index d8520b132..c23600a0f 100644 --- a/ngraph_creator/operations/src/L2Pooling2D.cpp +++ b/ngraph_creator/operations/src/L2Pooling2D.cpp @@ -12,6 +12,8 @@ L2Pooling2D::L2Pooling2D(int operationIndex) : OperationsBase(operationIndex) { } std::shared_ptr L2Pooling2D::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); bool isImplicit = false, isExplicit = false; @@ -19,10 +21,13 @@ std::shared_ptr L2Pooling2D::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 8) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } - int32_t padding_left, padding_right; - int32_t padding_top, padding_bottom; + int32_t padding_left = 0, padding_right = 0; + int32_t padding_top = 0, padding_bottom = 0; int32_t stride_width, stride_height; int32_t activationFn; int32_t layout = 0; @@ -59,13 +64,6 @@ std::shared_ptr L2Pooling2D::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - if (useNchw) { - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - } else { - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - } } if (isImplicit) { @@ -110,8 +108,7 @@ std::shared_ptr L2Pooling2D::createNode() { } } - std::shared_ptr inputNode, inputSquared, sqrtOutput; - inputNode = getInputNode(0); + std::shared_ptr inputSquared, sqrtOutput; inputSquared = std::make_shared(inputNode, inputNode); if (!useNchw) { diff --git a/ngraph_creator/operations/src/MaxPool2d.cpp b/ngraph_creator/operations/src/MaxPool2d.cpp index bf320b897..639990b6a 100644 --- a/ngraph_creator/operations/src/MaxPool2d.cpp +++ b/ngraph_creator/operations/src/MaxPool2d.cpp @@ -12,6 +12,8 @@ MaxPool2d::MaxPool2d(int operationIndex) : OperationsBase(operationIndex) { } std::shared_ptr MaxPool2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); @@ -21,6 +23,9 @@ std::shared_ptr MaxPool2d::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 8) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -61,13 +66,6 @@ std::shared_ptr MaxPool2d::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - if (useNchw) { - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - } else { - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - } } if (isImplicit) { @@ -112,9 +110,6 @@ std::shared_ptr MaxPool2d::createNode() { } } - std::shared_ptr inputNode; - inputNode = getInputNode(0); - if (!useNchw) { // No conversion needed if useNchw set inputNode = transpose(NHWC_NCHW, inputNode); } diff --git a/ngraph_creator/operations/src/TransposeConv2D.cpp b/ngraph_creator/operations/src/TransposeConv2D.cpp index 5b9d6999a..1b0cc3c88 100644 --- a/ngraph_creator/operations/src/TransposeConv2D.cpp +++ b/ngraph_creator/operations/src/TransposeConv2D.cpp @@ -46,6 +46,8 @@ bool TransposeConv2D::validate() { } std::shared_ptr TransposeConv2D::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); @@ -55,6 +57,9 @@ std::shared_ptr TransposeConv2D::createNode() { isExplicit = true; } else if (inputsSize == 9) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -152,10 +157,9 @@ std::shared_ptr TransposeConv2D::createNode() { padding_bottom = 0; } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/src/NgraphNodes.cpp b/ngraph_creator/src/NgraphNodes.cpp index a4277bb86..e657c45a5 100644 --- a/ngraph_creator/src/NgraphNodes.cpp +++ b/ngraph_creator/src/NgraphNodes.cpp @@ -41,8 +41,7 @@ const std::string& NgraphNodes::getNodeName(size_t index) { } std::vector NgraphNodes::getOutputShape(size_t index) { - - ALOGD("%s outputshape of node %d index ", __func__, index); + ALOGD("%s outputshape of node %zu index ", __func__, index); return mOutputAtOperandIndex[index].get_node_shared_ptr()->get_output_shape(0); } diff --git a/utils.cpp b/utils.cpp index 4f6dee016..e4fa0f205 100644 --- a/utils.cpp +++ b/utils.cpp @@ -482,25 +482,6 @@ int sizeOfData(OperandType type, std::vector dims) { return size; } -// TODO: Hide under debug flag or remove from code -bool createDirs(std::string path) { - char delim = '/'; - int start = 0; - - auto pos = path.find(delim); - while (pos != std::string::npos) { - auto dir = path.substr(start, pos - start + 1); - - struct stat sb; - if (!((stat(dir.c_str(), &sb) == 0) && (S_ISDIR(sb.st_mode)))) { - if (mkdir(dir.c_str(), 0777) != 0) return false; - } - pos = path.find(delim, pos + 1); - } - - return true; -} - bool getGrpcSocketPath(char *socket_path) { if (property_get("vendor.nn.hal.grpc_socket_path", socket_path, NULL) <= 0) { ALOGV("%s : failed to read vendor.nn.hal.grpc_socket_path", __func__); @@ -517,17 +498,6 @@ bool getGrpcIpPort(char *ip_port) { return true; } -void writeBufferToFile(std::string filename, const float* buf, size_t length) { - if (!createDirs(filename)) return; - - std::ofstream ofs; - ofs.open(filename.c_str(), std::ofstream::out | std::ofstream::trunc); - for (size_t i = 0; i < length; i++) { - ofs << buf[i] << "\n"; - } - ofs.close(); -} - } // namespace nnhal } // namespace neuralnetworks } // namespace hardware diff --git a/utils.h b/utils.h index 334e12334..f598caad0 100644 --- a/utils.h +++ b/utils.h @@ -254,7 +254,6 @@ int sizeOfData(OperandType type, std::vector dims); bool getGrpcSocketPath(char *socket_path); bool getGrpcIpPort(char *ip_port); -void writeBufferToFile(std::string filename, const float* buf, size_t length); template std::shared_ptr As(const std::shared_ptr& src) { return std::static_pointer_cast(src); From 20868f0709c15395f5f078169487c64a1411ef49 Mon Sep 17 00:00:00 2001 From: Anoob Anto K Date: Wed, 1 Feb 2023 18:07:32 +0530 Subject: [PATCH 12/12] Cleanup log messages to prevent leakage of data Tracked-On: OAM-105569 Signed-off-by: Anoob Anto K --- BasePreparedModel.cpp | 22 +++++++++---------- DetectionClient.cpp | 5 +++-- IENetwork.cpp | 6 ++--- ModelManager.cpp | 12 +++++----- cpu/CpuPreparedModel.cpp | 2 +- ngraph_creator/operations/src/Concat.cpp | 4 ++-- .../operations/src/OperationsBase.cpp | 2 +- ngraph_creator/src/NgraphNodes.cpp | 6 ++--- utils.h | 6 ++--- 9 files changed, 33 insertions(+), 32 deletions(-) diff --git a/BasePreparedModel.cpp b/BasePreparedModel.cpp index 5ded09558..aca248dde 100644 --- a/BasePreparedModel.cpp +++ b/BasePreparedModel.cpp @@ -62,21 +62,21 @@ bool BasePreparedModel::checkRemoteConnection() { char grpc_prop[PROPERTY_VALUE_MAX] = ""; bool is_success = false; if(getGrpcIpPort(grpc_prop)) { - ALOGD("Attempting GRPC via TCP : %s", grpc_prop); + ALOGV("Attempting GRPC via TCP : %s", grpc_prop); mDetectionClient = std::make_shared( grpc::CreateChannel(grpc_prop, grpc::InsecureChannelCredentials())); if(mDetectionClient) { auto reply = mDetectionClient->prepare(is_success); - ALOGI("GRPC prepare response is %d : %s", is_success, reply.c_str()); + ALOGI("GRPC(TCP) prepare response is %d : %s", is_success, reply.c_str()); } } if (!is_success && getGrpcSocketPath(grpc_prop)) { - ALOGD("Attempting GRPC via unix : %s", grpc_prop); + ALOGV("Attempting GRPC via unix : %s", grpc_prop); mDetectionClient = std::make_shared( grpc::CreateChannel(std::string("unix:") + grpc_prop, grpc::InsecureChannelCredentials())); if(mDetectionClient) { auto reply = mDetectionClient->prepare(is_success); - ALOGI("GRPC prepare response is %d : %s", is_success, reply.c_str()); + ALOGI("GRPC(unix) prepare response is %d : %s", is_success, reply.c_str()); } } mRemoteCheck = is_success; @@ -242,7 +242,7 @@ void asyncExecute(const Request& request, MeasureTiming measure, BasePreparedMod ALOGD("Ignorning output at index(%d), since it is invalid", outIndex); continue; } - ALOGD("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); + ALOGV("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); auto srcBlob = plugin->getBlob(outputNodeName); auto operandType = modelInfo->getOperandType(outIndex); uint32_t actualLength = srcBlob->byteSize(); @@ -378,7 +378,7 @@ static std::tuple, Timing> executeSynch ALOGD("Ignorning input at index(%d), since it is invalid", inIndex); continue; } - ALOGD("Input index: %d layername : %s", inIndex, inputNodeName.c_str()); + ALOGV("Input index: %d layername : %s", inIndex, inputNodeName.c_str()); //check if remote infer is available //TODO: Need to add FLOAT16 support for remote inferencing if(mRemoteCheck && mDetectionClient) { @@ -400,7 +400,7 @@ static std::tuple, Timing> executeSynch } - ALOGD("%s Run", __func__); + ALOGV("%s Run", __func__); if (measure == MeasureTiming::YES) deviceStart = now(); if(mRemoteCheck) { @@ -410,7 +410,7 @@ static std::tuple, Timing> executeSynch } if (!mRemoteCheck || !mDetectionClient->get_status()){ try { - ALOGI("%s Client Infer", __func__); + ALOGV("%s Client Infer", __func__); plugin->infer(); } catch (const std::exception& ex) { ALOGE("%s Exception !!! %s", __func__, ex.what()); @@ -421,13 +421,13 @@ static std::tuple, Timing> executeSynch for (size_t i = 0; i < request.outputs.size(); i++) { auto outIndex = modelInfo->getModelOutputIndex(i); - ALOGI("OutputIndex: %d", outIndex); + ALOGV("OutputIndex: %d", outIndex); const std::string& outputNodeName = ngraphNw->getNodeName(outIndex); if (outputNodeName == "") { ALOGD("Ignorning output at index(%d), since it is invalid", outIndex); continue; } - ALOGD("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); + ALOGV("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); auto srcBlob = plugin->getBlob(outputNodeName); auto operandType = modelInfo->getOperandType(outIndex); uint32_t actualLength = srcBlob->byteSize(); @@ -435,7 +435,7 @@ static std::tuple, Timing> executeSynch void* destPtr = modelInfo->getBlobFromMemoryPoolOut(request, i, expectedLength); auto outputBlobDims = srcBlob->getTensorDesc().getDims(); - ALOGD("output precision: %d", static_cast(srcBlob->getTensorDesc().getPrecision())); + ALOGV("output precision: %d", static_cast(srcBlob->getTensorDesc().getPrecision())); switch (operandType) { case OperandType::TENSOR_BOOL8: diff --git a/DetectionClient.cpp b/DetectionClient.cpp index d78cf7b86..7f36a8b91 100644 --- a/DetectionClient.cpp +++ b/DetectionClient.cpp @@ -27,7 +27,8 @@ Status DetectionClient::sendFile(std::string fileName, uint32_t CHUNK_SIZE = 1024 * 1024; std::ifstream fin(fileName, std::ifstream::binary); std::vector buffer(CHUNK_SIZE, 0); - ALOGI("GRPC sendFile %d sized chunks from %s", CHUNK_SIZE, fileName.c_str()); + ALOGV("GRPC sendFile %s", fileName.c_str()); + ALOGI("GRPC sendFile %d sized chunks", CHUNK_SIZE); if (!fin.is_open()) ALOGE("GRPC sendFile file Open Error "); while (!fin.eof()) { @@ -42,7 +43,7 @@ Status DetectionClient::sendFile(std::string fileName, } writer->WritesDone(); - ALOGI("GRPC sendFile completed %s", fileName.c_str()); + ALOGI("GRPC sendFile completed"); return writer->Finish(); } diff --git a/IENetwork.cpp b/IENetwork.cpp index 155307ffa..9b8360964 100644 --- a/IENetwork.cpp +++ b/IENetwork.cpp @@ -11,7 +11,7 @@ namespace neuralnetworks { namespace nnhal { bool IENetwork::loadNetwork() { - ALOGD("%s", __func__); + ALOGV("%s", __func__); #if __ANDROID__ InferenceEngine::Core ie(std::string("/vendor/etc/openvino/plugins.xml")); @@ -22,7 +22,7 @@ bool IENetwork::loadNetwork() { if (mNetwork) { mExecutableNw = ie.LoadNetwork(*mNetwork, "CPU"); - ALOGD("LoadNetwork is done...."); + ALOGD("loadNetwork is done...."); mInferRequest = mExecutableNw.CreateInferRequest(); ALOGD("CreateInfereRequest is done...."); @@ -65,7 +65,7 @@ InferenceEngine::TBlob::Ptr IENetwork::getBlob(const std::string& outName } void IENetwork::infer() { - ALOGI("Infer Network\n"); + ALOGI("infer Network\n"); mInferRequest.StartAsync(); mInferRequest.Wait(10000); ALOGI("infer request completed"); diff --git a/ModelManager.cpp b/ModelManager.cpp index 3cd2f104f..ad6f944dd 100644 --- a/ModelManager.cpp +++ b/ModelManager.cpp @@ -46,11 +46,11 @@ bool NnapiModelInfo::initializeRunTimeOperandInfo() { } to.scale = from.scale; + ALOGV("OperandType = %d\n", from.type); switch (from.type) { case OperandType::TENSOR_FLOAT32: case OperandType::FLOAT32: to.type = OperandType::TENSOR_FLOAT32; - ALOGD("OperandType = %d\n", from.type); break; case OperandType::INT32: case OperandType::UINT32: @@ -170,7 +170,7 @@ const uint8_t* NnapiModelInfo::GetOperandMemory(int index, uint32_t& lenOut) { V1_3::ErrorStatus NnapiModelInfo::setRunTimePoolInfosFromHidlMemories( const hidl_vec& pools) { - ALOGD("Number of pools: %zu", pools.size()); + ALOGV("Number of pools: %zu", pools.size()); mRequestPoolInfos.resize(pools.size()); for (size_t i = 0; i < pools.size(); i++) { auto& poolInfo = mRequestPoolInfos[i]; @@ -197,7 +197,7 @@ V1_3::ErrorStatus NnapiModelInfo::setRunTimePoolInfosFromHidlMemories( ErrorStatus NnapiModelInfo::setRunTimePoolInfosFromHidlMemories( const hidl_vec& pools) { - ALOGD("Number of pools: %zu", pools.size()); + ALOGV("Number of pools: %zu", pools.size()); mRequestPoolInfos.resize(pools.size()); for (size_t i = 0; i < pools.size(); i++) { @@ -229,7 +229,7 @@ void* NnapiModelInfo::getBlobFromMemoryPoolIn(const Request& request, uint32_t i operand.buffer = r.buffer + arg.location.offset; operand.length = arg.location.length; - ALOGI("%s Operand length:%d pointer:%p offset:%d pool index: %d", __func__, operand.length, + ALOGV("%s Operand length:%d pointer:%p offset:%d pool index: %d", __func__, operand.length, (r.buffer + arg.location.offset), arg.location.offset, poolIndex); rBufferLength = operand.length; @@ -244,7 +244,7 @@ void* NnapiModelInfo::getBlobFromMemoryPoolOut(const Request& request, uint32_t nnAssert(poolIndex < mRequestPoolInfos.size()); auto& r = mRequestPoolInfos[poolIndex]; - ALOGD("%s lifetime:%d location offset:%d length:%d pool index:%d", __func__, operand.lifetime, + ALOGV("%s lifetime:%d location offset:%d length:%d pool index:%d", __func__, operand.lifetime, arg.location.offset, arg.location.length, poolIndex); if (arg.dimensions.size() > 0) { @@ -258,7 +258,7 @@ void* NnapiModelInfo::getBlobFromMemoryPoolOut(const Request& request, uint32_t operand.buffer = r.buffer + arg.location.offset; operand.length = arg.location.length; rBufferLength = operand.length; - ALOGI("%s Operand length:%d pointer:%p", __func__, operand.length, + ALOGV("%s Operand length:%d pointer:%p", __func__, operand.length, (r.buffer + arg.location.offset)); return (r.buffer + arg.location.offset); } diff --git a/cpu/CpuPreparedModel.cpp b/cpu/CpuPreparedModel.cpp index 1046102d6..22df0a521 100755 --- a/cpu/CpuPreparedModel.cpp +++ b/cpu/CpuPreparedModel.cpp @@ -55,7 +55,7 @@ bool CpuPreparedModel::initialize() { auto resp = loadRemoteModel(); ALOGD("%s Load Remote Model returns %d", __func__, resp); } else { - ALOGI("%s Remote connection unavailable", __func__); + ALOGD("%s Remote connection unavailable", __func__); } } catch (const std::exception& ex) { ALOGE("%s Exception !!! %s", __func__, ex.what()); diff --git a/ngraph_creator/operations/src/Concat.cpp b/ngraph_creator/operations/src/Concat.cpp index bdc4425a9..ea1f52dd0 100644 --- a/ngraph_creator/operations/src/Concat.cpp +++ b/ngraph_creator/operations/src/Concat.cpp @@ -31,12 +31,12 @@ std::shared_ptr Concat::createNode() { auto axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, n); // n: concatenation axis std::vector> inputs; - ALOGD("createNode n %lu, axis %d", n, axis); + ALOGV("createNode n %lu, axis %d", n, axis); for (size_t i = 0; i < n; i++) { auto inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, i); auto inputOp = getInputNode(i); const auto op = sModelInfo->getOperand(inputIndex); - ALOGD("createNode inputIndex %d, lifetime %d", inputIndex, op.lifetime); + ALOGV("createNode inputIndex %d, lifetime %d", inputIndex, op.lifetime); inputs.push_back(inputOp); } diff --git a/ngraph_creator/operations/src/OperationsBase.cpp b/ngraph_creator/operations/src/OperationsBase.cpp index 3df23c0f9..020033eff 100755 --- a/ngraph_creator/operations/src/OperationsBase.cpp +++ b/ngraph_creator/operations/src/OperationsBase.cpp @@ -109,7 +109,7 @@ bool OperationsBase::checkOperandType(uint32_t operandIndex, const int32_t expec const std::string& strLogInfo) { const auto operandType = (int32_t)sModelInfo->getOperandType(operandIndex); if (operandType != expectedOperandType) { - ALOGE("OperationIndex %d %s Index %d type %d invalid", mNnapiOperationIndex, + ALOGV("OperationIndex %d %s Index %d type %d invalid", mNnapiOperationIndex, strLogInfo.c_str(), operandIndex, operandType); return false; } diff --git a/ngraph_creator/src/NgraphNodes.cpp b/ngraph_creator/src/NgraphNodes.cpp index e657c45a5..2b97800a6 100644 --- a/ngraph_creator/src/NgraphNodes.cpp +++ b/ngraph_creator/src/NgraphNodes.cpp @@ -27,21 +27,21 @@ ngraph::Output NgraphNodes::getOperationOutput(size_t index) { } void NgraphNodes::setResultNode(size_t outputIndex, std::shared_ptr resultNode) { - ALOGD("setResultNode %zu", outputIndex); + ALOGV("setResultNode %zu", outputIndex); mResultNodes.push_back(resultNode); } const std::string& NgraphNodes::getNodeName(size_t index) { if (mNodeNames.find(index) == mNodeNames.end()) { mNodeNames[index] = mOutputAtOperandIndex[index].get_node_shared_ptr()->get_name(); - ALOGD("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); + ALOGV("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); } ALOGV("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); return mNodeNames[index]; } std::vector NgraphNodes::getOutputShape(size_t index) { - ALOGD("%s outputshape of node %zu index ", __func__, index); + ALOGV("%s outputshape of node %zu index ", __func__, index); return mOutputAtOperandIndex[index].get_node_shared_ptr()->get_output_shape(0); } diff --git a/utils.h b/utils.h index f598caad0..aea1cafc5 100644 --- a/utils.h +++ b/utils.h @@ -89,19 +89,19 @@ enum PaddingScheme { #define VLOGDIMS(l, d, header) \ do { \ auto size = (d).size(); \ - ALOGI("%s: vectors {%d, %d, %d, %d}", header, (d)[0], size > 1 ? (d)[1] : 0, \ + ALOGV("%s: vectors {%d, %d, %d, %d}", header, (d)[0], size > 1 ? (d)[1] : 0, \ size > 2 ? (d)[2] : 0, size > 3 ? (d)[3] : 0); \ } while (0) #define dumpOperand(index, model) \ do { \ const auto op = model.operands[index]; \ - ALOGI("Operand (%zu) %s", index, toString(op).c_str()); \ + ALOGV("Operand (%zu) %s", index, toString(op).c_str()); \ } while (0) #define dumpOperation(operation) \ do { \ - ALOGI("Operation: %s", toString(operation).c_str()); \ + ALOGV("Operation: %s", toString(operation).c_str()); \ } while (0) #define WRONG_DIM (-1)