diff --git a/Android.bp b/Android.bp index 82ffdfb63..f4996e139 100644 --- a/Android.bp +++ b/Android.bp @@ -9,6 +9,7 @@ cc_library_shared { srcs: [ "Driver.cpp", "BasePreparedModel.cpp", + "DetectionClient.cpp", "utils.cpp", "IENetwork.cpp", "ModelManager.cpp", @@ -27,7 +28,17 @@ cc_library_shared { "packages/modules/NeuralNetworks/common/include", "packages/modules/NeuralNetworks/runtime/include", "frameworks/native/libs/nativewindow/include", - "external/mesa3d/include/android_stub" + "external/mesa3d/include/android_stub", + "external/grpc-grpc", + "external/grpc-grpc/include", + "external/grpc-grpc/third_party/cares", + "external/grpc-grpc/third_party/cares/config_android", + "external/grpc-grpc/src/core/ext/filters/client_channel", + "external/grpc-grpc/third_party/nanopb", + "external/protobuf", + "external/protobuf/src", + "external/protobuf/config", + "external/protobuf/android" ], header_libs: [ @@ -94,7 +105,9 @@ cc_library_shared { "libutils", "libinference_engine", "libngraph", - "libMKLDNNPlugin" + "libMKLDNNPlugin", + "libgrpc++", + "libprotobuf-cpp-full" ], static_libs: [ @@ -103,6 +116,13 @@ cc_library_shared { "libngraph_creator", ], + generated_headers: [ + "ObjectDetectionProtoStub_h", + ], + generated_sources: [ + "ObjectDetectionProtoStub_cc", + ], + defaults: [ "neuralnetworks_defaults" ], @@ -111,6 +131,38 @@ cc_library_shared { } +genrule { + name: "ObjectDetectionProtoStub_h", + tools: [ + "aprotoc", + "protoc-gen-grpc-cpp-plugin", + ], + cmd: "$(location aprotoc) -I$$(dirname $(in)) -Iexternal/protobuf/src --plugin=protoc-gen-grpc=$(location protoc-gen-grpc-cpp-plugin) $(in) --grpc_out=$(genDir) --cpp_out=$(genDir)", + srcs: [ + "proto/nnhal_object_detection.proto", + ], + out: [ + "nnhal_object_detection.pb.h", + "nnhal_object_detection.grpc.pb.h", + ], +} + +genrule { + name: "ObjectDetectionProtoStub_cc", + tools: [ + "aprotoc", + "protoc-gen-grpc-cpp-plugin", + ], + cmd: "$(location aprotoc) -I$$(dirname $(in)) -Iexternal/protobuf/src --plugin=protoc-gen-grpc=$(location protoc-gen-grpc-cpp-plugin) $(in) --grpc_out=$(genDir) --cpp_out=$(genDir)", + srcs: [ + "proto/nnhal_object_detection.proto", + ], + out: [ + "nnhal_object_detection.pb.cc", + "nnhal_object_detection.grpc.pb.cc", + ], +} + //############################################################## cc_binary { name: "android.hardware.neuralnetworks@1.3-generic-service", @@ -158,4 +210,11 @@ cc_binary { ], compile_multilib: "64", -} \ No newline at end of file +} + +//############################################################## +cc_library_headers { + name: "libnnhal_headers", + vendor: true, + export_include_dirs: ["."], +} diff --git a/BUILD.gn b/BUILD.gn index 288347ebc..5f3a67d8a 100755 --- a/BUILD.gn +++ b/BUILD.gn @@ -160,6 +160,7 @@ shared_library("intel_nnhal") { "ModelManager.cpp", "cpu/CpuPreparedModel.cpp", "BasePreparedModel.cpp", + "DetectionClient.cpp", ] include_dirs = [ diff --git a/BasePreparedModel.cpp b/BasePreparedModel.cpp index 94704fdec..aca248dde 100644 --- a/BasePreparedModel.cpp +++ b/BasePreparedModel.cpp @@ -36,6 +36,8 @@ namespace nnhal { using namespace android::nn; static const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; +bool mRemoteCheck = false; +std::shared_ptr mDetectionClient; void BasePreparedModel::deinitialize() { ALOGV("Entering %s", __func__); @@ -56,6 +58,45 @@ bool BasePreparedModel::initialize() { return true; } +bool BasePreparedModel::checkRemoteConnection() { + char grpc_prop[PROPERTY_VALUE_MAX] = ""; + bool is_success = false; + if(getGrpcIpPort(grpc_prop)) { + ALOGV("Attempting GRPC via TCP : %s", grpc_prop); + mDetectionClient = std::make_shared( + grpc::CreateChannel(grpc_prop, grpc::InsecureChannelCredentials())); + if(mDetectionClient) { + auto reply = mDetectionClient->prepare(is_success); + ALOGI("GRPC(TCP) prepare response is %d : %s", is_success, reply.c_str()); + } + } + if (!is_success && getGrpcSocketPath(grpc_prop)) { + ALOGV("Attempting GRPC via unix : %s", grpc_prop); + mDetectionClient = std::make_shared( + grpc::CreateChannel(std::string("unix:") + grpc_prop, grpc::InsecureChannelCredentials())); + if(mDetectionClient) { + auto reply = mDetectionClient->prepare(is_success); + ALOGI("GRPC(unix) prepare response is %d : %s", is_success, reply.c_str()); + } + } + mRemoteCheck = is_success; + return is_success; +} + +bool BasePreparedModel::loadRemoteModel() { + ALOGI("Entering %s", __func__); + bool is_success = false; + if(mDetectionClient) { + auto reply = mDetectionClient->sendIRs(is_success); + ALOGI("sendIRs response GRPC %d %s", is_success, reply.c_str()); + } + else { + ALOGE("%s mDetectionClient is null",__func__); + } + mRemoteCheck = is_success; + return is_success; +} + static Return notify(const sp& callback, const ErrorStatus& status, const hidl_vec&, Timing) { return callback->notify(status); @@ -201,7 +242,7 @@ void asyncExecute(const Request& request, MeasureTiming measure, BasePreparedMod ALOGD("Ignorning output at index(%d), since it is invalid", outIndex); continue; } - ALOGD("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); + ALOGV("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); auto srcBlob = plugin->getBlob(outputNodeName); auto operandType = modelInfo->getOperandType(outIndex); uint32_t actualLength = srcBlob->byteSize(); @@ -337,41 +378,56 @@ static std::tuple, Timing> executeSynch ALOGD("Ignorning input at index(%d), since it is invalid", inIndex); continue; } - ALOGD("Input index: %d layername : %s", inIndex, inputNodeName.c_str()); - auto destBlob = plugin->getBlob(inputNodeName); - if (modelInfo->getOperandType(inIndex) == OperandType::TENSOR_FLOAT16) { - float* dest = destBlob->buffer().as(); - _Float16* src = (_Float16*)srcPtr; - - for (unsigned int i = 0; i < len / 2; i++) { - dest[i] = src[i]; - } + ALOGV("Input index: %d layername : %s", inIndex, inputNodeName.c_str()); + //check if remote infer is available + //TODO: Need to add FLOAT16 support for remote inferencing + if(mRemoteCheck && mDetectionClient) { + mDetectionClient->add_input_data(inputNodeName, (uint8_t*)srcPtr, ngraphNw->getOutputShape(inIndex)); } else { - uint8_t* dest = destBlob->buffer().as(); - std::memcpy(dest, (uint8_t*)srcPtr, len); + auto destBlob = plugin->getBlob(inputNodeName); + if (modelInfo->getOperandType(inIndex) == OperandType::TENSOR_FLOAT16) { + float* dest = destBlob->buffer().as(); + _Float16* src = (_Float16*)srcPtr; + + for (unsigned int i = 0; i < len / 2; i++) { + dest[i] = src[i]; + } + } else { + uint8_t* dest = destBlob->buffer().as(); + std::memcpy(dest, (uint8_t*)srcPtr, len); + } } + } - ALOGD("%s Run", __func__); + ALOGV("%s Run", __func__); if (measure == MeasureTiming::YES) deviceStart = now(); - try { - plugin->infer(); - } catch (const std::exception& ex) { - ALOGE("%s Exception !!! %s", __func__, ex.what()); - return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; + if(mRemoteCheck) { + ALOGI("%s GRPC Remote Infer", __func__); + auto reply = mDetectionClient->remote_infer(); + ALOGI("***********GRPC server response************* %s", reply.c_str()); + } + if (!mRemoteCheck || !mDetectionClient->get_status()){ + try { + ALOGV("%s Client Infer", __func__); + plugin->infer(); + } catch (const std::exception& ex) { + ALOGE("%s Exception !!! %s", __func__, ex.what()); + return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; + } } if (measure == MeasureTiming::YES) deviceEnd = now(); for (size_t i = 0; i < request.outputs.size(); i++) { auto outIndex = modelInfo->getModelOutputIndex(i); - ALOGI("OutputIndex: %d", outIndex); + ALOGV("OutputIndex: %d", outIndex); const std::string& outputNodeName = ngraphNw->getNodeName(outIndex); if (outputNodeName == "") { ALOGD("Ignorning output at index(%d), since it is invalid", outIndex); continue; } - ALOGD("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); + ALOGV("Output index: %d layername : %s", outIndex, outputNodeName.c_str()); auto srcBlob = plugin->getBlob(outputNodeName); auto operandType = modelInfo->getOperandType(outIndex); uint32_t actualLength = srcBlob->byteSize(); @@ -379,7 +435,7 @@ static std::tuple, Timing> executeSynch void* destPtr = modelInfo->getBlobFromMemoryPoolOut(request, i, expectedLength); auto outputBlobDims = srcBlob->getTensorDesc().getDims(); - ALOGD("output precision: %d", static_cast(srcBlob->getTensorDesc().getPrecision())); + ALOGV("output precision: %d", static_cast(srcBlob->getTensorDesc().getPrecision())); switch (operandType) { case OperandType::TENSOR_BOOL8: @@ -420,44 +476,50 @@ static std::tuple, Timing> executeSynch "OUTPUT_INSUFFICIENT_SIZE error"); return {ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, modelInfo->getOutputShapes(), kNoTiming}; } - - switch (operandType) { - case OperandType::TENSOR_INT32: - case OperandType::TENSOR_FLOAT32: { - std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), - srcBlob->byteSize()); - break; - } - case OperandType::TENSOR_BOOL8: { - floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT8_ASYMM: { - floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: { - floatToint8(srcBlob->buffer().as(), (int8_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_FLOAT16: { - floatToFloat16(srcBlob->buffer().as(), (_Float16*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT16_SYMM: { - floatToInt16(srcBlob->buffer().as(), (int16_t*)destPtr, srcBlob->size()); - break; - } - case OperandType::TENSOR_QUANT16_ASYMM: { - floatToUInt16(srcBlob->buffer().as(), (uint16_t*)destPtr, srcBlob->size()); - break; + //copy output from remote infer + //TODO: Add support for other OperandType + if (mRemoteCheck && mDetectionClient && mDetectionClient->get_status()) { + mDetectionClient->get_output_data(outputNodeName, (uint8_t*)destPtr, ngraphNw->getOutputShape(outIndex)); + } else { + switch (operandType) { + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_FLOAT32: { + std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), + srcBlob->byteSize()); + break; + } + case OperandType::TENSOR_BOOL8: { + floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT8_ASYMM: { + floatToUint8(srcBlob->buffer().as(), (uint8_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: { + floatToint8(srcBlob->buffer().as(), (int8_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_FLOAT16: { + floatToFloat16(srcBlob->buffer().as(), (_Float16*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT16_SYMM: { + floatToInt16(srcBlob->buffer().as(), (int16_t*)destPtr, srcBlob->size()); + break; + } + case OperandType::TENSOR_QUANT16_ASYMM: { + floatToUInt16(srcBlob->buffer().as(), (uint16_t*)destPtr, srcBlob->size()); + break; + } + default: + std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), + srcBlob->byteSize()); + break; } - default: - std::memcpy((uint8_t*)destPtr, srcBlob->buffer().as(), - srcBlob->byteSize()); - break; + } } @@ -465,6 +527,9 @@ static std::tuple, Timing> executeSynch ALOGE("Failed to update the request pool infos"); return {ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; } + if (mRemoteCheck && mDetectionClient && mDetectionClient->get_status()) { + mDetectionClient->clear_data(); + } if (measure == MeasureTiming::YES) { driverEnd = now(); diff --git a/BasePreparedModel.h b/BasePreparedModel.h index ba9b2e2e6..c2ff83cb5 100755 --- a/BasePreparedModel.h +++ b/BasePreparedModel.h @@ -33,6 +33,7 @@ #include "Driver.h" #include "IENetwork.h" #include "ModelManager.h" +#include "DetectionClient.h" #include "utils.h" #if __ANDROID__ @@ -51,7 +52,8 @@ namespace nnhal { template using vec = std::vector; typedef uint8_t* memory; - +extern bool mRemoteCheck; +extern std::shared_ptr mDetectionClient; class BasePreparedModel : public V1_3::IPreparedModel { public: BasePreparedModel(const Model& model) : mTargetDevice(IntelDeviceType::CPU) { @@ -89,6 +91,8 @@ class BasePreparedModel : public V1_3::IPreparedModel { executeFenced_cb cb) override; virtual bool initialize(); + virtual bool checkRemoteConnection(); + virtual bool loadRemoteModel(); std::shared_ptr getModelInfo() { return mModelInfo; } diff --git a/DetectionClient.cpp b/DetectionClient.cpp new file mode 100644 index 000000000..7f36a8b91 --- /dev/null +++ b/DetectionClient.cpp @@ -0,0 +1,129 @@ +#include "DetectionClient.h" + +#undef LOG_TAG +#define LOG_TAG "DetectionClient" + +std::string DetectionClient::prepare(bool& flag) { + RequestString request; + request.set_value(""); + ReplyStatus reply; + ClientContext context; + time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(100); + context.set_deadline(deadline); + + Status status = stub_->prepare(&context, request, &reply); + + if (status.ok()) { + flag = reply.status(); + return (flag ? "status True" : "status False"); + } else { + return std::string(status.error_message()); + } +} + +Status DetectionClient::sendFile(std::string fileName, + std::unique_ptr >& writer) { + RequestDataChunks request; + uint32_t CHUNK_SIZE = 1024 * 1024; + std::ifstream fin(fileName, std::ifstream::binary); + std::vector buffer(CHUNK_SIZE, 0); + ALOGV("GRPC sendFile %s", fileName.c_str()); + ALOGI("GRPC sendFile %d sized chunks", CHUNK_SIZE); + + if (!fin.is_open()) ALOGE("GRPC sendFile file Open Error "); + while (!fin.eof()) { + fin.read(buffer.data(), buffer.size()); + std::streamsize s = fin.gcount(); + // ALOGI("GRPC sendFile read %d", s); + request.set_data(buffer.data(), s); + if (!writer->Write(request)) { + ALOGE("GRPC Broken Stream "); + break; + } + } + + writer->WritesDone(); + ALOGI("GRPC sendFile completed"); + return writer->Finish(); +} + +std::string DetectionClient::sendIRs(bool& flag) { + ReplyStatus reply; + ClientContext context; + std::unique_ptr > writerXml = + std::unique_ptr >(stub_->sendXml(&context, &reply)); + Status status = sendFile(IR_XML, writerXml); + + if (status.ok()) { + ClientContext newContext; + std::unique_ptr > writerBin = + std::unique_ptr >( + stub_->sendBin(&newContext, &reply)); + status = sendFile(IR_BIN, writerBin); + if (status.ok()) { + flag = reply.status(); + return (flag ? "status True" : "status False"); + } + } + return std::string(status.error_message()); +} + +void DetectionClient::add_input_data(std::string label, const uint8_t* buffer, std::vector shape) { + const float* src; + size_t index; + size_t size = 1; + + DataTensor* input = request.add_data_tensors(); + input->set_node_name(label); + for (index = 0; index < shape.size(); index++) { + input->add_tensor_shape(shape[index]); + size *= shape[index]; + } + input->set_data(buffer, size * sizeof(float)); +} + +void DetectionClient::get_output_data(std::string label, uint8_t* buffer, std::vector shape) { + std::string src; + size_t index; + size_t size = 1; + + for (index = 0; index < shape.size(); index++) { + size *= shape[index]; + } + for (index = 0; index < reply.data_tensors_size(); index++) { + if (label.compare(reply.data_tensors(index).node_name()) == 0) { + src = reply.data_tensors(index).data(); + memcpy(buffer, src.data(), src.length()); + break; + } + } +} + +void DetectionClient::clear_data() { + request.clear_data_tensors(); + reply.clear_data_tensors(); +} + +std::string DetectionClient::remote_infer() { + ClientContext context; + time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(1000); + context.set_deadline(deadline); + + status = stub_->getInferResult(&context, request, &reply); + if (status.ok()) { + if (reply.data_tensors_size() == 0) ALOGE("GRPC reply empty, ovms failure ?"); + return "Success"; + } else { + ALOGE("GRPC Error code: %d, message: %s", status.error_code(), + status.error_message().c_str()); + return std::string(status.error_message()); + } +} + +bool DetectionClient::get_status() { + if (status.ok() && (reply.data_tensors_size() > 0)) + return 1; + else { + return 0; + } +} \ No newline at end of file diff --git a/DetectionClient.h b/DetectionClient.h new file mode 100644 index 000000000..1b272f699 --- /dev/null +++ b/DetectionClient.h @@ -0,0 +1,52 @@ +#ifndef __DETECTION_CLIENT_H +#define __DETECTION_CLIENT_H + +#include +#include +#include +#include +#include +#include +#include "nnhal_object_detection.grpc.pb.h" + +using grpc::Channel; +using grpc::ClientContext; +using grpc::ClientWriter; +using grpc::Status; +using objectDetection::DataTensor; +using objectDetection::Detection; +using objectDetection::ReplyDataTensors; +using objectDetection::ReplyStatus; +using objectDetection::RequestDataChunks; +using objectDetection::RequestDataTensors; +using objectDetection::RequestString; +using time_point = std::chrono::system_clock::time_point; + +static std::string IR_XML("/data/vendor/neuralnetworks/ngraph_ir.xml"); +static std::string IR_BIN("/data/vendor/neuralnetworks/ngraph_ir.bin"); + +class DetectionClient { +public: + DetectionClient(std::shared_ptr channel) : stub_(Detection::NewStub(channel)){} + + std::string prepare(bool& flag); + + Status sendFile(std::string fileName, + std::unique_ptr >& writer); + + std::string sendIRs(bool& flag); + + void add_input_data(std::string label, const uint8_t* buffer, std::vector shape); + void get_output_data(std::string label, uint8_t* buffer, std::vector shape); + void clear_data(); + std::string remote_infer(); + bool get_status(); + +private: + std::unique_ptr stub_; + RequestDataTensors request; + ReplyDataTensors reply; + Status status; +}; + +#endif \ No newline at end of file diff --git a/Driver.cpp b/Driver.cpp index 06a86f5f3..af690d03a 100644 --- a/Driver.cpp +++ b/Driver.cpp @@ -124,7 +124,7 @@ Return Driver::prepareModel(const V1_0_Model& model, ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } - for (auto opn : model.operations) dumpOperation(opn); + for (auto& opn : model.operations) dumpOperation(opn); if (!driverPreparedModel->initialize()) { ALOGE("failed to initialize preparedmodel"); @@ -179,7 +179,7 @@ Return Driver::prepareModel_1_1(const V1_1_Model& model, ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } - for (auto opn : model.operations) dumpOperation(opn); + for (auto& opn : model.operations) dumpOperation(opn); if (!driverPreparedModel->initialize()) { ALOGE("failed to initialize preparedmodel"); @@ -284,7 +284,7 @@ Return Driver::prepareModel_1_2(const V1_2_Model& model, ALOGE("failed to create preparedmodel"); return ErrorStatus::INVALID_ARGUMENT; } - for (auto opn : model.operations) dumpOperation(opn); + for (auto& opn : model.operations) dumpOperation(opn); if (!driverPreparedModel->initialize()) { ALOGE("failed to initialize preparedmodel"); diff --git a/IENetwork.cpp b/IENetwork.cpp index f0c481c52..9b8360964 100644 --- a/IENetwork.cpp +++ b/IENetwork.cpp @@ -1,10 +1,6 @@ #include "IENetwork.h" #include "ie_common.h" - -#include -#include #include -#include #undef LOG_TAG #define LOG_TAG "IENetwork" @@ -15,7 +11,7 @@ namespace neuralnetworks { namespace nnhal { bool IENetwork::loadNetwork() { - ALOGD("%s", __func__); + ALOGV("%s", __func__); #if __ANDROID__ InferenceEngine::Core ie(std::string("/vendor/etc/openvino/plugins.xml")); @@ -26,7 +22,7 @@ bool IENetwork::loadNetwork() { if (mNetwork) { mExecutableNw = ie.LoadNetwork(*mNetwork, "CPU"); - ALOGD("LoadNetwork is done...."); + ALOGD("loadNetwork is done...."); mInferRequest = mExecutableNw.CreateInferRequest(); ALOGD("CreateInfereRequest is done...."); @@ -69,7 +65,7 @@ InferenceEngine::TBlob::Ptr IENetwork::getBlob(const std::string& outName } void IENetwork::infer() { - ALOGI("Infer Network\n"); + ALOGI("infer Network\n"); mInferRequest.StartAsync(); mInferRequest.Wait(10000); ALOGI("infer request completed"); diff --git a/IENetwork.h b/IENetwork.h index d00faf438..54f0545e3 100644 --- a/IENetwork.h +++ b/IENetwork.h @@ -9,6 +9,10 @@ #include #include "utils.h" +#include +#include +#include +#include // #include "ie_blob.h" // #include "ie_common.h" // #include "ie_core.hpp" @@ -56,6 +60,8 @@ class IENetwork : public IIENetwork { InferenceEngine::InferRequest getInferRequest() { return mInferRequest; } void queryState() {} void infer(); + bool getGrpcIpPort(char *ip_port); + }; } // namespace nnhal diff --git a/ModelManager.cpp b/ModelManager.cpp index 3cd2f104f..ad6f944dd 100644 --- a/ModelManager.cpp +++ b/ModelManager.cpp @@ -46,11 +46,11 @@ bool NnapiModelInfo::initializeRunTimeOperandInfo() { } to.scale = from.scale; + ALOGV("OperandType = %d\n", from.type); switch (from.type) { case OperandType::TENSOR_FLOAT32: case OperandType::FLOAT32: to.type = OperandType::TENSOR_FLOAT32; - ALOGD("OperandType = %d\n", from.type); break; case OperandType::INT32: case OperandType::UINT32: @@ -170,7 +170,7 @@ const uint8_t* NnapiModelInfo::GetOperandMemory(int index, uint32_t& lenOut) { V1_3::ErrorStatus NnapiModelInfo::setRunTimePoolInfosFromHidlMemories( const hidl_vec& pools) { - ALOGD("Number of pools: %zu", pools.size()); + ALOGV("Number of pools: %zu", pools.size()); mRequestPoolInfos.resize(pools.size()); for (size_t i = 0; i < pools.size(); i++) { auto& poolInfo = mRequestPoolInfos[i]; @@ -197,7 +197,7 @@ V1_3::ErrorStatus NnapiModelInfo::setRunTimePoolInfosFromHidlMemories( ErrorStatus NnapiModelInfo::setRunTimePoolInfosFromHidlMemories( const hidl_vec& pools) { - ALOGD("Number of pools: %zu", pools.size()); + ALOGV("Number of pools: %zu", pools.size()); mRequestPoolInfos.resize(pools.size()); for (size_t i = 0; i < pools.size(); i++) { @@ -229,7 +229,7 @@ void* NnapiModelInfo::getBlobFromMemoryPoolIn(const Request& request, uint32_t i operand.buffer = r.buffer + arg.location.offset; operand.length = arg.location.length; - ALOGI("%s Operand length:%d pointer:%p offset:%d pool index: %d", __func__, operand.length, + ALOGV("%s Operand length:%d pointer:%p offset:%d pool index: %d", __func__, operand.length, (r.buffer + arg.location.offset), arg.location.offset, poolIndex); rBufferLength = operand.length; @@ -244,7 +244,7 @@ void* NnapiModelInfo::getBlobFromMemoryPoolOut(const Request& request, uint32_t nnAssert(poolIndex < mRequestPoolInfos.size()); auto& r = mRequestPoolInfos[poolIndex]; - ALOGD("%s lifetime:%d location offset:%d length:%d pool index:%d", __func__, operand.lifetime, + ALOGV("%s lifetime:%d location offset:%d length:%d pool index:%d", __func__, operand.lifetime, arg.location.offset, arg.location.length, poolIndex); if (arg.dimensions.size() > 0) { @@ -258,7 +258,7 @@ void* NnapiModelInfo::getBlobFromMemoryPoolOut(const Request& request, uint32_t operand.buffer = r.buffer + arg.location.offset; operand.length = arg.location.length; rBufferLength = operand.length; - ALOGI("%s Operand length:%d pointer:%p", __func__, operand.length, + ALOGV("%s Operand length:%d pointer:%p", __func__, operand.length, (r.buffer + arg.location.offset)); return (r.buffer + arg.location.offset); } diff --git a/ModelManager.h b/ModelManager.h index e432f2bc0..a666579b1 100755 --- a/ModelManager.h +++ b/ModelManager.h @@ -177,7 +177,7 @@ class NnapiModelInfo { const hidl_vec& pools); bool updateRequestPoolInfos() { - for (auto runtimeInfo : mRequestPoolInfos) { + for (auto& runtimeInfo : mRequestPoolInfos) { runtimeInfo.update(); } @@ -187,7 +187,7 @@ class NnapiModelInfo { std::vector getOutputShapes() { return mOutputShapes; } void unmapRuntimeMemPools() { - for (auto runtimeInfo : mRequestPoolInfos) { + for (auto& runtimeInfo : mRequestPoolInfos) { runtimeInfo.unmap_mem(); } } diff --git a/cpu/CpuPreparedModel.cpp b/cpu/CpuPreparedModel.cpp index 1c3fa986e..22df0a521 100755 --- a/cpu/CpuPreparedModel.cpp +++ b/cpu/CpuPreparedModel.cpp @@ -31,6 +31,7 @@ bool CpuPreparedModel::initialize() { ALOGE("Failed to initialize Model runtime parameters!!"); return false; } + BasePreparedModel::checkRemoteConnection(); mNgraphNetCreator = std::make_shared(mModelInfo, mTargetDevice); if (!mNgraphNetCreator->validateOperations()) return false; @@ -50,6 +51,12 @@ bool CpuPreparedModel::initialize() { #endif mPlugin = std::make_shared(cnnNetworkPtr); mPlugin->loadNetwork(); + if(mRemoteCheck) { + auto resp = loadRemoteModel(); + ALOGD("%s Load Remote Model returns %d", __func__, resp); + } else { + ALOGD("%s Remote connection unavailable", __func__); + } } catch (const std::exception& ex) { ALOGE("%s Exception !!! %s", __func__, ex.what()); return false; diff --git a/ngraph_creator/Android.bp b/ngraph_creator/Android.bp index 3d1072752..c5028db30 100755 --- a/ngraph_creator/Android.bp +++ b/ngraph_creator/Android.bp @@ -95,9 +95,9 @@ cc_library_static { header_libs: [ "libngraph_headers", "libinference_headers", + "libnnhal_headers", ], include_dirs: [ - "vendor/intel/external/project-celadon/nn-hal", "packages/modules/NeuralNetworks/common/include", "packages/modules/NeuralNetworks/runtime/include", "external/mesa3d/include/android_stub" diff --git a/ngraph_creator/include/NgraphNetworkCreator.hpp b/ngraph_creator/include/NgraphNetworkCreator.hpp index c2ff98d6a..3d51e8a1d 100644 --- a/ngraph_creator/include/NgraphNetworkCreator.hpp +++ b/ngraph_creator/include/NgraphNetworkCreator.hpp @@ -27,6 +27,7 @@ class NgraphNetworkCreator { bool validateOperations(); const std::string& getNodeName(uint32_t index); + std::vector getOutputShape(uint32_t index); std::shared_ptr generateGraph(); }; diff --git a/ngraph_creator/include/NgraphNodes.hpp b/ngraph_creator/include/NgraphNodes.hpp index 783d23634..79f95055f 100644 --- a/ngraph_creator/include/NgraphNodes.hpp +++ b/ngraph_creator/include/NgraphNodes.hpp @@ -35,6 +35,7 @@ class NgraphNodes { const std::string& getNodeName(size_t index); void removeInputParameter(std::string name, size_t index); + std::vector getOutputShape(size_t index); std::shared_ptr generateGraph(); // Setting the node name to empty string "". Caller of getNodeName should validate against "". diff --git a/ngraph_creator/operations/include/DepthToSpace.hpp b/ngraph_creator/operations/include/DepthToSpace.hpp index 46242e1e7..51dacccb0 100644 --- a/ngraph_creator/operations/include/DepthToSpace.hpp +++ b/ngraph_creator/operations/include/DepthToSpace.hpp @@ -10,7 +10,6 @@ namespace nnhal { class DepthToSpace : public OperationsBase { public: DepthToSpace(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Dequantize.hpp b/ngraph_creator/operations/include/Dequantize.hpp index a77fc86d3..4e9248a9e 100644 --- a/ngraph_creator/operations/include/Dequantize.hpp +++ b/ngraph_creator/operations/include/Dequantize.hpp @@ -11,7 +11,6 @@ class Dequantize : public OperationsBase { public: Dequantize(int operationIndex); std::shared_ptr createNode() override; - bool validate() override; }; } // namespace nnhal diff --git a/ngraph_creator/operations/include/Div.hpp b/ngraph_creator/operations/include/Div.hpp index 0e3f9686c..6a78e9b53 100644 --- a/ngraph_creator/operations/include/Div.hpp +++ b/ngraph_creator/operations/include/Div.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Div : public OperationsBase { public: Div(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/L2Pooling2D.hpp b/ngraph_creator/operations/include/L2Pooling2D.hpp index 354ada6bb..ba9e7015f 100644 --- a/ngraph_creator/operations/include/L2Pooling2D.hpp +++ b/ngraph_creator/operations/include/L2Pooling2D.hpp @@ -10,7 +10,6 @@ namespace nnhal { class L2Pooling2D : public OperationsBase { public: L2Pooling2D(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Less.hpp b/ngraph_creator/operations/include/Less.hpp index 45058c56b..612df2660 100644 --- a/ngraph_creator/operations/include/Less.hpp +++ b/ngraph_creator/operations/include/Less.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Less : public OperationsBase { public: Less(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Logistic.hpp b/ngraph_creator/operations/include/Logistic.hpp index 85dea4a8c..d7bdf09fb 100644 --- a/ngraph_creator/operations/include/Logistic.hpp +++ b/ngraph_creator/operations/include/Logistic.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Logistic : public OperationsBase { public: Logistic(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/MaxPool2d.hpp b/ngraph_creator/operations/include/MaxPool2d.hpp index 52fd38e1f..58acc2645 100644 --- a/ngraph_creator/operations/include/MaxPool2d.hpp +++ b/ngraph_creator/operations/include/MaxPool2d.hpp @@ -10,7 +10,6 @@ namespace nnhal { class MaxPool2d : public OperationsBase { public: MaxPool2d(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Mul.hpp b/ngraph_creator/operations/include/Mul.hpp index 77a70e704..77687ba1e 100644 --- a/ngraph_creator/operations/include/Mul.hpp +++ b/ngraph_creator/operations/include/Mul.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Mul : public OperationsBase { public: Mul(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Quantize.hpp b/ngraph_creator/operations/include/Quantize.hpp index 95940a0c2..73c36c7e8 100644 --- a/ngraph_creator/operations/include/Quantize.hpp +++ b/ngraph_creator/operations/include/Quantize.hpp @@ -11,7 +11,6 @@ class Quantize : public OperationsBase { public: Quantize(int operationIndex); std::shared_ptr createNode() override; - bool validate() override; void connectOperationToGraph() override; }; diff --git a/ngraph_creator/operations/include/ReduceMin.hpp b/ngraph_creator/operations/include/ReduceMin.hpp index 44deef838..577d5f1fd 100644 --- a/ngraph_creator/operations/include/ReduceMin.hpp +++ b/ngraph_creator/operations/include/ReduceMin.hpp @@ -10,7 +10,6 @@ namespace nnhal { class ReduceMin : public OperationsBase { public: ReduceMin(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/ReduceSum.hpp b/ngraph_creator/operations/include/ReduceSum.hpp index 145170f6b..62591963c 100644 --- a/ngraph_creator/operations/include/ReduceSum.hpp +++ b/ngraph_creator/operations/include/ReduceSum.hpp @@ -10,7 +10,6 @@ namespace nnhal { class ReduceSum : public OperationsBase { public: ReduceSum(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Relu.hpp b/ngraph_creator/operations/include/Relu.hpp index 730fbe4df..cff5fd250 100644 --- a/ngraph_creator/operations/include/Relu.hpp +++ b/ngraph_creator/operations/include/Relu.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Relu : public OperationsBase { public: Relu(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Relu1.hpp b/ngraph_creator/operations/include/Relu1.hpp index fc4114a03..81127ca0f 100644 --- a/ngraph_creator/operations/include/Relu1.hpp +++ b/ngraph_creator/operations/include/Relu1.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Relu1 : public OperationsBase { public: Relu1(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Relu6.hpp b/ngraph_creator/operations/include/Relu6.hpp index cbd1cb715..b06aeadbc 100644 --- a/ngraph_creator/operations/include/Relu6.hpp +++ b/ngraph_creator/operations/include/Relu6.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Relu6 : public OperationsBase { public: Relu6(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Softmax.hpp b/ngraph_creator/operations/include/Softmax.hpp index fbca45961..4241b7209 100644 --- a/ngraph_creator/operations/include/Softmax.hpp +++ b/ngraph_creator/operations/include/Softmax.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Softmax : public OperationsBase { public: Softmax(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Sub.hpp b/ngraph_creator/operations/include/Sub.hpp index ca99958b2..af217602d 100644 --- a/ngraph_creator/operations/include/Sub.hpp +++ b/ngraph_creator/operations/include/Sub.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Sub : public OperationsBase { public: Sub(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/include/Tanh.hpp b/ngraph_creator/operations/include/Tanh.hpp index 90e92f8a8..3ecfd5019 100644 --- a/ngraph_creator/operations/include/Tanh.hpp +++ b/ngraph_creator/operations/include/Tanh.hpp @@ -10,7 +10,6 @@ namespace nnhal { class Tanh : public OperationsBase { public: Tanh(int operationIndex); - bool validate() override; std::shared_ptr createNode() override; }; diff --git a/ngraph_creator/operations/src/Add.cpp b/ngraph_creator/operations/src/Add.cpp index 3058086b1..702845cc0 100644 --- a/ngraph_creator/operations/src/Add.cpp +++ b/ngraph_creator/operations/src/Add.cpp @@ -12,29 +12,14 @@ Add::Add(int operationIndex) : OperationsBase(operationIndex) { } bool Add::validate() { - auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); - const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); - if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } + ALOGV("%s PASSED", __func__); - // check if both tensors are of same type - if(elementType1 != elementType2 ) { - ALOGE("%s Input type mismatch", __func__); + const auto& activationIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + if (!sModelInfo->isOperandLifeTimeConst(activationIndex)) { + ALOGE("%s Only Constant supported for specifying Activation", __func__); return false; - } else if ( elementType1 == OperandType::TENSOR_INT32 ) { - //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, - //the {@link FusedActivationFunc} must be "NONE". - auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); - if (activationFn != 0) { - ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); - return false; - } } - ALOGV("%s PASSED", __func__); + return true; } diff --git a/ngraph_creator/operations/src/AveragePool2D.cpp b/ngraph_creator/operations/src/AveragePool2D.cpp index dc49baed8..c5abdd6af 100644 --- a/ngraph_creator/operations/src/AveragePool2D.cpp +++ b/ngraph_creator/operations/src/AveragePool2D.cpp @@ -18,11 +18,6 @@ bool AveragePool2D::validate() { ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); return false; } - //check Input are of valid dimension or not - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } ALOGV("%s PASSED", __func__); return true; @@ -58,6 +53,9 @@ std::shared_ptr AveragePool2D::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 8) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } if (isExplicit) { diff --git a/ngraph_creator/operations/src/Concat.cpp b/ngraph_creator/operations/src/Concat.cpp index bdc4425a9..ea1f52dd0 100644 --- a/ngraph_creator/operations/src/Concat.cpp +++ b/ngraph_creator/operations/src/Concat.cpp @@ -31,12 +31,12 @@ std::shared_ptr Concat::createNode() { auto axis = sModelInfo->ParseOperationInput(mNnapiOperationIndex, n); // n: concatenation axis std::vector> inputs; - ALOGD("createNode n %lu, axis %d", n, axis); + ALOGV("createNode n %lu, axis %d", n, axis); for (size_t i = 0; i < n; i++) { auto inputIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, i); auto inputOp = getInputNode(i); const auto op = sModelInfo->getOperand(inputIndex); - ALOGD("createNode inputIndex %d, lifetime %d", inputIndex, op.lifetime); + ALOGV("createNode inputIndex %d, lifetime %d", inputIndex, op.lifetime); inputs.push_back(inputOp); } diff --git a/ngraph_creator/operations/src/Conv2d.cpp b/ngraph_creator/operations/src/Conv2d.cpp index 4f210399f..732fe7669 100644 --- a/ngraph_creator/operations/src/Conv2d.cpp +++ b/ngraph_creator/operations/src/Conv2d.cpp @@ -39,6 +39,8 @@ bool Conv2d::validate() { } std::shared_ptr Conv2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); @@ -49,6 +51,9 @@ std::shared_ptr Conv2d::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 10) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -108,15 +113,6 @@ std::shared_ptr Conv2d::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - { - if (useNchw) { - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - } else { - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - } - } } if (isImplicit) { @@ -172,10 +168,9 @@ std::shared_ptr Conv2d::createNode() { } } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/operations/src/DepthToSpace.cpp b/ngraph_creator/operations/src/DepthToSpace.cpp index 0ffb807c3..dbf30743f 100644 --- a/ngraph_creator/operations/src/DepthToSpace.cpp +++ b/ngraph_creator/operations/src/DepthToSpace.cpp @@ -11,30 +11,6 @@ DepthToSpace::DepthToSpace(int operationIndex) : OperationsBase(operationIndex) mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool DepthToSpace::validate() { - // Check input rank - const auto inputRank = getInputOperandDimensions(0).size(); - - if (inputRank != 4) { - ALOGE("%s Invalid dimension of rank %d", __func__, inputRank); - return false; - } - - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - auto block_size = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 1); - if(block_size < 1) { - ALOGE("%s Invalid block size %d", __func__, block_size); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr DepthToSpace::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/DepthwiseConv2d.cpp b/ngraph_creator/operations/src/DepthwiseConv2d.cpp index 3507c67c4..dd67aabaa 100644 --- a/ngraph_creator/operations/src/DepthwiseConv2d.cpp +++ b/ngraph_creator/operations/src/DepthwiseConv2d.cpp @@ -41,6 +41,8 @@ bool DepthwiseConv2d::validate() { } std::shared_ptr DepthwiseConv2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); bool isImplicit = false, isExplicit = false; @@ -50,6 +52,9 @@ std::shared_ptr DepthwiseConv2d::createNode() { isExplicit = true; } else if (inputsSize >= 8 && inputsSize <= 11) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -123,8 +128,7 @@ std::shared_ptr DepthwiseConv2d::createNode() { } } } - - if (isImplicit) { + else if (isImplicit) { padding_scheme = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 3); stride_width = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 4); @@ -181,10 +185,9 @@ std::shared_ptr DepthwiseConv2d::createNode() { } } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/operations/src/Dequantize.cpp b/ngraph_creator/operations/src/Dequantize.cpp index d3a529c45..85c19eb7b 100644 --- a/ngraph_creator/operations/src/Dequantize.cpp +++ b/ngraph_creator/operations/src/Dequantize.cpp @@ -11,16 +11,6 @@ Dequantize::Dequantize(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Dequantize::validate() { - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Dequantize::createNode() { // Creating input nodes std::shared_ptr input, outputNode; diff --git a/ngraph_creator/operations/src/Div.cpp b/ngraph_creator/operations/src/Div.cpp index 37f0d8805..a89995f8f 100644 --- a/ngraph_creator/operations/src/Div.cpp +++ b/ngraph_creator/operations/src/Div.cpp @@ -11,33 +11,6 @@ Div::Div(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Div::validate() { - auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); - const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); - if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - // check if both tensors are of same type - if(elementType1 != elementType2 ) { - ALOGE("%s Input type mismatch", __func__); - return false; - } else if ( elementType1 == OperandType::TENSOR_INT32 ) { - //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, - //the {@link FusedActivationFunc} must be "NONE". - auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); - if (activationFn != 0) { - ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); - return false; - } - } - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Div::createNode() { // Creating input nodes auto input1 = getInputNode(0); diff --git a/ngraph_creator/operations/src/FullyConnected.cpp b/ngraph_creator/operations/src/FullyConnected.cpp index 5dcb7218e..7771c66fa 100644 --- a/ngraph_creator/operations/src/FullyConnected.cpp +++ b/ngraph_creator/operations/src/FullyConnected.cpp @@ -25,18 +25,6 @@ bool FullyConnected::validate() { ALOGE("%s Invalid input parameter dimensions!!!", __func__); return false; } - //check operand lifetime - const auto& dimsOperandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - const auto& dimsOperandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& dimsOperandIndex3 = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - const auto& dimsOperandIndex4 = sModelInfo->getOperationInput(mNnapiOperationIndex, 3); - if(!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex1) || - !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex2) || - !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex3) || - !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex4)) { - ALOGE("%s Only Const lifetime is supported", __func__); - return false; - } ALOGD("%s succeeded", __func__); return true; diff --git a/ngraph_creator/operations/src/GroupedConv2d.cpp b/ngraph_creator/operations/src/GroupedConv2d.cpp index 7c09a2fd6..6deca60d3 100644 --- a/ngraph_creator/operations/src/GroupedConv2d.cpp +++ b/ngraph_creator/operations/src/GroupedConv2d.cpp @@ -35,6 +35,8 @@ bool GroupedConv2d::validate() { } std::shared_ptr GroupedConv2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); bool isImplicit = false, isExplicit = false; @@ -43,17 +45,20 @@ std::shared_ptr GroupedConv2d::createNode() { isExplicit = true; } else if (inputsSize >= 8 && inputsSize <= 9) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } - int32_t padding_left, padding_right; - int32_t padding_top, padding_bottom; + int32_t padding_left = 0, padding_right = 0; + int32_t padding_top = 0, padding_bottom = 0; int32_t stride_width, stride_height; int32_t dilation_width_factor = 1, dilation_height_factor = 1; int32_t number_groups; int32_t activationFn; int32_t layout = 0; int32_t padding_scheme; - int32_t input_width, input_height, input_channel; + int32_t input_width, input_height; int32_t filter_width, filter_height; bool useNchw = false; std::vector strides; @@ -89,17 +94,6 @@ std::shared_ptr GroupedConv2d::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - { - if (useNchw) { // NCHW - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - input_channel = inputDimensions[1]; - } else { // NHWC - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - input_channel = inputDimensions[3]; - } - } } if (isImplicit) { @@ -120,11 +114,9 @@ std::shared_ptr GroupedConv2d::createNode() { if (useNchw) { input_width = inputDimensions[3]; input_height = inputDimensions[2]; - input_channel = inputDimensions[1]; } else { input_width = inputDimensions[2]; input_height = inputDimensions[1]; - input_channel = inputDimensions[3]; } if (padding_scheme == 1) { @@ -144,10 +136,9 @@ std::shared_ptr GroupedConv2d::createNode() { } } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/operations/src/L2Pooling2D.cpp b/ngraph_creator/operations/src/L2Pooling2D.cpp index 0e11e52a7..c23600a0f 100644 --- a/ngraph_creator/operations/src/L2Pooling2D.cpp +++ b/ngraph_creator/operations/src/L2Pooling2D.cpp @@ -11,23 +11,9 @@ L2Pooling2D::L2Pooling2D(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool L2Pooling2D::validate() { - const auto& inputDimensionsSize = getInputOperandDimensions(0).size(); - if (inputDimensionsSize != 4) { - ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); - return false; - } - //check Input are of valid dimension or not - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr L2Pooling2D::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); bool isImplicit = false, isExplicit = false; @@ -35,10 +21,13 @@ std::shared_ptr L2Pooling2D::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 8) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } - int32_t padding_left, padding_right; - int32_t padding_top, padding_bottom; + int32_t padding_left = 0, padding_right = 0; + int32_t padding_top = 0, padding_bottom = 0; int32_t stride_width, stride_height; int32_t activationFn; int32_t layout = 0; @@ -75,13 +64,6 @@ std::shared_ptr L2Pooling2D::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - if (useNchw) { - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - } else { - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - } } if (isImplicit) { @@ -126,8 +108,7 @@ std::shared_ptr L2Pooling2D::createNode() { } } - std::shared_ptr inputNode, inputSquared, sqrtOutput; - inputNode = getInputNode(0); + std::shared_ptr inputSquared, sqrtOutput; inputSquared = std::make_shared(inputNode, inputNode); if (!useNchw) { diff --git a/ngraph_creator/operations/src/Less.cpp b/ngraph_creator/operations/src/Less.cpp index 38f5b328a..c521414b3 100644 --- a/ngraph_creator/operations/src/Less.cpp +++ b/ngraph_creator/operations/src/Less.cpp @@ -11,25 +11,6 @@ Less::Less(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Less::validate() { - auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); - const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); - if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - // check if both tensors are of same type - if(elementType1 != elementType2 ) { - ALOGE("%s Input type mismatch", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} std::shared_ptr Less::createNode() { // Creating input nodes std::shared_ptr input1, input2; diff --git a/ngraph_creator/operations/src/Logistic.cpp b/ngraph_creator/operations/src/Logistic.cpp index a6aa88e46..f134cb956 100644 --- a/ngraph_creator/operations/src/Logistic.cpp +++ b/ngraph_creator/operations/src/Logistic.cpp @@ -11,22 +11,6 @@ Logistic::Logistic(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Logistic::validate() { - const auto& inputDimensionsSize = getInputOperandDimensions(0).size(); - if (inputDimensionsSize > 4) { - ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); - return false; - } - //check Input are of valid dimension or not - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Logistic::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/MaxPool2d.cpp b/ngraph_creator/operations/src/MaxPool2d.cpp index ab613f52a..639990b6a 100644 --- a/ngraph_creator/operations/src/MaxPool2d.cpp +++ b/ngraph_creator/operations/src/MaxPool2d.cpp @@ -11,23 +11,9 @@ MaxPool2d::MaxPool2d(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool MaxPool2d::validate() { - // Check Input Dimension size - const auto& inputDimensionsSize = getInputOperandDimensions(0).size(); - if (inputDimensionsSize != 4) { - ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize); - return false; - } - //check Input are of valid dimension or not - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} std::shared_ptr MaxPool2d::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); @@ -37,6 +23,9 @@ std::shared_ptr MaxPool2d::createNode() { isExplicit = true; } else if (inputsSize >= 7 && inputsSize <= 8) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -77,13 +66,6 @@ std::shared_ptr MaxPool2d::createNode() { if (layout) useNchw = true; auto_pad = ngraph::op::PadType::EXPLICIT; - if (useNchw) { - input_width = inputDimensions[3]; - input_height = inputDimensions[2]; - } else { - input_width = inputDimensions[2]; - input_height = inputDimensions[1]; - } } if (isImplicit) { @@ -128,9 +110,6 @@ std::shared_ptr MaxPool2d::createNode() { } } - std::shared_ptr inputNode; - inputNode = getInputNode(0); - if (!useNchw) { // No conversion needed if useNchw set inputNode = transpose(NHWC_NCHW, inputNode); } diff --git a/ngraph_creator/operations/src/Mul.cpp b/ngraph_creator/operations/src/Mul.cpp index 0c5501cab..84a3e647b 100644 --- a/ngraph_creator/operations/src/Mul.cpp +++ b/ngraph_creator/operations/src/Mul.cpp @@ -11,34 +11,6 @@ Mul::Mul(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Mul::validate() { - auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); - const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); - - if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - if(elementType1 != elementType2 ) { - ALOGE("%s Input type mismatch", __func__); - return false; - } else if ( elementType1 == OperandType::TENSOR_INT32 ) { - //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, - //the {@link FusedActivationFunc} must be "NONE". - auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); - if (activationFn != 0) { - ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); - return false; - } - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Mul::createNode() { // Creating input nodes std::shared_ptr input1, input2; diff --git a/ngraph_creator/operations/src/Quantize.cpp b/ngraph_creator/operations/src/Quantize.cpp index 02809b29b..24c09174d 100755 --- a/ngraph_creator/operations/src/Quantize.cpp +++ b/ngraph_creator/operations/src/Quantize.cpp @@ -11,16 +11,6 @@ Quantize::Quantize(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Quantize::validate() { - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - void Quantize::connectOperationToGraph() { createNode(); } std::shared_ptr Quantize::createNode() { diff --git a/ngraph_creator/operations/src/ReduceMin.cpp b/ngraph_creator/operations/src/ReduceMin.cpp index 6a20937c9..4eec8b34f 100644 --- a/ngraph_creator/operations/src/ReduceMin.cpp +++ b/ngraph_creator/operations/src/ReduceMin.cpp @@ -11,25 +11,6 @@ ReduceMin::ReduceMin(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool ReduceMin::validate() { - // Check input rank - const auto inputRank = getInputOperandDimensions(0).size(); - - if (inputRank > 4) - return false; - - if ( !isValidInputTensor(0) || !isValidInputTensor(1)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - auto& input_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto& dim_reduce_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr ReduceMin::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/ReduceSum.cpp b/ngraph_creator/operations/src/ReduceSum.cpp index 937d1bb69..e60859c12 100644 --- a/ngraph_creator/operations/src/ReduceSum.cpp +++ b/ngraph_creator/operations/src/ReduceSum.cpp @@ -11,25 +11,6 @@ ReduceSum::ReduceSum(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool ReduceSum::validate() { - // Check input rank - const auto inputRank = getInputOperandDimensions(0).size(); - - if (inputRank > 4) - return false; - - if ( !isValidInputTensor(0) || !isValidInputTensor(1)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - auto& input_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto& dim_reduce_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr ReduceSum::createNode() { // Creating input nodes auto input = getInputNode(0); diff --git a/ngraph_creator/operations/src/Relu.cpp b/ngraph_creator/operations/src/Relu.cpp index e8dce05c0..815dd7914 100644 --- a/ngraph_creator/operations/src/Relu.cpp +++ b/ngraph_creator/operations/src/Relu.cpp @@ -11,16 +11,6 @@ Relu::Relu(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Relu::validate() { - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Relu::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Relu1.cpp b/ngraph_creator/operations/src/Relu1.cpp index 2d8e648a3..4c5a40799 100644 --- a/ngraph_creator/operations/src/Relu1.cpp +++ b/ngraph_creator/operations/src/Relu1.cpp @@ -11,16 +11,6 @@ Relu1::Relu1(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Relu1::validate() { - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Relu1::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Relu6.cpp b/ngraph_creator/operations/src/Relu6.cpp index 68533e9ca..3f16afe5d 100644 --- a/ngraph_creator/operations/src/Relu6.cpp +++ b/ngraph_creator/operations/src/Relu6.cpp @@ -11,16 +11,6 @@ Relu6::Relu6(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Relu6::validate() { - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Relu6::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Reshape.cpp b/ngraph_creator/operations/src/Reshape.cpp index 1cfe737b7..39556276a 100644 --- a/ngraph_creator/operations/src/Reshape.cpp +++ b/ngraph_creator/operations/src/Reshape.cpp @@ -12,17 +12,12 @@ Reshape::Reshape(int operationIndex) : OperationsBase(operationIndex) { } bool Reshape::validate() { - const auto inputRank = getInputOperandDimensions(0).size(); - if (!isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); + const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + if (!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex) || !isValidInputTensor(1)) { + // TODO: Support CPU_reshape_all_tensors_as_inputs + ALOGE("%s Only Constant non-zero dimensions supported now", __func__); return false; } - - if (inputRank > 4) { - ALOGE("%s Invalid dimensions size for input", __func__); - return false; - } - ALOGV("%s PASSED", __func__); return true; } diff --git a/ngraph_creator/operations/src/Softmax.cpp b/ngraph_creator/operations/src/Softmax.cpp index 80ea14212..18f1bcdea 100644 --- a/ngraph_creator/operations/src/Softmax.cpp +++ b/ngraph_creator/operations/src/Softmax.cpp @@ -11,17 +11,6 @@ Softmax::Softmax(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Softmax::validate() { - const auto inputRank = getInputOperandDimensions(0).size(); - if ( !isValidInputTensor(0) || inputRank > 4 ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Softmax::createNode() { // Creating input nodes std::shared_ptr input, outputNode; diff --git a/ngraph_creator/operations/src/SpaceToBatch.cpp b/ngraph_creator/operations/src/SpaceToBatch.cpp index f925e039a..976c16e7d 100644 --- a/ngraph_creator/operations/src/SpaceToBatch.cpp +++ b/ngraph_creator/operations/src/SpaceToBatch.cpp @@ -16,21 +16,17 @@ bool SpaceToBatch::validate() { const auto inputRank = getInputOperandDimensions(0).size(); if (inputRank != 4) return false; - if ( !isValidInputTensor(0) || !isValidInputTensor(1) || !isValidInputTensor(2) ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - auto& input_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); auto& block_shape_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - auto& pad_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); + // TODO: Add Support for all_tensors_as_inputs + if (!sModelInfo->isOperandLifeTimeConst(block_shape_OperandIndex)) { + ALOGE("%s Only Constant dimensions supported now", __func__); + return false; + } - //check operand lifetime is const or not as for now only const operand lifetime is supported + auto pad_OperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); // TODO: Add Support for all_tensors_as_inputs - if (!sModelInfo->isOperandLifeTimeConst(input_OperandIndex) || - !sModelInfo->isOperandLifeTimeConst(block_shape_OperandIndex) || - !sModelInfo->isOperandLifeTimeConst(pad_OperandIndex)) { + if (!sModelInfo->isOperandLifeTimeConst(pad_OperandIndex)) { ALOGE("%s Only Constant dimensions supported now", __func__); return false; } diff --git a/ngraph_creator/operations/src/Squeeze.cpp b/ngraph_creator/operations/src/Squeeze.cpp index 0364fd875..bb466e793 100644 --- a/ngraph_creator/operations/src/Squeeze.cpp +++ b/ngraph_creator/operations/src/Squeeze.cpp @@ -12,32 +12,17 @@ Squeeze::Squeeze(int operationIndex) : OperationsBase(operationIndex) { } bool Squeeze::validate() { - const auto inputRank = getInputOperandDimensions(0).size(); - if (inputRank > 4) return false; - - if ( !isValidInputTensor(0)) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } // TODO: Add Support for all_tensors_as_inputs - const auto& dimsOperandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); + const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - if (!sModelInfo->isOperandLifeTimeConst(dimsOperandIndex1)) { + // TODO: Support OmittedInput. + // The empty 2nd argument in Squeeze op causes dynamic output + // To add support, the dims will have to be calculated statically + if (sModelInfo->isOmittedInput(mNnapiOperationIndex, 1) || + !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { ALOGE("%s Only Constant dimensions supported now", __func__); return false; } - const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); - if (inputsSize == 2) { - const auto& dimsOperandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - // TODO: Support OmittedInput. - // The empty 2nd argument in Squeeze op causes dynamic output - // To add support, the dims will have to be calculated statically - if (!isValidInputTensor(1) || !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex2) || - sModelInfo->isOmittedInput(mNnapiOperationIndex, 1) ) { - ALOGE("%s Invalid operand type or operand lifetime", __func__); - return false; - } - } return true; } diff --git a/ngraph_creator/operations/src/Sub.cpp b/ngraph_creator/operations/src/Sub.cpp index 51b3c8668..c90d7f7b5 100644 --- a/ngraph_creator/operations/src/Sub.cpp +++ b/ngraph_creator/operations/src/Sub.cpp @@ -11,33 +11,6 @@ Sub::Sub(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Sub::validate() { - auto operandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - auto operandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - const auto& elementType1 = sModelInfo->getOperandType(operandIndex1); - const auto& elementType2 = sModelInfo->getOperandType(operandIndex2); - if ( !isValidInputTensor(0) || !isValidInputTensor(1) ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - // check if both tensors are of same type - if(elementType1 != elementType2 ) { - ALOGE("%s Input type mismatch", __func__); - return false; - } else if ( elementType1 == OperandType::TENSOR_INT32 ) { - //In 1.3 For a {@link OperandType::TENSOR_INT32} tensor, - //the {@link FusedActivationFunc} must be "NONE". - auto activationFn = sModelInfo->ParseOperationInput(mNnapiOperationIndex, 2); - if (activationFn != 0) { - ALOGE("%s Activation type must be none for TENSOR_INT32 type", __func__); - return false; - } - } - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Sub::createNode() { // Creating input nodes std::shared_ptr input1, input2; diff --git a/ngraph_creator/operations/src/Tanh.cpp b/ngraph_creator/operations/src/Tanh.cpp index 7f3679489..65adaa342 100644 --- a/ngraph_creator/operations/src/Tanh.cpp +++ b/ngraph_creator/operations/src/Tanh.cpp @@ -11,17 +11,6 @@ Tanh::Tanh(int operationIndex) : OperationsBase(operationIndex) { mDefaultOutputIndex = sModelInfo->getOperationOutput(mNnapiOperationIndex, 0); } -bool Tanh::validate() { - const auto inputRank = getInputOperandDimensions(0).size(); - if ( !isValidInputTensor(0) || inputRank > 4 ) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; - } - - ALOGV("%s PASSED", __func__); - return true; -} - std::shared_ptr Tanh::createNode() { // Creating input nodes std::shared_ptr input; diff --git a/ngraph_creator/operations/src/Transpose.cpp b/ngraph_creator/operations/src/Transpose.cpp index 641b68799..052453386 100644 --- a/ngraph_creator/operations/src/Transpose.cpp +++ b/ngraph_creator/operations/src/Transpose.cpp @@ -13,23 +13,13 @@ Transpose::Transpose(int operationIndex) : OperationsBase(operationIndex) { bool Transpose::validate() { // TODO: Add Support for all_tensors_as_inputs - const auto& dimsOperandIndex1 = sModelInfo->getOperationInput(mNnapiOperationIndex, 0); - const auto inputRank = getInputOperandDimensions(0).size(); - if ( !isValidInputTensor(0) || inputRank > 4) { - ALOGE("%s Empty or Invalid dimensions size for input", __func__); - return false; + const auto& dimsOperandIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); + const auto& dims = getInputOperandDimensions(1); + if (!dims.empty() && dims[0] != 0 && !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex)) { + ALOGE("%s Only Constant dimensions supported now", __func__); + return false; } - const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); - if (inputsSize == 2) { - const auto& dimsOperandIndex2 = sModelInfo->getOperationInput(mNnapiOperationIndex, 1); - if (!isValidInputTensor(1) || !sModelInfo->isOperandLifeTimeConst(dimsOperandIndex2)) { - ALOGE("%s Invalid operand type or operand lifetime", __func__); - return false; - } - } - - ALOGV("%s PASSED", __func__); return true; } @@ -40,12 +30,14 @@ std::shared_ptr Transpose::createNode() { input = getInputNode(0); std::shared_ptr order; - order = createConstNode(ngraph::element::i32, {0}, convertToVector(0)); - const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); - if (inputsSize == 2) { + const auto& dims = getInputOperandDimensions(1); + if (!dims.empty() && dims[0] != 0) { order = getInputNode(1); + } else { + order = createConstNode(ngraph::element::i32, {0}, convertToVector(0)); } + std::shared_ptr outputNode; outputNode = std::make_shared(input, order); diff --git a/ngraph_creator/operations/src/TransposeConv2D.cpp b/ngraph_creator/operations/src/TransposeConv2D.cpp index a9e9c2d85..1b0cc3c88 100644 --- a/ngraph_creator/operations/src/TransposeConv2D.cpp +++ b/ngraph_creator/operations/src/TransposeConv2D.cpp @@ -22,7 +22,7 @@ bool TransposeConv2D::validate() { inputDimensionsSize, filterDimensionsSize); return false; } - if (!isValidInputTensor(0) || !isValidInputTensor(1) || !isValidInputTensor(2)) { + if (!isValidInputTensor(0) || !isValidInputTensor(1)) { ALOGE("%s Invalid dimensions for input or filter", __func__); return false; } @@ -38,22 +38,16 @@ bool TransposeConv2D::validate() { // TODO: Issue from OV 2021.4, remove this check once CVS-61723 is resolved // Workaround to ignore VTS large input error test cases const auto& inputDimensions = getInputOperandDimensions(0); - const auto& filterDimensions = getInputOperandDimensions(1); - const auto& biasDimensions = getInputOperandDimensions(2); - if (inputDimensions[1] == 1 && inputDimensions[2] == 1 && inputDimensions[3] == 1) { - return false; - } - //check if the bias dimension == filter depth_out && filter depth_in == input depth_in - if(filterDimensions[0] != biasDimensions[0] && biasDimensions[3] != inputDimensions[3]) { - return false; - } + if (inputDimensions[1] == 1 && inputDimensions[2] == 1 && inputDimensions[3] == 1) return false; ALOGV("%s PASSED", __func__); return true; } std::shared_ptr TransposeConv2D::createNode() { + std::shared_ptr inputNode; + inputNode = getInputNode(0); const auto& inputsSize = sModelInfo->getOperationInputsSize(mNnapiOperationIndex); ALOGD("%s inputsSize %lu", __func__, inputsSize); @@ -63,6 +57,9 @@ std::shared_ptr TransposeConv2D::createNode() { isExplicit = true; } else if (inputsSize == 9) { isImplicit = true; + } else { + ALOGE("%s inputsSize %lu NOT SUPPORTED", __func__, inputsSize); + return inputNode; } int32_t padding_left, padding_right; @@ -160,10 +157,9 @@ std::shared_ptr TransposeConv2D::createNode() { padding_bottom = 0; } - std::shared_ptr inputNode, filterNode, biasNode; + std::shared_ptr filterNode, biasNode; const auto& biasIndex = sModelInfo->getOperationInput(mNnapiOperationIndex, 2); - inputNode = getInputNode(0); filterNode = getInputNode(1); biasNode = getInputNode(2); diff --git a/ngraph_creator/src/NgraphNetworkCreator.cpp b/ngraph_creator/src/NgraphNetworkCreator.cpp index 69209129a..c9980f7f4 100644 --- a/ngraph_creator/src/NgraphNetworkCreator.cpp +++ b/ngraph_creator/src/NgraphNetworkCreator.cpp @@ -159,6 +159,11 @@ const std::string& NgraphNetworkCreator::getNodeName(uint32_t index) { return mNgraphNodes->getNodeName(index); } +std::vector NgraphNetworkCreator::getOutputShape(uint32_t index) { + + ALOGV("%s get node %d outputsize ", __func__, index); + return mNgraphNodes->getOutputShape(index); +} std::shared_ptr NgraphNetworkCreator::generateGraph() { ALOGV("%s Called", __func__); std::shared_ptr ret; diff --git a/ngraph_creator/src/NgraphNodes.cpp b/ngraph_creator/src/NgraphNodes.cpp index ebdbc1788..2b97800a6 100644 --- a/ngraph_creator/src/NgraphNodes.cpp +++ b/ngraph_creator/src/NgraphNodes.cpp @@ -27,18 +27,24 @@ ngraph::Output NgraphNodes::getOperationOutput(size_t index) { } void NgraphNodes::setResultNode(size_t outputIndex, std::shared_ptr resultNode) { - ALOGD("setResultNode %zu", outputIndex); + ALOGV("setResultNode %zu", outputIndex); mResultNodes.push_back(resultNode); } const std::string& NgraphNodes::getNodeName(size_t index) { if (mNodeNames.find(index) == mNodeNames.end()) { mNodeNames[index] = mOutputAtOperandIndex[index].get_node_shared_ptr()->get_name(); - ALOGD("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); + ALOGV("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); } ALOGV("%s index %zu, name %s", __func__, index, mNodeNames[index].c_str()); return mNodeNames[index]; } + +std::vector NgraphNodes::getOutputShape(size_t index) { + ALOGV("%s outputshape of node %zu index ", __func__, index); + return mOutputAtOperandIndex[index].get_node_shared_ptr()->get_output_shape(0); +} + // remove null input node parameter void NgraphNodes::removeInputParameter(std::string name, size_t index) { for (size_t i = 0; i < mInputParams.size(); i++) { diff --git a/ngraph_creator/src/OperationsFactory.cpp b/ngraph_creator/src/OperationsFactory.cpp index 06a3bb811..f31deb796 100755 --- a/ngraph_creator/src/OperationsFactory.cpp +++ b/ngraph_creator/src/OperationsFactory.cpp @@ -26,8 +26,8 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::ARGMIN: return std::make_shared(operationIndex); - // case OperationType::AVERAGE_POOL_2D: - // return std::make_shared(operationIndex); + case OperationType::AVERAGE_POOL_2D: + return std::make_shared(operationIndex); case OperationType::BATCH_TO_SPACE_ND: return std::make_shared(operationIndex); case OperationType::BIDIRECTIONAL_SEQUENCE_RNN: @@ -70,10 +70,10 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::INSTANCE_NORMALIZATION: return std::make_shared(operationIndex); - // case OperationType::L2_POOL_2D: - // return std::make_shared(operationIndex); - // case OperationType::L2_NORMALIZATION: - // return std::make_shared(operationIndex); + case OperationType::L2_POOL_2D: + return std::make_shared(operationIndex); + case OperationType::L2_NORMALIZATION: + return std::make_shared(operationIndex); case OperationType::LSTM: return std::make_shared(operationIndex); case OperationType::LESS: @@ -144,10 +144,10 @@ std::shared_ptr OperationsFactory::getOperation( return std::make_shared(operationIndex); case OperationType::RSQRT: return std::make_shared(operationIndex); - // case OperationType::RESIZE_BILINEAR: - // return std::make_shared(operationIndex); - // case OperationType::RESIZE_NEAREST_NEIGHBOR: - // return std::make_shared(operationIndex); + case OperationType::RESIZE_BILINEAR: + return std::make_shared(operationIndex); + case OperationType::RESIZE_NEAREST_NEIGHBOR: + return std::make_shared(operationIndex); case OperationType::SELECT: return std::make_shared