Skip to content

Commit

Permalink
Fixed coverity issues
Browse files Browse the repository at this point in the history
  • Loading branch information
jatinwadhwa921 committed Jan 31, 2025
1 parent 7179a0b commit 522c041
Show file tree
Hide file tree
Showing 8 changed files with 31 additions and 14 deletions.
27 changes: 21 additions & 6 deletions onnxruntime/core/providers/openvino/backend_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,22 @@ std::istream& operator>>(std::istream& stream, SharedContext::SharedWeights::Met
stream >> value.size;
size_t num_dimensions;
stream >> num_dimensions;
value.dimensions.resize(num_dimensions);

if (stream.fail()) {
ORT_THROW("Error: Failed to read num_dimensions from stream.");
}

constexpr size_t MAX_SAFE_DIMENSIONS = 1024;

if(num_dimensions == 0 || num_dimensions > MAX_SAFE_DIMENSIONS) {
ORT_THROW("Invalid number of dimensions: " + std::to_string(num_dimensions));
}
try {
value.dimensions.resize(num_dimensions);
} catch (const std::bad_alloc&) {
ORT_THROW("Error: Memory allocation failed while resizing dimensions.");
}

for (auto& dim : value.dimensions) {
stream >> dim;
}
Expand Down Expand Up @@ -236,23 +251,23 @@ int GetFirstAvailableDevice(SessionContext& session_context) {
void FillOutputsWithConstantData(std::shared_ptr<ov::Node> node, Ort::UnownedValue& out_tensor) {
switch (node->get_element_type()) {
case ov::element::Type_t::f32: {
FillOutputHelper<float>(out_tensor, node);
FillOutputHelper<float>(out_tensor, std::move(node));
break;
}
case ov::element::Type_t::boolean: {
FillOutputHelper<char>(out_tensor, node);
FillOutputHelper<char>(out_tensor, std::move(node));
break;
}
case ov::element::Type_t::i32: {
FillOutputHelper<int32_t>(out_tensor, node);
FillOutputHelper<int32_t>(out_tensor, std::move(node));
break;
}
case ov::element::Type_t::i64: {
FillOutputHelper<int64_t>(out_tensor, node);
FillOutputHelper<int64_t>(out_tensor, std::move(node));
break;
}
case ov::element::Type_t::f16: {
FillOutputHelper<float>(out_tensor, node);
FillOutputHelper<float>(out_tensor, std::move(node));
break;
}
default:
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/openvino/backends/basic_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ BasicBackend::BasicBackend(std::unique_ptr<ONNX_NAMESPACE::ModelProto>& model_pr
if (!subgraph_context.has_dynamic_input_shape) {
delete model_proto.release();
}
ov_model = CreateOVModel(model, session_context_, const_outputs_map_);
ov_model = CreateOVModel(std::move(model), session_context_, const_outputs_map_);
}
exe_network_ = OVCore::CompileModel(
ov_model, hw_target, device_config, subgraph_context_.subgraph_name);
Expand All @@ -141,7 +141,7 @@ BasicBackend::BasicBackend(std::unique_ptr<ONNX_NAMESPACE::ModelProto>& model_pr
}
};
}
inferRequestsQueue_ = std::unique_ptr<InferRequestsQueue>(new InferRequestsQueue(exe_network_, num_infer_req, initializer));
inferRequestsQueue_ = std::unique_ptr<InferRequestsQueue>(new InferRequestsQueue(exe_network_, num_infer_req, std::move(initializer)));
}

bool BasicBackend::ValidateSubgraph(std::map<std::string, std::shared_ptr<ov::Node>>& const_outputs_map) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
namespace onnxruntime {
namespace openvino_ep {

EPCtxHandler::EPCtxHandler(std::string ov_sdk_version, const logging::Logger& logger) : openvino_sdk_version_(ov_sdk_version), logger_(logger) {
EPCtxHandler::EPCtxHandler(std::string ov_sdk_version, const logging::Logger& logger) : openvino_sdk_version_(std::move(ov_sdk_version)), logger_(logger) {
epctx_model_ = Model::Create("ovep_context_model", false, logger_);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ common::Status OpenVINOExecutionProvider::Compile(
}
};

node_compute_funcs.push_back(compute_info);
node_compute_funcs.push_back(std::move(compute_info));

if (!status.IsOK()) {
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ void ParseProviderOptions([[maybe_unused]] ProviderInfo& result, [[maybe_unused]

struct OpenVINOProviderFactory : IExecutionProviderFactory {
OpenVINOProviderFactory(ProviderInfo provider_info, SharedContext& shared_context)
: provider_info_(provider_info), shared_context_(shared_context) {}
: provider_info_(std::move(provider_info)), shared_context_(shared_context) {}

~OpenVINOProviderFactory() override {}

Expand Down Expand Up @@ -333,7 +333,7 @@ struct OpenVINO_Provider : Provider {
if (pi.so_share_ep_contexts) {
ov::AnyMap map;
map["NPU_COMPILATION_MODE_PARAMS"] = "enable-wd-blockarg-input=true compute-layers-with-higher-precision=Sqrt,Power,ReduceSum";
pi.load_config["NPU"] = map;
pi.load_config["NPU"] = std::move(map);
}

return std::make_shared<OpenVINOProviderFactory>(pi, shared_context_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ GetCapability::GetCapability(const EPCtxHandler& ep_ctx_handler,
const std::string device_type_param,
const bool enable_qdq_optimizer) : ep_ctx_handler_(ep_ctx_handler),
graph_viewer_(graph_viewer_param),
device_type_(device_type_param) {
device_type_(std::move(device_type_param)) {
bool npu_qdq_optimizer_enabled = false;
if (device_type_.find("NPU") != std::string::npos) {
device_type_ = "CPU";
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/openvino/ov_versions/data_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class DataOps {
const std::string dev_id, const bool npu_qdq_optimizer_enabled)
: graph_viewer_(graph_viewer_param),
version_id_(ver),
device_id_(dev_id),
device_id_(std::move(dev_id)),
npu_qdq_optimizer_enabled_(npu_qdq_optimizer_enabled) {
populate_op_mode_supported();
populate_types_supported();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -712,6 +712,8 @@ Status CreateModelWithStrippedQDQNodes(const GraphViewer& src_graph,
// Will set inputs after deciding fate oif all internal and external initializers
// accumulated_inputs container will store input of the original graph and initializer with ext data
InlinedVector<const NodeArg*> accumulated_inputs;
accumulated_inputs.reserve(dst_graph_inputs.size());

// dst_graph.SetInputs(dst_graph_inputs);
dst_graph.SetOutputs(dst_graph_outputs);

Expand Down

0 comments on commit 522c041

Please sign in to comment.