diff --git a/.github/workflows/internal_ci.yml b/.github/workflows/internal_ci.yml new file mode 100644 index 0000000000000..c5a1c97aae0d7 --- /dev/null +++ b/.github/workflows/internal_ci.yml @@ -0,0 +1,34 @@ +name : Internal CI + +on: + pull_request: + branches: + - '**' # Triggers on a PR to any Branch + +jobs: + build: + + runs-on: [self-hosted, Linux, X64] # Runs on a Lunar lake + env: + BUILD_SOURCESDIRECTORY: ${{ github.workspace }} + BUILD_BINARIESDIRECTORY: ${{ github.workspace }}/build + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.ref }} # checkout the pr branch + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Create build directory + run: | + mkdir -p ${{ env.BUILD_BINARIESDIRECTORY }} + chmod -R 777 ${{ env.BUILD_BINARIESDIRECTORY }} + + - name: Running Internal CI # Trigger Internal CI on the pr branch + run: | + cd tools/ci_build/github/linux/ + dir + ./run_dockerbuild.sh -o ubuntu22.04 -p 3.10 -d openvino -v 2024.5.0 -x "--config Release --use_openvino CPU --build_wheel --build_shared_lib --parallel " diff --git a/onnxruntime/core/providers/openvino/backend_manager.cc b/onnxruntime/core/providers/openvino/backend_manager.cc index 16a92b43adaf6..3efb6e7c32946 100644 --- a/onnxruntime/core/providers/openvino/backend_manager.cc +++ b/onnxruntime/core/providers/openvino/backend_manager.cc @@ -225,13 +225,11 @@ Status BackendManager::ExportCompiledBlobAsEPCtxNode(const onnxruntime::GraphVie // Build name by combining EpCtx model name (if available) and subgraph name. Model // name is not available in when creating a session from memory auto name = session_context_.so_context_file_path.stem().string(); - if (!name.empty() && !graph_body_viewer.ModelPath().empty()) { + if (name.empty() && !graph_body_viewer.ModelPath().empty()) { name = graph_body_viewer.ModelPath().stem().string(); } - if (!name.empty()) { - name += "_"; - } - name += subgraph_context_.subgraph_name; + ORT_ENFORCE(!name.empty()); + name += "_" + subgraph_context_.subgraph_name; std::filesystem::path blob_filename = session_context_.so_context_file_path; if (blob_filename.empty()) { diff --git a/onnxruntime/core/providers/openvino/openvino_provider_factory.cc b/onnxruntime/core/providers/openvino/openvino_provider_factory.cc index 1c2d857b6252d..2cf962c829afc 100644 --- a/onnxruntime/core/providers/openvino/openvino_provider_factory.cc +++ b/onnxruntime/core/providers/openvino/openvino_provider_factory.cc @@ -148,7 +148,7 @@ std::string ParsePrecision(const ProviderOptions& provider_options, std::string& << "Update the 'device_type' to specified types 'CPU', 'GPU', 'GPU.0', " << "'GPU.1', 'NPU' or from" << " HETERO/MULTI/AUTO options and set 'precision' separately. \n"; - int delimit = device_type.find("_"); + auto delimit = device_type.find("_"); device_type = device_type.substr(0, delimit); return device_type.substr(delimit + 1); } diff --git a/onnxruntime/core/providers/openvino/ov_interface.cc b/onnxruntime/core/providers/openvino/ov_interface.cc index 4c656bceff550..d8d0dbfec8c31 100644 --- a/onnxruntime/core/providers/openvino/ov_interface.cc +++ b/onnxruntime/core/providers/openvino/ov_interface.cc @@ -233,7 +233,7 @@ void OVInferRequest::SetTensor(const std::string& name, OVTensorPtr& blob) { } uint32_t OVInferRequest::GetNumInputs() { - return ovInfReq.get_compiled_model().inputs().size(); + return static_cast(ovInfReq.get_compiled_model().inputs().size()); } void OVInferRequest::StartAsync() {