Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,32 @@ model = DetectionModel.create_model("~/.cache/omz/public/ssdlite_mobilenet_v2/FP
model.save("serialized.xml")
```

## Usage with generic OpenVINO models

ModelAPI uses custom field in `rt_info/model_info` section of OpenVINO IR to store metadata required for preprocessing and postprocessing. If you have a generic OpenVINO model without such metadata, you can provide that metadata in `configuration` argument of `create_model()` method:

```python
from model_api.models import Model

# Create a model wrapper from a compatible model generated by OpenVINO Training Extensions
model = Model.create_model(
"model.xml",
configuration={
"model_type": "Segmentation",
"blur_strength": 1,
"labels": ["object"],
"soft_threshold": 0.5,
}
)

# Run synchronous inference locally
result = model(image) # image is numpy.ndarray

# Print results in model-specific format
print(f"Inference result: {result}")

# Save the model with metadata already embedded (passing configuration is not required anymore)
model.save("serialized_with_metadata.xml")
```

For more details please refer to the [examples](https://github.com/openvinotoolkit/model_api/tree/master/examples) of this project.
2 changes: 1 addition & 1 deletion src/model_api/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def create_model(
["model_info", "model_type"],
).astype(str)
except RuntimeError:
model_type = cls.detect_model_type(inference_adapter)
model_type = configuration.get("model_type", cls.detect_model_type(inference_adapter))
Model = cls.get_model_class(model_type)
return Model(inference_adapter, configuration, preload)

Expand Down
18 changes: 18 additions & 0 deletions tests/accuracy/public_scope.json
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,24 @@
}
]
},
{
"name": "otx_models/Lite-hrnet-s_mod2-without-model-info.xml",
"type": "SegmentationModel",
"configuration": {
"blur_strength": 1,
"labels": ["object"],
"model_type": "Segmentation",
"soft_threshold": 0.5
},
"test_data": [
{
"image": "coco128/images/train2017/000000000074.jpg",
"reference": [
"0: 0.563, 1: 0.437, [426,640,2], [0], [0]; object: 0.520, 26, 0, object: 0.530, 42, 0, object: 0.501, 4, 0, object: 0.507, 27, 0, object: 0.503, 8, 0, object: 0.502, 6, 0, object: 0.505, 18, 0, object: 0.504, 13, 0, object: 0.524, 87, 0, object: 0.521, 89, 0, object: 0.757, 2706, 2, "
]
}
]
},
{
"name": "otx_models/Lite-hrnet-s_mod2.onnx",
"type": "SegmentationModel",
Expand Down
9 changes: 6 additions & 3 deletions tests/accuracy/test_accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def read_config(fname):
return json.load(f)


def create_models(model_type, model_path, download_dir, force_onnx_adapter=False, device="CPU"):
def create_models(model_type, model_path, download_dir, force_onnx_adapter=False, device="CPU", configuration=None):
if model_path.endswith(".onnx") and force_onnx_adapter:
wrapper_type = model_type.get_model_class(
load_parameters_from_onnx(onnx.load(model_path))["model_info"]["model_type"],
Expand All @@ -92,16 +92,18 @@ def create_models(model_type, model_path, download_dir, force_onnx_adapter=False
model.load()
return [model]

configuration = configuration or {}

models = [
model_type.create_model(model_path, device=device, download_dir=download_dir),
model_type.create_model(model_path, device=device, download_dir=download_dir, configuration=configuration),
]
if model_path.endswith(".xml"):
model = create_core().read_model(model_path)
if model.has_rt_info(["model_info", "model_type"]):
wrapper_type = model_type.get_model_class(
model.get_rt_info(["model_info", "model_type"]).astype(str),
)
model = wrapper_type(OpenvinoAdapter(create_core(), model_path, device=device))
model = wrapper_type(OpenvinoAdapter(create_core(), model_path, device=device), configuration=configuration)
model.load()
models.append(model)
return models
Expand Down Expand Up @@ -287,6 +289,7 @@ def test_image_models(data, device, dump, result, model_data, results_dir): # n
data,
model_data.get("force_ort", False),
device=device,
configuration=model_data.get("configuration", None),
):
if "tiler" in model_data:
if "extra_model" in model_data:
Expand Down