Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug Fixes for PTQ and ACQ based OpenVINO Model Export and Added Test Cases #2594

Open
wants to merge 42 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
b0fc113
Added helper functions for loading Metrics
srikesh-07 Mar 5, 2025
97e9fa2
Added AnomalibMetric as subclass argument
srikesh-07 Mar 5, 2025
9c744eb
Modified `model.export` to dynamically load metrics
srikesh-07 Mar 5, 2025
7f7cf8b
Modified ACQ method to support `AnomalibMetric` method
srikesh-07 Mar 5, 2025
05ab70d
Update test_cli.py
srikesh-07 Mar 5, 2025
8faa2c6
Modified docstring and added cache clearance after test
srikesh-07 Mar 5, 2025
c08eb45
Removed modified `convert_snake_to_pascal_case` as its no longer used.
srikesh-07 Mar 5, 2025
cad5731
Updated docstring for `get_available_metrics()`
srikesh-07 Mar 5, 2025
9a0c7cd
Fixed circular import and added lazy import
srikesh-07 Mar 5, 2025
7cbf7e0
Modifed the field name for ACQ export
srikesh-07 Mar 5, 2025
a02aa37
Improved code quality
srikesh-07 Mar 5, 2025
87dde1c
Improved code quality
srikesh-07 Mar 5, 2025
5d2a3bc
Removed debugging statement
srikesh-07 Mar 5, 2025
7888e12
Minor bug fix
srikesh-07 Mar 5, 2025
3bf7af8
Update __init__.py
srikesh-07 Mar 5, 2025
1fbf2fb
Update __init__.py
srikesh-07 Mar 5, 2025
1f4948d
Reformatted the code
srikesh-07 Mar 6, 2025
af8c734
Reformatted cli.py
srikesh-07 Mar 6, 2025
2233dbd
Reformatted engine.py
srikesh-07 Mar 6, 2025
96860c7
Reformatted export_mixin.py
srikesh-07 Mar 6, 2025
d8235c8
Update export_mixin.py
srikesh-07 Mar 6, 2025
42fc4d5
Added testcase for PTQ export
srikesh-07 Mar 6, 2025
e55b6ca
Update test_cli.py docstrings
srikesh-07 Mar 6, 2025
177baaf
Fixed typo in docstring
srikesh-07 Mar 6, 2025
e0c8cc5
Added testcase for FP16 and INT8 export
srikesh-07 Mar 6, 2025
ad3ac99
Reformatted test_cli.py
srikesh-07 Mar 6, 2025
65a58ad
Added `use_placeholder_fields` to `get_metric` method
srikesh-07 Mar 6, 2025
c9f971c
Modified 'export' method
srikesh-07 Mar 6, 2025
835bf32
Add configuration options to PostProcessor (#2547)
djdameln Mar 6, 2025
2dc5a19
Added functionality to modify metric fields only if placeholder is us…
srikesh-07 Mar 6, 2025
0bb9fd8
Added Unit Tests for helper functions of Metrics
srikesh-07 Mar 6, 2025
01c6c05
Modified Engine Export Docstrings
srikesh-07 Mar 6, 2025
7d27a29
Rename PostProcessor (#2589)
djdameln Mar 6, 2025
9293bd7
Rename MVTec dataset to MVTecAD dataset (#2557)
samet-akcay Mar 6, 2025
a30744e
Update test_cli.py
srikesh-07 Mar 7, 2025
9dae140
Added test case and changed MVTec to MVTecAD
srikesh-07 Mar 7, 2025
0737ddc
Fixed a bug in calculating the metric for Binary labels
srikesh-07 Mar 7, 2025
1425059
Reformatted the test_cli.py
srikesh-07 Mar 7, 2025
8a6ec1f
Merge branch 'main' into acq
srikesh-07 Mar 11, 2025
2ced95b
Merge branch 'main' into acq
samet-akcay Mar 21, 2025
10020b5
Fixed snake-case issue for single-word metrics
srikesh-07 Mar 22, 2025
2b06dd4
Merge pull request #4 from openvinotoolkit/main
srikesh-07 Mar 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions src/anomalib/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

from anomalib.data import AnomalibDataModule
from anomalib.engine import Engine
from anomalib.metrics import AnomalibMetric
from anomalib.models import AnomalibModule
from anomalib.utils.config import update_config

Expand Down Expand Up @@ -253,10 +254,17 @@ def add_export_arguments(self, parser: ArgumentParser) -> None:
type=AnomalibDataModule,
required=False,
)
parser.add_subclass_arguments(
AnomalibMetric,
"metric",
fail_untyped=False,
required=False,
instantiate=False,
)
added = parser.add_method_arguments(
Engine,
"export",
skip={"ov_args", "model", "datamodule"},
skip={"ov_args", "model", "datamodule", "metric"},
)
self.subcommand_method_arguments["export"] = added
add_openvino_export_arguments(parser)
Expand Down Expand Up @@ -309,8 +317,8 @@ def instantiate_classes(self) -> None:
self._configure_optimizers_method_to_model()
self.instantiate_engine()
else:
self.config_init = self.parser.instantiate_classes(self.config)
subcommand = self.config["subcommand"]
self.config_init = self.parser.instantiate_classes(self.config)
if subcommand in {"train", "export"}:
self.instantiate_engine()
if "model" in self.config_init[subcommand]:
Expand Down Expand Up @@ -461,6 +469,8 @@ def _prepare_subcommand_kwargs(self, subcommand: str) -> dict[str, Any]:
fn_kwargs["dataloaders"] = self.datamodule
elif isinstance(self.datamodule, Path | str):
fn_kwargs["data_path"] = self.datamodule
if self.config[subcommand].get("metric"):
fn_kwargs["metric"] = self.config[subcommand]["metric"]
return fn_kwargs

def _parser(self, subcommand: str | None) -> ArgumentParser:
Expand Down
17 changes: 14 additions & 3 deletions src/anomalib/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,19 @@
from pathlib import Path
from typing import Any

from jsonargparse import Namespace
from lightning.pytorch.callbacks import Callback
from lightning.pytorch.loggers import Logger
from lightning.pytorch.trainer import Trainer
from lightning.pytorch.utilities.types import _EVALUATE_OUTPUT, _PREDICT_OUTPUT, EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch.utils.data import DataLoader, Dataset
from torchmetrics import Metric

from anomalib import LearningType
from anomalib.callbacks.checkpoint import ModelCheckpoint
from anomalib.callbacks.timer import TimerCallback
from anomalib.data import AnomalibDataModule, AnomalibDataset, PredictDataset
from anomalib.deploy import CompressionType, ExportType
from anomalib.metrics import AnomalibMetric, get_metric
from anomalib.models import AnomalibModule
from anomalib.utils.path import create_versioned_dir

Expand Down Expand Up @@ -732,7 +733,7 @@ def export(
input_size: tuple[int, int] | None = None,
compression_type: CompressionType | None = None,
datamodule: AnomalibDataModule | None = None,
metric: Metric | str | None = None,
metric: AnomalibMetric | str | dict | Namespace | None = None,
ov_args: dict[str, Any] | None = None,
ckpt_path: str | Path | None = None,
) -> Path | None:
Expand All @@ -753,7 +754,7 @@ def export(
Must be provided if ``CompressionType.INT8_PTQ`` or `CompressionType.INT8_ACQ`` is selected
(OpenVINO export only).
Defaults to ``None``.
metric (Metric | str | None, optional): Metric to measure quality loss when quantizing.
metric (AnomalibMetric | str | None, optional): Metric to measure quality loss when quantizing.
Must be provided if ``CompressionType.INT8_ACQ`` is selected and must return higher value for better
performance of the model (OpenVINO export only).
Defaults to ``None``.
Expand Down Expand Up @@ -787,6 +788,13 @@ def export(
anomalib export --model Padim --export_type openvino --ckpt_path <PATH_TO_CHECKPOINT> \
--input_size "[256,256]" --compression_type INT8_PTQ --data MVTec
```
5. You can also quantize OpenVINO model with ACQ technique using the following command.
```python
anomalib export --model Padim --export_type openvino --ckpt_path <PATH_TO_CHECKPOINT> \
--input_size "[256,256]" --compression_type INT8_PTQ --data MVTec --metric min_max
If the metric fields need to be manually defined by the user, then the user can add the
following command-line argument, `--metric.fields "['pred_scores', 'gt_labels']"`
```
"""
export_type = ExportType(export_type)
self._setup_trainer(model)
Expand All @@ -797,6 +805,9 @@ def export(
if export_root is None:
export_root = Path(self.trainer.default_root_dir)

if metric is not None and not isinstance(metric, AnomalibMetric):
metric = get_metric(metric, use_placeholder_fields=True)

exported_model_path: Path | None = None
if export_type == ExportType.TORCH:
exported_model_path = model.to_torch(
Expand Down
189 changes: 189 additions & 0 deletions src/anomalib/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,14 @@
# Copyright (C) 2022-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import logging
from importlib import import_module

from jsonargparse import Namespace
from omegaconf import DictConfig, OmegaConf

from anomalib.utils.path import convert_to_snake_case

from .anomaly_score_distribution import AnomalyScoreDistribution
from .aupr import AUPR
from .aupro import AUPRO
Expand Down Expand Up @@ -71,3 +79,184 @@
"PIMO",
"AUPIMO",
]


class UnknownMetricError(ModuleNotFoundError):
pass


logger = logging.getLogger(__name__)


def get_available_metrics() -> set[str]:
"""Get set of available anomaly detection metrics.

Returns a set of metric names in snake_case format from the Anomalib library.
However, fully uppercase metric names, such as ``'AUPRO'``, are returned in lowercase.

Returns:
set[str]: Set of available metric names in snake_case format (e.g.
``'min_max'``, ``'auroc'``, etc.)

Example:
Get all available metrics:

>>> from anomalib.metrics import get_available_metrics
>>> metrics = get_available_metrics()
>>> print(sorted(list(metrics))) # doctest: +NORMALIZE_WHITESPACE
['aupimo', 'aupr', 'aupro', 'auroc',
'f1_adaptive_threshold', 'f1_max', 'f1_score',
'min_max', 'pimo', 'pro']

Note:
The returned metric names can be used with :func:`get_metric` to instantiate
the corresponding metrics class.
"""
return {
cls.__name__.lower() if cls.__name__.isupper() else convert_to_snake_case(cls.__name__)
for cls in AnomalibMetric.__subclasses__()
if cls.__name__ != "AnomalibMetric"
}


def _get_metric_class_by_name(name: str) -> type[AnomalibMetric]:
"""Retrieve an anomaly metric class based on its name.

This internal function takes a metric name and returns the corresponding metric class.
The name matching is case-insensitive and supports both snake_case and PascalCase
formats.

Args:
name (str): Name of the metric to retrieve. Can be in snake_case (e.g.
``"min_max"``) or PascalCase (e.g. ``"MinMax"``). The name is
case-insensitive.

Raises:
UnknownMetricError: If no metric is found matching the provided name. The error
message includes the list of available metrics.

Returns:
type[AnomalibMetric]: Metric class that inherits from ``AnomalibMetric``.

Examples:
>>> from anomalib.metrics import _get_metric_class_by_name
>>> metric_class = _get_metric_class_by_name("auroc")
>>> metric_class.__name__
'AUROC'
>>> metric_class = _get_metric_class_by_name("min_max")
>>> metric_class.__name__
'MinMax'
"""
from anomalib.models import convert_snake_to_pascal_case

logger.info("Loading the metric..")
metric_class: type[AnomalibMetric] | None = None

name = convert_snake_to_pascal_case(name).lower() if "_" in name else name.lower()
for metric in AnomalibMetric.__subclasses__():
if name == metric.__name__.lower():
metric_class = metric
if metric_class is None:
logger.exception(f"Could not find the metric {name}. Available metric are {get_available_metrics()}")
raise UnknownMetricError

return metric_class


def get_metric(
metric: DictConfig | str | dict | Namespace,
use_placeholder_fields: bool = False,
*args,
**kwdargs,
) -> AnomalibMetric:
"""Get an anomaly detection metric instance.

This function instantiates an anomaly detection metric based on the provided
configuration or metric name. It supports multiple ways of metric specification
including string names, dictionaries and OmegaConf configurations.

Args:
metric (DictConfig | str | dict | Namespace): Metric specification that can be:
- A string with metric name (e.g. ``"min_max"``, ``"auroc"``)
- A dictionary with ``class_path`` and optional ``init_args``
- An OmegaConf DictConfig with similar structure as dict
- A Namespace object with similar structure as dict
use_placeholder_fields (bool): If `True` and `fields` is not provided as
the first argument in `args` or as a key in `kwargs`,
it initializes `fields` with a list containing an empty string as a
placeholder.
*args: Variable length argument list passed to metric initialization.
**kwdargs: Arbitrary keyword arguments passed to metric initialization.

Returns:
AnomalibMetric: Instantiated anomaly detection metric.

Raises:
TypeError: If ``metric`` argument is of unsupported type.
UnknownMetricError: If specified metric class cannot be found.

Examples:
Get metric by name:

>>> metric = get_metric("min_max", use_placeholder_fields=True)
>>> metric = get_metric("f1_score", use_placeholder_fields=True)
>>> metric = get_metric("auroc", fields=("pred_labels", "gt_labels"))

Get metric using dictionary config:

>>> metric = get_metric({"class_path": "AUPRO"})
>>> metric = get_metric(
... {"class_path": "MinMax"},
... fields=("pred_labels", "gt_labels")
... )
>>> metric = get_metric({
... "class_path": "F1Score",
... "init_args": {"fields": ("pred_labels", "gt_labels")}
... })

Get metric using fully qualified path:

>>> metric = get_metric({
... "class_path": "anomalib.metrics.F1Score",
... "init_args": {"fields": ("pred_labels", "gt_labels")}
... })
"""
_metric: AnomalibMetric
if isinstance(metric, str):
_metric_class = _get_metric_class_by_name(metric)
if use_placeholder_fields and not (len(args) or kwdargs.get("fields", None)):
logger.warning("Initializing the metrics with empty fields parameter which may raise exception.")
kwdargs["fields"] = [""]
_metric = _metric_class(*args, **kwdargs)
elif isinstance(metric, DictConfig | Namespace | dict):
if isinstance(metric, dict):
metric = OmegaConf.create(metric)
path_split = metric.class_path.rsplit(".", 1)
try:
module = import_module(path_split[0]) if len(path_split) > 1 else import_module("anomalib.metrics")
except ModuleNotFoundError as exception:
logger.exception(
f"Could not find the module {metric.class_path}. Available metrics are {get_available_metrics()}",
)
raise UnknownMetricError from exception
try:
metric_class = getattr(module, path_split[-1])
init_args = metric.get("init_args", {})
if isinstance(init_args, Namespace):
for key, value in kwdargs.items():
init_args.update(value, key)
else:
init_args.update(kwdargs)
if use_placeholder_fields and not (len(args) or init_args.get("fields", None)):
logger.warning("Initializing the metrics with empty fields parameter which may raise exception.")
init_args["fields"] = [""]
_metric = metric_class(*args, **init_args)
except AttributeError as exception:
logger.exception(
f"Could not find the metric {metric.class_path}. Available metrics are {get_available_metrics()}",
)
raise UnknownMetricError from exception
else:
logger.error(f"Unsupported type {type(metric)} for metric configuration.")
raise TypeError
return _metric
20 changes: 15 additions & 5 deletions src/anomalib/models/components/base/export_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def _post_training_quantization_ov(
f">300 images recommended for INT8 quantization, found only {len(dataloader.dataset)} images",
)

calibration_dataset = nncf.Dataset(dataloader, lambda x: x["image"])
calibration_dataset = nncf.Dataset(dataloader, lambda x: x.image)
return nncf.quantize(model, calibration_dataset)

@staticmethod
Expand Down Expand Up @@ -364,6 +364,11 @@ def _accuracy_control_quantization_ov(
msg = "Metric must be provided for OpenVINO INT8_ACQ compression"
raise ValueError(msg)

# Setting up the fields parameter in Metric if Metric is initialized with placeholder.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you mean without a placeholder?

Copy link
Author

@srikesh-07 srikesh-07 Mar 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What I’m trying to convey is this:

Let’s say the user passes a metric via the CLI using --metrics f1_score but does not specify any field parameters (i.e., --metrics.fields is not provided). In this case, F1Score is instantiated with a placeholder ([""]) in the model.export method. Later, the metric’s fields are updated based on the model type in export_mixin.py.

I also noticed a couple of grammatical mistakes in an inline comment and a logger statement:

  • In L367:
    Change "if Metric is initialized with placeholder" to if Metric was initialized with a placeholder."

  • In L370:
    Modify the logger statement from:

    "The fields of metric are initialized empty. Setting it to model fields {metric.fields}"
    to:
    "Since the fields of the metric were initialized as empty, they are now being set to the model fields: {metric.fields}."

I hope this clarifies your question. Let me know if you have any further doubtsβ€”I’d be happy to help!

Thank you.

if metric.fields[0] == "":
metric.fields = ("anomaly_map", "gt_mask") if task == TaskType.SEGMENTATION else ("pred_score", "gt_label")
logger.info(f"The fields of metric are initialized empty. Setting it to model fields {metric.fields}")

model_input = model.input(0)

if model_input.partial_shape[0].is_static:
Expand All @@ -376,15 +381,20 @@ def _accuracy_control_quantization_ov(
f">300 images recommended for INT8 quantization, found only {len(dataloader.dataset)} images",
)

calibration_dataset = nncf.Dataset(dataloader, lambda x: x["image"])
calibration_dataset = nncf.Dataset(dataloader, lambda x: x.image)
validation_dataset = nncf.Dataset(datamodule.test_dataloader())

# validation function to evaluate the quality loss after quantization
def val_fn(nncf_model: "CompiledModel", validation_data: Iterable) -> float:
for batch in validation_data:
preds = torch.from_numpy(nncf_model(batch["image"])[0])
target = batch["label"] if task == TaskType.CLASSIFICATION else batch["mask"][:, None, :, :]
metric.update(preds, target)
preds = nncf_model(batch.image)
for key, pred in preds.items():
name = key.get_any_name()
setattr(batch, name, torch.from_numpy(pred))
if batch.gt_mask is not None:
batch.gt_mask = batch.gt_mask.unsqueeze(dim=1)
batch.pred_score = batch.pred_score.squeeze(dim=1) # Squeezing since it is binary. (B, 1) -> (B)
metric.update(batch)
return metric.compute()

return nncf.quantize_with_accuracy_control(model, calibration_dataset, validation_dataset, val_fn)
Expand Down
Loading