From ef4190d2d5e88578c54f017064c57bf5db6ca67b Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 25 May 2022 16:58:55 +0200 Subject: [PATCH 01/13] Visualizer improvements Part II (#298) * add methods for adding labels to image * refactor visualizer * move add_label to post processing helpers * fix tests * support single image in visualizer * call generate * revert default task type * typing * refactor inferencer and visualizer * add torch inference entrypoint * revert torch inferencer for backward compatibility * add torch inferencer to init files * pause visualization * openvino.py -> openvino_inference.py * address comments * remove unused argument * remove depracation warning --- anomalib/deploy/inferencers/__init__.py | 2 +- .../models/components/base/anomaly_module.py | 2 +- anomalib/post_processing/visualizer.py | 1 + anomalib/utils/callbacks/model_loader.py | 10 +- .../utils/callbacks/visualizer_callback.py | 100 ++++++------ .../dummy_lightning_model.py | 2 + tools/inference.py | 144 ++--------------- tools/openvino_inference.py | 145 ++++++++++++++++++ 8 files changed, 232 insertions(+), 174 deletions(-) create mode 100644 tools/openvino_inference.py diff --git a/anomalib/deploy/inferencers/__init__.py b/anomalib/deploy/inferencers/__init__.py index 2f5391dec8..91894e821e 100644 --- a/anomalib/deploy/inferencers/__init__.py +++ b/anomalib/deploy/inferencers/__init__.py @@ -18,4 +18,4 @@ from .openvino import OpenVINOInferencer from .torch import TorchInferencer -__all__ = ["Inferencer", "TorchInferencer", "OpenVINOInferencer"] +__all__ = ["Inferencer", "OpenVINOInferencer", "TorchInferencer"] diff --git a/anomalib/models/components/base/anomaly_module.py b/anomalib/models/components/base/anomaly_module.py index cff2985c6d..6c055acd89 100644 --- a/anomalib/models/components/base/anomaly_module.py +++ b/anomalib/models/components/base/anomaly_module.py @@ -103,7 +103,7 @@ def test_step(self, batch, _): # pylint: disable=arguments-differ Dictionary containing images, features, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. """ - return self.validation_step(batch, _) + return self.predict_step(batch, _) def validation_step_end(self, val_step_outputs): # pylint: disable=arguments-differ """Called at the end of each validation step.""" diff --git a/anomalib/post_processing/visualizer.py b/anomalib/post_processing/visualizer.py index d4a980af51..fbc891479b 100644 --- a/anomalib/post_processing/visualizer.py +++ b/anomalib/post_processing/visualizer.py @@ -70,6 +70,7 @@ def show(self): """Show image on a matplotlib figure.""" self.generate() self.figure.show() + plt.waitforbuttonpress() def save(self, filename: Path): """Save image. diff --git a/anomalib/utils/callbacks/model_loader.py b/anomalib/utils/callbacks/model_loader.py index 146b1663fe..07516489b4 100644 --- a/anomalib/utils/callbacks/model_loader.py +++ b/anomalib/utils/callbacks/model_loader.py @@ -30,10 +30,18 @@ class LoadModelCallback(Callback): def __init__(self, weights_path): self.weights_path = weights_path - def on_test_start(self, trainer, pl_module: AnomalyModule) -> None: # pylint: disable=W0613 + def on_test_start(self, _trainer, pl_module: AnomalyModule) -> None: # pylint: disable=W0613 """Call when the test begins. Loads the model weights from ``weights_path`` into the PyTorch module. """ logger.info("Loading the model from %s", self.weights_path) pl_module.load_state_dict(torch.load(self.weights_path)["state_dict"]) + + def on_predict_start(self, _trainer, pl_module: AnomalyModule) -> None: + """Call when inferebce begins. + + Loads the model weights from ``weights_path`` into the PyTorch module. + """ + logger.info("Loading the model from %s", self.weights_path) + pl_module.load_state_dict(torch.load(self.weights_path)["state_dict"]) diff --git a/anomalib/utils/callbacks/visualizer_callback.py b/anomalib/utils/callbacks/visualizer_callback.py index bab411b030..5f37a48fb1 100644 --- a/anomalib/utils/callbacks/visualizer_callback.py +++ b/anomalib/utils/callbacks/visualizer_callback.py @@ -15,7 +15,7 @@ # and limitations under the License. from pathlib import Path -from typing import Any, List, Optional, cast +from typing import Any, Iterator, List, Optional, cast from warnings import warn import pytorch_lightning as pl @@ -28,7 +28,6 @@ Visualizer, add_anomalous_label, add_normal_label, - compute_mask, superimpose_anomaly_map, ) from anomalib.pre_processing.transforms import Denormalize @@ -97,6 +96,60 @@ def _add_images( if "local" in self.log_images_to: visualizer.save(Path(trainer.default_root_dir) / "images" / filename.parent.name / filename.name) + def generate_visualizer(self, outputs) -> Iterator[Visualizer]: + """Yields a visualizer object for each of the images in the output.""" + for i in range(outputs["image"].size(0)): + visualizer = Visualizer() + + image = Denormalize()(outputs["image"][i].cpu()) + anomaly_map = outputs["anomaly_maps"][i].cpu().numpy() + heat_map = superimpose_anomaly_map(anomaly_map, image, normalize=not self.inputs_are_normalized) + pred_score = outputs["pred_scores"][i].cpu().numpy() + pred_label = outputs["pred_labels"][i].cpu().numpy() + + if self.task == "segmentation": + pred_mask = outputs["pred_masks"][i].squeeze().int().cpu().numpy() * 255 + vis_img = mark_boundaries(image, pred_mask, color=(1, 0, 0), mode="thick") + visualizer.add_image(image=image, title="Image") + if "mask" in outputs: + true_mask = outputs["mask"][i].cpu().numpy() * 255 + visualizer.add_image(image=true_mask, color_map="gray", title="Ground Truth") + visualizer.add_image(image=heat_map, title="Predicted Heat Map") + visualizer.add_image(image=pred_mask, color_map="gray", title="Predicted Mask") + visualizer.add_image(image=vis_img, title="Segmentation Result") + elif self.task == "classification": + visualizer.add_image(image, title="Image") + if pred_label: + image_classified = add_anomalous_label(heat_map, pred_score) + else: + image_classified = add_normal_label(heat_map, 1 - pred_score) + visualizer.add_image(image=image_classified, title="Prediction") + + yield visualizer + + def on_predict_batch_end( + self, + _trainer: pl.Trainer, + _pl_module: AnomalyModule, + outputs: Optional[STEP_OUTPUT], + _batch: Any, + _batch_idx: int, + _dataloader_idx: int, + ) -> None: + """Show images at the end of every batch. + + Args: + _trainer (Trainer): Pytorch lightning trainer object (unused). + _pl_module (LightningModule): Lightning modules derived from BaseAnomalyLightning object as + currently only they support logging images. + outputs (Dict[str, Any]): Outputs of the current test step. + _batch (Any): Input batch of the current test step (unused). + _batch_idx (int): Index of the current test batch (unused). + _dataloader_idx (int): Index of the dataloader that yielded the current batch (unused). + """ + for visualizer in self.generate_visualizer(outputs): + visualizer.show() + def on_test_batch_end( self, trainer: pl.Trainer, @@ -118,49 +171,10 @@ def on_test_batch_end( _dataloader_idx (int): Index of the dataloader that yielded the current batch (unused). """ assert outputs is not None - - if self.inputs_are_normalized: - normalize = False # anomaly maps are already normalized - else: - normalize = True # raw anomaly maps. Still need to normalize - - threshold = pl_module.pixel_metrics.threshold - for i, (filename, image, anomaly_map, pred_score, gt_label) in enumerate( - zip( - outputs["image_path"], - outputs["image"], - outputs["anomaly_maps"], - outputs["pred_scores"], - outputs["label"], - ) - ): - image = Denormalize()(image.cpu()) - anomaly_map = anomaly_map.cpu().numpy() - heat_map = superimpose_anomaly_map(anomaly_map, image, normalize=normalize) - pred_mask = compute_mask(anomaly_map, threshold) - vis_img = mark_boundaries(image, pred_mask, color=(1, 0, 0), mode="thick") - - visualizer = Visualizer() - - if self.task == "segmentation": - visualizer.add_image(image=image, title="Image") - if "mask" in outputs: - true_mask = outputs["mask"][i].cpu().numpy() * 255 - visualizer.add_image(image=true_mask, color_map="gray", title="Ground Truth") - visualizer.add_image(image=heat_map, title="Predicted Heat Map") - visualizer.add_image(image=pred_mask, color_map="gray", title="Predicted Mask") - visualizer.add_image(image=vis_img, title="Segmentation Result") - elif self.task == "classification": - gt_im = add_anomalous_label(image) if gt_label else add_normal_label(image) - visualizer.add_image(gt_im, title="Image/True label") - if pred_score >= threshold: - image_classified = add_anomalous_label(heat_map, pred_score) - else: - image_classified = add_normal_label(heat_map, 1 - pred_score) - visualizer.add_image(image=image_classified, title="Prediction") + for i, visualizer in enumerate(self.generate_visualizer(outputs)): visualizer.generate() - self._add_images(visualizer, pl_module, trainer, Path(filename)) + self._add_images(visualizer, pl_module, trainer, Path(outputs["image_path"][i])) visualizer.close() def on_test_end(self, _trainer: pl.Trainer, pl_module: AnomalyModule) -> None: diff --git a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py index fe17b1d885..0a99977861 100644 --- a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py +++ b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py @@ -67,6 +67,8 @@ def test_step(self, batch, _): mask=torch.zeros((1, 100, 100)), anomaly_maps=torch.ones((1, 100, 100)), label=torch.Tensor([0]), + pred_labels=torch.Tensor([0]), + pred_masks=torch.zeros((1, 100, 100)), ) return outputs diff --git a/tools/inference.py b/tools/inference.py index 784f34fa8e..a087a1fb61 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -1,34 +1,16 @@ -"""Anomalib Inferencer Script. - -This script performs inference by reading a model config file from -command line, and show the visualization results. -""" - -# Copyright (C) 2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +"""Inference Entrypoint script.""" import warnings from argparse import ArgumentParser, Namespace -from importlib import import_module from pathlib import Path -from typing import Optional -import cv2 -import numpy as np +from pytorch_lightning import Trainer +from torch.utils.data import DataLoader from anomalib.config import get_configurable_parameters -from anomalib.deploy.inferencers.base import Inferencer +from anomalib.data.inference import InferenceDataset +from anomalib.models import get_model +from anomalib.utils.callbacks import get_callbacks def get_args() -> Namespace: @@ -43,15 +25,6 @@ def get_args() -> Namespace: parser.add_argument("--config", type=Path, required=True, help="Path to a model config file") parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.") - parser.add_argument("--save_path", type=Path, required=False, help="Path to save the output image.") - parser.add_argument("--meta_data", type=Path, required=False, help="Path to JSON file containing the metadata.") - parser.add_argument( - "--overlay_mask", - type=bool, - required=False, - default=False, - help="Overlay the segmentation mask on the image. It assumes that the task is segmentation.", - ) args = parser.parse_args() if args.model_config_path is not None: @@ -65,106 +38,21 @@ def get_args() -> Namespace: return args -def add_label(prediction: np.ndarray, scores: float, font: int = cv2.FONT_HERSHEY_PLAIN) -> np.ndarray: - """If the model outputs score, it adds the score to the output image. - - Args: - prediction (np.ndarray): Resized anomaly map. - scores (float): Confidence score. - - Returns: - np.ndarray: Image with score text. - """ - text = f"Confidence Score {scores:.0%}" - font_size = prediction.shape[1] // 1024 + 1 # Text scale is calculated based on the reference size of 1024 - (width, height), baseline = cv2.getTextSize(text, font, font_size, thickness=font_size // 2) - label_patch = np.zeros((height + baseline, width + baseline, 3), dtype=np.uint8) - label_patch[:, :] = (225, 252, 134) - cv2.putText(label_patch, text, (0, baseline // 2 + height), font, font_size, 0, lineType=cv2.LINE_AA) - prediction[: baseline + height, : baseline + width] = label_patch - return prediction - - -def stream() -> None: - """Stream predictions. - - Show/save the output if path is to an image. If the path is a directory, go over each image in the directory. - """ - # Get the command line arguments, and config from the config.yaml file. - # This config file is also used for training and contains all the relevant - # information regarding the data, model, train and inference details. +def infer(): + """Run inference.""" args = get_args() config = get_configurable_parameters(config_path=args.config) + config.model["weight_file"] = str(args.weight_path) - # Get the inferencer. We use .ckpt extension for Torch models and (onnx, bin) - # for the openvino models. - extension = args.weight_path.suffix - inferencer: Inferencer - if extension in (".ckpt"): - module = import_module("anomalib.deploy.inferencers.torch") - TorchInferencer = getattr(module, "TorchInferencer") # pylint: disable=invalid-name - inferencer = TorchInferencer(config=config, model_source=args.weight_path, meta_data_path=args.meta_data) - - elif extension in (".onnx", ".bin", ".xml"): - module = import_module("anomalib.deploy.inferencers.openvino") - OpenVINOInferencer = getattr(module, "OpenVINOInferencer") # pylint: disable=invalid-name - inferencer = OpenVINOInferencer(config=config, path=args.weight_path, meta_data_path=args.meta_data) - - else: - raise ValueError( - f"Model extension is not supported. Torch Inferencer exptects a .ckpt file," - f"OpenVINO Inferencer expects either .onnx, .bin or .xml file. Got {extension}" - ) - if args.image_path.is_dir(): - # Write the output to save_path in the same structure as the input directory. - for image in args.image_path.glob("**/*"): - if image.is_file() and image.suffix in (".jpg", ".png", ".jpeg"): - # Here save_path is assumed to be a directory. Image subdirectories are appended to the save_path. - save_path = Path(args.save_path / image.relative_to(args.image_path).parent) if args.save_path else None - infer(image, inferencer, save_path, args.overlay_mask) - elif args.image_path.suffix in (".jpg", ".png", ".jpeg"): - infer(args.image_path, inferencer, args.save_path, args.overlay_mask) - else: - raise ValueError( - f"Image extension is not supported. Supported extensions are .jpg, .png, .jpeg." - f" Got {args.image_path.suffix}" - ) - - -def infer(image_path: Path, inferencer: Inferencer, save_path: Optional[Path] = None, overlay: bool = False) -> None: - """Perform inference on a single image. - - Args: - image_path (Path): Path to image/directory containing images. - inferencer (Inferencer): Inferencer to use. - save_path (Path, optional): Path to save the output image. If this is None, the output is visualized. - overlay (bool, optional): Overlay the segmentation mask on the image. It assumes that the task is segmentation. - """ - # Perform inference for the given image or image path. if image - # path is provided, `predict` method will read the image from - # file for convenience. We set the superimpose flag to True - # to overlay the predicted anomaly map on top of the input image. - output = inferencer.predict(image=image_path, superimpose=True, overlay_mask=overlay) + model = get_model(config) + callbacks = get_callbacks(config) - # Incase both anomaly map and scores are returned add scores to the image. - if isinstance(output, tuple): - anomaly_map, score = output - output = add_label(anomaly_map, score) + trainer = Trainer(callbacks=callbacks, **config.trainer) - # Show or save the output image, depending on what's provided as - # the command line argument. - output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) - if save_path is None: - cv2.imshow("Anomaly Map", output) - cv2.waitKey(0) # wait for any key press - else: - # Create directory for parents if it doesn't exist. - save_path.parent.mkdir(parents=True, exist_ok=True) - if save_path.suffix == "": # This is a directory - save_path.mkdir(exist_ok=True) # Create current directory - save_path = save_path / image_path.name - cv2.imwrite(filename=str(save_path), img=output) + dataset = InferenceDataset(args.image_path, image_size=tuple(config.dataset.image_size)) + dataloader = DataLoader(dataset) + trainer.predict(model=model, dataloaders=[dataloader]) if __name__ == "__main__": - stream() + infer() diff --git a/tools/openvino_inference.py b/tools/openvino_inference.py new file mode 100644 index 0000000000..206fa82bf1 --- /dev/null +++ b/tools/openvino_inference.py @@ -0,0 +1,145 @@ +"""Anomalib Inferencer Script. + +This script performs inference by reading a model config file from +command line, and show the visualization results. +""" + +# Copyright (C) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from argparse import ArgumentParser, Namespace +from pathlib import Path +from typing import Optional + +import cv2 +import numpy as np +from torchvision.datasets.folder import IMG_EXTENSIONS + +from anomalib.config import get_configurable_parameters +from anomalib.deploy.inferencers import OpenVINOInferencer +from anomalib.deploy.inferencers.base import Inferencer + + +def get_args() -> Namespace: + """Get command line arguments. + + Returns: + Namespace: List of arguments. + """ + parser = ArgumentParser() + parser.add_argument("--config", type=Path, required=True, help="Path to a model config file") + parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") + parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.") + parser.add_argument("--save_path", type=Path, required=False, help="Path to save the output image.") + parser.add_argument("--meta_data", type=Path, required=True, help="Path to JSON file containing the metadata.") + parser.add_argument( + "--overlay_mask", + type=bool, + required=False, + default=False, + help="Overlay the segmentation mask on the image. It assumes that the task is segmentation.", + ) + + args = parser.parse_args() + + return args + + +def add_label(prediction: np.ndarray, scores: float, font: int = cv2.FONT_HERSHEY_PLAIN) -> np.ndarray: + """If the model outputs score, it adds the score to the output image. + + Args: + prediction (np.ndarray): Resized anomaly map. + scores (float): Confidence score. + + Returns: + np.ndarray: Image with score text. + """ + text = f"Confidence Score {scores:.0%}" + font_size = prediction.shape[1] // 1024 + 1 # Text scale is calculated based on the reference size of 1024 + (width, height), baseline = cv2.getTextSize(text, font, font_size, thickness=font_size // 2) + label_patch = np.zeros((height + baseline, width + baseline, 3), dtype=np.uint8) + label_patch[:, :] = (225, 252, 134) + cv2.putText(label_patch, text, (0, baseline // 2 + height), font, font_size, 0, lineType=cv2.LINE_AA) + prediction[: baseline + height, : baseline + width] = label_patch + return prediction + + +def stream() -> None: + """Stream predictions. + + Show/save the output if path is to an image. If the path is a directory, go over each image in the directory. + """ + # Get the command line arguments, and config from the config.yaml file. + # This config file is also used for training and contains all the relevant + # information regarding the data, model, train and inference details. + args = get_args() + config = get_configurable_parameters(config_path=args.config) + + # Get the inferencer. + inferencer = OpenVINOInferencer(config=config, path=args.weight_path, meta_data_path=args.meta_data) + + if args.image_path.is_dir(): + # Write the output to save_path in the same structure as the input directory. + for image in args.image_path.glob("**/*"): + if image.is_file() and image.suffix in IMG_EXTENSIONS: + # Here save_path is assumed to be a directory. Image subdirectories are appended to the save_path. + save_path = Path(args.save_path / image.relative_to(args.image_path).parent) if args.save_path else None + infer(image, inferencer, save_path, args.overlay_mask) + elif args.image_path.suffix in IMG_EXTENSIONS: + infer(args.image_path, inferencer, args.save_path, args.overlay_mask) + else: + raise ValueError( + f"Image extension is not supported. Supported extensions are .jpg, .png, .jpeg." + f" Got {args.image_path.suffix}" + ) + + +def infer(image_path: Path, inferencer: Inferencer, save_path: Optional[Path] = None, overlay: bool = False) -> None: + """Perform inference on a single image. + + Args: + image_path (Path): Path to image/directory containing images. + inferencer (Inferencer): Inferencer to use. + save_path (Path, optional): Path to save the output image. If this is None, the output is visualized. + overlay (bool, optional): Overlay the segmentation mask on the image. It assumes that the task is segmentation. + """ + # Perform inference for the given image or image path. if image + # path is provided, `predict` method will read the image from + # file for convenience. We set the superimpose flag to True + # to overlay the predicted anomaly map on top of the input image. + output = inferencer.predict(image=image_path, superimpose=True, overlay_mask=overlay) + + # Incase both anomaly map and scores are returned add scores to the image. + if isinstance(output, tuple): + anomaly_map, score = output + output = add_label(anomaly_map, score) + + # Show or save the output image, depending on what's provided as + # the command line argument. + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + if save_path is None: + cv2.imshow("Anomaly Map", output) + cv2.waitKey(0) # wait for any key press + else: + # Create directory for parents if it doesn't exist. + save_path.parent.mkdir(parents=True, exist_ok=True) + if save_path.suffix == "": # This is a directory + save_path.mkdir(exist_ok=True) # Create current directory + save_path = save_path / image_path.name + cv2.imwrite(filename=str(save_path), img=output) + + +if __name__ == "__main__": + stream() From 1ed2f1e573cfdc163e49cf7ccd4e3bb441e17e80 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 8 Jun 2022 13:26:53 +0200 Subject: [PATCH 02/13] =?UTF-8?q?=F0=9F=93=9C=20Add=20Inference=20Document?= =?UTF-8?q?ation=20(#308)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add inference and export guides * finish sentence * fix typo * add to index, change required field --- docs/source/guides/export.rst | 34 +++++++++++++++++++ docs/source/guides/inference.rst | 57 ++++++++++++++++++++++++++++++++ docs/source/index.rst | 2 ++ 3 files changed, 93 insertions(+) create mode 100644 docs/source/guides/export.rst create mode 100644 docs/source/guides/inference.rst diff --git a/docs/source/guides/export.rst b/docs/source/guides/export.rst new file mode 100644 index 0000000000..c699dd0b07 --- /dev/null +++ b/docs/source/guides/export.rst @@ -0,0 +1,34 @@ +Export & Optimization +-------------- +This page will explain how to export your trained models to OpenVINO format, and how the performance of the exported OpenVINO models can be optimized. For an explanation how the exported models can be deployed, please refer to the inference guide: :ref:`_inference_documentation`. + +Export +======= +Anomalib models are fully compatible with the OpenVINO framework for accelerating inference on intel hardware. To export a model to OpenVINO format, simply set openvino optimization to ``true`` in the model config as shown below, and trigger a training run. When the training finishes, the trained model weights will be converted to OpenVINO Intermediate Representation (IR) format, and written to the file system in the chosen results folder. + +.. code-block:: none + :caption: Add this configuration to your config.yaml file to export your model to OpenVINO IR after training. + + optimization: + openvino: + apply: true + +As a prerequisite, make sure that all required packages listed in ``requirements/openvino.txt`` are installed in your environment. + +Optimization +============= +Anomalib supports OpenVINO's Neural Network Compression Framework (NNCF) to further improve the performance of the exported OpenVINO models. NNCF optimizes the neural network components of the anomaly models during the training process, and can therefore achieve a better performance-accuracy trade-off than post-training approaches. + +.. note:: + NNCF support is in experimental stage, and is currently only available for the STFPM model + +To enable NNCF, add the following configuration to your ``config.yaml``: + +.. code-block:: none + :caption: Add this configuration to your config.yaml file to enable NNCF. + + optimization: + nncf: + apply: true + +The compressed model will be stored in the OpenVINO IR format in the specified results directory. diff --git a/docs/source/guides/inference.rst b/docs/source/guides/inference.rst new file mode 100644 index 0000000000..78ac21b40e --- /dev/null +++ b/docs/source/guides/inference.rst @@ -0,0 +1,57 @@ +.. _inference_documentation: + +Inference +--------- +Anomalib provides entrypoint scripts for using a trained model to generate predictions from a source of image data. This guide explains how to run inference with the standard PyTorch model and the exported OpenVINO model. + + +Torch Inference +============== +The entrypoint script in ``tools/inference.py`` can be used to run inference with a trained PyTorch model. The entrypoint script has several command line arguments that can be used to configure inference: + ++-------------+----------+-------------------------------------------------------------------------------------+ +| Parameter | Required | Description | ++=============+==========+=====================================================================================+ +| config | True | Path to the model config file. | ++-------------+----------+-------------------------------------------------------------------------------------+ +| weight_path | True | Path to the ``.ckpt`` model checkpoint file. | ++-------------+----------+-------------------------------------------------------------------------------------+ +| image_path | True | Path to the image source. This can be a single image or a folder of images. | ++-------------+----------+-------------------------------------------------------------------------------------+ +| save_data | False | Path to which the output images should be saved. Leave empty for live visualization.| ++-------------+----------+-------------------------------------------------------------------------------------+ + +To run inference, call the script from the command line with the with the following parameters, e.g.: + +``python tools/inference.py --config padim.yaml --weight_path results/weights/model.ckpt --image_path image.png`` + +This will run inference on the specified image file or all images in the folder. A visualization of the inference results including the predicted heatmap and segmentation results (if applicable), will be displayed on the screen, like the example below. + + + +OpenVINO Inference +============== +To run OpenVINO inference, first make sure that your model has been exported to the OpenVINO IR format. Once the model has been exported, OpenVINO inference can be triggered by running the OpenVINO entrypoint script in ``tools/openvino.py``. The command line arguments are very similar to PyTorch inference entrypoint script: + ++-------------+----------+-------------------------------------------------------------------------------------+ +| Parameter | Required | Description | ++=============+==========+=====================================================================================+ +| config | True | Path to the model config file. | ++-------------+----------+-------------------------------------------------------------------------------------+ +| weight_path | True | Path to the OpenVINO IR model file (either ``.xml`` or ``.bin``) | ++-------------+----------+-------------------------------------------------------------------------------------+ +| image_path | True | Path to the image source. This can be a single image or a folder of images. | ++-------------+----------+-------------------------------------------------------------------------------------+ +| save_data | False | Path to which the output images should be saved. Leave empty for live visualization.| ++-------------+----------+-------------------------------------------------------------------------------------+ +| meta_data | True | Path to the JSON file containing the model's meta data (e.g. normalization | +| | | parameters and anomaly score threshold). | ++-------------+----------+-------------------------------------------------------------------------------------+ + +For correct inference results, the ``meta_data`` argument should be specified and point to the ``meta_data.json`` file that was generated when exporting the OpenVINO IR model. The file is stored in the same folder as the ``.xml`` and ``.bin`` files of the model. + +As an example, OpenVINO inference can be triggered by the following command: + +``python tools/openvino.py --config padim.yaml --weight_path results/openvino/model.xml --image_path image.png --meta_data results/openvino/meta_data.json`` + +Similar to PyTorch inference, the visualization results will be displayed on the screen, and optionally saved to the file system location specified by the ``save_data`` parameter. diff --git a/docs/source/index.rst b/docs/source/index.rst index e61e29a998..22aabaddb4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -50,6 +50,8 @@ This repository as been tested on guides/structure_of_documentation guides/using_tox guides/using_pre_commit + guides/inference + guides/export .. toctree:: :maxdepth: 1 From 46dc93cbede2f1cb872ae3940e1f8f9cd9529d1d Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Fri, 17 Jun 2022 09:53:04 +0200 Subject: [PATCH 03/13] =?UTF-8?q?=F0=9F=9A=9C=20Visualizer=20refactor=20pt?= =?UTF-8?q?3=20(#374)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make saving and showing of visualizations configurable * make saving and showing of visualizations configurable * fix visualizer callback tests * remove unused imports * fix openvino test * add comments --- anomalib/data/inference.py | 1 + anomalib/models/cflow/config.yaml | 7 ++- anomalib/models/dfkde/config.yaml | 7 ++- anomalib/models/dfm/config.yaml | 7 ++- anomalib/models/draem/config.yaml | 7 ++- anomalib/models/fastflow/config.yaml | 7 ++- anomalib/models/ganomaly/config.yaml | 7 ++- anomalib/models/padim/config.yaml | 7 ++- anomalib/models/patchcore/config.yaml | 7 ++- .../models/reverse_distillation/config.yaml | 7 ++- anomalib/models/stfpm/config.yaml | 7 ++- anomalib/post_processing/visualizer.py | 18 ++++-- anomalib/utils/callbacks/__init__.py | 61 +++++++++++++----- .../utils/callbacks/visualizer_callback.py | 63 ++++++++++--------- .../dummy_lightning_model.py | 9 ++- .../dummy_lightning_model.py | 6 +- .../visualizer_callback/test_visualizer.py | 3 +- tools/inference.py | 13 ++++ 18 files changed, 180 insertions(+), 64 deletions(-) diff --git a/anomalib/data/inference.py b/anomalib/data/inference.py index b775a17fff..e471f1b14a 100644 --- a/anomalib/data/inference.py +++ b/anomalib/data/inference.py @@ -63,5 +63,6 @@ def __getitem__(self, index: int) -> Any: image_filename = self.image_filenames[index] image = read_image(path=image_filename) pre_processed = self.pre_process(image=image) + pre_processed["image_path"] = image_filename return pre_processed diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index 1065219bf6..4a9c793ef8 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -46,12 +46,17 @@ metrics: pixel_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 0 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index d9abecd18b..38d23022f2 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -32,12 +32,17 @@ metrics: image_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index 9bb3935c79..a57efda4b5 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -34,12 +34,17 @@ metrics: image_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/draem/config.yaml b/anomalib/models/draem/config.yaml index 0d18c237f0..779ad72fd0 100644 --- a/anomalib/models/draem/config.yaml +++ b/anomalib/models/draem/config.yaml @@ -42,12 +42,17 @@ metrics: pixel_default: 3 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/fastflow/config.yaml b/anomalib/models/fastflow/config.yaml index 253cf5f0ff..375e78f558 100644 --- a/anomalib/models/fastflow/config.yaml +++ b/anomalib/models/fastflow/config.yaml @@ -46,12 +46,17 @@ metrics: pixel_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/ganomaly/config.yaml b/anomalib/models/ganomaly/config.yaml index 9e56a73d40..51c1aea005 100644 --- a/anomalib/models/ganomaly/config.yaml +++ b/anomalib/models/ganomaly/config.yaml @@ -50,12 +50,17 @@ metrics: image_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index 27ebbc0032..b14d0c01e0 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -41,12 +41,17 @@ metrics: pixel_default: 3 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index 05b4ce008e..dd791f132e 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -43,12 +43,17 @@ metrics: pixel_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 0 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/reverse_distillation/config.yaml b/anomalib/models/reverse_distillation/config.yaml index 12d5defb0e..c18c7be896 100644 --- a/anomalib/models/reverse_distillation/config.yaml +++ b/anomalib/models/reverse_distillation/config.yaml @@ -50,12 +50,17 @@ metrics: pixel_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 42 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index b76d815fee..5621c2b2ed 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -49,12 +49,17 @@ metrics: pixel_default: 0 adaptive: true +visualization: + show_images: False # show images on the screen + save_images: True # save images to the file system + log_images: True # log images to the available loggers (if any) + image_save_path: null # path to which images will be saved + project: seed: 0 path: ./results logging: - log_images_to: ["local"] # options: [wandb, tensorboard, local]. Make sure you also set logger with using wandb or tensorboard. logger: [] # options: [tensorboard, wandb, csv] or combinations. log_graph: false # Logs the model graph to respective logger. diff --git a/anomalib/post_processing/visualizer.py b/anomalib/post_processing/visualizer.py index 2a5500be58..b316f73292 100644 --- a/anomalib/post_processing/visualizer.py +++ b/anomalib/post_processing/visualizer.py @@ -17,6 +17,7 @@ from pathlib import Path from typing import Dict, List, Optional +import cv2 import matplotlib.figure import matplotlib.pyplot as plt import numpy as np @@ -66,10 +67,19 @@ def generate(self): axis.imshow(image_dict["image"], image_dict["color_map"], vmin=0, vmax=255) axis.title.set_text(image_dict["title"]) - def show(self): - """Show image on a matplotlib figure.""" - self.figure.show() - plt.waitforbuttonpress() + def show(self, filename: Optional[str] = None): + """Convert figure to array and show with opencv.""" + self.figure.canvas.draw() + # convert canvas to numpy array + img = np.frombuffer(self.figure.canvas.tostring_rgb(), dtype=np.uint8) + img = img.reshape(self.figure.canvas.get_width_height()[::-1] + (3,)) + # show image with opencv + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + title = "Prediction" + title = title + f" for {filename}" if filename else title + cv2.imshow(title, img) + cv2.waitKey(0) + cv2.destroyAllWindows() def save(self, filename: Path): """Save image. diff --git a/anomalib/utils/callbacks/__init__.py b/anomalib/utils/callbacks/__init__.py index 868a4f5018..ab44650b7c 100644 --- a/anomalib/utils/callbacks/__init__.py +++ b/anomalib/utils/callbacks/__init__.py @@ -106,23 +106,7 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]: else: raise ValueError(f"Normalization method not recognized: {config.model.normalization_method}") - # TODO Modify when logger is deprecated from project - if "log_images_to" in config.project.keys(): - warnings.warn( - "'log_images_to' key will be deprecated from 'project' section of the config file." - " Please use the logging section in config file", - DeprecationWarning, - ) - config.logging.log_images_to = config.project.log_images_to - - if not config.logging.log_images_to == []: - callbacks.append( - VisualizerCallback( - task=config.dataset.task, - log_images_to=config.logging.log_images_to, - inputs_are_normalized=not config.model.normalization_method == "none", - ) - ) + add_visualizer_callback(callbacks, config) if "optimization" in config.keys(): if "nncf" in config.optimization and config.optimization.nncf.apply: @@ -155,3 +139,46 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]: callbacks.append(GraphLogger()) return callbacks + + +def add_visualizer_callback(callbacks: List[Callback], config: Union[DictConfig, ListConfig]): + """Configure the visualizer callback based on the config and add it to the list of callbacks. + + Args: + callbacks (List[Callback]): Current list of callbacks. + config (Union[DictConfig, ListConfig]): The config object. + """ + # visualization settings + assert isinstance(config, DictConfig) + if ( + "log_images_to" in config.project.keys() + and len(config.project.log_images_to) > 0 + or "log_images_to" in config.logging.keys() + and len(config.logging.log_images_to) > 0 + ): + warnings.warn( + "log_images_to parameter is deprecated and will be removed in version 0.3.4. Please use " + "the visualization.log_images and visualization.save_images parameters instead." + ) + if "visualization" not in config.keys(): + config["visualization"] = dict(log_images=False, save_images=False, show_image=False, image_save_path=None) + if "local" in config.project.log_images_to: + config.visualization["save_images"] = True + if "local" not in config.project.log_images_to or len(config.project.log_images_to) > 1: + config.visualization["log_images"] = True + if config.visualization.log_images or config.visualization.save_images or config.visualization.show_images: + image_save_path = ( + config.visualization.image_save_path + if config.visualization.image_save_path + else config.project.path + "/images" + ) + callbacks.append( + VisualizerCallback( + task=config.dataset.task, + image_save_path=image_save_path, + inputs_are_normalized=not config.model.normalization_method == "none", + show_images=config.visualization.show_images, + log_images=config.visualization.log_images, + save_images=config.visualization.save_images, + ) + ) diff --git a/anomalib/utils/callbacks/visualizer_callback.py b/anomalib/utils/callbacks/visualizer_callback.py index 5f2046f54f..211a248ef6 100644 --- a/anomalib/utils/callbacks/visualizer_callback.py +++ b/anomalib/utils/callbacks/visualizer_callback.py @@ -15,8 +15,7 @@ # and limitations under the License. from pathlib import Path -from typing import Any, Iterator, List, Optional, cast -from warnings import warn +from typing import Any, Iterator, Optional, cast import pytorch_lightning as pl from pytorch_lightning import Callback @@ -31,7 +30,6 @@ superimpose_anomaly_map, ) from anomalib.pre_processing.transforms import Denormalize -from anomalib.utils import loggers from anomalib.utils.loggers import AnomalibWandbLogger from anomalib.utils.loggers.base import ImageLoggerBase @@ -46,26 +44,34 @@ class VisualizerCallback(Callback): config.yaml file. """ - def __init__(self, task: str, log_images_to: Optional[List[str]] = None, inputs_are_normalized: bool = True): + def __init__( + self, + task: str, + image_save_path: str, + inputs_are_normalized: bool = True, + show_images: bool = False, + log_images: bool = True, + save_images: bool = True, + ): """Visualizer callback.""" self.task = task - self.log_images_to = [] if log_images_to is None else log_images_to self.inputs_are_normalized = inputs_are_normalized + self.show_images = show_images + self.log_images = log_images + self.save_images = save_images + self.image_save_path = Path(image_save_path) - def _add_images( + def _add_to_logger( self, visualizer: Visualizer, module: AnomalyModule, trainer: pl.Trainer, filename: Path, ): - """Save image to logger/local storage. - - Saves the image in `visualizer.figure` to the respective loggers and local storage if specified in - `log_images_to` in `config.yaml` of the models. + """Log image from a visualizer to each of the available loggers in the project. Args: - visualizer (Visualizer): Visualizer object from which the `figure` is saved/logged. + visualizer (Visualizer): Visualizer object from which the `figure` is logged. module (AnomalyModule): Anomaly module. trainer (Trainer): Pytorch Lightning trainer which holds reference to `logger` filename (Path): Path of the input image. This name is used as name for the generated image. @@ -75,26 +81,16 @@ def _add_images( type(logger).__name__.lower().rstrip("logger").lstrip("anomalib"): logger for logger in trainer.loggers } # save image to respective logger - for log_to in self.log_images_to: - if log_to in loggers.AVAILABLE_LOGGERS: + if self.log_images: + for log_to in available_loggers: # check if logger object is same as the requested object - if log_to in available_loggers and isinstance(available_loggers[log_to], ImageLoggerBase): + if isinstance(available_loggers[log_to], ImageLoggerBase): logger: ImageLoggerBase = cast(ImageLoggerBase, available_loggers[log_to]) # placate mypy logger.add_image( image=visualizer.figure, name=filename.parent.name + "_" + filename.name, global_step=module.global_step, ) - else: - warn( - f"Requested {log_to} logging but logger object is of type: {type(module.logger)}." - f" Skipping logging to {log_to}" - ) - elif log_to not in ["local"]: - warn(f"{log_to} not in the list of supported image loggers.") - - if "local" in self.log_images_to: - visualizer.save(Path(trainer.default_root_dir) / "images" / filename.parent.name / filename.name) def generate_visualizer(self, outputs) -> Iterator[Visualizer]: """Yields a visualizer object for each of the images in the output.""" @@ -147,8 +143,14 @@ def on_predict_batch_end( _batch_idx (int): Index of the current test batch (unused). _dataloader_idx (int): Index of the dataloader that yielded the current batch (unused). """ - for visualizer in self.generate_visualizer(outputs): - visualizer.show() + assert outputs is not None + for i, visualizer in enumerate(self.generate_visualizer(outputs)): + filename = Path(outputs["image_path"][i]) + visualizer.generate() + if self.save_images: + visualizer.save(self.image_save_path / filename.parent.name / filename.name) + if self.show_images: + visualizer.show(str(filename)) def on_test_batch_end( self, @@ -172,9 +174,14 @@ def on_test_batch_end( """ assert outputs is not None for i, visualizer in enumerate(self.generate_visualizer(outputs)): - + filename = Path(outputs["image_path"][i]) visualizer.generate() - self._add_images(visualizer, pl_module, trainer, Path(outputs["image_path"][i])) + if self.save_images: + visualizer.save(self.image_save_path / filename.parent.name / filename.name) + if self.log_images: + self._add_to_logger(visualizer, pl_module, trainer, Path(outputs["image_path"][i])) + if self.show_images: + visualizer.show(str(filename)) visualizer.close() def on_test_end(self, _trainer: pl.Trainer, pl_module: AnomalyModule) -> None: diff --git a/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py b/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py index 4f6d0ee5c8..d305c58aa0 100644 --- a/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py +++ b/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py @@ -74,7 +74,14 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__() self.save_hyperparameters(hparams) self.loss_fn = nn.NLLLoss() - self.callbacks = [VisualizerCallback(task="segmentation")] # test if this is removed + self.callbacks = [ + VisualizerCallback( + task="segmentation", + image_save_path=hparams.project.path + "/images", + log_images=False, + save_images=True, + ) + ] # test if this is removed self.image_threshold = AdaptiveThreshold(hparams.model.threshold.image_default).cpu() self.pixel_threshold = AdaptiveThreshold(hparams.model.threshold.pixel_default).cpu() diff --git a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py index 2f4237b307..910271bc21 100644 --- a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py +++ b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py @@ -5,12 +5,10 @@ import torch from omegaconf.dictconfig import DictConfig from omegaconf.listconfig import ListConfig -from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import nn from torch.utils.data import DataLoader, Dataset from anomalib.models.components import AnomalyModule -from anomalib.utils.callbacks.metrics_configuration import MetricsConfigurationCallback from anomalib.utils.callbacks.visualizer_callback import VisualizerCallback from anomalib.utils.metrics import get_metrics @@ -51,7 +49,9 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.model = DummyModel() self.task = "segmentation" self.callbacks = [ - VisualizerCallback(task=self.task, log_images_to=hparams.logging.log_images_to), + VisualizerCallback( + task=self.task, image_save_path=hparams.project.path + "/images", log_images=True, save_images=True + ) ] # test if this is removed self.image_metrics, self.pixel_metrics = get_metrics(hparams) diff --git a/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py b/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py index e375deb266..6ecd92c6f0 100644 --- a/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py +++ b/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py @@ -30,7 +30,8 @@ def test_add_images(dataset): "dataset": {"task": dataset}, "model": {"threshold": {"image_default": 0.5, "pixel_default": 0.5, "adaptive": True}}, "project": {"path": dir_loc}, - "logging": {"log_images_to": ["tensorboard", "local"]}, + "logging": {"logger": ["tensorboard"]}, + "visualization": {"log_images": True, "save_images": True}, "metrics": {}, } ) diff --git a/tools/inference.py b/tools/inference.py index a087a1fb61..ea85f42347 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -25,6 +25,13 @@ def get_args() -> Namespace: parser.add_argument("--config", type=Path, required=True, help="Path to a model config file") parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.") + parser.add_argument( + "--disable_show_images", + action="store_true", + required=False, + help="Do not show the visualized predictions on the screen.", + ) + parser.add_argument("--save_path", type=str, required=False, help="Path to save the output images.") args = parser.parse_args() if args.model_config_path is not None: @@ -43,6 +50,12 @@ def infer(): args = get_args() config = get_configurable_parameters(config_path=args.config) config.model["weight_file"] = str(args.weight_path) + config.visualization.show_images = not args.disable_show_images + if args.save_path: # overwrite save path + config.visualization.save_images = True + config.visualization.image_save_path = args.save_path + else: + config.visualization.save_images = False model = get_model(config) callbacks = get_callbacks(config) From 6cad624a0b4d33c6d35d50e9fd4c93272f65c507 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 28 Jun 2022 11:15:00 +0200 Subject: [PATCH 04/13] Visualizer refactor pt4 (#379) * make visualization mode configurable * change default visualization mode * value checks * check mask range, move save and show to visualizer * fix visualizer tests * change pred_mask value check * address circular import * address mypy issue * fix visualizer test and mypy * fix openvino tests --- anomalib/models/cflow/config.yaml | 1 + anomalib/models/dfkde/config.yaml | 1 + anomalib/models/dfm/config.yaml | 1 + anomalib/models/draem/config.yaml | 1 + anomalib/models/fastflow/config.yaml | 1 + anomalib/models/ganomaly/config.yaml | 1 + anomalib/models/padim/config.yaml | 1 + anomalib/models/patchcore/config.yaml | 1 + .../models/reverse_distillation/config.yaml | 1 + anomalib/models/stfpm/config.yaml | 1 + anomalib/post_processing/visualizer.py | 220 ++++++++++++++---- anomalib/utils/callbacks/__init__.py | 1 + .../utils/callbacks/visualizer_callback.py | 76 ++---- anomalib/utils/loggers/tensorboard.py | 2 +- .../post_processing/test_visualizer.py | 4 +- .../dummy_lightning_model.py | 1 + .../dummy_lightning_model.py | 7 +- tools/inference.py | 9 + 18 files changed, 234 insertions(+), 96 deletions(-) diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index 4a9c793ef8..21079390f0 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -51,6 +51,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 0 diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 38d23022f2..62ccc1e31d 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -37,6 +37,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index a57efda4b5..e09359f53a 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -39,6 +39,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/draem/config.yaml b/anomalib/models/draem/config.yaml index 779ad72fd0..dbb02f12ab 100644 --- a/anomalib/models/draem/config.yaml +++ b/anomalib/models/draem/config.yaml @@ -47,6 +47,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/fastflow/config.yaml b/anomalib/models/fastflow/config.yaml index 375e78f558..04fa9f1060 100644 --- a/anomalib/models/fastflow/config.yaml +++ b/anomalib/models/fastflow/config.yaml @@ -51,6 +51,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/ganomaly/config.yaml b/anomalib/models/ganomaly/config.yaml index 51c1aea005..500cdf0b24 100644 --- a/anomalib/models/ganomaly/config.yaml +++ b/anomalib/models/ganomaly/config.yaml @@ -55,6 +55,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index b14d0c01e0..51748e4520 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -46,6 +46,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index dd791f132e..ab66dd1da9 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -48,6 +48,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 0 diff --git a/anomalib/models/reverse_distillation/config.yaml b/anomalib/models/reverse_distillation/config.yaml index c18c7be896..97925cc9d1 100644 --- a/anomalib/models/reverse_distillation/config.yaml +++ b/anomalib/models/reverse_distillation/config.yaml @@ -55,6 +55,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 42 diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index 5621c2b2ed..3bdf915188 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -54,6 +54,7 @@ visualization: save_images: True # save images to the file system log_images: True # log images to the available loggers (if any) image_save_path: null # path to which images will be saved + mode: full # options: ["full", "simple"] project: seed: 0 diff --git a/anomalib/post_processing/visualizer.py b/anomalib/post_processing/visualizer.py index b316f73292..e162b4a5af 100644 --- a/anomalib/post_processing/visualizer.py +++ b/anomalib/post_processing/visualizer.py @@ -14,36 +14,194 @@ # See the License for the specific language governing permissions # and limitations under the License. +from dataclasses import dataclass, field from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, Iterator, List, Optional import cv2 import matplotlib.figure import matplotlib.pyplot as plt import numpy as np +from skimage.segmentation import mark_boundaries + +from anomalib.post_processing.post_process import ( + add_anomalous_label, + add_normal_label, + superimpose_anomaly_map, +) +from anomalib.pre_processing.transforms import Denormalize + + +@dataclass +class ImageResult: + """Collection of data needed to visualize the predictions for an image.""" + + image: np.ndarray + pred_score: float + pred_label: str + anomaly_map: np.ndarray + gt_mask: Optional[np.ndarray] = None + pred_mask: Optional[np.ndarray] = None + + heat_map: np.ndarray = field(init=False) + segmentations: np.ndarray = field(init=False) + + def __post_init__(self): + """Generate heatmap overlay and segmentations, convert masks to images.""" + self.heat_map = superimpose_anomaly_map(self.anomaly_map, self.image, normalize=False) + if self.pred_mask is not None and np.max(self.pred_mask) <= 1.0: + self.pred_mask *= 255 + self.segmentations = mark_boundaries(self.image, self.pred_mask, color=(1, 0, 0), mode="thick") + if self.gt_mask is not None and np.max(self.pred_mask) <= 1.0: + self.gt_mask *= 255 class Visualizer: - """Anomaly Visualization. + """Class that handles the logic of composing the visualizations. + + Args: + mode (str): visualization mode, either "full" or "simple" + task (str): task type, either "segmentation" or "classification" + """ + + def __init__(self, mode: str, task: str): + if mode not in ["full", "simple"]: + raise ValueError(f"Unknown visualization mode: {mode}. Please choose one of ['full', 'simple']") + self.mode = mode + if task not in ["classification", "segmentation"]: + raise ValueError(f"Unknown task type: {mode}. Please choose one of ['classification', 'segmentation']") + self.task = task + + def visualize_batch(self, batch: Dict) -> Iterator[np.ndarray]: + """Generator that yields a visualization result for each item in the batch. + + Args: + batch (Dict): Dictionary containing the ground truth and predictions of a batch of images. + + Returns: + Generator that yields a display-ready visualization for each image. + """ + for i in range(batch["image"].size(0)): + image_result = ImageResult( + image=Denormalize()(batch["image"][i].cpu()), + pred_score=batch["pred_scores"][i].cpu().numpy().item(), + pred_label=batch["pred_labels"][i].cpu().numpy().item(), + anomaly_map=batch["anomaly_maps"][i].cpu().numpy(), + pred_mask=batch["pred_masks"][i].squeeze().int().cpu().numpy() if "pred_masks" in batch else None, + gt_mask=batch["mask"][i].squeeze().int().cpu().numpy() if "mask" in batch else None, + ) + yield self.visualize_image(image_result) + + def visualize_image(self, image_result: ImageResult) -> np.ndarray: + """Generate the visualization for an image. + + Args: + image_result (ImageResult): GT and Prediction data for a single image. + + Returns: + The full or simple visualization for the image, depending on the specified mode. + """ + if self.mode == "full": + return self._visualize_full(image_result) + if self.mode == "simple": + return self._visualize_simple(image_result) + raise ValueError(f"Unknown visualization mode: {self.mode}") + + def _visualize_full(self, image_result: ImageResult): + """Generate the full set of visualization for an image. + + The full visualization mode shows a grid with subplots that contain the original image, the GT mask (if + available), the predicted heat map, the predicted segmentation mask (if available), and the predicted + segmentations (if available). + + Args: + image_result (ImageResult): GT and Prediction data for a single image. + + Returns: + An image showing the full set of visualizations for the input image. + """ + visualization = ImageGrid() + if self.task == "segmentation": + assert image_result.pred_mask is not None + visualization.add_image(image_result.image, "Image") + if image_result.gt_mask is not None: + visualization.add_image(image=image_result.gt_mask, color_map="gray", title="Ground Truth") + visualization.add_image(image_result.heat_map, "Predicted Heat Map") + visualization.add_image(image=image_result.pred_mask, color_map="gray", title="Predicted Mask") + visualization.add_image(image=image_result.segmentations, title="Segmentation Result") + elif self.task == "classification": + visualization.add_image(image_result.image, title="Image") + if image_result.pred_label: + image_classified = add_anomalous_label(image_result.heat_map, image_result.pred_score) + else: + image_classified = add_normal_label(image_result.heat_map, 1 - image_result.pred_score) + visualization.add_image(image=image_classified, title="Prediction") + + return visualization.generate() + + def _visualize_simple(self, image_result): + """Generate a simple visualization for an image. + + The simple visualization mode only shows the model's predictions in a single image. + + Args: + image_result (ImageResult): GT and Prediction data for a single image. + + Returns: + An image showing the simple visualization for the input image. + """ + if self.task == "segmentation": + visualization = mark_boundaries( + image_result.heat_map, image_result.pred_mask, color=(1, 0, 0), mode="thick" + ) + return cv2.cvtColor((visualization * 255).astype(np.uint8), cv2.COLOR_RGB2BGR) + if self.task == "classification": + if image_result.pred_label: + image_classified = add_anomalous_label(image_result.heat_map, image_result.pred_score) + else: + image_classified = add_normal_label(image_result.heat_map, 1 - image_result.pred_score) + return cv2.cvtColor(image_classified, cv2.COLOR_RGB2BGR) + raise ValueError(f"Unknown task type: {self.task}") + + @staticmethod + def show(title: str, image: np.ndarray, delay: int = 0): + """Show an image on the screen. + + Args: + title (str): Title that will be given to the window showing the image. + image (np.ndarray): Image that will be shown in the window. + delay (int): Delay in milliseconds to wait for keystroke. 0 for infinite. + """ + cv2.imshow(title, image) + cv2.waitKey(delay) + cv2.destroyAllWindows() + + @staticmethod + def save(file_path: Path, image: np.ndarray): + """Save an image to the file system. + + Args: + file_path (Path): Path to which the image will be saved. + image (np.ndarray): Image that will be saved to the file system. + """ + file_path.parent.mkdir(parents=True, exist_ok=True) + cv2.imwrite(str(file_path), image) - The visualizer object is responsible for collating all the images passed to it into a single image. This can then - either be logged by accessing the `figure` attribute or can be saved directly by calling `save()` method. - Example: - >>> visualizer = Visualizer() - >>> visualizer.add_image(image=image, title="Image") - >>> visualizer.close() +class ImageGrid: + """Helper class that compiles multiple images into a grid using subplots. + + Individual images can be added with the `add_image` method. When all images have been added, the `generate` method + must be called to compile the image grid and obtain the final visualization. """ def __init__(self): - self.images: List[Dict] = [] - self.figure: matplotlib.figure.Figure self.axis: np.ndarray - def add_image(self, image: np.ndarray, title: str, color_map: Optional[str] = None): - """Add image to figure. + def add_image(self, image: np.ndarray, title: Optional[str] = None, color_map: Optional[str] = None): + """Add an image to the grid. Args: image (np.ndarray): Image which should be added to the figure. @@ -53,43 +211,27 @@ def add_image(self, image: np.ndarray, title: str, color_map: Optional[str] = No image_data = dict(image=image, title=title, color_map=color_map) self.images.append(image_data) - def generate(self): - """Generate the image.""" + def generate(self) -> np.ndarray: + """Generate the image. + + Returns: + Image consisting of a grid of added images and their title. + """ num_cols = len(self.images) figure_size = (num_cols * 3, 3) self.figure, self.axis = plt.subplots(1, num_cols, figsize=figure_size) self.figure.subplots_adjust(right=0.9) - axes = self.axis if len(self.images) > 1 else [self.axis] + axes = self.axis if isinstance(self.axis, np.ndarray) else np.array([self.axis]) for axis, image_dict in zip(axes, self.images): axis.axes.xaxis.set_visible(False) axis.axes.yaxis.set_visible(False) axis.imshow(image_dict["image"], image_dict["color_map"], vmin=0, vmax=255) - axis.title.set_text(image_dict["title"]) - - def show(self, filename: Optional[str] = None): - """Convert figure to array and show with opencv.""" + if image_dict["title"] is not None: + axis.title.set_text(image_dict["title"]) self.figure.canvas.draw() - # convert canvas to numpy array + # convert canvas to numpy array to prepare for visualization with opencv img = np.frombuffer(self.figure.canvas.tostring_rgb(), dtype=np.uint8) img = img.reshape(self.figure.canvas.get_width_height()[::-1] + (3,)) - # show image with opencv img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) - title = "Prediction" - title = title + f" for {filename}" if filename else title - cv2.imshow(title, img) - cv2.waitKey(0) - cv2.destroyAllWindows() - - def save(self, filename: Path): - """Save image. - - Args: - filename (Path): Filename to save image - """ - filename.parent.mkdir(parents=True, exist_ok=True) - self.figure.savefig(filename, dpi=100) - - def close(self): - """Close figure.""" - plt.close(self.figure) + return img diff --git a/anomalib/utils/callbacks/__init__.py b/anomalib/utils/callbacks/__init__.py index ab44650b7c..6878b8451c 100644 --- a/anomalib/utils/callbacks/__init__.py +++ b/anomalib/utils/callbacks/__init__.py @@ -175,6 +175,7 @@ def add_visualizer_callback(callbacks: List[Callback], config: Union[DictConfig, callbacks.append( VisualizerCallback( task=config.dataset.task, + mode=config.visualization.mode, image_save_path=image_save_path, inputs_are_normalized=not config.model.normalization_method == "none", show_images=config.visualization.show_images, diff --git a/anomalib/utils/callbacks/visualizer_callback.py b/anomalib/utils/callbacks/visualizer_callback.py index 211a248ef6..b64b9e962d 100644 --- a/anomalib/utils/callbacks/visualizer_callback.py +++ b/anomalib/utils/callbacks/visualizer_callback.py @@ -15,21 +15,15 @@ # and limitations under the License. from pathlib import Path -from typing import Any, Iterator, Optional, cast +from typing import Any, Optional, cast +import numpy as np import pytorch_lightning as pl from pytorch_lightning import Callback from pytorch_lightning.utilities.types import STEP_OUTPUT -from skimage.segmentation import mark_boundaries from anomalib.models.components import AnomalyModule -from anomalib.post_processing import ( - Visualizer, - add_anomalous_label, - add_normal_label, - superimpose_anomaly_map, -) -from anomalib.pre_processing.transforms import Denormalize +from anomalib.post_processing import Visualizer from anomalib.utils.loggers import AnomalibWandbLogger from anomalib.utils.loggers.base import ImageLoggerBase @@ -47,6 +41,7 @@ class VisualizerCallback(Callback): def __init__( self, task: str, + mode: str, image_save_path: str, inputs_are_normalized: bool = True, show_images: bool = False, @@ -54,6 +49,11 @@ def __init__( save_images: bool = True, ): """Visualizer callback.""" + if mode not in ["full", "simple"]: + raise ValueError(f"Unknown visualization mode: {mode}. Please choose one of ['full', 'simple']") + self.mode = mode + if task not in ["classification", "segmentation"]: + raise ValueError(f"Unknown task type: {mode}. Please choose one of ['classification', 'segmentation']") self.task = task self.inputs_are_normalized = inputs_are_normalized self.show_images = show_images @@ -61,9 +61,11 @@ def __init__( self.save_images = save_images self.image_save_path = Path(image_save_path) + self.visualizer = Visualizer(mode, task) + def _add_to_logger( self, - visualizer: Visualizer, + image: np.ndarray, module: AnomalyModule, trainer: pl.Trainer, filename: Path, @@ -71,7 +73,7 @@ def _add_to_logger( """Log image from a visualizer to each of the available loggers in the project. Args: - visualizer (Visualizer): Visualizer object from which the `figure` is logged. + image (np.ndarray): Image that should be added to the loggers. module (AnomalyModule): Anomaly module. trainer (Trainer): Pytorch Lightning trainer which holds reference to `logger` filename (Path): Path of the input image. This name is used as name for the generated image. @@ -87,42 +89,11 @@ def _add_to_logger( if isinstance(available_loggers[log_to], ImageLoggerBase): logger: ImageLoggerBase = cast(ImageLoggerBase, available_loggers[log_to]) # placate mypy logger.add_image( - image=visualizer.figure, + image=image, name=filename.parent.name + "_" + filename.name, global_step=module.global_step, ) - def generate_visualizer(self, outputs) -> Iterator[Visualizer]: - """Yields a visualizer object for each of the images in the output.""" - for i in range(outputs["image"].size(0)): - visualizer = Visualizer() - - image = Denormalize()(outputs["image"][i].cpu()) - anomaly_map = outputs["anomaly_maps"][i].cpu().numpy() - heat_map = superimpose_anomaly_map(anomaly_map, image, normalize=not self.inputs_are_normalized) - pred_score = outputs["pred_scores"][i].cpu().numpy() - pred_label = outputs["pred_labels"][i].cpu().numpy() - - if self.task == "segmentation": - pred_mask = outputs["pred_masks"][i].squeeze().int().cpu().numpy() * 255 - vis_img = mark_boundaries(image, pred_mask, color=(1, 0, 0), mode="thick") - visualizer.add_image(image=image, title="Image") - if "mask" in outputs: - true_mask = outputs["mask"][i].cpu().numpy() * 255 - visualizer.add_image(image=true_mask, color_map="gray", title="Ground Truth") - visualizer.add_image(image=heat_map, title="Predicted Heat Map") - visualizer.add_image(image=pred_mask, color_map="gray", title="Predicted Mask") - visualizer.add_image(image=vis_img, title="Segmentation Result") - elif self.task == "classification": - visualizer.add_image(image, title="Image") - if pred_label: - image_classified = add_anomalous_label(heat_map, pred_score) - else: - image_classified = add_normal_label(heat_map, 1 - pred_score) - visualizer.add_image(image=image_classified, title="Prediction") - - yield visualizer - def on_predict_batch_end( self, _trainer: pl.Trainer, @@ -144,13 +115,13 @@ def on_predict_batch_end( _dataloader_idx (int): Index of the dataloader that yielded the current batch (unused). """ assert outputs is not None - for i, visualizer in enumerate(self.generate_visualizer(outputs)): + for i, image in enumerate(self.visualizer.visualize_batch(outputs)): filename = Path(outputs["image_path"][i]) - visualizer.generate() if self.save_images: - visualizer.save(self.image_save_path / filename.parent.name / filename.name) + file_path = self.image_save_path / filename.parent.name / filename.name + self.visualizer.save(file_path, image) if self.show_images: - visualizer.show(str(filename)) + self.visualizer.show(str(filename), image) def on_test_batch_end( self, @@ -173,16 +144,15 @@ def on_test_batch_end( _dataloader_idx (int): Index of the dataloader that yielded the current batch (unused). """ assert outputs is not None - for i, visualizer in enumerate(self.generate_visualizer(outputs)): + for i, image in enumerate(self.visualizer.visualize_batch(outputs)): filename = Path(outputs["image_path"][i]) - visualizer.generate() if self.save_images: - visualizer.save(self.image_save_path / filename.parent.name / filename.name) + file_path = self.image_save_path / filename.parent.name / filename.name + self.visualizer.save(file_path, image) if self.log_images: - self._add_to_logger(visualizer, pl_module, trainer, Path(outputs["image_path"][i])) + self._add_to_logger(image, pl_module, trainer, filename) if self.show_images: - visualizer.show(str(filename)) - visualizer.close() + self.visualizer.show(str(filename), image) def on_test_end(self, _trainer: pl.Trainer, pl_module: AnomalyModule) -> None: """Sync logs. diff --git a/anomalib/utils/loggers/tensorboard.py b/anomalib/utils/loggers/tensorboard.py index 56695babd6..1cf3e9da54 100644 --- a/anomalib/utils/loggers/tensorboard.py +++ b/anomalib/utils/loggers/tensorboard.py @@ -102,4 +102,4 @@ def add_image(self, image: Union[np.ndarray, Figure], name: Optional[str] = None image = buffer.reshape(image.canvas.get_width_height()[::-1] + (3,)) kwargs["dataformats"] = "HWC" - self.experiment.add_image(img_tensor=image, tag=name, **kwargs) + self.experiment.add_image(img_tensor=image, tag=name, dataformats="HWC", **kwargs) diff --git a/tests/pre_merge/post_processing/test_visualizer.py b/tests/pre_merge/post_processing/test_visualizer.py index 4e8fc3df04..2db5eeab74 100644 --- a/tests/pre_merge/post_processing/test_visualizer.py +++ b/tests/pre_merge/post_processing/test_visualizer.py @@ -17,14 +17,14 @@ import numpy as np from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from anomalib.post_processing.visualizer import Visualizer +from anomalib.post_processing.visualizer import ImageGrid def test_visualize_fully_defected_masks(): """Test if a fully defected anomaly mask results in a completely white image.""" # create visualizer and add fully defected mask - visualizer = Visualizer() + visualizer = ImageGrid() mask = np.ones((256, 256)) * 255 visualizer.add_image(image=mask, color_map="gray", title="fully defected mask") visualizer.generate() diff --git a/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py b/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py index d305c58aa0..02eae0a5a6 100644 --- a/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py +++ b/tests/pre_merge/utils/callbacks/openvino_callback/dummy_lightning_model.py @@ -76,6 +76,7 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.loss_fn = nn.NLLLoss() self.callbacks = [ VisualizerCallback( + mode="full", task="segmentation", image_save_path=hparams.project.path + "/images", log_images=False, diff --git a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py index 910271bc21..d3978228f3 100644 --- a/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py +++ b/tests/pre_merge/utils/callbacks/visualizer_callback/dummy_lightning_model.py @@ -48,9 +48,14 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__() self.model = DummyModel() self.task = "segmentation" + self.mode = "full" self.callbacks = [ VisualizerCallback( - task=self.task, image_save_path=hparams.project.path + "/images", log_images=True, save_images=True + task=self.task, + mode=self.mode, + image_save_path=hparams.project.path + "/images", + log_images=True, + save_images=True, ) ] # test if this is removed diff --git a/tools/inference.py b/tools/inference.py index ea85f42347..2c1688fc25 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -25,6 +25,14 @@ def get_args() -> Namespace: parser.add_argument("--config", type=Path, required=True, help="Path to a model config file") parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.") + parser.add_argument( + "--visualization_mode", + type=str, + required=False, + default="simple", + help="Visualization mode. 'full' or 'simple'", + choices=["full", "simple"], + ) parser.add_argument( "--disable_show_images", action="store_true", @@ -51,6 +59,7 @@ def infer(): config = get_configurable_parameters(config_path=args.config) config.model["weight_file"] = str(args.weight_path) config.visualization.show_images = not args.disable_show_images + config.visualization.mode = args.visualization_mode if args.save_path: # overwrite save path config.visualization.save_images = True config.visualization.image_save_path = args.save_path From c114a9f052665829fbd262c7553c7db83933eb2c Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 12:42:03 +0200 Subject: [PATCH 05/13] fix typo --- anomalib/utils/callbacks/model_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anomalib/utils/callbacks/model_loader.py b/anomalib/utils/callbacks/model_loader.py index 2790cf9a5f..2dfc1a183f 100644 --- a/anomalib/utils/callbacks/model_loader.py +++ b/anomalib/utils/callbacks/model_loader.py @@ -41,7 +41,7 @@ def on_test_start(self, _trainer, pl_module: AnomalyModule) -> None: # pylint: pl_module.load_state_dict(torch.load(self.weights_path)["state_dict"]) def on_predict_start(self, _trainer, pl_module: AnomalyModule) -> None: - """Call when inferebce begins. + """Call when inference begins. Loads the model weights from ``weights_path`` into the PyTorch module. """ From e1e4751ceb15a8aa54f100a4d74adcc631d57716 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 12:45:43 +0200 Subject: [PATCH 06/13] add license header --- tools/inference.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/inference.py b/tools/inference.py index 2c1688fc25..a3f097c6c2 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -1,5 +1,8 @@ """Inference Entrypoint script.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + import warnings from argparse import ArgumentParser, Namespace from pathlib import Path From 3745a91ca22d96937a70cee0fcf86cb2c0427397 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 14:46:52 +0200 Subject: [PATCH 07/13] add new inference parameters to guide --- docs/source/guides/inference.rst | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/docs/source/guides/inference.rst b/docs/source/guides/inference.rst index 78ac21b40e..feeb6cbb56 100644 --- a/docs/source/guides/inference.rst +++ b/docs/source/guides/inference.rst @@ -9,17 +9,21 @@ Torch Inference ============== The entrypoint script in ``tools/inference.py`` can be used to run inference with a trained PyTorch model. The entrypoint script has several command line arguments that can be used to configure inference: -+-------------+----------+-------------------------------------------------------------------------------------+ -| Parameter | Required | Description | -+=============+==========+=====================================================================================+ -| config | True | Path to the model config file. | -+-------------+----------+-------------------------------------------------------------------------------------+ -| weight_path | True | Path to the ``.ckpt`` model checkpoint file. | -+-------------+----------+-------------------------------------------------------------------------------------+ -| image_path | True | Path to the image source. This can be a single image or a folder of images. | -+-------------+----------+-------------------------------------------------------------------------------------+ -| save_data | False | Path to which the output images should be saved. Leave empty for live visualization.| -+-------------+----------+-------------------------------------------------------------------------------------+ ++---------------------+----------+-------------------------------------------------------------------------------------+ +| Parameter | Required | Description | ++=====================+==========+=====================================================================================+ +| config | True | Path to the model config file. | ++---------------------+----------+-------------------------------------------------------------------------------------+ +| weight_path | True | Path to the ``.ckpt`` model checkpoint file. | ++---------------------+----------+-------------------------------------------------------------------------------------+ +| image_path | True | Path to the image source. This can be a single image or a folder of images. | ++---------------------+----------+-------------------------------------------------------------------------------------+ +| save_path | False | Path to which the output images should be saved. | ++---------------------+----------+-------------------------------------------------------------------------------------+ +| visualization_mode | False | Determines how the inference results are visualized. Options: "full", "simple". | ++---------------------+----------+-------------------------------------------------------------------------------------+ +| disable_show_images | False | When this flag is passed, visualizations will not be shown on the screen. | ++---------------------+----------+-------------------------------------------------------------------------------------+ To run inference, call the script from the command line with the with the following parameters, e.g.: From 3ccd5bea608a7e6978cc0d55442737a601e425b6 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 15:11:19 +0200 Subject: [PATCH 08/13] refactor inference entrypoint structure --- README.md | 2 +- docs/source/guides/inference.rst | 10 +++++----- .../gradio_inference.py} | 8 ++++---- tools/{inference.py => inference/lightning.py} | 0 tools/{openvino_inference.py => inference/openvino.py} | 0 5 files changed, 10 insertions(+), 10 deletions(-) rename tools/{inference_gradio.py => inference/gradio_inference.py} (93%) rename tools/{inference.py => inference/lightning.py} (100%) rename tools/{openvino_inference.py => inference/openvino.py} (100%) diff --git a/README.md b/README.md index 7385728f2d..84a7df6424 100644 --- a/README.md +++ b/README.md @@ -159,7 +159,7 @@ The new CLI approach offers a lot more flexibility, details of which are explain ## Inference ### ⚠️ Anomalib < v.0.4.0 -Anomalib contains several tools that can be used to perform inference with a trained model. The script in [`tools/inference`](tools/inference.py) contains an example of how the inference tools can be used to generate a prediction for an input image. +Anomalib contains several tools that can be used to perform inference with a trained model. The script in [`tools/inference`](tools/inference/lightning.py) contains an example of how the inference tools can be used to generate a prediction for an input image. If the specified weight path points to a PyTorch Lightning checkpoint file (`.ckpt`), inference will run in PyTorch. If the path points to an ONNX graph (`.onnx`) or OpenVINO IR (`.bin` or `.xml`), inference will run in OpenVINO. diff --git a/docs/source/guides/inference.rst b/docs/source/guides/inference.rst index feeb6cbb56..6ce218401e 100644 --- a/docs/source/guides/inference.rst +++ b/docs/source/guides/inference.rst @@ -5,9 +5,9 @@ Inference Anomalib provides entrypoint scripts for using a trained model to generate predictions from a source of image data. This guide explains how to run inference with the standard PyTorch model and the exported OpenVINO model. -Torch Inference +PyTorch (Lightning) Inference ============== -The entrypoint script in ``tools/inference.py`` can be used to run inference with a trained PyTorch model. The entrypoint script has several command line arguments that can be used to configure inference: +The entrypoint script in ``tools/inference/lightning.py`` can be used to run inference with a trained PyTorch model. The script runs inference by loading a previously trained model into a PyTorch Lightning trainer and running the ``predict sequence``. The entrypoint script has several command line arguments that can be used to configure inference: +---------------------+----------+-------------------------------------------------------------------------------------+ | Parameter | Required | Description | @@ -27,7 +27,7 @@ The entrypoint script in ``tools/inference.py`` can be used to run inference wit To run inference, call the script from the command line with the with the following parameters, e.g.: -``python tools/inference.py --config padim.yaml --weight_path results/weights/model.ckpt --image_path image.png`` +``python tools/inference/lightning.py --config padim.yaml --weight_path results/weights/model.ckpt --image_path image.png`` This will run inference on the specified image file or all images in the folder. A visualization of the inference results including the predicted heatmap and segmentation results (if applicable), will be displayed on the screen, like the example below. @@ -35,7 +35,7 @@ This will run inference on the specified image file or all images in the folder. OpenVINO Inference ============== -To run OpenVINO inference, first make sure that your model has been exported to the OpenVINO IR format. Once the model has been exported, OpenVINO inference can be triggered by running the OpenVINO entrypoint script in ``tools/openvino.py``. The command line arguments are very similar to PyTorch inference entrypoint script: +To run OpenVINO inference, first make sure that your model has been exported to the OpenVINO IR format. Once the model has been exported, OpenVINO inference can be triggered by running the OpenVINO entrypoint script in ``tools/inference/openvino.py``. The command line arguments are very similar to PyTorch inference entrypoint script: +-------------+----------+-------------------------------------------------------------------------------------+ | Parameter | Required | Description | @@ -56,6 +56,6 @@ For correct inference results, the ``meta_data`` argument should be specified an As an example, OpenVINO inference can be triggered by the following command: -``python tools/openvino.py --config padim.yaml --weight_path results/openvino/model.xml --image_path image.png --meta_data results/openvino/meta_data.json`` +``python tools/inference/openvino.py --config padim.yaml --weight_path results/openvino/model.xml --image_path image.png --meta_data results/openvino/meta_data.json`` Similar to PyTorch inference, the visualization results will be displayed on the screen, and optionally saved to the file system location specified by the ``save_data`` parameter. diff --git a/tools/inference_gradio.py b/tools/inference/gradio_inference.py similarity index 93% rename from tools/inference_gradio.py rename to tools/inference/gradio_inference.py index 8f17b6eb86..cafe6b1c29 100644 --- a/tools/inference_gradio.py +++ b/tools/inference/gradio_inference.py @@ -98,13 +98,13 @@ def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optiona inferencer: Inferencer if extension in (".ckpt"): module = import_module("anomalib.deploy.inferencers.torch") - TorchInferencer = getattr(module, "TorchInferencer") - inferencer = TorchInferencer(config=config, model_source=weight_path, meta_data_path=meta_data_path) + torch_inferencer = getattr(module, "TorchInferencer") + inferencer = torch_inferencer(config=config, model_source=weight_path, meta_data_path=meta_data_path) elif extension in (".onnx", ".bin", ".xml"): module = import_module("anomalib.deploy.inferencers.openvino") - OpenVINOInferencer = getattr(module, "OpenVINOInferencer") - inferencer = OpenVINOInferencer(config=config, path=weight_path, meta_data_path=meta_data_path) + openvino_inferencer = getattr(module, "OpenVINOInferencer") + inferencer = openvino_inferencer(config=config, path=weight_path, meta_data_path=meta_data_path) else: raise ValueError( diff --git a/tools/inference.py b/tools/inference/lightning.py similarity index 100% rename from tools/inference.py rename to tools/inference/lightning.py diff --git a/tools/openvino_inference.py b/tools/inference/openvino.py similarity index 100% rename from tools/openvino_inference.py rename to tools/inference/openvino.py From fe16cf7d322ae0cab0b31383b19be3967f66cae3 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 18:23:51 +0200 Subject: [PATCH 09/13] fix visualizer in cli --- anomalib/utils/cli/cli.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/anomalib/utils/cli/cli.py b/anomalib/utils/cli/cli.py index 1212c963df..98e3efb82e 100644 --- a/anomalib/utils/cli/cli.py +++ b/anomalib/utils/cli/cli.py @@ -118,9 +118,12 @@ def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: parser.add_lightning_class_args(VisualizerCallback, "visualization") # type: ignore parser.set_defaults( { + "visualization.mode": "full", "visualization.task": "segmentation", - "visualization.log_images_to": ["local"], - "visualization.inputs_are_normalized": True, + "visualization.image_save_path": "", + "visualization.save_images": False, + "visualization.show_images": False, + "visualization.log_images": False, } ) @@ -148,6 +151,8 @@ def __set_default_root_dir(self) -> None: # that is two-level up. default_root_dir = str(Path(config.trainer.resume_from_checkpoint).parent.parent) + if "image_save_path" not in config.visualization.keys() or config.visualization.image_save_path is None: + self.config[subcommand].visualization.image_save_path = config.project.path self.config[subcommand].trainer.default_root_dir = default_root_dir def __set_callbacks(self) -> None: From fd78b5680893dc0843d12d85e16b580fa59bd796 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 18:29:13 +0200 Subject: [PATCH 10/13] remove param from kwargs --- anomalib/utils/loggers/tensorboard.py | 1 - 1 file changed, 1 deletion(-) diff --git a/anomalib/utils/loggers/tensorboard.py b/anomalib/utils/loggers/tensorboard.py index 1cf3e9da54..a5b5c11cf2 100644 --- a/anomalib/utils/loggers/tensorboard.py +++ b/anomalib/utils/loggers/tensorboard.py @@ -100,6 +100,5 @@ def add_image(self, image: Union[np.ndarray, Figure], name: Optional[str] = None image.canvas.draw() # cache the renderer buffer = np.frombuffer(image.canvas.tostring_rgb(), dtype=np.uint8) image = buffer.reshape(image.canvas.get_width_height()[::-1] + (3,)) - kwargs["dataformats"] = "HWC" self.experiment.add_image(img_tensor=image, tag=name, dataformats="HWC", **kwargs) From 36494e3bdc10e2005a8e8108116290b441f2ea23 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 29 Jun 2022 19:02:55 +0200 Subject: [PATCH 11/13] fix visualizer args in notebook --- notebooks/200_models/201_fastflow.ipynb | 272 +++++++++++++++++++----- 1 file changed, 224 insertions(+), 48 deletions(-) diff --git a/notebooks/200_models/201_fastflow.ipynb b/notebooks/200_models/201_fastflow.ipynb index 3786ce8b7b..5bb2fdf653 100644 --- a/notebooks/200_models/201_fastflow.ipynb +++ b/notebooks/200_models/201_fastflow.ipynb @@ -2,7 +2,11 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "# FastFlow\n", "\n", @@ -14,7 +18,11 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from functools import partial, update_wrapper\n", @@ -42,7 +50,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Data Module\n", "\n", @@ -52,7 +64,11 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -79,7 +95,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## FastFlow Model\n", "\n", @@ -89,7 +109,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "Fastflow??" @@ -98,7 +122,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "model = Fastflow(input_size=[256, 256], backbone=\"resnet18\", flow_steps=8)" @@ -106,7 +134,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Depending on the `training` mode, `model` returns two different outputs. If the model is in `training` mode, it returns the hidden variable and the log of the jacobian, based on the feature maps." ] @@ -114,7 +146,11 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -133,7 +169,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "During the test/inference mode, the model returns an anomaly heatmap localizing the anomalous regions." ] @@ -141,7 +181,11 @@ { "cell_type": "code", "execution_count": 6, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -159,7 +203,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Optimizer\n", "\n", @@ -169,7 +217,11 @@ { "cell_type": "code", "execution_count": 7, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "def configure_optimizers(lightning_module: LightningModule, optimizer: Optimizer) -> Any: # pylint: disable=W0613,W0621\n", @@ -185,7 +237,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Callbacks\n", "\n", @@ -195,7 +251,11 @@ { "cell_type": "code", "execution_count": 8, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "callbacks = [\n", @@ -214,13 +274,17 @@ " patience=3,\n", " ),\n", " MinMaxNormalizationCallback(),\n", - " VisualizerCallback(task=\"segmentation\", log_images_to=[\"local\"]),\n", + " VisualizerCallback(mode=\"full\", task=\"segmentation\", image_save_path=\"./results/images\"),\n", "]" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Training\n", "\n", @@ -232,7 +296,11 @@ { "cell_type": "code", "execution_count": 9, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stderr", @@ -256,7 +324,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "`Trainer` object has number of options that suit all specific needs. For more details, refer to [Lightning Documentation](https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html) to see how it could be tweaked to your needs.\n", "\n", @@ -266,7 +338,11 @@ { "cell_type": "code", "execution_count": 10, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stderr", @@ -332,7 +408,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The training has finished after 12 epochs. This is because, we set the `EarlyStopping` criteria with a patience of 3, which terminated the training after `pixel_AUROC` stopped improving. If we increased the `patience`, the training would continue further.\n", "\n", @@ -344,7 +424,11 @@ { "cell_type": "code", "execution_count": 11, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stderr", @@ -373,10 +457,10 @@ ], "text/plain": [ "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", - "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", + "┃\u001B[1m \u001B[0m\u001B[1m Test metric \u001B[0m\u001B[1m \u001B[0m┃\u001B[1m \u001B[0m\u001B[1m DataLoader 0 \u001B[0m\u001B[1m \u001B[0m┃\n", "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│\u001b[36m \u001b[0m\u001b[36m image_AUROC \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 1.0 \u001b[0m\u001b[35m \u001b[0m│\n", - "│\u001b[36m \u001b[0m\u001b[36m pixel_AUROC \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9744887351989746 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001B[36m \u001B[0m\u001B[36m image_AUROC \u001B[0m\u001B[36m \u001B[0m│\u001B[35m \u001B[0m\u001B[35m 1.0 \u001B[0m\u001B[35m \u001B[0m│\n", + "│\u001B[36m \u001B[0m\u001B[36m pixel_AUROC \u001B[0m\u001B[36m \u001B[0m│\u001B[35m \u001B[0m\u001B[35m 0.9744887351989746 \u001B[0m\u001B[35m \u001B[0m│\n", "└───────────────────────────┴───────────────────────────┘\n" ] }, @@ -400,7 +484,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "`trainer.test` returns the `pixel_AUROC` and `image_AUROC` results. We could also find the saved output in `images` directory." ] @@ -408,7 +496,11 @@ { "cell_type": "code", "execution_count": 12, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -424,7 +516,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Inference\n", "\n", @@ -434,7 +530,11 @@ { "cell_type": "code", "execution_count": 13, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "inference_dataset = InferenceDataset(\n", @@ -445,7 +545,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We could utilize `Trainer`'s `predict` method to infer, and get the outputs to visualize" ] @@ -453,7 +557,11 @@ { "cell_type": "code", "execution_count": 14, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stderr", @@ -478,7 +586,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "`predictions` contain image, anomaly maps, predicted scores, labels and masks. These are all stored in a dictionary. We could check this by printing the `prediction` keys." ] @@ -486,7 +598,11 @@ { "cell_type": "code", "execution_count": 15, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -503,7 +619,11 @@ { "cell_type": "code", "execution_count": 16, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -523,14 +643,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Visualization" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "To properly visualize the predictions, we will need to perform some post-processing operations.\n", "\n", @@ -540,7 +668,11 @@ { "cell_type": "code", "execution_count": 17, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -560,7 +692,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We could now see that `image` is of a numpy array and within the range of \\[0, 255\\]. It's ready to be visualized now." ] @@ -568,7 +704,11 @@ { "cell_type": "code", "execution_count": 18, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -599,7 +739,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The second output of the predictions is the anomaly map. As can be seen above, it's also a torch tensor and of size `torch.Size([1, 1, 256, 256])`. We therefore need to convert it to numpy and squeeze the dimensions to make it `256x256` output to visualize." ] @@ -607,7 +751,11 @@ { "cell_type": "code", "execution_count": 19, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -640,7 +788,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We could superimpose (overlay) the anomaly map on top of the original image to get a heat map. Anomalib has a built-in function to achieve this. Let's try it." ] @@ -648,7 +800,11 @@ { "cell_type": "code", "execution_count": 20, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -680,7 +836,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "`predictions` also contains prediction scores and labels." ] @@ -688,7 +848,11 @@ { "cell_type": "code", "execution_count": 21, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -706,7 +870,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The last part of the predictions is the mask that is predicted by the model. This is a boolean mask containing True/False for the abnormal/normal pixels, respectively." ] @@ -714,7 +882,11 @@ { "cell_type": "code", "execution_count": 22, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -746,7 +918,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "That wraps it! In this notebook, we show how we could train, test and finally infer a FastFlow model using Anomalib API." ] From de63f1d6449b374208b822c33102f57138cd1946 Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Fri, 1 Jul 2022 10:57:22 +0200 Subject: [PATCH 12/13] Fix torchvision version (#397) --- requirements/base.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/base.txt b/requirements/base.txt index b9687d9dca..00fa66971a 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -6,9 +6,9 @@ opencv-python>=4.5.3.56 pandas>=1.1.0 pytorch-lightning[extra]>=1.6.0 timm==0.5.4 -torchmetrics>=0.9.0 -torchvision>=0.9.1 -torchtext>=0.9.1 +torchmetrics==0.9.1 +torchvision==0.12.0 +torchtext==0.12.0 wandb==0.12.17 matplotlib>=3.4.3 gradio>=2.9.4 From 1807bd96a81902fbb549f963c91550113ed0bf8a Mon Sep 17 00:00:00 2001 From: Ashwin Vaidya Date: Fri, 1 Jul 2022 10:57:38 +0200 Subject: [PATCH 13/13] =?UTF-8?q?=F0=9F=9B=A0=20Fix=20torchvision=20versio?= =?UTF-8?q?n=20(#397)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit