From 56c601410f04bceeee1f21be5acdf2363a416f2a Mon Sep 17 00:00:00 2001 From: Samet Date: Thu, 30 Dec 2021 01:42:38 -0700 Subject: [PATCH 01/30] Bump up the pytorch lightning to master branch due to vulnurability issues. --- anomalib/models/dfkde/config.yaml | 5 ----- anomalib/models/dfm/config.yaml | 5 ----- anomalib/models/padim/config.yaml | 5 ----- anomalib/models/patchcore/config.yaml | 5 ----- anomalib/models/stfpm/config.yaml | 5 ----- requirements/base.txt | 2 +- 6 files changed, 1 insertion(+), 26 deletions(-) diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 2569279f00..f7e31648c3 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -35,7 +35,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -44,9 +43,7 @@ trainer: checkpoint_callback: true default_root_dir: null deterministic: false - distributed_backend: null fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -71,14 +68,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index f2eb6cba2a..c38e704314 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -34,7 +34,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -43,9 +42,7 @@ trainer: checkpoint_callback: true default_root_dir: null deterministic: false - distributed_backend: null fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -70,14 +67,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index dffe81e606..46008f085d 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -60,7 +60,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -69,9 +68,7 @@ trainer: checkpoint_callback: true default_root_dir: null deterministic: true - distributed_backend: null fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -96,14 +93,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index 1d6dc3a9d5..096055f8dd 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -46,7 +46,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -55,9 +54,7 @@ trainer: checkpoint_callback: true default_root_dir: null deterministic: true - distributed_backend: null fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -82,14 +79,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index e3d70eb562..c6ed9b6577 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -71,7 +71,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -80,9 +79,7 @@ trainer: checkpoint_callback: true default_root_dir: null deterministic: true - distributed_backend: null fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -107,14 +104,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top diff --git a/requirements/base.txt b/requirements/base.txt index 623c3bb527..a5d0d05c84 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -9,7 +9,7 @@ nncf==2.0.0 numpy~=1.19.5 omegaconf==2.1.1 pillow==8.3.2 -pytorch-lightning==1.3.6 +pytorch-lightning @ git+https://github.com/PyTorchLightning/pytorch-lightning.git torch==1.8.1 torchvision==0.9.1 scikit-image>=0.17.2 From 1fef827a827a7d2286aeb8eca6d537f3627d8ece Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 06:42:05 -0700 Subject: [PATCH 02/30] Updated config files --- anomalib/models/dfkde/config.yaml | 4 ++-- anomalib/models/dfm/config.yaml | 4 ++-- anomalib/models/padim/config.yaml | 6 +++--- anomalib/models/patchcore/config.yaml | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index e5ac9983c6..62c835072c 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -39,7 +39,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 1 + check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: false @@ -74,6 +74,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 1.0 + val_check_interval: 2.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index caf8dd6d3e..e2495dba13 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -38,7 +38,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 1 + check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: false @@ -73,6 +73,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 1.0 + val_check_interval: 2.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index effbfb4299..0a5f2a658c 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -35,7 +35,7 @@ model: project: seed: 42 path: ./results - log_images_to: [] + log_images_to: ["local"] logger: false save_to_csv: false @@ -64,7 +64,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 1 + check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: true @@ -99,6 +99,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 1.0 + val_check_interval: 2.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index e022ed97f1..63cf9bb4ab 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -50,7 +50,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 1 + check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: true @@ -85,6 +85,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 1.0 + val_check_interval: 2.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top From ec10d61d17d44a3325c393213164be97c5589451 Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 06:42:45 -0700 Subject: [PATCH 03/30] Added trainer.validate method to tools/train.py --- tools/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/train.py b/tools/train.py index d1139a8f60..bbe2077901 100644 --- a/tools/train.py +++ b/tools/train.py @@ -59,6 +59,8 @@ def train(): trainer = Trainer(**config.trainer, logger=logger, callbacks=callbacks) trainer.fit(model=model, datamodule=datamodule) + # TODO: https://github.com/openvinotoolkit/anomalib/issues/62 + trainer.validate(model=model, datamodule=datamodule) trainer.test(model=model, datamodule=datamodule) From 3927ee516884622b768184ca29b4693dbee51a61 Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 07:24:57 -0700 Subject: [PATCH 04/30] address mypy issues --- anomalib/core/callbacks/cdf_normalization.py | 4 +--- anomalib/core/callbacks/nncf_callback.py | 22 ++++++++++---------- anomalib/core/model/anomaly_module.py | 3 ++- anomalib/data/mvtec.py | 14 +++++++++---- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/anomalib/core/callbacks/cdf_normalization.py b/anomalib/core/callbacks/cdf_normalization.py index ac6fab98bb..01c92bfc61 100644 --- a/anomalib/core/callbacks/cdf_normalization.py +++ b/anomalib/core/callbacks/cdf_normalization.py @@ -22,9 +22,7 @@ def on_test_start(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) -> pl_module.image_metrics.F1.threshold = 0.5 pl_module.pixel_metrics.F1.threshold = 0.5 - def on_train_epoch_end( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, _unused: Optional[Any] = None - ) -> None: + def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: """Called when the train epoch ends. Use the current model to compute the anomaly score distributions diff --git a/anomalib/core/callbacks/nncf_callback.py b/anomalib/core/callbacks/nncf_callback.py index fa193c492f..d7e5fc5f65 100644 --- a/anomalib/core/callbacks/nncf_callback.py +++ b/anomalib/core/callbacks/nncf_callback.py @@ -13,8 +13,6 @@ from pytorch_lightning import Callback from torch.utils.data.dataloader import DataLoader -from anomalib.data import get_datamodule - def criterion_fn(outputs, criterion): """Calls the criterion function on outputs.""" @@ -76,21 +74,18 @@ def __init__(self, config: Union[ListConfig, DictConfig], dirpath: str, filename self.dirpath = dirpath self.filename = filename - # we need to create a datamodule here to obtain the init loader - datamodule = get_datamodule(config) - datamodule.setup() - self.train_loader = datamodule.train_dataloader() - self.comp_ctrl: Optional[CompressionAlgorithmController] = None self.compression_scheduler: CompressionScheduler - def setup(self, _: pl.Trainer, pl_module: pl.LightningModule, __: Optional[str] = None) -> None: + def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, _stage: Optional[str] = None) -> None: """Call when fit or test begins. Takes the pytorch model and wraps it using the compression controller so that it is ready for nncf fine-tuning. """ if self.comp_ctrl is None: - init_loader = InitLoader(self.train_loader) + # NOTE: trainer.datamodule returns the following error + # "Trainer" has no attribute "datamodule" [attr-defined] + init_loader = InitLoader(trainer.datamodule.train_dataloader()) # type: ignore nncf_config = register_default_init_args( self.nncf_config, init_loader, pl_module.model.loss, criterion_fn=criterion_fn ) @@ -99,7 +94,12 @@ def setup(self, _: pl.Trainer, pl_module: pl.LightningModule, __: Optional[str] self.compression_scheduler = self.comp_ctrl.scheduler def on_train_batch_start( - self, trainer, _pl_module: pl.LightningModule, _batch: Any, _batch_idx: int, _dataloader_idx: int + self, + trainer: pl.Trainer, + _pl_module: pl.LightningModule, + _batch: Any, + _batch_idx: int, + _unused: Optional[int] = 0, ) -> None: """Call when the train batch begins. @@ -109,7 +109,7 @@ def on_train_batch_start( if self.comp_ctrl is not None: trainer.model.loss_val = self.comp_ctrl.loss() - def on_train_end(self, _trainer, _pl_module: pl.LightningModule) -> None: + def on_train_end(self, _trainer: pl.Trainer, _pl_module: pl.LightningModule) -> None: """Call when the train ends. Exports onnx model and if compression controller is not None, uses the onnx model to generate the OpenVINO IR. diff --git a/anomalib/core/model/anomaly_module.py b/anomalib/core/model/anomaly_module.py index 0709bc424a..02605da7bc 100644 --- a/anomalib/core/model/anomaly_module.py +++ b/anomalib/core/model/anomaly_module.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions # and limitations under the License. +from abc import ABC from typing import List, Union import pytorch_lightning as pl @@ -30,7 +31,7 @@ ) -class AnomalyModule(pl.LightningModule): +class AnomalyModule(pl.LightningModule, ABC): """AnomalyModule to train, validate, predict and test images. Acts as a base class for all the Anomaly Modules in the library. diff --git a/anomalib/data/mvtec.py b/anomalib/data/mvtec.py index 8f44558aa7..ebaae0072a 100644 --- a/anomalib/data/mvtec.py +++ b/anomalib/data/mvtec.py @@ -24,6 +24,7 @@ import logging import random import tarfile +from abc import ABC from pathlib import Path from typing import Dict, Optional, Tuple, Union from urllib.request import urlretrieve @@ -34,6 +35,7 @@ import pandas as pd from pandas.core.frame import DataFrame from pytorch_lightning.core.datamodule import LightningDataModule +from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS from torch import Tensor from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset @@ -358,7 +360,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]: return item -class MVTecDataModule(LightningDataModule): +class MVTecDataModule(LightningDataModule, ABC): """MVTec Lightning Data Module.""" def __init__( @@ -465,15 +467,19 @@ def setup(self, stage: Optional[str] = None) -> None: create_validation_set=self.create_validation_set, ) - def train_dataloader(self) -> DataLoader: + def train_dataloader(self) -> TRAIN_DATALOADERS: """Get train dataloader.""" return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers) - def val_dataloader(self) -> DataLoader: + def val_dataloader(self) -> EVAL_DATALOADERS: """Get validation dataloader.""" dataset = self.val_data if self.create_validation_set else self.test_data return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers) - def test_dataloader(self) -> DataLoader: + def test_dataloader(self) -> EVAL_DATALOADERS: """Get test dataloader.""" return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers) + + def predict_dataloader(self) -> EVAL_DATALOADERS: + """Get predict dataloader.""" + raise NotImplementedError From 2eebe2d6157613fd915894b4d9314e84c6fa5e2c Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 13:58:47 -0700 Subject: [PATCH 05/30] added Inference Dataset --- anomalib/data/__init__.py | 4 ++ anomalib/data/inference.py | 81 ++++++++++++++++++++++++++++++++++++++ anomalib/data/mvtec.py | 34 +++++++++++----- 3 files changed, 109 insertions(+), 10 deletions(-) create mode 100644 anomalib/data/inference.py diff --git a/anomalib/data/__init__.py b/anomalib/data/__init__.py index a5f5d2d6ec..302475d3e8 100644 --- a/anomalib/data/__init__.py +++ b/anomalib/data/__init__.py @@ -19,6 +19,7 @@ from omegaconf import DictConfig, ListConfig from pytorch_lightning import LightningDataModule +from .inference import InferenceDataset from .mvtec import MVTecDataModule @@ -48,3 +49,6 @@ def get_datamodule(config: Union[DictConfig, ListConfig]) -> LightningDataModule raise ValueError("Unknown dataset!") return datamodule + + +__all__ = ["get_datamodule", "InferenceDataset"] diff --git a/anomalib/data/inference.py b/anomalib/data/inference.py new file mode 100644 index 0000000000..d8bfd1528d --- /dev/null +++ b/anomalib/data/inference.py @@ -0,0 +1,81 @@ +"""Test Inference Dataset.""" + +# Copyright (C) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from pathlib import Path +from typing import Any, List, Optional, Tuple, Union + +import albumentations as A +from torch.utils.data.dataset import Dataset +from torchvision.datasets.folder import IMG_EXTENSIONS + +from anomalib.data.transforms import PreProcessor +from anomalib.data.utils import read_image + + +def get_image_filenames(path: Union[str, Path]) -> List[str]: + """Get image filenames. + + Args: + path (Union[str, Path]): Path to image or image-folder. + + Returns: + List[str]: List of image filenames + + """ + image_filenames: List[str] + + if isinstance(path, str): + path = Path(path) + + # If `path` is an image path. + if path.is_file() and path.suffix in IMG_EXTENSIONS: + image_filenames = [str(path)] + + # If it is a path to image folder + if path.is_dir(): + image_filenames = [str(p) for p in path.glob("**/*") if p.suffix in IMG_EXTENSIONS] + + if len(image_filenames) == 0: + raise ValueError(f"Found 0 images in {path}") + + return image_filenames + + +class InferenceDataset(Dataset): + """Inference Dataset to perform prediction.""" + + def __init__( + self, + path: Union[str, Path], + image_size: Optional[Union[int, Tuple[int, int]]] = None, + transform_config: Optional[Union[str, A.Compose]] = None, + ) -> None: + super().__init__() + + self.image_filenames = get_image_filenames(path) + self.pre_process = PreProcessor(config=transform_config, image_size=image_size) + + def __len__(self) -> int: + """Get the number of images in the given path.""" + return len(self.image_filenames) + + def __getitem__(self, index: int) -> Any: + """Get the image based on the `index`.""" + image_filename = self.image_filenames[index] + image = read_image(path=image_filename) + pre_processed = self.pre_process(image=image) + + return pre_processed diff --git a/anomalib/data/mvtec.py b/anomalib/data/mvtec.py index ebaae0072a..e8d3274a91 100644 --- a/anomalib/data/mvtec.py +++ b/anomalib/data/mvtec.py @@ -41,6 +41,7 @@ from torch.utils.data.dataset import Dataset from torchvision.datasets.folder import VisionDataset +from anomalib.data.inference import InferenceDataset from anomalib.data.transforms import PreProcessor from anomalib.data.utils import read_image from anomalib.utils.download_progress_bar import DownloadProgressBar @@ -419,8 +420,10 @@ def __init__( self.root = root if isinstance(root, Path) else Path(root) self.category = category self.dataset_path = self.root / self.category + self.transform_config = transform_config + self.image_size = image_size - self.pre_process = PreProcessor(config=transform_config, image_size=image_size) + self.pre_process = PreProcessor(config=self.transform_config, image_size=self.image_size) self.train_batch_size = train_batch_size self.test_batch_size = test_batch_size @@ -433,13 +436,25 @@ def __init__( self.test_data: Dataset if create_validation_set: self.val_data: Dataset + self.inference_data: Dataset def setup(self, stage: Optional[str] = None) -> None: """Setup train, validation and test data. Args: stage: Optional[str]: Train/Val/Test stages. (Default value = None) + """ + if stage in (None, "fit"): + self.train_data = MVTec( + root=self.root, + category=self.category, + pre_process=self.pre_process, + split="train", + seed=self.seed, + create_validation_set=self.create_validation_set, + ) + if self.create_validation_set: self.val_data = MVTec( root=self.root, @@ -449,6 +464,7 @@ def setup(self, stage: Optional[str] = None) -> None: seed=self.seed, create_validation_set=self.create_validation_set, ) + self.test_data = MVTec( root=self.root, category=self.category, @@ -457,14 +473,10 @@ def setup(self, stage: Optional[str] = None) -> None: seed=self.seed, create_validation_set=self.create_validation_set, ) - if stage in (None, "fit"): - self.train_data = MVTec( - root=self.root, - category=self.category, - pre_process=self.pre_process, - split="train", - seed=self.seed, - create_validation_set=self.create_validation_set, + + if stage == "predict": + self.inference_data = InferenceDataset( + path=self.root, image_size=self.image_size, transform_config=self.transform_config ) def train_dataloader(self) -> TRAIN_DATALOADERS: @@ -482,4 +494,6 @@ def test_dataloader(self) -> EVAL_DATALOADERS: def predict_dataloader(self) -> EVAL_DATALOADERS: """Get predict dataloader.""" - raise NotImplementedError + return DataLoader( + self.inference_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers + ) From 8634dcccbbccecbd3c955ce5cf3076a0b889de8e Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 14:19:08 -0700 Subject: [PATCH 06/30] Cleanup and polish --- anomalib/core/model/anomaly_module.py | 3 +- anomalib/data/inference.py | 54 ++++++++++----------------- anomalib/data/mvtec.py | 2 +- anomalib/data/utils.py | 31 +++++++++++++++ 4 files changed, 53 insertions(+), 37 deletions(-) diff --git a/anomalib/core/model/anomaly_module.py b/anomalib/core/model/anomaly_module.py index 02605da7bc..0709bc424a 100644 --- a/anomalib/core/model/anomaly_module.py +++ b/anomalib/core/model/anomaly_module.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions # and limitations under the License. -from abc import ABC from typing import List, Union import pytorch_lightning as pl @@ -31,7 +30,7 @@ ) -class AnomalyModule(pl.LightningModule, ABC): +class AnomalyModule(pl.LightningModule): """AnomalyModule to train, validate, predict and test images. Acts as a base class for all the Anomaly Modules in the library. diff --git a/anomalib/data/inference.py b/anomalib/data/inference.py index d8bfd1528d..094d4d72bc 100644 --- a/anomalib/data/inference.py +++ b/anomalib/data/inference.py @@ -1,4 +1,4 @@ -"""Test Inference Dataset.""" +"""Inference Dataset.""" # Copyright (C) 2020 Intel Corporation # @@ -15,43 +15,13 @@ # and limitations under the License. from pathlib import Path -from typing import Any, List, Optional, Tuple, Union +from typing import Any, Optional, Tuple, Union import albumentations as A from torch.utils.data.dataset import Dataset -from torchvision.datasets.folder import IMG_EXTENSIONS from anomalib.data.transforms import PreProcessor -from anomalib.data.utils import read_image - - -def get_image_filenames(path: Union[str, Path]) -> List[str]: - """Get image filenames. - - Args: - path (Union[str, Path]): Path to image or image-folder. - - Returns: - List[str]: List of image filenames - - """ - image_filenames: List[str] - - if isinstance(path, str): - path = Path(path) - - # If `path` is an image path. - if path.is_file() and path.suffix in IMG_EXTENSIONS: - image_filenames = [str(path)] - - # If it is a path to image folder - if path.is_dir(): - image_filenames = [str(p) for p in path.glob("**/*") if p.suffix in IMG_EXTENSIONS] - - if len(image_filenames) == 0: - raise ValueError(f"Found 0 images in {path}") - - return image_filenames +from anomalib.data.utils import get_image_filenames, read_image class InferenceDataset(Dataset): @@ -60,13 +30,29 @@ class InferenceDataset(Dataset): def __init__( self, path: Union[str, Path], + pre_process: Optional[PreProcessor] = None, image_size: Optional[Union[int, Tuple[int, int]]] = None, transform_config: Optional[Union[str, A.Compose]] = None, ) -> None: + """Inference Dataset to perform prediction. + + Args: + path (Union[str, Path]): Path to an image or image-folder. + pre_process (Optional[PreProcessor], optional): Pre-Processing transforms to + pre-process the input dataset. Defaults to None. + image_size (Optional[Union[int, Tuple[int, int]]], optional): Target image size + to resize the original image. Defaults to None. + transform_config (Optional[Union[str, A.Compose]], optional): Configuration file + parse the albumentation transforms. Defaults to None. + """ super().__init__() self.image_filenames = get_image_filenames(path) - self.pre_process = PreProcessor(config=transform_config, image_size=image_size) + + if pre_process is None: + self.pre_process = PreProcessor(transform_config, image_size) + else: + self.pre_process = pre_process def __len__(self) -> int: """Get the number of images in the given path.""" diff --git a/anomalib/data/mvtec.py b/anomalib/data/mvtec.py index e8d3274a91..a6dd4a161f 100644 --- a/anomalib/data/mvtec.py +++ b/anomalib/data/mvtec.py @@ -361,7 +361,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]: return item -class MVTecDataModule(LightningDataModule, ABC): +class MVTecDataModule(LightningDataModule): """MVTec Lightning Data Module.""" def __init__( diff --git a/anomalib/data/utils.py b/anomalib/data/utils.py index 9456784495..54e49d7b5a 100644 --- a/anomalib/data/utils.py +++ b/anomalib/data/utils.py @@ -14,8 +14,39 @@ # See the License for the specific language governing permissions # and limitations under the License. +from pathlib import Path +from typing import List, Union + import cv2 import numpy as np +from torchvision.datasets.folder import IMG_EXTENSIONS + + +def get_image_filenames(path: Union[str, Path]) -> List[str]: + """Get image filenames. + + Args: + path (Union[str, Path]): Path to image or image-folder. + + Returns: + List[str]: List of image filenames + + """ + image_filenames: List[str] + + if isinstance(path, str): + path = Path(path) + + if path.is_file() and path.suffix in IMG_EXTENSIONS: + image_filenames = [str(path)] + + if path.is_dir(): + image_filenames = [str(p) for p in path.glob("**/*") if p.suffix in IMG_EXTENSIONS] + + if len(image_filenames) == 0: + raise ValueError(f"Found 0 images in {path}") + + return image_filenames def read_image(path: str) -> np.ndarray: From a699de971369f8ba09d6e7cce6abc8c07e4c44ab Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 14:46:07 -0700 Subject: [PATCH 07/30] fix deterministic issue --- .github/workflows/tox.yml | 1 + anomalib/core/model/anomaly_module.py | 3 ++- anomalib/data/mvtec.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml index 3a44b136cb..4e50f0e230 100644 --- a/.github/workflows/tox.yml +++ b/.github/workflows/tox.yml @@ -24,6 +24,7 @@ jobs: run: | export ANOMALIB_DATASET_PATH=/media/data1/datasets/MVTec export CUDA_VISIBLE_DEVICES=3 + export CUBLAS_WORKSPACE_CONFIG=:16:8 tox -e coverage - name: Upload coverage result uses: actions/upload-artifact@v2 diff --git a/anomalib/core/model/anomaly_module.py b/anomalib/core/model/anomaly_module.py index 0709bc424a..02605da7bc 100644 --- a/anomalib/core/model/anomaly_module.py +++ b/anomalib/core/model/anomaly_module.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions # and limitations under the License. +from abc import ABC from typing import List, Union import pytorch_lightning as pl @@ -30,7 +31,7 @@ ) -class AnomalyModule(pl.LightningModule): +class AnomalyModule(pl.LightningModule, ABC): """AnomalyModule to train, validate, predict and test images. Acts as a base class for all the Anomaly Modules in the library. diff --git a/anomalib/data/mvtec.py b/anomalib/data/mvtec.py index a6dd4a161f..9beecf727c 100644 --- a/anomalib/data/mvtec.py +++ b/anomalib/data/mvtec.py @@ -21,10 +21,10 @@ # See the License for the specific language governing permissions # and limitations under the License. +from abc import ABC import logging import random import tarfile -from abc import ABC from pathlib import Path from typing import Dict, Optional, Tuple, Union from urllib.request import urlretrieve @@ -361,7 +361,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]: return item -class MVTecDataModule(LightningDataModule): +class MVTecDataModule(LightningDataModule, ABC): """MVTec Lightning Data Module.""" def __init__( From 9e02c2abf71f5fae32b5bfcfe67286cea2bf1396 Mon Sep 17 00:00:00 2001 From: Samet Date: Fri, 7 Jan 2022 14:58:14 -0700 Subject: [PATCH 08/30] isort --- anomalib/data/mvtec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anomalib/data/mvtec.py b/anomalib/data/mvtec.py index 9beecf727c..e8d3274a91 100644 --- a/anomalib/data/mvtec.py +++ b/anomalib/data/mvtec.py @@ -21,10 +21,10 @@ # See the License for the specific language governing permissions # and limitations under the License. -from abc import ABC import logging import random import tarfile +from abc import ABC from pathlib import Path from typing import Dict, Optional, Tuple, Union from urllib.request import urlretrieve From 8db78d14b0916fe95d0dd4684b62e9e8140184bf Mon Sep 17 00:00:00 2001 From: Samet Date: Thu, 13 Jan 2022 23:57:35 -0700 Subject: [PATCH 09/30] Modified config files --- anomalib/core/model/anomaly_module.py | 6 +++--- anomalib/models/cflow/config.yaml | 2 +- anomalib/models/dfkde/config.yaml | 4 ++-- anomalib/models/dfm/config.yaml | 4 ++-- anomalib/models/padim/config.yaml | 6 +++--- anomalib/models/patchcore/config.yaml | 2 +- anomalib/models/stfpm/config.yaml | 2 +- tests/models/test_model.py | 12 ++++++------ 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/anomalib/core/model/anomaly_module.py b/anomalib/core/model/anomaly_module.py index b8d7d079f6..7202f91ea1 100644 --- a/anomalib/core/model/anomaly_module.py +++ b/anomalib/core/model/anomaly_module.py @@ -15,7 +15,7 @@ # and limitations under the License. from abc import ABC -from typing import List, Union +from typing import Any, List, Optional, Union import pytorch_lightning as pl from omegaconf import DictConfig, ListConfig @@ -78,7 +78,7 @@ def validation_step(self, batch, batch_idx) -> dict: # type: ignore # pylint: """To be implemented in the subclasses.""" raise NotImplementedError - def predict_step(self, batch, batch_idx, _): # pylint: disable=arguments-differ, signature-differs + def predict_step(self, batch: Any, batch_idx: int, _dataloader_idx: Optional[int] = None) -> Any: """Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. By default, it calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`. @@ -87,7 +87,7 @@ def predict_step(self, batch, batch_idx, _): # pylint: disable=arguments-differ Args: batch (Tensor): Current batch batch_idx (int): Index of current batch - dataloader_idx (int): Index of the current dataloader + _dataloader_idx (int): Index of the current dataloader Return: Predicted output diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index 0a17079f2c..e81fb36395 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -68,7 +68,7 @@ trainer: log_every_n_steps: 50 log_gpu_memory: null max_epochs: 50 - max_steps: null + max_steps: -1 min_epochs: null min_steps: null move_metrics_to_cpu: false diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 62c835072c..25dde268a3 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -42,7 +42,7 @@ trainer: check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null - deterministic: false + deterministic: true fast_dev_run: false gpus: 1 gradient_clip_val: 0 @@ -53,7 +53,7 @@ trainer: log_every_n_steps: 50 log_gpu_memory: null max_epochs: 1 - max_steps: null + max_steps: -1 min_epochs: null min_steps: null move_metrics_to_cpu: false diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index e2495dba13..cda800e95a 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -41,7 +41,7 @@ trainer: check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null - deterministic: false + deterministic: true fast_dev_run: false gpus: 1 gradient_clip_val: 0 @@ -52,7 +52,7 @@ trainer: log_every_n_steps: 50 log_gpu_memory: null max_epochs: 1 - max_steps: null + max_steps: -1 min_epochs: null min_steps: null move_metrics_to_cpu: false diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index 0a5f2a658c..90c25df84c 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -64,7 +64,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 2 # Don't validate before extracting features. + check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: true @@ -78,7 +78,7 @@ trainer: log_every_n_steps: 50 log_gpu_memory: null max_epochs: 1 - max_steps: null + max_steps: -1 min_epochs: null min_steps: null move_metrics_to_cpu: false @@ -99,6 +99,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 2.0 # Don't validate before extracting features. + val_check_interval: 2.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index 63cf9bb4ab..cd7a94721d 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -64,7 +64,7 @@ trainer: log_every_n_steps: 50 log_gpu_memory: null max_epochs: 1 - max_steps: null + max_steps: -1 min_epochs: null min_steps: null move_metrics_to_cpu: false diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index 4c516e3d49..1808113496 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -89,7 +89,7 @@ trainer: log_every_n_steps: 50 log_gpu_memory: null max_epochs: 100 - max_steps: null + max_steps: -1 min_epochs: null min_steps: null move_metrics_to_cpu: false diff --git a/tests/models/test_model.py b/tests/models/test_model.py index 9b11c57455..5fd4d4f28c 100644 --- a/tests/models/test_model.py +++ b/tests/models/test_model.py @@ -150,12 +150,12 @@ def _test_model_load(self, config, datamodule, results): ["model_name", "nncf"], [ ("padim", False), - ("dfkde", False), - # ("dfm", False), # skip dfm test - ("stfpm", False), - ("stfpm", True), - ("patchcore", False), - ("cflow", False), + # ("dfkde", False), + # # ("dfm", False), # skip dfm test + # ("stfpm", False), + # ("stfpm", True), + # ("patchcore", False), + # ("cflow", False), ], ) @pytest.mark.flaky(max_runs=3) From 5db96c3fcb8a7256e5ec29eee0831f0ec45f517f Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 25 Jan 2022 01:19:44 -0700 Subject: [PATCH 10/30] pl version to 1.5.9 --- requirements/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/base.txt b/requirements/base.txt index 7e6f523643..e3bf6d97a2 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -11,7 +11,7 @@ nncf==2.0.0 numpy~=1.19.5 omegaconf==2.1.1 pillow==8.3.2 -pytorch-lightning @ git+https://github.com/PyTorchLightning/pytorch-lightning.git +pytorch-lightning==1.5.9 torch==1.8.1 torchvision==0.9.1 scikit-image>=0.17.2 From 16210989fb66385ed209678e100f10edbbeaa453 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 25 Jan 2022 05:33:50 -0700 Subject: [PATCH 11/30] modified cflow configs --- anomalib/models/cflow/config.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index f131df4d20..01d1c9097e 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -47,7 +47,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -56,9 +55,7 @@ trainer: checkpoint_callback: true default_root_dir: null deterministic: true - distributed_backend: null fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -83,14 +80,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top From 5b4abfcb8c8231befacecf71730b34b2cad89fe9 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 00:06:54 -0700 Subject: [PATCH 12/30] set deterministic off --- anomalib/models/cflow/config.yaml | 2 +- anomalib/models/dfkde/config.yaml | 2 +- anomalib/models/dfm/config.yaml | 2 +- anomalib/models/padim/config.yaml | 2 +- anomalib/models/patchcore/config.yaml | 2 +- anomalib/models/stfpm/config.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index 01d1c9097e..aa75b80b3e 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -54,7 +54,7 @@ trainer: check_val_every_n_epoch: 1 checkpoint_callback: true default_root_dir: null - deterministic: true + deterministic: false fast_dev_run: false gpus: 1 gradient_clip_val: 0 diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 25dde268a3..23a06822f9 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -42,7 +42,7 @@ trainer: check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null - deterministic: true + deterministic: false fast_dev_run: false gpus: 1 gradient_clip_val: 0 diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index cda800e95a..a3da996855 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -41,7 +41,7 @@ trainer: check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null - deterministic: true + deterministic: false fast_dev_run: false gpus: 1 gradient_clip_val: 0 diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index 8cf590de3d..8cac201f3c 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -67,7 +67,7 @@ trainer: check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null - deterministic: true + deterministic: false fast_dev_run: false gpus: 1 gradient_clip_val: 0 diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index a30c065505..7003249190 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -53,7 +53,7 @@ trainer: check_val_every_n_epoch: 2 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null - deterministic: true + deterministic: false fast_dev_run: false gpus: 1 gradient_clip_val: 0 diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index 1d8861051e..a058c59e7a 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -78,7 +78,7 @@ trainer: check_val_every_n_epoch: 2 checkpoint_callback: true default_root_dir: null - deterministic: true + deterministic: false fast_dev_run: false gpus: 1 gradient_clip_val: 0 From 37b43a0d08e544279ebf6b6341cf96e60dbd1e8a Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 00:07:38 -0700 Subject: [PATCH 13/30] do not log images when testing. --- .../normalization_callback/test_normalization_callback.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/core/callbacks/normalization_callback/test_normalization_callback.py b/tests/core/callbacks/normalization_callback/test_normalization_callback.py index 14237edfda..d2ef85bda3 100644 --- a/tests/core/callbacks/normalization_callback/test_normalization_callback.py +++ b/tests/core/callbacks/normalization_callback/test_normalization_callback.py @@ -21,6 +21,7 @@ def test_normalizer(): config = get_configurable_parameters(model_config_path="anomalib/models/padim/config.yaml") config.dataset.path = get_dataset_path(config.dataset.path) config.model.threshold.adaptive = True + config.project.log_images_to = [] # run without normalization config.model.normalization_method = "none" From e8a12b016b90bccb94878686110cc86993d7e7e6 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 04:23:50 -0700 Subject: [PATCH 14/30] Bumped up version --- requirements/base.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/base.txt b/requirements/base.txt index e3bf6d97a2..45dab7a339 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -8,9 +8,9 @@ lxml==4.6.5 matplotlib==3.4.3 networkx~=2.5 nncf==2.0.0 -numpy~=1.19.5 +numpy==1.21.5 omegaconf==2.1.1 -pillow==8.3.2 +pillow==9.0.0 pytorch-lightning==1.5.9 torch==1.8.1 torchvision==0.9.1 From 06e6655a38c4812a6733d7c0c367d4f64311ec20 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 11:32:39 +0000 Subject: [PATCH 15/30] Update tox.yml --- .github/workflows/tox.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml index 4e50f0e230..3a44b136cb 100644 --- a/.github/workflows/tox.yml +++ b/.github/workflows/tox.yml @@ -24,7 +24,6 @@ jobs: run: | export ANOMALIB_DATASET_PATH=/media/data1/datasets/MVTec export CUDA_VISIBLE_DEVICES=3 - export CUBLAS_WORKSPACE_CONFIG=:16:8 tox -e coverage - name: Upload coverage result uses: actions/upload-artifact@v2 From fe8a8a82d7537667dd5164ebfcd3f4e3dcfc2a58 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 05:10:00 -0700 Subject: [PATCH 16/30] Removed export cublas --- .github/workflows/tox.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml index 4e50f0e230..3a44b136cb 100644 --- a/.github/workflows/tox.yml +++ b/.github/workflows/tox.yml @@ -24,7 +24,6 @@ jobs: run: | export ANOMALIB_DATASET_PATH=/media/data1/datasets/MVTec export CUDA_VISIBLE_DEVICES=3 - export CUBLAS_WORKSPACE_CONFIG=:16:8 tox -e coverage - name: Upload coverage result uses: actions/upload-artifact@v2 From 2042687f19f1fbbb7b11226af7f4dd2bdce157eb Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 05:13:06 -0700 Subject: [PATCH 17/30] removed abc class from mvtec. predict-dataloader implemented --- anomalib/data/mvtec.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/anomalib/data/mvtec.py b/anomalib/data/mvtec.py index e8d3274a91..ccf8ca9d2f 100644 --- a/anomalib/data/mvtec.py +++ b/anomalib/data/mvtec.py @@ -24,7 +24,6 @@ import logging import random import tarfile -from abc import ABC from pathlib import Path from typing import Dict, Optional, Tuple, Union from urllib.request import urlretrieve @@ -361,7 +360,7 @@ def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]: return item -class MVTecDataModule(LightningDataModule, ABC): +class MVTecDataModule(LightningDataModule): """MVTec Lightning Data Module.""" def __init__( From aa40d0423c7c42d464af5bda3859a13a8eccfce3 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 05:16:00 -0700 Subject: [PATCH 18/30] Add trainer.validate to `run_train_test` in normalization tests --- .../normalization_callback/test_normalization_callback.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/core/callbacks/normalization_callback/test_normalization_callback.py b/tests/core/callbacks/normalization_callback/test_normalization_callback.py index d2ef85bda3..8978efd590 100644 --- a/tests/core/callbacks/normalization_callback/test_normalization_callback.py +++ b/tests/core/callbacks/normalization_callback/test_normalization_callback.py @@ -11,8 +11,10 @@ def run_train_test(config): model = get_model(config) datamodule = get_datamodule(config) callbacks = get_callbacks(config) + trainer = Trainer(**config.trainer, callbacks=callbacks) trainer.fit(model=model, datamodule=datamodule) + trainer.validate(model=model, datamodule=datamodule) results = trainer.test(model=model, datamodule=datamodule) return results From a3e8daa68945d581bff81a9d12ec55667bfea28f Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 27 Jan 2022 09:52:12 -0700 Subject: [PATCH 19/30] revert numpy version --- requirements/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/base.txt b/requirements/base.txt index 45dab7a339..e0aa0cc813 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -8,7 +8,7 @@ lxml==4.6.5 matplotlib==3.4.3 networkx~=2.5 nncf==2.0.0 -numpy==1.21.5 +numpy~=1.19.2 omegaconf==2.1.1 pillow==9.0.0 pytorch-lightning==1.5.9 From 2ad990595781ce02dfbe9cf1e25c5a82f4623803 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 06:22:32 -0700 Subject: [PATCH 20/30] Fixed padim training --- anomalib/models/padim/config.yaml | 4 ++-- anomalib/models/padim/model.py | 33 +++++++++++++++---------------- tools/train.py | 2 +- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index 8cac201f3c..8b5df3cc37 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -64,7 +64,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 2 # Don't validate before extracting features. + check_val_every_n_epoch: 1 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: false @@ -99,6 +99,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 2.0 # Don't validate before extracting features. + val_check_interval: 1.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/padim/model.py b/anomalib/models/padim/model.py index d1edf24679..a082c2cb71 100644 --- a/anomalib/models/padim/model.py +++ b/anomalib/models/padim/model.py @@ -281,7 +281,7 @@ class PadimLightning(AnomalyModule): def __init__(self, hparams: Union[DictConfig, ListConfig]): super().__init__(hparams) self.layers = hparams.model.layers - self.model = PadimModel( + self.model: PadimModel = PadimModel( layers=hparams.model.layers, input_size=hparams.model.input_size, tile_size=hparams.dataset.tiling.tile_size, @@ -292,38 +292,38 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.stats: List[Tensor] = [] self.automatic_optimization = False + self.embeddings: List[Tensor] = [] @staticmethod def configure_optimizers(): """PADIM doesn't require optimization, therefore returns no optimizers.""" return None - def training_step(self, batch, _): # pylint: disable=arguments-differ + def training_step(self, batch, _batch_idx): # pylint: disable=arguments-differ """Training Step of PADIM. For each batch, hierarchical features are extracted from the CNN. Args: batch (Dict[str,Tensor]): Input batch - _: Index of the batch. + _batch_idx: Index of the batch. Returns: Hierarchical feature map """ - self.model.feature_extractor.eval() embeddings = self.model(batch["image"]) - return {"embeddings": embeddings.cpu()} - - def training_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> None: - """Fit a multivariate gaussian model on an embedding extracted from deep hierarchical CNN features. - - Args: - outputs (List[Dict[str, Tensor]]): Batch of outputs from the training step - Returns: - None - """ - - embeddings = torch.vstack([x["embeddings"] for x in outputs]) + # NOTE: `self.embedding` appends each batch embedding to + # store the training set embedding. We manually append these + # values mainly due to the new order of hooks introduced after PL v1.4.0 + # https://github.com/PyTorchLightning/pytorch-lightning/pull/7357 + self.embeddings.append(embeddings.cpu()) + + def on_validation_start(self) -> None: + """Fit a Gaussian to the embedding collected from the training set.""" + # NOTE: Previous anomalib versions fit Gaussian at the end of the epoch. + # This is not possible anymore with PyTorch Lightning v1.4.0 since validation + # is run within train epoch. + embeddings = torch.vstack(self.embeddings) self.stats = self.model.gaussian.fit(embeddings) def validation_step(self, batch, _): # pylint: disable=arguments-differ @@ -341,5 +341,4 @@ def validation_step(self, batch, _): # pylint: disable=arguments-differ """ batch["anomaly_maps"] = self.model(batch["image"]) - return batch diff --git a/tools/train.py b/tools/train.py index bbe2077901..4c41e9a47a 100644 --- a/tools/train.py +++ b/tools/train.py @@ -60,7 +60,7 @@ def train(): trainer = Trainer(**config.trainer, logger=logger, callbacks=callbacks) trainer.fit(model=model, datamodule=datamodule) # TODO: https://github.com/openvinotoolkit/anomalib/issues/62 - trainer.validate(model=model, datamodule=datamodule) + # trainer.validate(model=model, datamodule=datamodule) trainer.test(model=model, datamodule=datamodule) From 53cc1f173de51e9f8affe0736268e21914ffaaab Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 07:31:24 -0700 Subject: [PATCH 21/30] Patchcore is now supported as well --- anomalib/models/padim/model.py | 6 ++--- anomalib/models/patchcore/config.yaml | 4 +-- anomalib/models/patchcore/model.py | 37 +++++++++++++++------------ 3 files changed, 26 insertions(+), 21 deletions(-) diff --git a/anomalib/models/padim/model.py b/anomalib/models/padim/model.py index a082c2cb71..ca0343a5c2 100644 --- a/anomalib/models/padim/model.py +++ b/anomalib/models/padim/model.py @@ -303,20 +303,20 @@ def training_step(self, batch, _batch_idx): # pylint: disable=arguments-differ """Training Step of PADIM. For each batch, hierarchical features are extracted from the CNN. Args: - batch (Dict[str,Tensor]): Input batch + batch (Dict[str, Any]): Batch containing image filename, image, label and mask _batch_idx: Index of the batch. Returns: Hierarchical feature map """ self.model.feature_extractor.eval() - embeddings = self.model(batch["image"]) + embedding = self.model(batch["image"]) # NOTE: `self.embedding` appends each batch embedding to # store the training set embedding. We manually append these # values mainly due to the new order of hooks introduced after PL v1.4.0 # https://github.com/PyTorchLightning/pytorch-lightning/pull/7357 - self.embeddings.append(embeddings.cpu()) + self.embeddings.append(embedding.cpu()) def on_validation_start(self) -> None: """Fit a Gaussian to the embedding collected from the training set.""" diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index 7003249190..a64e48e090 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -50,7 +50,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 2 # Don't validate before extracting features. + check_val_every_n_epoch: 1 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: false @@ -85,6 +85,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 2.0 # Don't validate before extracting features. + val_check_interval: 1.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/patchcore/model.py b/anomalib/models/patchcore/model.py index e3dbe9c549..b497b831ec 100644 --- a/anomalib/models/patchcore/model.py +++ b/anomalib/models/patchcore/model.py @@ -216,8 +216,11 @@ def subsample_embedding(self, embedding: torch.Tensor, sampling_ratio: float) -> """ # Coreset Subsampling + print("Creating CoreSet Sampler via k-Center Greedy") sampler = KCenterGreedy(embedding=embedding, sampling_ratio=sampling_ratio) + print("Getting the coreset from the main embedding.") coreset = sampler.sample_coreset() + print("Assigning the coreset as the memory bank.") self.memory_bank = coreset def nearest_neighbors(self, embedding: Tensor, n_neighbors: int = 9) -> Tensor: @@ -250,7 +253,7 @@ class PatchcoreLightning(AnomalyModule): def __init__(self, hparams) -> None: super().__init__(hparams) - self.model = PatchcoreModel( + self.model: PatchcoreModel = PatchcoreModel( layers=hparams.model.layers, input_size=hparams.model.input_size, tile_size=hparams.dataset.tiling.tile_size, @@ -259,6 +262,7 @@ def __init__(self, hparams) -> None: apply_tiling=hparams.dataset.tiling.apply, ) self.automatic_optimization = False + self.embeddings: List[Tensor] = [] def configure_optimizers(self) -> None: """Configure optimizers. @@ -268,13 +272,12 @@ def configure_optimizers(self) -> None: """ return None - def training_step(self, batch, _): # pylint: disable=arguments-differ + def training_step(self, batch, _batch_idx): # pylint: disable=arguments-differ """Generate feature embedding of the batch. Args: - batch (Dict[str, Any]): Batch containing image filename, - image, label and mask - _ (int): Batch Index + batch (Dict[str, Any]): Batch containing image filename, image, label and mask + _batch_idx (int): Batch Index Returns: Dict[str, np.ndarray]: Embedding Vector @@ -282,20 +285,22 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ self.model.feature_extractor.eval() embedding = self.model(batch["image"]) - return {"embedding": embedding} - - def training_epoch_end(self, outputs): - """Concatenate batch embeddings to generate normal embedding. + # NOTE: `self.embedding` appends each batch embedding to + # store the training set embedding. We manually append these + # values mainly due to the new order of hooks introduced after PL v1.4.0 + # https://github.com/PyTorchLightning/pytorch-lightning/pull/7357 + self.embeddings.append(embedding) - Apply coreset subsampling to the embedding set for dimensionality reduction. + def on_validation_start(self) -> None: + """Apply subsampling to the embedding collected from the training set.""" + # NOTE: Previous anomalib versions fit subsampling at the end of the epoch. + # This is not possible anymore with PyTorch Lightning v1.4.0 since validation + # is run within train epoch. + print("Aggregating the embedding extracted from the training set.") + embeddings = torch.vstack(self.embeddings) - Args: - outputs (List[Dict[str, np.ndarray]]): List of embedding vectors - """ - embedding = torch.vstack([output["embedding"] for output in outputs]) sampling_ratio = self.hparams.model.coreset_sampling_ratio - - self.model.subsample_embedding(embedding, sampling_ratio) + self.model.subsample_embedding(embeddings, sampling_ratio) def validation_step(self, batch, _): # pylint: disable=arguments-differ """Get batch of anomaly maps from input image batch. From 44767190595b13ee7d2bbae6142293c30f8ea3d3 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 08:19:11 -0700 Subject: [PATCH 22/30] DFKDE is now supported as well --- anomalib/models/dfkde/config.yaml | 6 ++--- anomalib/models/dfkde/model.py | 39 +++++++++++++++++-------------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 23a06822f9..0364889035 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -3,7 +3,7 @@ dataset: format: mvtec path: ./datasets/MVTec url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz - category: leather + category: cable task: classification label_format: None image_size: 256 @@ -39,7 +39,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 2 # Don't validate before extracting features. + check_val_every_n_epoch: 1 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: false @@ -74,6 +74,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 2.0 # Don't validate before extracting features. + val_check_interval: 1.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/dfkde/model.py b/anomalib/models/dfkde/model.py index 66cf71e3a8..e14b33c800 100644 --- a/anomalib/models/dfkde/model.py +++ b/anomalib/models/dfkde/model.py @@ -14,12 +14,13 @@ # See the License for the specific language governing permissions # and limitations under the License. -from typing import Any, Dict, List, Union +from typing import List, Union import torch import torchvision from omegaconf.dictconfig import DictConfig from omegaconf.listconfig import ListConfig +from torch import Tensor from anomalib.core.model import AnomalyModule from anomalib.core.model.feature_extractor import FeatureExtractor @@ -47,17 +48,19 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): threshold_offset=self.threshold_offset, ) self.automatic_optimization = False + self.embeddings: List[Tensor] = [] @staticmethod def configure_optimizers(): """DFKDE doesn't require optimization, therefore returns no optimizers.""" return None - def training_step(self, batch, _): # pylint: disable=arguments-differ + def training_step(self, batch, _batch_idx): # pylint: disable=arguments-differ """Training Step of DFKDE. For each batch, features are extracted from the CNN. Args: - batch (Tensor): Input batch + batch (Dict[str, Any]): Batch containing image filename, image, label and mask + _batch_idx: Index of the batch. Returns: Deep CNN features. @@ -65,21 +68,21 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ self.feature_extractor.eval() layer_outputs = self.feature_extractor(batch["image"]) - feature_vector = torch.hstack(list(layer_outputs.values())).detach().squeeze() - return {"feature_vector": feature_vector} - - def training_epoch_end(self, outputs: List[Dict[str, Any]]) -> None: - """Fit a KDE model on deep CNN features. - - Args: - outputs (List[Dict[str, Any]]): Batch of outputs from the training step - - Returns: - None - """ - - feature_stack = torch.vstack([output["feature_vector"] for output in outputs]) - self.normality_model.fit(feature_stack) + embedding = torch.hstack(list(layer_outputs.values())).detach().squeeze() + + # NOTE: `self.embedding` appends each batch embedding to + # store the training set embedding. We manually append these + # values mainly due to the new order of hooks introduced after PL v1.4.0 + # https://github.com/PyTorchLightning/pytorch-lightning/pull/7357 + self.embeddings.append(embedding) + + def on_validation_start(self) -> None: + """Fit a KDE Model to the embedding collected from the training set.""" + # NOTE: Previous anomalib versions fit Gaussian at the end of the epoch. + # This is not possible anymore with PyTorch Lightning v1.4.0 since validation + # is run within train epoch. + embeddings = torch.vstack(self.embeddings) + self.normality_model.fit(embeddings) def validation_step(self, batch, _): # pylint: disable=arguments-differ """Validation Step of DFKDE. From 1cdfcd9555c846382406453c19fa16da03e86ff9 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 08:28:58 -0700 Subject: [PATCH 23/30] DFM is now supported as well --- anomalib/models/cflow/config.yaml | 2 +- anomalib/models/dfkde/config.yaml | 2 +- anomalib/models/dfm/config.yaml | 8 +++---- anomalib/models/dfm/model.py | 33 ++++++++++++++------------- anomalib/models/patchcore/config.yaml | 2 +- 5 files changed, 24 insertions(+), 23 deletions(-) diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index aa75b80b3e..d1d8c26bda 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -3,7 +3,7 @@ dataset: format: mvtec path: ./datasets/MVTec url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz - category: leather + category: bottle task: segmentation label_format: None image_size: 256 diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 0364889035..e73b636208 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -3,7 +3,7 @@ dataset: format: mvtec path: ./datasets/MVTec url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz - category: cable + category: bottle task: classification label_format: None image_size: 256 diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index a3da996855..9ab2b04161 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -3,7 +3,7 @@ dataset: format: mvtec path: ./datasets/MVTec url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz - category: leather + category: bottle task: classification label_format: None image_size: 256 @@ -25,7 +25,7 @@ model: project: seed: 42 path: ./results - log_images_to: [local] + log_images_to: [] logger: false save_to_csv: false @@ -38,7 +38,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 2 # Don't validate before extracting features. + check_val_every_n_epoch: 1 # Don't validate before extracting features. checkpoint_callback: true default_root_dir: null deterministic: false @@ -73,6 +73,6 @@ trainer: terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - val_check_interval: 2.0 # Don't validate before extracting features. + val_check_interval: 1.0 # Don't validate before extracting features. weights_save_path: null weights_summary: top diff --git a/anomalib/models/dfm/model.py b/anomalib/models/dfm/model.py index 423830aa56..7a4a9f1683 100644 --- a/anomalib/models/dfm/model.py +++ b/anomalib/models/dfm/model.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions # and limitations under the License. -from typing import Dict, List, Union +from typing import List, Union import torch import torchvision @@ -37,6 +37,7 @@ def __init__(self, hparams: Union[DictConfig, ListConfig]): self.dfm_model = DFMModel(n_comps=hparams.model.pca_level, score_type=hparams.model.score_type) self.automatic_optimization = False + self.embeddings: List[Tensor] = [] @staticmethod def configure_optimizers() -> None: @@ -58,21 +59,21 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ self.feature_extractor.eval() layer_outputs = self.feature_extractor(batch["image"]) - feature_vector = torch.hstack(list(layer_outputs.values())).detach().squeeze() - return {"feature_vector": feature_vector} - - def training_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> None: - """Fit a KDE model on deep CNN features. - - Args: - outputs (List[Dict[str, Tensor]]): Batch of outputs from the training step - - Returns: - None - """ - - feature_stack = torch.vstack([output["feature_vector"] for output in outputs]) - self.dfm_model.fit(feature_stack) + embedding = torch.hstack(list(layer_outputs.values())).detach().squeeze() + + # NOTE: `self.embedding` appends each batch embedding to + # store the training set embedding. We manually append these + # values mainly due to the new order of hooks introduced after PL v1.4.0 + # https://github.com/PyTorchLightning/pytorch-lightning/pull/7357 + self.embeddings.append(embedding) + + def on_validation_start(self) -> None: + """Fit a KDE Model to the embedding collected from the training set.""" + # NOTE: Previous anomalib versions fit Gaussian at the end of the epoch. + # This is not possible anymore with PyTorch Lightning v1.4.0 since validation + # is run within train epoch. + embeddings = torch.vstack(self.embeddings) + self.dfm_model.fit(embeddings) def validation_step(self, batch, _): # pylint: disable=arguments-differ """Validation Step of DFM. diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index a64e48e090..ebb1755993 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -4,7 +4,7 @@ dataset: path: ./datasets/MVTec url: ftp://guest:GU.205dldo@ftp.softronics.ch/mvtec_anomaly_detection/mvtec_anomaly_detection.tar.xz task: segmentation - category: carpet + category: bottle label_format: None tiling: apply: false From 78d35dbeda59db55f19e366d6c1b74af58edf917 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 09:05:42 -0700 Subject: [PATCH 24/30] STFPM support and modify tests --- anomalib/core/callbacks/cdf_normalization.py | 4 ++-- anomalib/models/stfpm/config.yaml | 2 +- tests/deploy/test_inferencer.py | 2 +- tests/models/test_model.py | 13 +++++++------ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/anomalib/core/callbacks/cdf_normalization.py b/anomalib/core/callbacks/cdf_normalization.py index 01c92bfc61..6c2e800b45 100644 --- a/anomalib/core/callbacks/cdf_normalization.py +++ b/anomalib/core/callbacks/cdf_normalization.py @@ -22,8 +22,8 @@ def on_test_start(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) -> pl_module.image_metrics.F1.threshold = 0.5 pl_module.pixel_metrics.F1.threshold = 0.5 - def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: - """Called when the train epoch ends. + def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: + """Called when the validation starts after training. Use the current model to compute the anomaly score distributions of the normal training data. This is needed after every epoch, because the statistics must be diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index a058c59e7a..1f476e9f41 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -75,7 +75,7 @@ trainer: auto_scale_batch_size: false auto_select_gpus: false benchmark: false - check_val_every_n_epoch: 2 + check_val_every_n_epoch: 1 checkpoint_callback: true default_root_dir: null deterministic: false diff --git a/tests/deploy/test_inferencer.py b/tests/deploy/test_inferencer.py index 883f6f49d1..1a08198dcb 100644 --- a/tests/deploy/test_inferencer.py +++ b/tests/deploy/test_inferencer.py @@ -52,7 +52,7 @@ class TestInferencers: "patchcore", ], ) - @TestDataset(num_train=20, num_test=1, path=get_dataset_path(), use_mvtec=False) + @TestDataset(num_train=200, num_test=1, path=get_dataset_path(), use_mvtec=False) def test_torch_inference(self, model_name: str, category: str = "shapes", path: str = "./datasets/MVTec"): """Tests Torch inference. Model is not trained as this checks that the inferencers are working. diff --git a/tests/models/test_model.py b/tests/models/test_model.py index 5fd4d4f28c..ed36086719 100644 --- a/tests/models/test_model.py +++ b/tests/models/test_model.py @@ -85,6 +85,7 @@ def _setup(self, model_name, use_mvtec, dataset_path, project_path, nncf, catego config.dataset.category = category config.dataset.path = dataset_path config.model.weight_file = "weights/model.ckpt" # add model weights to the config + config.project.log_images_to = [] if not use_mvtec: config.dataset.category = "shapes" @@ -150,12 +151,12 @@ def _test_model_load(self, config, datamodule, results): ["model_name", "nncf"], [ ("padim", False), - # ("dfkde", False), - # # ("dfm", False), # skip dfm test - # ("stfpm", False), - # ("stfpm", True), - # ("patchcore", False), - # ("cflow", False), + ("dfkde", False), + # ("dfm", False), # skip dfm test + ("stfpm", False), + ("stfpm", True), + ("patchcore", False), + ("cflow", False), ], ) @pytest.mark.flaky(max_runs=3) From 271a96f931b39f077f7265c899f92fd17ca007bb Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 13:13:56 -0700 Subject: [PATCH 25/30] revert numpy version --- requirements/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/base.txt b/requirements/base.txt index e0aa0cc813..a800fb288e 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -8,7 +8,7 @@ lxml==4.6.5 matplotlib==3.4.3 networkx~=2.5 nncf==2.0.0 -numpy~=1.19.2 +numpy~=1.19.5 omegaconf==2.1.1 pillow==9.0.0 pytorch-lightning==1.5.9 From 7135e1c9890e3b6b67ca372938d84f9d61db0942 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 13:14:16 -0700 Subject: [PATCH 26/30] revert some tests --- .../normalization_callback/test_normalization_callback.py | 1 - tests/deploy/test_inferencer.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/core/callbacks/normalization_callback/test_normalization_callback.py b/tests/core/callbacks/normalization_callback/test_normalization_callback.py index 8978efd590..bfbe924b68 100644 --- a/tests/core/callbacks/normalization_callback/test_normalization_callback.py +++ b/tests/core/callbacks/normalization_callback/test_normalization_callback.py @@ -14,7 +14,6 @@ def run_train_test(config): trainer = Trainer(**config.trainer, callbacks=callbacks) trainer.fit(model=model, datamodule=datamodule) - trainer.validate(model=model, datamodule=datamodule) results = trainer.test(model=model, datamodule=datamodule) return results diff --git a/tests/deploy/test_inferencer.py b/tests/deploy/test_inferencer.py index 1a08198dcb..883f6f49d1 100644 --- a/tests/deploy/test_inferencer.py +++ b/tests/deploy/test_inferencer.py @@ -52,7 +52,7 @@ class TestInferencers: "patchcore", ], ) - @TestDataset(num_train=200, num_test=1, path=get_dataset_path(), use_mvtec=False) + @TestDataset(num_train=20, num_test=1, path=get_dataset_path(), use_mvtec=False) def test_torch_inference(self, model_name: str, category: str = "shapes", path: str = "./datasets/MVTec"): """Tests Torch inference. Model is not trained as this checks that the inferencers are working. From ee2d37aba24f4f7d6d6328b7c21bebc89bb2e474 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 28 Jan 2022 21:57:58 -0700 Subject: [PATCH 27/30] nncf callback --- anomalib/core/callbacks/nncf_callback.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anomalib/core/callbacks/nncf_callback.py b/anomalib/core/callbacks/nncf_callback.py index d7e5fc5f65..8d0013fce8 100644 --- a/anomalib/core/callbacks/nncf_callback.py +++ b/anomalib/core/callbacks/nncf_callback.py @@ -77,7 +77,7 @@ def __init__(self, config: Union[ListConfig, DictConfig], dirpath: str, filename self.comp_ctrl: Optional[CompressionAlgorithmController] = None self.compression_scheduler: CompressionScheduler - def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, _stage: Optional[str] = None) -> None: + def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None) -> None: """Call when fit or test begins. Takes the pytorch model and wraps it using the compression controller so that it is ready for nncf fine-tuning. From 623f4ed300fe68650c0fc1509ae58743f23d2ef8 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 2 Feb 2022 12:13:58 +0100 Subject: [PATCH 28/30] use on_validation_epoch_start instead of on_validation_start in cdf callback --- anomalib/core/callbacks/cdf_normalization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anomalib/core/callbacks/cdf_normalization.py b/anomalib/core/callbacks/cdf_normalization.py index 6c2e800b45..1786591139 100644 --- a/anomalib/core/callbacks/cdf_normalization.py +++ b/anomalib/core/callbacks/cdf_normalization.py @@ -22,7 +22,7 @@ def on_test_start(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) -> pl_module.image_metrics.F1.threshold = 0.5 pl_module.pixel_metrics.F1.threshold = 0.5 - def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: + def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the validation starts after training. Use the current model to compute the anomaly score distributions From 62f1abf118f1e7ccea858305d7005130b961bacb Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 2 Feb 2022 13:12:19 -0700 Subject: [PATCH 29/30] Updated ganomaly configs to match the version --- anomalib/models/ganomaly/config.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/anomalib/models/ganomaly/config.yaml b/anomalib/models/ganomaly/config.yaml index f6d9a316e7..2837bf13d1 100644 --- a/anomalib/models/ganomaly/config.yaml +++ b/anomalib/models/ganomaly/config.yaml @@ -71,7 +71,6 @@ trainer: accelerator: null accumulate_grad_batches: 1 amp_backend: native - amp_level: O2 auto_lr_find: false auto_scale_batch_size: false auto_select_gpus: false @@ -79,10 +78,8 @@ trainer: check_val_every_n_epoch: 2 checkpoint_callback: true default_root_dir: null - deterministic: true - distributed_backend: null + deterministic: false fast_dev_run: false - flush_logs_every_n_steps: 100 gpus: 1 gradient_clip_val: 0 limit_predict_batches: 1.0 @@ -107,14 +104,12 @@ trainer: process_position: 0 profiler: null progress_bar_refresh_rate: null - reload_dataloaders_every_epoch: false replace_sampler_ddp: true stochastic_weight_avg: false sync_batchnorm: false terminate_on_nan: false tpu_cores: null track_grad_norm: -1 - truncated_bptt_steps: null val_check_interval: 1.0 weights_save_path: null weights_summary: top From 50100f7d4a78dd3be0ee0656691d0ae1731bbb97 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Mon, 7 Feb 2022 03:30:02 -0700 Subject: [PATCH 30/30] Remove commented lines. --- tools/train.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/train.py b/tools/train.py index 4c41e9a47a..d1139a8f60 100644 --- a/tools/train.py +++ b/tools/train.py @@ -59,8 +59,6 @@ def train(): trainer = Trainer(**config.trainer, logger=logger, callbacks=callbacks) trainer.fit(model=model, datamodule=datamodule) - # TODO: https://github.com/openvinotoolkit/anomalib/issues/62 - # trainer.validate(model=model, datamodule=datamodule) trainer.test(model=model, datamodule=datamodule)