diff --git a/README.md b/README.md index 7d59d789e7..e36c29fc9c 100644 --- a/README.md +++ b/README.md @@ -156,26 +156,22 @@ The new CLI approach offers a lot more flexibility, details of which are explain ## Inference ### ⚠️ Anomalib < v.0.4.0 -Anomalib contains several tools that can be used to perform inference with a trained model. The script in [`tools/inference`](tools/inference/lightning.py) contains an example of how the inference tools can be used to generate a prediction for an input image. +Anomalib includes multiple tools, including Lightning, Gradio, and OpenVINO inferencers, for performing inference with a trained model. -If the specified weight path points to a PyTorch Lightning checkpoint file (`.ckpt`), inference will run in PyTorch. If the path points to an ONNX graph (`.onnx`) or OpenVINO IR (`.bin` or `.xml`), inference will run in OpenVINO. - -The following command can be used to run inference from the command line: +The following command can be used to run PyTorch Lightning inference from the command line: ```bash -python tools/inference.py \ - --config \ - --weight_path \ - --image_path +python tools/inference/lightning_inference.py -h ``` As a quick example: ```bash -python tools/inference.py \ +python tools/inference/lightning_inference.py \ --config anomalib/models/padim/config.yaml \ - --weight_path results/padim/mvtec/bottle/weights/model.ckpt \ - --image_path datasets/MVTec/bottle/test/broken_large/000.png + --weights results/padim/mvtec/bottle/weights/model.ckpt \ + --input datasets/MVTec/bottle/test/broken_large/000.png \ + --output results/padim/mvtec/bottle/images ``` If you want to run OpenVINO model, ensure that `openvino` `apply` is set to `True` in the respective model `config.yaml`. @@ -191,10 +187,10 @@ Example OpenVINO Inference: ```bash python tools/inference/openvino_inference.py \ --config anomalib/models/padim/config.yaml \ - --weight_path results/padim/mvtec/bottle/openvino/openvino_model.bin \ - --image_path datasets/MVTec/bottle/test/broken_large/000.png \ + --weights results/padim/mvtec/bottle/openvino/openvino_model.bin \ --meta_data results/padim/mvtec/bottle/openvino/meta_data.json \ - --save_path results/padim/mvtec/bottle/images + --input datasets/MVTec/bottle/test/broken_large/000.png \ + --output results/padim/mvtec/bottle/images ``` > Ensure that you provide path to `meta_data.json` if you want the normalization to be applied correctly. diff --git a/anomalib/config/config.py b/anomalib/config/config.py index 1ebc1f88eb..dcb11862fc 100644 --- a/anomalib/config/config.py +++ b/anomalib/config/config.py @@ -161,7 +161,7 @@ def get_configurable_parameters( config.trainer.default_root_dir = str(project_path) if weight_file: - config.model.weight_file = weight_file + config.trainer.resume_from_checkpoint = weight_file config = update_nncf_config(config) diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index ab66dd1da9..d588b7d1ac 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -28,7 +28,6 @@ model: - layer3 coreset_sampling_ratio: 0.1 num_neighbors: 9 - weight_file: weights/model.ckpt normalization_method: min_max # options: [null, min_max, cdf] metrics: diff --git a/anomalib/utils/callbacks/__init__.py b/anomalib/utils/callbacks/__init__.py index 3b7efc502c..f199173d91 100644 --- a/anomalib/utils/callbacks/__init__.py +++ b/anomalib/utils/callbacks/__init__.py @@ -91,8 +91,8 @@ def get_callbacks(config: Union[ListConfig, DictConfig]) -> List[Callback]: ) callbacks.append(metrics_callback) - if "weight_file" in config.model.keys(): - load_model = LoadModelCallback(os.path.join(config.project.path, config.model.weight_file)) + if "resume_from_checkpoint" in config.trainer.keys() and config.trainer.resume_from_checkpoint is not None: + load_model = LoadModelCallback(config.trainer.resume_from_checkpoint) callbacks.append(load_model) if "normalization_method" in config.model.keys() and not config.model.normalization_method == "none": diff --git a/docs/source/guides/inference.rst b/docs/source/guides/inference.rst index 6ce218401e..70be3625fa 100644 --- a/docs/source/guides/inference.rst +++ b/docs/source/guides/inference.rst @@ -9,25 +9,25 @@ PyTorch (Lightning) Inference ============== The entrypoint script in ``tools/inference/lightning.py`` can be used to run inference with a trained PyTorch model. The script runs inference by loading a previously trained model into a PyTorch Lightning trainer and running the ``predict sequence``. The entrypoint script has several command line arguments that can be used to configure inference: -+---------------------+----------+-------------------------------------------------------------------------------------+ -| Parameter | Required | Description | -+=====================+==========+=====================================================================================+ -| config | True | Path to the model config file. | -+---------------------+----------+-------------------------------------------------------------------------------------+ -| weight_path | True | Path to the ``.ckpt`` model checkpoint file. | -+---------------------+----------+-------------------------------------------------------------------------------------+ -| image_path | True | Path to the image source. This can be a single image or a folder of images. | -+---------------------+----------+-------------------------------------------------------------------------------------+ -| save_path | False | Path to which the output images should be saved. | -+---------------------+----------+-------------------------------------------------------------------------------------+ -| visualization_mode | False | Determines how the inference results are visualized. Options: "full", "simple". | -+---------------------+----------+-------------------------------------------------------------------------------------+ -| disable_show_images | False | When this flag is passed, visualizations will not be shown on the screen. | -+---------------------+----------+-------------------------------------------------------------------------------------+ ++---------------------+----------+---------------------------------------------------------------------------------+ +| Parameter | Required | Description | ++=====================+==========+=================================================================================+ +| config | True | Path to the model config file. | ++---------------------+----------+---------------------------------------------------------------------------------+ +| weights | True | Path to the ``.ckpt`` model checkpoint file. | ++---------------------+----------+---------------------------------------------------------------------------------+ +| input | True | Path to the image source. This can be a single image or a folder of images. | ++---------------------+----------+---------------------------------------------------------------------------------+ +| output | False | Path to which the output images should be saved. | ++---------------------+----------+---------------------------------------------------------------------------------+ +| visualization_mode | False | Determines how the inference results are visualized. Options: "full", "simple". | ++---------------------+----------+---------------------------------------------------------------------------------+ +| disable_show_images | False | When this flag is passed, visualizations will not be shown on the screen. | ++---------------------+----------+---------------------------------------------------------------------------------+ To run inference, call the script from the command line with the with the following parameters, e.g.: -``python tools/inference/lightning.py --config padim.yaml --weight_path results/weights/model.ckpt --image_path image.png`` +``python tools/inference/lightning.py --config padim.yaml --weights results/weights/model.ckpt --input image.png`` This will run inference on the specified image file or all images in the folder. A visualization of the inference results including the predicted heatmap and segmentation results (if applicable), will be displayed on the screen, like the example below. @@ -42,9 +42,9 @@ To run OpenVINO inference, first make sure that your model has been exported to +=============+==========+=====================================================================================+ | config | True | Path to the model config file. | +-------------+----------+-------------------------------------------------------------------------------------+ -| weight_path | True | Path to the OpenVINO IR model file (either ``.xml`` or ``.bin``) | +| weights | True | Path to the OpenVINO IR model file (either ``.xml`` or ``.bin``) | +-------------+----------+-------------------------------------------------------------------------------------+ -| image_path | True | Path to the image source. This can be a single image or a folder of images. | +| image | True | Path to the image source. This can be a single image or a folder of images. | +-------------+----------+-------------------------------------------------------------------------------------+ | save_data | False | Path to which the output images should be saved. Leave empty for live visualization.| +-------------+----------+-------------------------------------------------------------------------------------+ @@ -56,6 +56,6 @@ For correct inference results, the ``meta_data`` argument should be specified an As an example, OpenVINO inference can be triggered by the following command: -``python tools/inference/openvino.py --config padim.yaml --weight_path results/openvino/model.xml --image_path image.png --meta_data results/openvino/meta_data.json`` +``python tools/inference/openvino.py --config padim.yaml --weights results/openvino/model.xml --input image.png --meta_data results/openvino/meta_data.json`` Similar to PyTorch inference, the visualization results will be displayed on the screen, and optionally saved to the file system location specified by the ``save_data`` parameter. diff --git a/tests/helpers/model.py b/tests/helpers/model.py index 0ddf048bf3..d03c4113d7 100644 --- a/tests/helpers/model.py +++ b/tests/helpers/model.py @@ -72,12 +72,6 @@ def setup_model_train( if legacy_device in config.trainer: config.trainer[legacy_device] = None - # If weight file is empty, remove the key from config - if "weight_file" in config.model.keys() and weight_file == "": - config.model.pop("weight_file") - else: - config.model.weight_file = weight_file if not fast_run else "weights/last.ckpt" - if nncf: config.optimization["nncf"] = {"apply": True, "input_info": {"sample_size": None}} config = update_nncf_config(config) @@ -132,6 +126,9 @@ def model_load_test(config: Union[DictConfig, ListConfig], datamodule: Lightning """ loaded_model = get_model(config) # get new model + # Assing the weight file to resume_from_checkpoint. When trainer is initialized, Trainer + # object will automatically load the weights. + config.trainer.resume_from_checkpoint = os.path.join(config.project.path, "weights/last.ckpt") callbacks = get_callbacks(config) diff --git a/tools/inference/gradio_inference.py b/tools/inference/gradio_inference.py index 213384ac53..9cdafa5274 100644 --- a/tools/inference/gradio_inference.py +++ b/tools/inference/gradio_inference.py @@ -43,24 +43,22 @@ def infer( def get_args() -> Namespace: - """Get command line arguments. + r"""Get command line arguments. Example: - >>> python tools/inference_gradio.py \ - --config_path ./anomalib/models/padim/config.yaml \ - --weight_path ./results/padim/mvtec/bottle/weights/model.ckpt + Example for Torch Inference. + >>> python tools/inference/gradio_inference.py \ ─╯ + ... --config ./anomalib/models/padim/config.yaml \ + ... --weights ./results/padim/mvtec/bottle/weights/model.ckpt # noqa: E501 #pylint: disable=line-too-long Returns: Namespace: List of arguments. """ parser = ArgumentParser() - parser.add_argument("--config_path", type=Path, required=True, help="Path to a model config file") - parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") - parser.add_argument( - "--meta_data_path", type=Path, required=False, help="Path to JSON file containing the metadata." - ) - + parser.add_argument("--config", type=Path, required=True, help="Path to a config file") + parser.add_argument("--weights", type=Path, required=True, help="Path to model weights") + parser.add_argument("--meta_data", type=Path, required=False, help="Path to a JSON file containing the metadata.") parser.add_argument( "--threshold", type=float, @@ -68,12 +66,9 @@ def get_args() -> Namespace: default=75.0, help="Value to threshold anomaly scores into 0-100 range", ) - parser.add_argument("--share", type=bool, required=False, default=False, help="Share Gradio `share_url`") - args = parser.parse_args() - - return args + return parser.parse_args() def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optional[Path] = None) -> Inferencer: @@ -96,15 +91,14 @@ def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optiona # for the openvino models. extension = weight_path.suffix inferencer: Inferencer + module = import_module("anomalib.deploy") if extension in (".ckpt"): - module = import_module("anomalib.deploy.inferencers.torch") torch_inferencer = getattr(module, "TorchInferencer") inferencer = torch_inferencer(config=config, model_source=weight_path, meta_data_path=meta_data_path) elif extension in (".onnx", ".bin", ".xml"): - module = import_module("anomalib.deploy.inferencers.openvino") openvino_inferencer = getattr(module, "OpenVINOInferencer") - inferencer = openvino_inferencer(config=config, path=weight_path, meta_data_path=meta_data_path) + inferencer = openvino_inferencer(config=config_path, path=weight_path, meta_data_path=meta_data_path) else: raise ValueError( @@ -116,9 +110,9 @@ def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optiona if __name__ == "__main__": - session_args = get_args() + args = get_args() - gradio_inferencer = get_inferencer(session_args.config_path, session_args.weight_path, session_args.meta_data_path) + gradio_inferencer = get_inferencer(args.config, args.weights, args.meta_data) interface = gr.Interface( fn=lambda image, threshold: infer(image, gradio_inferencer, threshold), @@ -126,7 +120,7 @@ def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optiona gradio.inputs.Image( shape=None, image_mode="RGB", source="upload", tool="editor", type="numpy", label="Image" ), - gradio.inputs.Slider(default=session_args.threshold, label="threshold", optional=False), + gradio.inputs.Slider(default=args.threshold, label="threshold", optional=False), ], outputs=[ gradio.outputs.Image(type="numpy", label="Anomaly Map"), @@ -139,4 +133,4 @@ def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optiona description="Anomalib Gradio", ) - interface.launch(share=session_args.share) + interface.launch(share=args.share) diff --git a/tools/inference/lightning_inference.py b/tools/inference/lightning_inference.py index 5c3ab9c01f..e5763208d0 100644 --- a/tools/inference/lightning_inference.py +++ b/tools/inference/lightning_inference.py @@ -22,24 +22,24 @@ def get_args() -> Namespace: Namespace: List of arguments. """ parser = ArgumentParser() - parser.add_argument("--config", type=Path, required=True, help="Path to a model config file") - parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") - parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.") + parser.add_argument("--config", type=Path, required=True, help="Path to a config file") + parser.add_argument("--weights", type=Path, required=True, help="Path to model weights") + parser.add_argument("--input", type=Path, required=True, help="Path to image(s) to infer.") + parser.add_argument("--output", type=str, required=False, help="Path to save the output image(s).") parser.add_argument( "--visualization_mode", type=str, required=False, default="simple", - help="Visualization mode. 'full' or 'simple'", + help="Visualization mode.", choices=["full", "simple"], ) parser.add_argument( - "--disable_show_images", + "--show", action="store_true", required=False, - help="Do not show the visualized predictions on the screen.", + help="Show the visualized predictions on the screen.", ) - parser.add_argument("--save_path", type=str, required=False, help="Path to save the output images.") args = parser.parse_args() return args @@ -49,12 +49,12 @@ def infer(): """Run inference.""" args = get_args() config = get_configurable_parameters(config_path=args.config) - config.model["weight_file"] = str(args.weight_path) - config.visualization.show_images = not args.disable_show_images + config.trainer.resume_from_checkpoint = str(args.weights) + config.visualization.show_images = args.show config.visualization.mode = args.visualization_mode - if args.save_path: # overwrite save path + if args.output: # overwrite save path config.visualization.save_images = True - config.visualization.image_save_path = args.save_path + config.visualization.image_save_path = args.output else: config.visualization.save_images = False @@ -63,7 +63,7 @@ def infer(): trainer = Trainer(callbacks=callbacks, **config.trainer) - dataset = InferenceDataset(args.image_path, image_size=tuple(config.dataset.image_size)) + dataset = InferenceDataset(args.input, image_size=tuple(config.dataset.image_size)) dataloader = DataLoader(dataset) trainer.predict(model=model, dataloaders=[dataloader]) diff --git a/tools/inference/openvino_inference.py b/tools/inference/openvino_inference.py index 810226e703..759300a25d 100644 --- a/tools/inference/openvino_inference.py +++ b/tools/inference/openvino_inference.py @@ -37,11 +37,11 @@ def get_args() -> Namespace: Namespace: List of arguments. """ parser = ArgumentParser() - parser.add_argument("--config", type=Path, required=True, help="Path to a model config file") - parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights") - parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.") - parser.add_argument("--save_path", type=Path, required=False, help="Path to save the output image.") - parser.add_argument("--meta_data", type=Path, required=True, help="Path to JSON file containing the metadata.") + parser.add_argument("--config", type=Path, required=True, help="Path to a config file") + parser.add_argument("--weights", type=Path, required=True, help="Path to model weights") + parser.add_argument("--meta_data", type=Path, required=True, help="Path to a JSON file containing the metadata.") + parser.add_argument("--input", type=Path, required=True, help="Path to an image to infer.") + parser.add_argument("--output", type=Path, required=False, help="Path to save the output image.") parser.add_argument( "--overlay_mask", type=bool, @@ -75,6 +75,41 @@ def add_label(prediction: np.ndarray, scores: float, font: int = cv2.FONT_HERSHE return prediction +def infer(image_path: Path, inferencer: Inferencer, output_path: Optional[Path] = None, overlay: bool = False) -> None: + """Perform inference on a single image. + + Args: + image_path (Path): Path to image/directory containing images. + inferencer (Inferencer): Inferencer to use. + output_path (Path, optional): Path to save the output image. If this is None, the output is visualized. + overlay (bool, optional): Overlay the segmentation mask on the image. It assumes that the task is segmentation. + """ + # Perform inference for the given image or image path. if image + # path is provided, `predict` method will read the image from + # file for convenience. We set the superimpose flag to True + # to overlay the predicted anomaly map on top of the input image. + prediction = inferencer.predict(image=image_path, superimpose=True, overlay_mask=overlay) + + # Incase both anomaly map and scores are returned add scores to the image. + if isinstance(prediction, tuple): + anomaly_map, score = prediction + output_image = add_label(anomaly_map, score) + + # Show or save the output image, depending on what's provided as + # the command line argument. + output_image = cv2.cvtColor(output_image, cv2.COLOR_RGB2BGR) + if output_path is None: + cv2.imshow("Anomaly Map", output_image) + cv2.waitKey(0) # wait for any key press + else: + # Create directory for parents if it doesn't exist. + output_path.parent.mkdir(parents=True, exist_ok=True) + if output_path.suffix == "": # This is a directory + output_path.mkdir(exist_ok=True) # Create current directory + output_path = output_path / image_path.name + cv2.imwrite(filename=str(output_path), img=output_image) + + def stream() -> None: """Stream predictions. @@ -87,58 +122,22 @@ def stream() -> None: config = get_configurable_parameters(config_path=args.config) # Get the inferencer. - inferencer = OpenVINOInferencer(config=config, path=args.weight_path, meta_data_path=args.meta_data) + inferencer = OpenVINOInferencer(config=config, path=args.weights, meta_data_path=args.meta_data) - if args.image_path.is_dir(): + if args.input.is_dir(): # Write the output to save_path in the same structure as the input directory. - for image in args.image_path.glob("**/*"): + for image in args.input.glob("**/*"): if image.is_file() and image.suffix in IMG_EXTENSIONS: # Here save_path is assumed to be a directory. Image subdirectories are appended to the save_path. - save_path = Path(args.save_path / image.relative_to(args.image_path).parent) if args.save_path else None + save_path = Path(args.output / image.relative_to(args.input).parent) if args.output else None infer(image, inferencer, save_path, args.overlay_mask) - elif args.image_path.suffix in IMG_EXTENSIONS: - infer(args.image_path, inferencer, args.save_path, args.overlay_mask) + elif args.input.suffix in IMG_EXTENSIONS: + infer(args.input, inferencer, args.output, args.overlay_mask) else: raise ValueError( - f"Image extension is not supported. Supported extensions are .jpg, .png, .jpeg." - f" Got {args.image_path.suffix}" + f"Image extension is not supported. Supported extensions are .jpg, .png, .jpeg." f" Got {args.input.suffix}" ) -def infer(image_path: Path, inferencer: Inferencer, save_path: Optional[Path] = None, overlay: bool = False) -> None: - """Perform inference on a single image. - - Args: - image_path (Path): Path to image/directory containing images. - inferencer (Inferencer): Inferencer to use. - save_path (Path, optional): Path to save the output image. If this is None, the output is visualized. - overlay (bool, optional): Overlay the segmentation mask on the image. It assumes that the task is segmentation. - """ - # Perform inference for the given image or image path. if image - # path is provided, `predict` method will read the image from - # file for convenience. We set the superimpose flag to True - # to overlay the predicted anomaly map on top of the input image. - output = inferencer.predict(image=image_path, superimpose=True, overlay_mask=overlay) - - # Incase both anomaly map and scores are returned add scores to the image. - if isinstance(output, tuple): - anomaly_map, score = output - output = add_label(anomaly_map, score) - - # Show or save the output image, depending on what's provided as - # the command line argument. - output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) - if save_path is None: - cv2.imshow("Anomaly Map", output) - cv2.waitKey(0) # wait for any key press - else: - # Create directory for parents if it doesn't exist. - save_path.parent.mkdir(parents=True, exist_ok=True) - if save_path.suffix == "": # This is a directory - save_path.mkdir(exist_ok=True) # Create current directory - save_path = save_path / image_path.name - cv2.imwrite(filename=str(save_path), img=output) - - if __name__ == "__main__": stream()