From 969ddd1edcdb5dfc4a59ec232cbf1eaab024ca6d Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Thu, 2 Jun 2022 21:01:03 +0200 Subject: [PATCH 01/19] initial implementation of DRAEM algo --- anomalib/models/__init__.py | 2 +- anomalib/models/draem/LICENSE | 29 + anomalib/models/draem/__init__.py | 19 + anomalib/models/draem/augmenter.py | 150 ++++++ anomalib/models/draem/config.yaml | 103 ++++ anomalib/models/draem/lightning_model.py | 124 +++++ anomalib/models/draem/loss.py | 204 +++++++ anomalib/models/draem/perlin.py | 134 +++++ anomalib/models/draem/torch_model.py | 496 ++++++++++++++++++ anomalib/models/draem/transform_config.yaml | 26 + anomalib/pre_processing/pre_process.py | 18 +- tests/pre_merge/models/test_model_premerge.py | 1 + 12 files changed, 1298 insertions(+), 8 deletions(-) create mode 100644 anomalib/models/draem/LICENSE create mode 100644 anomalib/models/draem/__init__.py create mode 100644 anomalib/models/draem/augmenter.py create mode 100644 anomalib/models/draem/config.yaml create mode 100644 anomalib/models/draem/lightning_model.py create mode 100644 anomalib/models/draem/loss.py create mode 100644 anomalib/models/draem/perlin.py create mode 100644 anomalib/models/draem/torch_model.py create mode 100644 anomalib/models/draem/transform_config.yaml diff --git a/anomalib/models/__init__.py b/anomalib/models/__init__.py index 7b86048ae1..8062b47f3d 100644 --- a/anomalib/models/__init__.py +++ b/anomalib/models/__init__.py @@ -42,7 +42,7 @@ def get_model(config: Union[DictConfig, ListConfig]) -> AnomalyModule: Returns: AnomalyModule: Anomaly Model """ - model_list: List[str] = ["cflow", "dfkde", "dfm", "ganomaly", "padim", "patchcore", "stfpm"] + model_list: List[str] = ["cflow", "dfkde", "dfm", "draem", "ganomaly", "padim", "patchcore", "stfpm"] model: AnomalyModule if config.model.name in model_list: diff --git a/anomalib/models/draem/LICENSE b/anomalib/models/draem/LICENSE new file mode 100644 index 0000000000..e0721649d6 --- /dev/null +++ b/anomalib/models/draem/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2022 Intel Corporation +SPDX-License-Identifier: Apache-2.0 + +The files in this module are based on the original DRAEM implementation by VitjanZ + +Original license: +---------------- + + MIT License + + Copyright (c) 2021 VitjanZ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/anomalib/models/draem/__init__.py b/anomalib/models/draem/__init__.py new file mode 100644 index 0000000000..3e691d9efa --- /dev/null +++ b/anomalib/models/draem/__init__.py @@ -0,0 +1,19 @@ +"""DRAEM model.""" + +# Copyright (C) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .lightning_model import DraemLightning + +__all__ = ["DraemLightning"] diff --git a/anomalib/models/draem/augmenter.py b/anomalib/models/draem/augmenter.py new file mode 100644 index 0000000000..33216f0611 --- /dev/null +++ b/anomalib/models/draem/augmenter.py @@ -0,0 +1,150 @@ +"""Augmenter module to generates out-of-distribution samples for the DRAEM implementation.""" + +# Original Code +# Copyright (c) 2022 VitjanZ +# https://github.com/VitjanZ/DRAEM. +# SPDX-License-Identifier: MIT +# +# Modified +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import glob +import random +from typing import Optional, Tuple + +import cv2 +import imgaug.augmenters as iaa +import numpy as np +import torch +from torch import Tensor + +from anomalib.models.draem.perlin import rand_perlin_2d_np + + +class Augmenter: + """Class that generates noisy augmentations of input images. + + Args: + anomaly_source_path (Optional[str]): Path to a folder of images that will be used as source of the anomalous + noise. If not specified, random noise will be used instead. + """ + + def __init__(self, anomaly_source_path: Optional[str] = None): + + if anomaly_source_path is not None: + self.anomaly_source_paths = sorted(glob.glob(anomaly_source_path + "/**/*.jpg", recursive=True)) + else: + self.anomaly_source_paths = [] + + self.augmenters = [ + iaa.GammaContrast((0.5, 2.0), per_channel=True), + iaa.MultiplyAndAddToBrightness(mul=(0.8, 1.2), add=(-30, 30)), + iaa.pillike.EnhanceSharpness(), + iaa.AddToHueAndSaturation((-50, 50), per_channel=True), + iaa.Solarize(0.5, threshold=(32, 128)), + iaa.Posterize(), + iaa.Invert(), + iaa.pillike.Autocontrast(), + iaa.pillike.Equalize(), + iaa.Affine(rotate=(-45, 45)), + ] + self.rot = iaa.Sequential([iaa.Affine(rotate=(-90, 90))]) + + def rand_augmenter(self) -> iaa.Sequential: + """Selects 3 random transforms that will be applied to the anomaly source images. + + Returns: + A selection of 3 transforms. + """ + aug_ind = np.random.choice(np.arange(len(self.augmenters)), 3, replace=False) + aug = iaa.Sequential([self.augmenters[aug_ind[0]], self.augmenters[aug_ind[1]], self.augmenters[aug_ind[2]]]) + return aug + + def generate_perturbation( + self, height: int, width: int, anomaly_source_path: Optional[str] + ) -> Tuple[np.ndarray, np.ndarray]: + """Generate an image containing a random anomalous perturbation using a source image. + + Args: + height (int): height of the generated image. + width: (int): width of the generated image. + anomaly_source_path (Optional[str]): Path to an image file. If not provided, random noise will be used + instead. + + Returns: + Image containing a random anomalous perturbation, and the corresponding ground truth anomaly mask. + """ + # Generate random perlin noise + perlin_scale = 6 + min_perlin_scale = 0 + + perlin_scalex = 2 ** random.randint(min_perlin_scale, perlin_scale) + perlin_scaley = 2 ** random.randint(min_perlin_scale, perlin_scale) + + perlin_noise = rand_perlin_2d_np((height, width), (perlin_scalex, perlin_scaley)) + perlin_noise = self.rot(image=perlin_noise) + + # Create mask from perlin noise + mask = np.where(perlin_noise > 0.5, np.ones_like(perlin_noise), np.zeros_like(perlin_noise)) + mask = np.expand_dims(mask, axis=2).astype(np.float32) + + # Load anomaly source image + if anomaly_source_path: + anomaly_source_img = cv2.imread(anomaly_source_path) + anomaly_source_img = cv2.resize(anomaly_source_img, dsize=(width, height)) + else: # if no anomaly source is specified, we use the perlin noise as anomalous source + anomaly_source_img = np.expand_dims(perlin_noise, 2).repeat(3, 2) + anomaly_source_img = (anomaly_source_img * 255).astype(np.uint8) + + # Augment anomaly source image + aug = self.rand_augmenter() + anomaly_img_augmented = aug(image=anomaly_source_img) + + # Create anomalous perturbation that we will apply to the image + perturbation = anomaly_img_augmented.astype(np.float32) * mask / 255.0 + + return perturbation, mask + + def augment_batch(self, batch: Tensor) -> Tuple[Tensor, Tensor]: + """Generate anomalous augmentations for a batch of input images. + + Args: + batch (Tensor): Batch of input images + + Returns: + - Augmented image to which anomalous perturbations have been added. + - Ground truth masks corresponding to the anomalous perturbations. + """ + batch_size, channels, height, width = batch.shape + + # Collect perturbations + perturbations_list = [] + masks_list = [] + for _ in range(batch_size): + if random.random() > 0.5: # include 50% normal samples + perturbations_list.append(torch.zeros((channels, height, width))) + masks_list.append(torch.zeros((1, height, width))) + else: + anomaly_source_path = ( + random.sample(self.anomaly_source_paths, 1)[0] if len(self.anomaly_source_paths) > 0 else None + ) + perturbation, mask = self.generate_perturbation(height, width, anomaly_source_path) + perturbations_list.append(Tensor(perturbation).permute((2, 0, 1))) + masks_list.append(Tensor(mask).permute((2, 0, 1))) + + perturbations = torch.stack(perturbations_list).to(batch.device) + masks = torch.stack(masks_list).to(batch.device) + + # Apply perturbations batch wise + beta = torch.rand(batch_size) * 0.8 + beta = beta.view(batch_size, 1, 1, 1).expand_as(batch).to(batch.device) + + augmented_batch = batch * (1 - masks) + (1 - beta) * perturbations + beta * batch * (masks) + + # for i in range(batch_size): + # cv2.imshow("aug", augmented_batch[i].permute(1, 2, 0).cpu().numpy()) + # cv2.waitKey(0) + + return augmented_batch, masks diff --git a/anomalib/models/draem/config.yaml b/anomalib/models/draem/config.yaml new file mode 100644 index 0000000000..6a47627c81 --- /dev/null +++ b/anomalib/models/draem/config.yaml @@ -0,0 +1,103 @@ +dataset: + name: mvtec #options: [mvtec, btech, folder] + format: mvtec + path: ./datasets/MVTec + category: bottle + task: segmentation + image_size: 256 + train_batch_size: 8 + test_batch_size: 32 + num_workers: 36 + transform_config: + train: ./anomalib/models/draem/transform_config.yaml + val: ./anomalib/models/draem/transform_config.yaml + create_validation_set: false + tiling: + apply: false + tile_size: null + stride: null + remove_border_count: 0 + use_random_tiling: False + random_tile_count: 16 + +model: + name: draem + # anomaly_source_path: ./datasets/dtd + anomaly_source_path: null + lr: 0.0001 + early_stopping: + patience: 50 + metric: pixel_AUROC + mode: max + normalization_method: min_max # options: [none, min_max, cdf] + +metrics: + image: + - F1Score + - AUROC + pixel: + - F1Score + - AUROC + threshold: + image_default: 3 + pixel_default: 3 + adaptive: true + +project: + seed: 42 + path: ./results + log_images_to: ["local"] + logger: false # options: [tensorboard, wandb, csv] or combinations. + +optimization: + openvino: + apply: false + +# PL Trainer Args. Don't add extra parameter here. +trainer: + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + accumulate_grad_batches: 1 + amp_backend: native + auto_lr_find: false + auto_scale_batch_size: false + auto_select_gpus: false + benchmark: false + check_val_every_n_epoch: 1 + default_root_dir: null + detect_anomaly: false + deterministic: false + devices: 1 + enable_checkpointing: true + enable_model_summary: true + enable_progress_bar: true + fast_dev_run: false + gpus: null # Set automatically + gradient_clip_val: 0 + ipus: null + limit_predict_batches: 1.0 + limit_test_batches: 1.0 + limit_train_batches: 1.0 + limit_val_batches: 1.0 + log_every_n_steps: 50 + log_gpu_memory: null + max_epochs: 100 + max_steps: -1 + max_time: null + min_epochs: null + min_steps: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle + num_nodes: 1 + num_processes: null + num_sanity_val_steps: 0 + overfit_batches: 0.0 + plugins: null + precision: 32 + profiler: null + reload_dataloaders_every_n_epochs: 0 + replace_sampler_ddp: true + strategy: null + sync_batchnorm: false + tpu_cores: null + track_grad_norm: -1 + val_check_interval: 1.0 diff --git a/anomalib/models/draem/lightning_model.py b/anomalib/models/draem/lightning_model.py new file mode 100644 index 0000000000..8d4e5eda40 --- /dev/null +++ b/anomalib/models/draem/lightning_model.py @@ -0,0 +1,124 @@ +"""DRÆM – A discriminatively trained reconstruction embedding for surface anomaly detection. + +Paper https://arxiv.org/abs/2108.07610 +""" + +# Copyright (C) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +import logging +from typing import Optional, Union + +import torch +from omegaconf import DictConfig, ListConfig +from pytorch_lightning.callbacks import EarlyStopping +from pytorch_lightning.utilities.cli import MODEL_REGISTRY + +from anomalib.models.components import AnomalyModule +from anomalib.models.draem.augmenter import Augmenter +from anomalib.models.draem.loss import SSIM, FocalLoss +from anomalib.models.draem.torch_model import DraemModel + +logger = logging.getLogger(__name__) + +__all__ = ["Draem", "DraemLightning"] + + +@MODEL_REGISTRY +class Draem(AnomalyModule): + """DRÆM: A discriminatively trained reconstruction embedding for surface anomaly detection. + + Args: + anomaly_source_path (Optional[str]): Path to folder that contains the anomaly source images. Random noise will + be used if left empty. + """ + + def __init__(self, anomaly_source_path: Optional[str] = None): + super().__init__() + + self.augmenter = Augmenter(anomaly_source_path) + self.model = DraemModel() + + self.l2_loss = torch.nn.modules.loss.MSELoss() + self.ssim_loss = SSIM() + self.focal_loss = FocalLoss() + + def training_step(self, batch, _): # pylint: disable=arguments-differ + """Training Step of DRAEM. + + Feeds the original image and the simulated anomaly + image through the network and computes the training loss. + + Args: + batch (Dict[str, Any]): Batch containing image filename, image, label and mask + + Returns: + Loss dictionary + """ + # Apply corruption to input image + augmented_image, anomaly_mask = self.augmenter.augment_batch(batch["image"]) + # Generate model prediction + reconstruction, prediction = self.model(augmented_image) + # Compute loss + l2_loss = self.l2_loss(reconstruction, augmented_image) + ssim_loss = self.ssim_loss(reconstruction, augmented_image) + focal_loss = self.focal_loss(prediction, anomaly_mask) + loss = l2_loss + ssim_loss + focal_loss + return {"loss": loss} + + def validation_step(self, batch, _): + """Validation step of DRAEM. The Softmax predictions of the anomalous class are used as anomaly map. + + Args: + batch: Batch of input images + + Returns: + Dictionary to which predicted anomaly maps have been added. + """ + prediction = self.model(batch["image"]) + batch["anomaly_maps"] = prediction[:, 1, :, :] + return batch + + +class DraemLightning(Draem): + """DRÆM: A discriminatively trained reconstruction embedding for surface anomaly detection. + + Args: + hparams (Union[DictConfig, ListConfig]): Model parameters + """ + + def __init__(self, hparams: Union[DictConfig, ListConfig]): + super().__init__(anomaly_source_path=hparams.model.anomaly_source_path) + self.hparams: Union[DictConfig, ListConfig] # type: ignore + self.save_hyperparameters(hparams) + + def configure_callbacks(self): + """Configure model-specific callbacks. + + Note: + This method is used for the existing CLI. + When PL CLI is introduced, configure callback method will be + deprecated, and callbacks will be configured from either + config.yaml file or from CLI. + """ + early_stopping = EarlyStopping( + monitor=self.hparams.model.early_stopping.metric, + patience=self.hparams.model.early_stopping.patience, + mode=self.hparams.model.early_stopping.mode, + ) + return [early_stopping] + + def configure_optimizers(self): # pylint: disable=arguments-differ + """Configure the Adam optimizer.""" + return torch.optim.Adam(params=self.model.parameters(), lr=self.hparams.model.lr) diff --git a/anomalib/models/draem/loss.py b/anomalib/models/draem/loss.py new file mode 100644 index 0000000000..08263aff3f --- /dev/null +++ b/anomalib/models/draem/loss.py @@ -0,0 +1,204 @@ +"""Loss functions for DRAEM implementation.""" + +# Original Code +# Copyright (c) 2022 VitjanZ +# https://github.com/VitjanZ/DRAEM. +# SPDX-License-Identifier: MIT +# +# Modified +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +from math import exp + +import numpy as np +import torch +import torch.nn.functional as F +from torch import Tensor, nn + + +class FocalLoss(nn.Module): + """Copy from: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/FocalLoss.py. + + This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in + 'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)' + + Args: + alpha (tensor): 3D or 4D the scalar factor for this criterion + gamma (float,double): gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more + focus on hard misclassified example + smooth (float,double): smooth value when cross entropy + balance_index (int): balance class index, should be specific when alpha is float + size_average (bool, optional): By default, the losses are averaged over each loss element in the batch. + """ + + def __init__(self, apply_nonlin=None, alpha=None, gamma=2, balance_index=0, smooth=1e-5, size_average=True): + super().__init__() + self.apply_nonlin = apply_nonlin + self.alpha = alpha + self.gamma = gamma + self.balance_index = balance_index + self.smooth = smooth + self.size_average = size_average + + if self.smooth is not None: + if self.smooth < 0 or self.smooth > 1.0: + raise ValueError("smooth value should be in [0,1]") + + def forward(self, logit: Tensor, target: Tensor) -> Tensor: + """Compute the focal loss. + + Args: + logit (Tensor): Predicted logits + target (Tensor): Ground truth + + Returns: + Value of the focal loss + """ + if self.apply_nonlin is not None: + logit = self.apply_nonlin(logit) + num_class = logit.shape[1] + + if logit.dim() > 2: + # N,C,d1,d2 -> N,C,m (m=d1*d2*...) + logit = logit.view(logit.size(0), logit.size(1), -1) + logit = logit.permute(0, 2, 1).contiguous() + logit = logit.view(-1, logit.size(-1)) + target = torch.squeeze(target, 1) + target = target.view(-1, 1) + alpha = self.alpha + + if alpha is None: + alpha = torch.ones(num_class, 1) + elif isinstance(alpha, (list, np.ndarray)): + assert len(alpha) == num_class + alpha = torch.FloatTensor(alpha).view(num_class, 1) + alpha = alpha / alpha.sum() + elif isinstance(alpha, float): + alpha = torch.ones(num_class, 1) + alpha = alpha * (1 - self.alpha) + alpha[self.balance_index] = self.alpha + + else: + raise TypeError("Not support alpha type") + + if alpha.device != logit.device: + alpha = alpha.to(logit.device) + + idx = target.cpu().long() + + one_hot_key = torch.FloatTensor(target.size(0), num_class).zero_() + one_hot_key = one_hot_key.scatter_(1, idx, 1) + if one_hot_key.device != logit.device: + one_hot_key = one_hot_key.to(logit.device) + + if self.smooth: + one_hot_key = torch.clamp(one_hot_key, self.smooth / (num_class - 1), 1.0 - self.smooth) + pt = (one_hot_key * logit).sum(1) + self.smooth + logpt = pt.log() + + gamma = self.gamma + + alpha = alpha[idx] + alpha = torch.squeeze(alpha) + loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt + + if self.size_average: + loss = loss.mean() + return loss + + +def gaussian(window_size, sigma): + """Helper function to compute gaussian.""" + gauss = torch.Tensor([exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) for x in range(window_size)]) + return gauss / gauss.sum() + + +def create_window(window_size, channel=1): + """Helper function to create sliding window.""" + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = _2D_window.expand(channel, 1, window_size, window_size).contiguous() + return window + + +def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None): + """Compute the structural similatity metric between two images.""" + if val_range is None: + if torch.max(img1) > 128: + max_val = 255 + else: + max_val = 1 + + if torch.min(img1) < -0.5: + min_val = -1 + else: + min_val = 0 + length = max_val - min_val + else: + length = val_range + + padd = window_size // 2 + (_, channel, height, width) = img1.size() + if window is None: + real_size = min(window_size, height, width) + window = create_window(real_size, channel=channel).to(img1.device) + + mu1 = F.conv2d(img1, window, padding=padd, groups=channel) + mu2 = F.conv2d(img2, window, padding=padd, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq + sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq + sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2 + + c1 = (0.01 * length) ** 2 + c2 = (0.03 * length) ** 2 + + v1 = 2.0 * sigma12 + c2 + v2 = sigma1_sq + sigma2_sq + c2 + cs = torch.mean(v1 / v2) # contrast sensitivity + + ssim_map = ((2 * mu1_mu2 + c1) * v1) / ((mu1_sq + mu2_sq + c1) * v2) + + if size_average: + ret = ssim_map.mean() + else: + ret = ssim_map.mean(1).mean(1).mean(1) + + if full: + return ret, cs + return ret, ssim_map + + +class SSIM(torch.nn.Module): + """Implementation of the structural similarity loss.""" + + def __init__(self, window_size=11, size_average=True, val_range=None): + super().__init__() + self.window_size = window_size + self.size_average = size_average + self.val_range = val_range + + # Assume 1 channel for SSIM + self.channel = 1 + self.window = create_window(window_size).cuda() + + def forward(self, img1, img2): + """Compute ssim loss between two input images.""" + (_, channel, _, _) = img1.size() + + if channel == self.channel and self.window.dtype == img1.dtype: + window = self.window + else: + window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype) + self.window = window + self.channel = channel + + s_score, _ = ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average) + return 1.0 - s_score diff --git a/anomalib/models/draem/perlin.py b/anomalib/models/draem/perlin.py new file mode 100644 index 0000000000..8fbe3deafa --- /dev/null +++ b/anomalib/models/draem/perlin.py @@ -0,0 +1,134 @@ +"""Helper functions for generating Perlin noise.""" + +# Original Code +# Copyright (c) 2022 VitjanZ +# https://github.com/VitjanZ/DRAEM. +# SPDX-License-Identifier: MIT +# +# Modified +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +import math + +import numpy as np +import torch + + +def lerp_np(x, y, w): + """Helper function.""" + fin_out = (y - x) * w + x + return fin_out + + +def rand_perlin_2d_octaves_np(shape, res, octaves=1, persistence=0.5): + """Generate Perlin noise parameterized by the octaves method. Numpy version.""" + noise = np.zeros(shape) + frequency = 1 + amplitude = 1 + for _ in range(octaves): + noise += amplitude * generate_perlin_noise_2d(shape, (frequency * res[0], frequency * res[1])) + frequency *= 2 + amplitude *= persistence + return noise + + +def generate_perlin_noise_2d(shape, res): + """Fractal perlin noise.""" + + def f(t): + return 6 * t**5 - 15 * t**4 + 10 * t**3 + + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + grid = np.mgrid[0 : res[0] : delta[0], 0 : res[1] : delta[1]].transpose(1, 2, 0) % 1 + # Gradients + angles = 2 * np.pi * np.random.rand(res[0] + 1, res[1] + 1) + gradients = np.dstack((np.cos(angles), np.sin(angles))) + g00 = gradients[0:-1, 0:-1].repeat(d[0], 0).repeat(d[1], 1) + g10 = gradients[1:, 0:-1].repeat(d[0], 0).repeat(d[1], 1) + g01 = gradients[0:-1, 1:].repeat(d[0], 0).repeat(d[1], 1) + g11 = gradients[1:, 1:].repeat(d[0], 0).repeat(d[1], 1) + # Ramps + n00 = np.sum(grid * g00, 2) + n10 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1])) * g10, 2) + n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1] - 1)) * g01, 2) + n11 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1] - 1)) * g11, 2) + # Interpolation + t = f(grid) + n0 = n00 * (1 - t[:, :, 0]) + t[:, :, 0] * n10 + n1 = n01 * (1 - t[:, :, 0]) + t[:, :, 0] * n11 + return np.sqrt(2) * ((1 - t[:, :, 1]) * n0 + t[:, :, 1] * n1) + + +def rand_perlin_2d_np(shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): + """Generate a random image containing Perlin noise. Numpy version.""" + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + grid = np.mgrid[0 : res[0] : delta[0], 0 : res[1] : delta[1]].transpose(1, 2, 0) % 1 + + angles = 2 * math.pi * np.random.rand(res[0] + 1, res[1] + 1) + gradients = np.stack((np.cos(angles), np.sin(angles)), axis=-1) + + def tile_grads(slice1, slice2): + return np.repeat(np.repeat(gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]], d[0], axis=0), d[1], axis=1) + + def dot(grad, shift): + return ( + np.stack((grid[: shape[0], : shape[1], 0] + shift[0], grid[: shape[0], : shape[1], 1] + shift[1]), axis=-1) + * grad[: shape[0], : shape[1]] + ).sum(axis=-1) + + n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) + n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) + n01 = dot(tile_grads([0, -1], [1, None]), [0, -1]) + n11 = dot(tile_grads([1, None], [1, None]), [-1, -1]) + t = fade(grid[: shape[0], : shape[1]]) + return math.sqrt(2) * lerp_np(lerp_np(n00, n10, t[..., 0]), lerp_np(n01, n11, t[..., 0]), t[..., 1]) + + +def rand_perlin_2d(shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): + """Generate a random image containing Perlin noise. PyTorch version.""" + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + + grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), torch.arange(0, res[1], delta[1])), dim=-1) % 1 + angles = 2 * math.pi * torch.rand(res[0] + 1, res[1] + 1) + gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1) + + def tile_grads(slice1, slice2): + return ( + gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]] + .repeat_interleave(d[0], 0) + .repeat_interleave(d[1], 1) + ) + + def dot(grad, shift): + return ( + torch.stack( + (grid[: shape[0], : shape[1], 0] + shift[0], grid[: shape[0], : shape[1], 1] + shift[1]), dim=-1 + ) + * grad[: shape[0], : shape[1]] + ).sum(dim=-1) + + n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) + + n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) + n01 = dot(tile_grads([0, -1], [1, None]), [0, -1]) + n11 = dot(tile_grads([1, None], [1, None]), [-1, -1]) + t = fade(grid[: shape[0], : shape[1]]) + return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1]) + + +def rand_perlin_2d_octaves(shape, res, octaves=1, persistence=0.5): + """Generate Perlin noise parameterized by the octaves method. PyTorch version.""" + noise = torch.zeros(shape) + frequency = 1 + amplitude = 1 + for _ in range(octaves): + noise += amplitude * rand_perlin_2d(shape, (frequency * res[0], frequency * res[1])) + frequency *= 2 + amplitude *= persistence + return noise diff --git a/anomalib/models/draem/torch_model.py b/anomalib/models/draem/torch_model.py new file mode 100644 index 0000000000..1b6614e137 --- /dev/null +++ b/anomalib/models/draem/torch_model.py @@ -0,0 +1,496 @@ +"""PyTorch model for the DRAEM model implementation.""" + +# Original Code +# Copyright (c) 2022 VitjanZ +# https://github.com/VitjanZ/DRAEM. +# SPDX-License-Identifier: MIT +# +# Modified +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=invalid-name + +from typing import Tuple, Union + +import torch +from torch import Tensor, nn + + +class DraemModel(nn.Module): + """DRAEM PyTorch model consisting of the reconstructive and discriminative sub networks.""" + + def __init__(self): + super().__init__() + self.reconstructive_subnetwork = ReconstructiveSubNetwork() + self.discriminative_subnetwork = DiscriminativeSubNetwork(in_channels=6, out_channels=2) + + def forward(self, x: Tensor) -> Union[Tensor, Tuple[Tensor, Tensor]]: + """Compute the reconstruction and anomaly mask from an input image. + + Args: + x (Tensor): batch of input images + + Returns: + Predicted confidence values of the anomaly mask. During training the reconstructed input images are + returned as well. + """ + reconstruction = self.reconstructive_subnetwork(x) + concatenated_inputs = torch.cat([x, reconstruction], axis=1) + prediction = self.discriminative_subnetwork(concatenated_inputs) + prediction = torch.softmax(prediction, dim=1) + if self.training: + return reconstruction, prediction + return prediction + + +class ReconstructiveSubNetwork(nn.Module): + """Autoencoder model that encodes and reconstructs the input image. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + base_width (int): Base dimensionality of the layers of the autoencoder. + """ + + def __init__(self, in_channels: int = 3, out_channels: int = 3, base_width=128): + super().__init__() + self.encoder = EncoderReconstructive(in_channels, base_width) + self.decoder = DecoderReconstructive(base_width, out_channels=out_channels) + + def forward(self, x: Tensor): + """Encode and reconstruct the input images. + + Args: + x (Tensor): Batch of input images + + Returns: + Batch of reconstructed images. + """ + b5 = self.encoder(x) + output = self.decoder(b5) + return output + + +class DiscriminativeSubNetwork(nn.Module): + """Discriminative model that predicts the anomaly mask from the original image and its reconstruction. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + base_width (int): Base dimensionality of the layers of the autoencoder. + """ + + def __init__(self, in_channels: int = 3, out_channels: int = 3, base_width: int = 64, out_features: bool = False): + super().__init__() + self.encoder_segment = EncoderDiscriminative(in_channels, base_width) + self.decoder_segment = DecoderDiscriminative(base_width, out_channels=out_channels) + # self.segment_act = torch.nn.Sigmoid() + self.out_features = out_features + + def forward(self, x: Tensor) -> Tensor: + """Generate the predicted anomaly masks for a batch of input images. + + Args: + x (Tensor): Batch of inputs consisting of the concatenation of the original images + and their reconstructions. + + Returns: + Activations of the output layer corresponding to the normal and anomalous class scores on the pixel level. + """ + b1, b2, b3, b4, b5, b6 = self.encoder_segment(x) + output_segment = self.decoder_segment(b1, b2, b3, b4, b5, b6) + if self.out_features: + return output_segment, b2, b3, b4, b5, b6 + return output_segment + + +class EncoderDiscriminative(nn.Module): + """Encoder part of the discriminator network. + + Args: + in_channels (int): Number of input channels. + base_width (int): Base dimensionality of the layers of the autoencoder. + """ + + def __init__(self, in_channels: int, base_width: int): + super().__init__() + self.block1 = nn.Sequential( + nn.Conv2d(in_channels, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + self.mp1 = nn.Sequential(nn.MaxPool2d(2)) + self.block2 = nn.Sequential( + nn.Conv2d(base_width, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + ) + self.mp2 = nn.Sequential(nn.MaxPool2d(2)) + self.block3 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + ) + self.mp3 = nn.Sequential(nn.MaxPool2d(2)) + self.block4 = nn.Sequential( + nn.Conv2d(base_width * 4, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + self.mp4 = nn.Sequential(nn.MaxPool2d(2)) + self.block5 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + + self.mp5 = nn.Sequential(nn.MaxPool2d(2)) + self.block6 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + + def forward(self, x: Tensor) -> Tensor: + """Convert the inputs to the salient space by running them through the encoder network. + + Args: + x (Tensor): Batch of inputs consisting of the concatenation of the original images + and their reconstructions. + + Returns: + Computed feature maps for each of the layers in the encoder sub network. + """ + b1 = self.block1(x) + mp1 = self.mp1(b1) + b2 = self.block2(mp1) + mp2 = self.mp3(b2) + b3 = self.block3(mp2) + mp3 = self.mp3(b3) + b4 = self.block4(mp3) + mp4 = self.mp4(b4) + b5 = self.block5(mp4) + mp5 = self.mp5(b5) + b6 = self.block6(mp5) + return b1, b2, b3, b4, b5, b6 + + +class DecoderDiscriminative(nn.Module): + """Decoder part of the discriminator network. + + Args: + base_width (int): Base dimensionality of the layers of the autoencoder. + out_channels (int): Number of output channels. + """ + + def __init__(self, base_width: int, out_channels: int = 1): + super().__init__() + + self.up_b = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + self.db_b = nn.Sequential( + nn.Conv2d(base_width * (8 + 8), base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + + self.up1 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + ) + self.db1 = nn.Sequential( + nn.Conv2d(base_width * (4 + 8), base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + ) + + self.up2 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + ) + self.db2 = nn.Sequential( + nn.Conv2d(base_width * (2 + 4), base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + ) + + self.up3 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + self.db3 = nn.Sequential( + nn.Conv2d(base_width * (2 + 1), base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + + self.up4 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + self.db4 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + + self.fin_out = nn.Sequential(nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) + + def forward(self, b1: Tensor, b2: Tensor, b3: Tensor, b4: Tensor, b5: Tensor, b6: Tensor) -> Tensor: + """Computes predicted anomaly class scores from the intermediate outputs of the encoder sub network. + + Args: + b1 (Tensor): Feature maps extracted from the first block of convolutional layers. + b2 (Tensor): Feature maps extracted from the second block of convolutional layers. + b3 (Tensor): Feature maps extracted from the third block of convolutional layers. + b4 (Tensor): Feature maps extracted from the fourth block of convolutional layers. + b5 (Tensor): Feature maps extracted from the fifth block of convolutional layers. + b6 (Tensor): Feature maps extracted from the sixth block of convolutional layers. + + Returns: + Predicted anomaly class scores per pixel. + """ + up_b = self.up_b(b6) + cat_b = torch.cat((up_b, b5), dim=1) + db_b = self.db_b(cat_b) + + up1 = self.up1(db_b) + cat1 = torch.cat((up1, b4), dim=1) + db1 = self.db1(cat1) + + up2 = self.up2(db1) + cat2 = torch.cat((up2, b3), dim=1) + db2 = self.db2(cat2) + + up3 = self.up3(db2) + cat3 = torch.cat((up3, b2), dim=1) + db3 = self.db3(cat3) + + up4 = self.up4(db3) + cat4 = torch.cat((up4, b1), dim=1) + db4 = self.db4(cat4) + + out = self.fin_out(db4) + return out + + +class EncoderReconstructive(nn.Module): + """Encoder part of the reconstructive network. + + Args: + in_channels (int): Number of input channels. + base_width (int): Base dimensionality of the layers of the autoencoder. + """ + + def __init__(self, in_channels: int, base_width: int): + super().__init__() + self.block1 = nn.Sequential( + nn.Conv2d(in_channels, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + self.mp1 = nn.Sequential(nn.MaxPool2d(2)) + self.block2 = nn.Sequential( + nn.Conv2d(base_width, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + ) + self.mp2 = nn.Sequential(nn.MaxPool2d(2)) + self.block3 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + ) + self.mp3 = nn.Sequential(nn.MaxPool2d(2)) + self.block4 = nn.Sequential( + nn.Conv2d(base_width * 4, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + self.mp4 = nn.Sequential(nn.MaxPool2d(2)) + self.block5 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + + def forward(self, x: Tensor) -> Tensor: + """Encode a batch of input images to the salient space. + + Args: + x (Tensor): Batch of input images. + + Returns: + Feature maps extracted from the bottleneck layer. + """ + b1 = self.block1(x) + mp1 = self.mp1(b1) + b2 = self.block2(mp1) + mp2 = self.mp3(b2) + b3 = self.block3(mp2) + mp3 = self.mp3(b3) + b4 = self.block4(mp3) + mp4 = self.mp4(b4) + b5 = self.block5(mp4) + return b5 + + +class DecoderReconstructive(nn.Module): + """Decoder part of the reconstructive network. + + Args: + base_width (int): Base dimensionality of the layers of the autoencoder. + out_channels (int): Number of output channels. + """ + + def __init__(self, base_width: int, out_channels: int = 1): + super().__init__() + + self.up1 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + ) + self.db1 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + ) + + self.up2 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + ) + self.db2 = nn.Sequential( + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + ) + + self.up3 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + ) + # cat with base*1 + self.db3 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 1, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 1), + nn.ReLU(inplace=True), + ) + + self.up4 = nn.Sequential( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + self.db4 = nn.Sequential( + nn.Conv2d(base_width * 1, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + ) + + self.fin_out = nn.Sequential(nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) + # self.fin_out = nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1) + + def forward(self, b5: Tensor) -> Tensor: + """Reconstruct the image from the activations of the bottleneck layer. + + Args: + b5 (Tensor): Activations of the bottleneck layer. + + Returns: + Batch of reconstructed images. + """ + up1 = self.up1(b5) + db1 = self.db1(up1) + + up2 = self.up2(db1) + db2 = self.db2(up2) + + up3 = self.up3(db2) + db3 = self.db3(up3) + + up4 = self.up4(db3) + db4 = self.db4(up4) + + out = self.fin_out(db4) + return out diff --git a/anomalib/models/draem/transform_config.yaml b/anomalib/models/draem/transform_config.yaml new file mode 100644 index 0000000000..5a379ef762 --- /dev/null +++ b/anomalib/models/draem/transform_config.yaml @@ -0,0 +1,26 @@ +{ + "__version__": "1.1.0", + "transform": + { + "__class_fullname__": "Compose", + "p": 1.0, + "transforms": + [ + { + "__class_fullname__": "ToFloat", + "always_apply": false, + "p": 1.0, + "max_value": null, + }, + { + "__class_fullname__": "ToTensorV2", + "always_apply": true, + "p": 1.0, + "transpose_mask": false, + }, + ], + "bbox_params": null, + "keypoint_params": null, + "additional_targets": {}, + }, +} diff --git a/anomalib/pre_processing/pre_process.py b/anomalib/pre_processing/pre_process.py index fd10fc10f6..f443c04ed7 100644 --- a/anomalib/pre_processing/pre_process.py +++ b/anomalib/pre_processing/pre_process.py @@ -103,14 +103,14 @@ def get_transforms(self) -> A.Compose: transforms: A.Compose - if self.config is None and self.image_size is not None: - if isinstance(self.image_size, int): - height, width = self.image_size, self.image_size - elif isinstance(self.image_size, tuple): - height, width = self.image_size - else: - raise ValueError("``image_size`` could be either int or Tuple[int, int]") + if isinstance(self.image_size, int): + height, width = self.image_size, self.image_size + elif isinstance(self.image_size, tuple): + height, width = self.image_size + else: + raise ValueError("``image_size`` could be either int or Tuple[int, int]") + if self.config is None and self.image_size is not None: transforms = A.Compose( [ A.Resize(height=height, width=width, always_apply=True), @@ -131,6 +131,10 @@ def get_transforms(self) -> A.Compose: if isinstance(transforms[-1], ToTensorV2): transforms = A.Compose(transforms[:-1]) + # always resize to specified image size + if not any(isinstance(transform, A.Resize) for transform in transforms): + transforms = A.Compose([A.Resize(height=height, width=width, always_apply=True), transforms]) + return transforms def __call__(self, *args, **kwargs): diff --git a/tests/pre_merge/models/test_model_premerge.py b/tests/pre_merge/models/test_model_premerge.py index f99adbad95..a03b088957 100644 --- a/tests/pre_merge/models/test_model_premerge.py +++ b/tests/pre_merge/models/test_model_premerge.py @@ -31,6 +31,7 @@ class TestModel: ("padim", False), ("dfkde", False), ("dfm", False), + ("draem", False), ("stfpm", False), ("patchcore", False), ("cflow", False), From 0e41f5d239a125bd2101729ee7fae1a1bd30936d Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Thu, 2 Jun 2022 22:00:48 +0200 Subject: [PATCH 02/19] fix preprocessor --- anomalib/pre_processing/pre_process.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/anomalib/pre_processing/pre_process.py b/anomalib/pre_processing/pre_process.py index f443c04ed7..b8ec079aa5 100644 --- a/anomalib/pre_processing/pre_process.py +++ b/anomalib/pre_processing/pre_process.py @@ -103,14 +103,8 @@ def get_transforms(self) -> A.Compose: transforms: A.Compose - if isinstance(self.image_size, int): - height, width = self.image_size, self.image_size - elif isinstance(self.image_size, tuple): - height, width = self.image_size - else: - raise ValueError("``image_size`` could be either int or Tuple[int, int]") - if self.config is None and self.image_size is not None: + height, width = self._get_height_and_width() transforms = A.Compose( [ A.Resize(height=height, width=width, always_apply=True), @@ -132,7 +126,8 @@ def get_transforms(self) -> A.Compose: transforms = A.Compose(transforms[:-1]) # always resize to specified image size - if not any(isinstance(transform, A.Resize) for transform in transforms): + if not any(isinstance(transform, A.Resize) for transform in transforms) and self.image_size is not None: + height, width = self._get_height_and_width() transforms = A.Compose([A.Resize(height=height, width=width, always_apply=True), transforms]) return transforms @@ -140,3 +135,13 @@ def get_transforms(self) -> A.Compose: def __call__(self, *args, **kwargs): """Return transformed arguments.""" return self.transforms(*args, **kwargs) + + def _get_height_and_width(self) -> Tuple[Optional[int], Optional[int]]: + """Extract height and width from image size attribute.""" + if isinstance(self.image_size, int): + return self.image_size, self.image_size + if isinstance(self.image_size, tuple): + return int(self.image_size[0]), int(self.image_size[1]) + if self.image_size is None: + return None, None + raise ValueError("``image_size`` could be either int or Tuple[int, int]") From 2518a5c01791c05726dbad4393e182a3a7059876 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Thu, 2 Jun 2022 22:21:44 +0200 Subject: [PATCH 03/19] fix config and update license --- anomalib/models/draem/LICENSE | 2 +- anomalib/models/draem/config.yaml | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/anomalib/models/draem/LICENSE b/anomalib/models/draem/LICENSE index e0721649d6..7025d18fb4 100644 --- a/anomalib/models/draem/LICENSE +++ b/anomalib/models/draem/LICENSE @@ -1,7 +1,7 @@ Copyright (c) 2022 Intel Corporation SPDX-License-Identifier: Apache-2.0 -The files in this module are based on the original DRAEM implementation by VitjanZ +Some files in this folder are based on the original DRAEM implementation by VitjanZ Original license: ---------------- diff --git a/anomalib/models/draem/config.yaml b/anomalib/models/draem/config.yaml index 6a47627c81..d619be26e0 100644 --- a/anomalib/models/draem/config.yaml +++ b/anomalib/models/draem/config.yaml @@ -7,7 +7,7 @@ dataset: image_size: 256 train_batch_size: 8 test_batch_size: 32 - num_workers: 36 + num_workers: 8 transform_config: train: ./anomalib/models/draem/transform_config.yaml val: ./anomalib/models/draem/transform_config.yaml @@ -22,8 +22,7 @@ dataset: model: name: draem - # anomaly_source_path: ./datasets/dtd - anomaly_source_path: null + anomaly_source_path: null # optional, e.g. ./datasets/dtd lr: 0.0001 early_stopping: patience: 50 From bd9f0714dcf041b0014a1e9779a247cda34898c3 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Fri, 3 Jun 2022 10:12:23 +0200 Subject: [PATCH 04/19] add imgaug to requirements --- requirements/base.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/base.txt b/requirements/base.txt index 9411498270..a0e3e3e32b 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -11,3 +11,4 @@ torchtext>=0.9.1 wandb==0.12.17 matplotlib>=3.4.3 gradio>=2.9.4 +imgaug==0.4.0 From 3341e19d7cacb9e19b073a512ba23710011f2561 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Fri, 3 Jun 2022 10:52:13 +0200 Subject: [PATCH 05/19] fix inputs of reconstruction loss --- anomalib/models/draem/lightning_model.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/anomalib/models/draem/lightning_model.py b/anomalib/models/draem/lightning_model.py index 8d4e5eda40..6ef71064ee 100644 --- a/anomalib/models/draem/lightning_model.py +++ b/anomalib/models/draem/lightning_model.py @@ -66,13 +66,14 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ Returns: Loss dictionary """ + input_image = batch["image"] # Apply corruption to input image - augmented_image, anomaly_mask = self.augmenter.augment_batch(batch["image"]) + augmented_image, anomaly_mask = self.augmenter.augment_batch(input_image) # Generate model prediction reconstruction, prediction = self.model(augmented_image) # Compute loss - l2_loss = self.l2_loss(reconstruction, augmented_image) - ssim_loss = self.ssim_loss(reconstruction, augmented_image) + l2_loss = self.l2_loss(reconstruction, input_image) + ssim_loss = self.ssim_loss(reconstruction, input_image) focal_loss = self.focal_loss(prediction, anomaly_mask) loss = l2_loss + ssim_loss + focal_loss return {"loss": loss} From 9284f6e5697c0b650452645d4aaba75f247514a3 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Fri, 3 Jun 2022 10:59:07 +0200 Subject: [PATCH 06/19] add readme --- anomalib/models/draem/README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 anomalib/models/draem/README.md diff --git a/anomalib/models/draem/README.md b/anomalib/models/draem/README.md new file mode 100644 index 0000000000..1331b2d40f --- /dev/null +++ b/anomalib/models/draem/README.md @@ -0,0 +1,21 @@ +# DRÆM – A discriminatively trained reconstruction embedding for surface anomaly detection + +This is the implementation of the [DRAEM](https://arxiv.org/pdf/2108.07610v2.pdf) paper. + +Model Type: Segmentation + +## Description + +DRAEM is a reconstruction based algorithm that consists of a reconstructive subnetwork and a discriminative subnetwork. DRAEM is trained on simulated anomaly images, generated by augmenting normal input images from the training set with a random Perlin noise mask extracted from an unrelated source of image data. The reconstructive subnetwork is an autoencoder architecture that is trained to reconstruct the original input images from the augmented images. The reconstructive submodel is trained using a combination of L2 loss and Structural Similarity loss. The input of the discriminative subnetwork consists of the channel-wise concatenation of the (augmented) input image and the output of the reconstructive subnetwork. The output of the discriminative subnetwork is an anomaly map that contains the predicted anomaly scores for each pixel location. The discriminative subnetwork is trained using Focal Loss. + + +## Architecture +![DRAEM Architecture](../../../docs/source/images/draem/architecture.png "DRAEM Architecture") + +## Usage + +`python tools/train.py --model draem` + +## Benchmark + +Benchmarking results are not yet available for this algorithm. Please check again later. From 7c50380836885295ba1cffeddb51792c30f7f168 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Fri, 3 Jun 2022 10:59:51 +0200 Subject: [PATCH 07/19] add architecture image --- docs/source/images/draem/architecture.png | Bin 0 -> 81458 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/source/images/draem/architecture.png diff --git a/docs/source/images/draem/architecture.png b/docs/source/images/draem/architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..a791020b63b0bc93fb2f49c2fbda4def02ac40fc GIT binary patch literal 81458 zcmd>mWn7g}_az`H(%s!6A>EBA-BKdm-Q67$(j_1b(%mW24bsxxU32a`?|(kbr};kq zyejuT_la}%S$plZ*D+jCUK-^!;cF-;C={6w63S3eFf&k4&@hPb;2k!7rmx^HSVu7# zRYdU13(+VH{7m2^sp+I*YvSbk)xj9b)W+7@nAy?L!PwZw(ahHAawr)IQB}8$ z!%TO5)tR;d;hWJc=lE~lG)ND)-y|h*yCRrAVa%y39$Z^7O>5ODXj{#wXDgg8Xw|88 z&Cb?S7O64Wh%&zO3-S)F4CWl9PL(xj+j(Mp?`p!1r0pjmKt_|=bhG5>b7OLxaq{%! zvqLP2Nb&6hHD|8W{J zI1wr4TU=TEMRHV*G+W)+z{L(!iR5u&1+VenC70HW{3lfMho~g4K8rQjEPs%Zp%4_z ztgdE%K6!cmRU-GjlepZF{yBh*s@!wcp5o>sXecef`O52btUylwHg6;YyqKYIaJ>@9mNF!I6=lO--p((lNcn ztK;M7t}z1}GuIw%hl>qp9jm{-BwH`m_a!b=8HUl9SA#YB9g|2iie_} zZ6$x1oSekeC~#heT5PbLJ3=EP>tDB9b7}Ga?HpN9T+AT!ZcOIVN6y z`<7+!Fl&j^&eaonQ#j6iPP>YQ!qgR?YeK#=X}FZ26h?GBU;VP0ulxZS{N^`cfPx|8 zcD&r8qN|Gn%U={q>JbM03ZB(&O@vl9B?oy_(DRzrVwzqmny|0a|5elD`4F_*PNpk8 zGqdse<{*fu^Aq0}BE;7>6yhOv$4xuu8)Ppj;NPc(!cRzkPY*Zm!A-nt-lZJnKM`PL zWSnpJ=8qMA=3!~O#H692d7Sfo!h`DT>wCN`6sFy@6gjpt zbe)y0o1RKXFePp9930SR<~3AB$h8L+jSZIuJ4SEjb$_WzUSsrKHtxE{5x~o zoye1xZSsGG(2sb%ljRvRIjQV%eKu<}@}QpoOgYZrn>Hd<>wk9~WL~#Z@;vES_yDGI z5*|ChnNp1!pC=Eg7~(<0SV8#COxKk`5Rs#Q>nw{aDz@7nj`NC(!_#e=!+oC)!1MIs zwcc%~%VrCa6iy-eEm&=@zC3eh``j~J?2PTi`aV)YU2KmSj^~I%R+sq{?elqZhoZWA zzxU1ZzzFxMtg|!cn9xI+%>f89Ub{7@Y~SZQr7`on^DTk1UM$F$9JW5H|I(8Klh4w6 z3gi87{j<3_4KlaeDIj1W=SsJ2N9qJCuei8){a+-D$y9-y!1L|KTixbF5J%^iQwoj` zM-2mYOhIeU4=1Jz)y9zfKAf)##GsNQo?Bhda2)+QSD|~jP!sBWFnb0GY`G~rzRdd$ zGnS?A8hV3}r!Og?WMhl_^XDVfar=W!MdQEMB_$>Et?uvBxvU{WzMX2C)n!msX);1s zQd&yD@A7tUZ?Ei2M^+#Ln(~(~V*zl;P?{hvH~OPr>5pdcri_?ySR5z*&14^A)$#+$$6tp3&_ z`PpyupLHRS^(NYqc)#9oX)&avF3%&Gn8@1&Yw%C#NmbD$1y z#J))fJS&}zrxORl8n}l3WWLPp({rOenoR@$J26LrEC;!%A~4V9o+m49Y$rD-s|t>e zj!RV&0CCgO(#))^@&UyCYQJZE2Q!yd&dJGn@9HvNa4PQl=@T*v3d(&uSOmLsq?8zg z^@o1?mu!E(Vr-wT_pn`kq+aF9FA|Ex=d{3HSXfwc7-D|=xjLw}mV@#7H`~ilSo9OQcUqm{4|nJmync`_uK} zaO(TeAL60x2EB;0<=S&L?bnicy1l;77i75abbf;nikY!6UaYtF0@*K$OqkS<=2H@@ z^(>`!V;Vt^OZ`8I443;ekz_Bo z10v&8&j4Tcw`|(3W>XNp8xJK-lxnbqyde;Kd47sbv$+7v_r1@fTL>!2AHka?ad-Eo ztiN-5*yjMAjR)gtmRuIC6B83p|1kZ6kG98sjShpp2%Iv_ssQlxyqYT70QDQGl?gx?K}St9^V|Fx?W4)b^)LG=oqg@QS_ z0CQH506DnQqrvS*aId(DH*C3SR3gK|XqKzrU+x9=tdGCw+I7KMC!u{&p@0?2j&)se zmu_$d1NC_0`{J5D5KShmhc^SJ$@p)rdF%820RT{Y5KRr1t5p@&-rj;UGc#V-3#J3k z?eXsiazq0mDIMx|D*L&2zRFOcBx~qhi>yL)L_zs37A)K>2-f*}YodeM^2l)gTD&8~ z_wMIL0E|b{I4w0BY)CY8?BPn)%MHLdLl!YW1}KTJy=2jy;UdhIVyCl`+wV>KRE7VWqHkbRynvK#g& zo=0qbdH`D8oe!}r)LCkzWBjWBASbtV@TUc2|8aI=~WZsDP*U0FX!mBpo{h5#w=uOZ)rBv!A}E`$N6f)CWD?*XGS zrlzJoh0x6b;paHvm%A}29k=yh0Z=I2Ly^P|^qg-EDPaf4QAwYJM>!bq1(@Wa)enZ! zZ|yBnC@M*=@5{5awddKtzS`Q_|17NERI!S>y3yz`Ez6|biWQsyFs&BrQIdxw$9mT%9?Ubqa*t=rg9ARBB_5&YTSG4)GIw`(9p@Rf$LE`l!m_eAk$t(@4}c7~ z>ejvh_~0+FRuH^3+2lYo_r>=~W^P8k!pn1_ z5KZ{;+ul^+L0kdI^1E?W^|>Sym&eOQP=Q?{K0fX5T*}p%z<5{}q>4ODWK^0Fv+6@Z z$oifWsfJb`7?wY)&)3{bHryv2FGAk;mp{tNqOK0+YOEKzAdv=Pr-0~+h>MHMuDGW_ zvM8t$&cRAZD^DQ6%2A5f@7eXI}o-S2D{7QoQf`N(2aK7^E=FZN6UY79VI$G=X zU&}Z6tbTfJ9&Dc1R`K!iZg*SBO<)BP@Hx>Z(8?J+Ki)|n(Npa9-Hy7pi<_G>tUevP zrgGboM&z=ZPf|lx6+9|2{>D{UL2hmUSl#j2*$Q93QfSv(O$4v~j-zVYh~eLz`$a}2 z6_o>0tg42_6aeGvxVSh`d3n?XItBQg%1X%K z31$=OZx5$>G;+T+F&WKpm*t1t5=i|twDK9h`=f~z_I0QWvzRIxVFJ5yIyx>VKLMx% zcjc#)C)HZVx<>E^#4z5)Zf4udpNMBFhDDr~dN2k<|p^a#`hkPHM!oEzYd zSDuRpSIQv8ONXkLY5Gs?838(G@4V!?>g{#A9(sAWkg)o=%?)@1GLOe*Ip_KZTv*4> zp186H5REB2K4tVq9DoNJ0xJ4ev(nFes$eg!45Yrj+_Hn zASZ}T)uJx$+&oCi$wi;uc}X+7p00Jd4BdJORe`j&e)F&qV{T(3^fjGB4ht|iI_ENU zF&<*n32@ruXE52w$wSV{&JF#0PW5*Uc09zn6&1%p2bN}L=%9cu?B$Z#F{sjnC7+y{ ziY@h$k_wq`w9i8q3j>I820$Xbtn6)-@enRW5c;R&cQ`v4jxpfFn^oU0kYX8;q8DTa z$&Vi+#yR8!K!P^-*BgG{*64houB4$s2Wl;r6@W`A*KgnheX?s;d_k@b0~M8y?-zA- z+Sv+S&*2RK9gL7rL+)5*88)dYs;IyfL#%SYFo{b|EuM?Z%wz`qI){uKkcYGDMcacD zJV2EIv7n5IjJ!Gl^$G*~`}cj?wrxL?wQEe|mTth^L)KXLp3`fJpj%MS(nDB2cmS}r zFdestF~I2od`#4sDpc96i-8XyRdt_&&p8%_#Hpt}pfw%~^|t^nnU7megr1(>j|ouO zG;Kk~0F=Is_<|m_&m`L}Eqx#}(gdOZOS7!N013xRRu|A^Y*0wvJ0DFI#_q1PwVb*M zh#^u4JsiGQQdckV zc85v^@%Y^mCk#sX`1qs`6c!c^SgT+MXJllk7-V!!a|OwPupgoc+LhH?}aA3v#-K&=}T z`K~z(1bgy{&rQpK_ptj;CIYO=I{>JifX-$&np09>dY^l=QPa@uO%#Hg3k6WFqOL9_ zB?S+H$GSi<)8-BQtB%_~mZG8}aw@7ou)Z)cF)vk<>^SK7`O`rv5InvGME$&n?D?Cv zO_QX8v5c(jCMeDL|3h&FFVW@Xf+NXXIcu zZFpFA#rMS8A+lMlN#)4%t0A)1jdiln1kFYy}c5=Op!oSbb|iB#8|oy0I&`Fqr%fmRsJjZ zo*(`E^hR}>qr67$c6MsE8!g_JRCmoKY5d9rA>PaL+mrduRNV9BO~;F1BrY?b@kJ*D zwyj6goZqIUr#HPkKU_Iu@crT4$>@27CSW&f8U2o;1R43`+1*nSeNIrYe}IU%nkwvT z%1bO&baW1_9CRO)kGr8z=pQRvG0U)+l~Hs1LPeAUwys{I!@5exD2j2xE~*O4jEuu_;7{`9c39jZ+PDCD$`wye;O-!=}#j&R|nlhvQ{v?FiA&C+#mt-;q|}t8VPRe~{F z6SQ7r3_0qHcgE9XPmYfl%@xwaM9%!7!J@Sr8zwt2a5?%b z_c*sKJRPjqcpYZijFR125IoBKr@7KqVD|zoQ$=Dv54P8iY>pSKX&WHatauqD~hG)HhIyo2<5bztw4EeTPFqLE%4`KnD>w_CK|;d}+@B z)q!hBlc}Ih15xb2)Ul7GQu`Hhq%)FIIsL>Q7R6X`*s&cIg=yQA&2n<^gL2ydvU^HT zmI=l&z4%JQHy2ckqghdFnr+vpUvhX=aV`P;aIYC}o;M419y22qQ34KrwS_DmH%9h{ zNCm-Y=y1i!Xy}Q`D6zE{Ky|+bGPzlM4t*@f#rg{W-!{aqA3xI&kv0`eWOf~jDgSZC z`sjxVlt@r}+;T`9+|#VObr%zjYY7ZQ%+D6TnOHQSl2ZDkS<6hd=*bi51-sZl<>I-R z-NV(kbT-ybLY2YgT!w(^UJ7clAVB;ZAk@nbQrQ4;#(sC40;)hEdlVvOhG9?{#F$%d zeI6`cpX+$$1bN-uwPVC2ufG2Jjn?$lE@m!Yev2uVISSij_w3861jp}NR7>nZOb&8 zNlskTuLc$gyyqvZY;0GH$FD)DKb$R0hWO5e-Fz}%1rtUH7b00DCK5YCWP{pLP4*Csl-z@8-5C%E`V++KpGBn~5Q}+Zb!}h&glQ8c0bm6PA4; zcKs{mCGURNUc~M3xNnpK>Lt(y`q|i+0_t7GFJF9>10&@_Qs0{@tEo9l{f+-19|Q=y z{mavhik{vmL}lFH-@o(*-#HTKp%&@II0b6Evaas(!nPAqb5Nlcl6!J+?vJ2Ja%_xI znD8`9@f&?6(Kz~CEW+F=Qc9r%eZi_M+qN>8^$8y_5f3fG)pVCvLfOgROXn&*dKftZ zKDE(OYbxOc|K&r_i$!40Xkg9_oD^MLy)#q#kR;u`^SaWcZ|AWmsW$~lW5ZTmh7%vX z9m?ldwjHxl^B3;VWUx}F>!M~D3ND*;79`X4 z_KPz8vS6lTjH14`$Bku&-5K@IE)>u;1=3r)6;ZiLZ|caKIQe1j=;BSF-kiqVZUi7S z{4MA;Da#~(Z#tS^T^%10@tRzn71R)r5*8@?U`V!-J05AEzJLD?-fa_4e{UM&p_9^EDMbaqALCX8kY*na-|_iN`%O=roVH^ zYfQ&Xj+dI)R(TzFMveQU2u!DADx{LpXXqK@g5W0@n~9gB2jGJxcTIW7c%`U=T5H#n zBM=MhgNBL_YwcL@89n=ghVDjpt7 z-LL0hj=ia0fRxPuls3>q0c8R^DAhrS&**j7W}^K@G`GXW2)7|uI+y@Ll>*syQ{e~D z4LRg7wg$B>kZ(GXSz2?@R0S4)4-T$_rU|b_YL=j97KLR1&>P;{t_Xk_xH^eC74N{F z(S{s7hy4KlgM{6IX*@d2pBoXaqiWGB&>{=3$tItlv@b)(y z1qF0qSHzc*wWUO_kk{OD|ImHvko?K1m?CR0orIn?4J~3hV6&mcruO#gq~$g!#skn$ zDAtcJJU*W-AT*|B%C_B;Dt_Jl{prqXkEp9grB;(A1#?tODOXvUGOPJ_jr76Z8093d z@^|Qed@r|qbJVL=G5Zv#zQ?f_GK}g;^|yb)1W#)Qbj!zpY6r=71J_Y{2jXsq1?NcFt0)KEzU zeglC54bt>0Mzawhs$8(j01~tPVrgk9KXVB-?)b#2HfY|m|B=KSEJoc%3+-w0A3xAX zwVQvSMttW&$pPJb9Hl2J$R+n;>~WD8%vn9iJFw)5FqjyV z>l^viLY|6RPd0U*%pDb$CeYKhuv_sqgz45Zc6Y~g+{#3q>}f8cJj>iOzNd^i$bKS+ zEh#aOtBlJ`5SO>`GVr)>asWmet-!{S3|I<0>S#pGk zOz4*zOLIV%zS+R_H3I{KId|d0x?Qic4r=H|(0=F#Iv%h_^nC7&0S)o#cK$$3@>9FP z1|ob2T#k!_ZaR=s_;d;j%-`y@rV!b-#x1wF*0`RmsOX!|^{(65tq0@f(!U08(lIkf z1GLPt=Lfn}5zsUT^K71;7e6MSs!H=w;J`6gPv(T3{|PbOLv*(=426?C&qdn8l13g4 z`>iIT#EOq%Wb#vKHZMb{%~M%Z)sKcDb#s%t)`To#TY6@RzB^PDR`C_%;Jgy1)#1f) z{`NTZ@SO>|S2}l> zG-y;6YBeKthvab9Gvp<2tPqfE^`P0+5heD&@sgWjO*rTS>?o!y9d1J5c^y-5Ei+fO1=qE&FP>TXY??7R1C1U`!(6@)jfvK4R{D zCfLN@obc_0LIrdjp#4IXH-9MF+`pTn3_x--IV8=NJ!RuzsEsZaVW(jTmjB0axE7>RMRoNI%#mQ+#S=$)lBj0tC zJM$-+{xxZAx?oEKe48sva&Ldvi4BdtI$*T)-JP2 zB_WkFS0^6!U>n!lIC9E0akY}yQjwsGDG?=wRU#^hMaXP}CpJ^5`POLDiHeFT;jvo` zh+2Kya_O2K_Ead&s6n%Qx`*=_A3PEZ;N%%IEhcg$hckE+0OU2ho{U0rJobZyS!W1WEp$!yQZ~NK?unsaaSsg65|bqW`kZoa7bAVyV22w4l|%3dw&! zz;6ZS5nx2)>jfDJNN`6Uc|$`(K$Q0U0K_NI*Rc;>yl@i$J>HU@D<@+{AkiPcfZKTD z|0f$^ff#cIWKz&_>+{O?THM3T)gm*%AC7}T2&I3NO1KMjA#mRM0Bs(WzH zC%5zwlh-YHj$K8L3`Y}02{JkOaQxRtn``S&0Fy1Q4|B~XonhZFxDja~-Y(?{Md+baF*j9rCO{@#Zcz}ljbk%8-_{d5^!a#96C}ex?5+U~YU5Mc1nFc4{fZEMdU(~-hNM5P;_DK_Z zhZONr!MQ>3@Y2eJ-S2IB6Jy;=za?c!Ww*Bx@R*S|!QJ7P)0U|try-8>*KjX;Cgt8e zPS4HMxZdcwoUDuhFMHp%?){W#_n$7+iO2scD;;DH|d;iEh)FK-HrL6sJIM9AV| zz`#cLmIKD=gA(?uVg?uX!)5vdH7O4%K6iKi`AYj&oDbJnr*|7^ygt0dOl9J!>X_lw zy%oAGDLnR+Js~KDkS@ssr`*P}>#8wm^JMBTb0>go#_n>c36Dlv7TX6pyGMxx+}&CG zmEW?!qJD2O9OgOyQa`~d;ryGayii0UI4y9G)r^4*>wwx-sRo=_6q;M;?5kL&8Kc&JSZq-i?tzbbq|%PZ``!g z&hBxdN;=JICZ^Z&e{@MUL}8b4VWrJQT5u@oWGLwQAYsu)Q59CECY_s^LvU%5aN!$^rQqV zj3g3@R-)s6W}U>-3~zxQN>k8=Ljkm;+TiGjNpINK%l#=L&jt>wa4rs5pLM#NJGS&_ ze8sByGR=gG-K6xvwXcEp+${@gZRuA;T$R=bF8c@j+9TtlPKY!{k)*j19>b`p@9O`S z&0BE+W-lWz-)F%2V_lk_Q12H)_E_lHM&AkAnuQH85`K%z0{t`6>sk(AGraYfVRW%t z^o3x*Fx$Dua+uz)ZdQ zMu4dA8sW+#FiPd&=j=vH$xzY)MOhmbsrY?O$29^o>q1@uZdR}vQPHqr7b*8+;{jY? zpTyY8&o@LuDH#D?L5w`pPU!w<*$u%-Nx0X?ykP@J{d?E(@kz1;BN8U{BpnKNyNXnv z2PAGw?;gATn7brAObADDxz%e3G!n3q2cj^{T(~B8&;MOE?dd6sNWNem78kd^bkPA2 z3|c)v+ESH%i`o(MwhTikG$%q)?Uj1z_lzat*RL-?>4C6rdN*geQ0I-{6B}CDp{JBM zzNM_F6cD+KPO6V1OBVhH9(hNj)WdPI4_qYXn@!zo{mP;+50-zhb$u2;iyFhac2pHL zl_j)H_>v@;R{Uc9wg*mbe&mc#PlwIzNFpMdLBFL%rVT_=iBCOm7oWktRn5P>jXG&{n5dxW*0(j;!C)<10!-m%D7mpmWJe$Lc zP+j-$y@s`%H2D&rn5KUp;>P`OQ&Mqfxc8MAM7lhj$rOF{@GUi2T;yzGd@?8(Q7cW5 zm{)PAE3gg}#;nEPQWE``*`NbUs=W_H(=wS6G010Zddy@x^;y4VBt>nbeY4^;U{4=c ztXz0Y{WqRCe2W*xSxjHQI!daH;t}X_R$RdXk4UstoQd(g=y0|BWNK1fLJ~XatR0d`G|h zz~oW#!3>J#Xs4c(1qcyVE3LdXo}gb3+)gr~`$F1xr#)z&n;eXnu7M@AqUE?5b&iY( zBV6Gr9CtKsL0HT)R|s_$mZ|OGvf!oj8T&hS!nb%o-Y!{@TO`;=j+QjL7HvQk{5tVY z4O#IOp%Mn8295g7SPc|W=p)1&OkM|GRPS=zL0#236VLnlLFV6g)#sXwMEWkGHu(gaEZ8Wq85epJE7jS99F-#7~=K^jtNLb z{%B!^S7uJAAaGu)!akF+d6);%DJVd{rKLqI+d%jLsOc56Id^4&%Rv;lyde4wXut6g z$D|O1wvRXgg?Y$GQW7PybGIzyRA5-C6qk{bvOG{YaAAI1f(Yb>+{RXZ^Hhdg)#zqg z1k!B0Kq%9|$n9-hAa4*X?BVCqE-F_hfG*)*SoP3N{p%slu$ zr0mA%Aiai-3?H^}aky~!Fm@OsNJAh26*>6ftCG;3c`=}kr!reR9MiLypGV5eMa|;s z&)>JieNPHoJ&-aB2-!BdCgwdeHq2W9hIfUhO?f?nRY9B8Nd=cK4+9sOg;45wyBOjHRPSnz;zBBkumDhd}128ANP z5G9CqJgC#E{##O#e@O`k7suLF(`kL}niH3bwsvgZim0BC5RK$A@R{Iql}2wdsVjcb zCL`>`XUd||dN7Ne&LG8>T)*4dS--gZJvwT1v0d)@L7`!4`d?SxU6)B-Eq!uGPo33r zJwo=tCyVM}jHjA2t+zZxONSNq`qk9Rs(Qc}Bm}fv6IUqHnuN9o)F0+Tu!P~^Ce`ni z&u-I{WO3iZ`}ep*)htLgr^i>`HS4VNR^AM_+O+!`|#-l0XcH%3k8UXXK z4(HXshveym>98V4LME*8`wsfP=~Ho*5*f1Khc6iY3q)fw=TTNr*MfO^jT7=6M_GL~ zv-VP!`06U<&>LNg9Uo5Ugu+XnMQ`A1+CdC@r^r@;;M--l^Pzrt80e^o&I6aZmRP0+mysxOG8eAD@%yiF)Qi1 z1F1~AAqG?5*lz~$PX2DInsQIRBtbviUL=x)HuIZ6@0D6p=?br z#$U>Q%=H=$=gK&ivOp#6-mrl03$x%%m}N%!wHVrW*thU6Z{diBcOrmhBm_8&8+sHB z6R@%eq-|`t53jK^-f>&b+V-8;8#WYvOHG%N_aUthlCV-omI_45>x){*G2kF%g%@ai zMUIWqvHerhjg??IAIHRgig~WmXM|Sh-i_eF^@KJDPVjJj33vtkmb1iS&yRthVx0MX zVB$1&6fI8RAXsU3cUnCEtf>2}3{KacVOwp4?l+Oo*;T9ms19$LEbVb+FqjMvrrxF# z6PG>#5Q0EP1g0#U-XQm!r-iS73DgVeQ_4*|ZuVqu zvVP{9%mbAq4bt%?1mg#m6{{-R!Ew|CfI#`t2@$4*s3)FBZ(3=0Vp-h{*&jVh;sUD+A{JV`^~s8 z&br$efkuczQ5dp5g`^^ zoppIMhlYK7f1oyfSc-%SA0y9+_N9iaUQ(A&KC}`QVYk~?D|>7p1SK6f{;@#uBZE}4X{J&sJ3+gnDW|G{vN%C zv`FyZz!%7-2FzF-SvmggHK+oSY6~%eW`Ug*qg;h51YoBS5|RbtyFMWRmCeov{V^nT zCq;aOV6V(@I=4dhQO~+vrO_a!&)v`$5X&)Appye6l1icF9e|uoptnBONTM5EK~W;U z`)V&tmI?3hgTWL{5WI1)U?q*o2Io)vwN5UNOUAs)T0dA%O6ZZE`#K_Chep(2a|#|V zwCbLi&JPCxBn>bXCU~HsRXP#?&%srvUrDX3nHajbSJ1Q&-tv=R#*Cbq>icDsa<$`m z)#8*=v=sKt8zB}kvs-aNYjUU(gDwj;!>SavqMRa=;eMzzgMO15w#`S!!9kXPJ!r|* zE;Ap!uc7lMBs`=D1w9kT8+?T$&K-e{)0=a9nzJU|qtEdV!v~dkL7{y7gTi3Jmp+k4 zpJ|Q(|-9DuZ7vyxJn!&;lN~sF0tbGhADaj>#lfN*-U3x1=+Nv;C7u!2F#wIOSD!y&*^%s1L=8yT}x#wI6h!+L-rYg2rxkb z{caoB>r8+nzS)}J_4qK+c&Y_c_oHg)6IoQ0G)uH3!zEqG=1e z{r}>&Yu+&ME(kRzCmD|Yp`_x@a{tFWbJ z=#v#Qz_f{tZOs+nP`y6n`_Q>X&D$3;m?mOArX1n$8eNJA+LtOC#ksaI?X0m)6I)om z0YMIS9EMOm0i$##$b{VhU`tv#bzy$IzD%cyoM=^wsQ=^w+?#wIfB4icD>P%6|hG`30pE^f0PTVJGHOsE_?lswW8y=WDSaL zBr+`{Y+#K_#_jZ8Yu;7=l}~vL=}+%UQ{!)w%;AEZoUj}$xalZlr0mTDEj(z>7C+VA?5~p%}N?7S&-;Y3qs$ys>xTgHVMFxg<=oB?LWnAx(xTruJsML)g@`QEefnZnqeKj(g?#F zqW4MF&p3@~X{>}f+RY@MQn)6iaH=RXW}T&CfqAL4g@Gm=2$7t-pEZIA0vE>Gms@1s zzRPRW;PtjR_}tzaL4%x(o@%I=^LU{0`XO0GfHiirt8K7$Q4urV%%*C5bAr{qO|xz) zc?no4PuMi49kk6`&u{xWwtGI)mOwU;R_(mZPUZHl>iOOcQ7r+ps%dXbO4%$n!nn9G2o#Py;%$eU`oDB(rfvmXkx_Nzd#utjig{xbp&XJT?X~IsPd^a8+(e zem=E9y%s2;G_lGWm*4FZ*X5Y!6oF=Zs~#GBAdo(COW=b*6)BrH1+<%i9Q@P!N4K_> zoJLqLAF`5M16KUXl#vuU6XgVzB745G=4H1fo3|$Kk-|Yn5Yibz1(X;N(EP#&Gb=L* zEJE6vRBE-KO$(AL2?=2D-`6@#hPU(uwXFzp@^nlZ6}f{G_%s=d_|pWWJl<)l&WeiC z@Cs6I!$=27uJrRux_(Y74X@cG{ErtPmo%e0uq*KDaH%Phgx@tTKBG69TTdD|#s~lW z;bXoI{akB?(V&jaPC?C*Cn?YUU1HHHH^@r|$MmZTU_$32N%H zK0ffYfdm?k$SCr3D<*V6Z@80mpD*K5#=Mu@QQQkvFNNw^-zW?Cr-}OV+#?Jx|?qgcIr~4bj+q)iK zoz>oYIc;<;zte73n6BeG?S&d2B5h&4p>a{kH>4>D;kei{h(f_jQv&+5yP3{eD^ksf z@PQQg>L=5pA3pBBDl~qG)YP?p& zu3Ab?Ty)Le6Kwq~f`8@Gl9{OLX;;FZ(_TzkIu{_kHn4Mpaf%>jPPo0ssf;a4OXcGW zr3Ov+S`4`WtB7NCsw>!XH2UXVeZ5gWwnI zkEmm^D^91UlJspkdDy4hOG>&drL8FI5j_|$321CDJ#Ss+fKGOeqZ!#4MCMjGBb54=0a z+;b&m`z0%-Gvxl%il3}rkw#UneXV5sjmlbYFM`kiXcdU@HB|1+hUwt7p&Q$Kf2$~3 zsMt8`C^Ge7C|dl-Vb~A&R7B_&NYu418n$(&TOK!_WA467WpXtBU2UqLo}!N{{&>E> z$Y@>QzVP(jd3f+S5!^8p2Kx?HF0yW{nm%BO*$pu_K%Le%v_*re_(JFxax{rpiL_?q z{wxf|8n^8|C;Rt6Mn&@g;Ltn3Fe*~8SF1;V_^qJXdV`+z2e5)wCUqnmRZ zoXwk)`tIV#7?bCBa?sT8uyRi1&sa?!zxfa!CZKSZF2|T9Q^Pizv${3E5j$}lG@Sie zGG$}4Ip6@_sL6j=x!;28$VLP^s4*J8M>NVZAcj(4uG`i6A7$SDZA!iWRvIFCKO^7e~vgYi~@|pBwSu+58PsDPFh{`H&kyJDuYFfsKwIWzJ0r) z7W4kSBOJ@iMDn=xugdDiZmG&BO(w0H#>zi04NF^0FPFM{?mZX0wGIB`iu6})tLdK_ z&B%Lch3eqT8>|)|TR2d%Jh!%pTdjn#RG5j!zT%@)5x@HRx|?1EKY)^%Wx7_G{-;cQ zkdN<&tS$2OwPW+gj||pG0Z>!l5TQk}^$BrH#&^XMcJ4l0fAt^NOp0cF9naBKf8>mL zG>=;FHGXT)ES+*hIU-l&wP=!wQ;TkppH`iG=}hk8LEH}9vR*c|%tD7P8CC+bdeWk5 zsTwUU-I-VN&pw)nvd#;g#K~TJG1Iz0>*O}HjdJ^UzN0v0wTl^Pf`Zk5AM|X<7h`Ta z2rMPMOA9<}EEV@w%M-O$t?)5_4lR02F)&N@dbG6*M*PGR%h1rhy6Fa87_fbOI5?{x zEeh$t#X61gi;ds|%7|nZET%q|1><{1M{N;|4Ioh%8y!){3>a*kO2Da~yLH4%a z-ts76uQ$K*J*V?^7#$G8<8e8}CwtnJhHM7AT|*H5&GOjIk}-7#cIc|xbUa;KCwZi& zm4Gs4dY?ERA zVR_&?vqOu+n2A^o9KMvut*Js} zqpO4drV$09#%Fd^(xh{S{Z66t(e9EU#Kk6 zq#}|dK;rQisYs3W2+e_lS@l^6i}A^4q_(Zn-Wc-ZL}Jh_uDnpGcGp6yp$EOmlB*%d zxwD137?T-uzas3X-I#_Y)dw9*41L`E{O)xu@b^Ik7egBU9xTXF07DaP2Qq5dtR<*SL~`7#6ydVME%_+#^j@R zd2_aHo8qjrELtYoEFVFMdA4-3X1}Xi1B7-A+ip|We|i0;9F=OBo?13MEPEjGszS}8 zD%1k=AvVfUIo10I5BXFIVZY;4Me9%Tt}G)GoI4doLM&pR96B7O+P6OhaQd8*^7Z1? z;-_r|MZCRfk|f;I&+)G!fY8C%+{=B??xQy%h4n5D9Zd{D$p+~@iCk{s>*jOInFR5BAQ zIPXJQC`&xAV_X3G6@Br4p$*Cv(&btfU!fuu!vu;(Ysflnc9tMp!3JL{u*jJpDC)(m z0IwQyxD-|lrS7i?;w{kb&y?+02G;8H;`CyrNDI*$k^P(_E*)vPg(WtW76aV6JZF!4b+9_N`gkw$c?qfaq{FgkZn%_dAkWyk z>$E##$G&oYqZyFa1zqk0wFNp}JiJ+Sd!D}E_3(R>tErT~^YB-#rw{$Kgz#w&UDM$)aY^ekO5HC*)Qdze@E z`PcpBqn_u$z{ADB1O;lJt-Qp?Q-IXPYQ@~Yo?Ht!p6q;nIFDe7y%|=J===Sf$~o{n z*Jo+W#B%{=;QsW}D5HD7ro;Pwd2Q0zYh|!E480&FHSTNpYw3zs1Ks~@dx0N$e%yTK zHf=jE@adh`cRlu^{^V3XF>3cJSYSdV!0kVHExl@zayl(85t!0;+Vbfqy}oe=`eDa` zWM|Up2)IHwDq~&<^1&ceu4o#v*M^f8EPz*!BHJc^iBj{1S2kAdD+ea4mQp@8n=bY; zd9o-{xnPET28ZgYICj6MqHg(pX30P@;qqm47~RUY&|{#K-%=s3riI;j>4v@(D_R%| zg{5Ubb(k5z3A-}GQp%T`6HB9?8|bIw11`d&RJ=Et`t3#~j3z~~Z$(B{SvKcR)2KuP z^L9);1oe#qquaDgcM~#U)9K~ub#nH&E`Kcxq`BX%Io~tpAy-wF7R+UsyXSrcb#)$` z(3M;t4pj>eD8-KwUmB8B3jicjocsRNCg54OIk;=4_Ai%OZ78eFe0{NCSHrQyU`38u zdaO6^zH-TBZu;W&@v+M5T34$i-8S#p>uqJ1fJ{Cli3UKN_n4_ znS4y_hWcrXyV_+MMTQ0jP|#ma@JP1)==&0JEL{ed5K{E;L6UqPJ{LLLu)pow>HaY~ z!}kJJ1Ran1&+>|Mt$syiGgiU~+G5u~%5LACl@~hDk)EJcf}_s+eW>V|A_hH*Xo*&J zy6;RK3*CT-7xS$&z8sY1j&ea!wP#JT7S~=9gF5KJHSpRiC<_pnrgiS=)^6CF-cuq> zw3yZNgn_=0q%iUu9s%U@XVWk3^i z&V}3-Uj37x*w>z>`Lq zZnTa<_YWw?SNshgKh>;tHp#Fmy=u(*JK31x=z9V2Ea3?rDzex-4sC%4tOz|@E53il zIPC#@8CZ#&_>p1YtSz4@8lV|i5)@2eGmoY3Z$kO1U0LNR!$uNCJDGig0};0Iah0;K zO)$O&$1e)|Sm%#)`ITv?@tjw^{~ZJmV5wFxyqwk;@I`f3GxaI7`heIfirl@WDRgpv z4CRguU3`vuj(TNs{tH{6vHClpCG@#;eli{CX#HgcuJ+lqQhPdkd_F~*a0qW!Z}O1b zo-o`?xDKdR4)|m(nQ7nf+hph;MSQ~Bn8^RSq?0~>0K=UOhMif{hGp*>Yz(JTf z2H;o@1=a{h!wdBHO?qXJrWln}ocGGA?+N8FHHnGj2{Ntt1e@dJ8`U!{Wi z{0|;f5SYeP)q=sUCk5m5oABSfIeQ4Ea**V26G64&35}(7E)}9!pOD^!ff@QP z%p}+`8v~3#h}mK`WxAV*`@Z6$lDY&n-zt?93nz|;%KWF^tsNvfFqNKg!Yx0Pkh~_$ zLg}pPhpR(-7j;4mZ-W3sG=I|v@|pxvCX0KoG~7s_j2nde@L5f&<-6w1{l%<2z=_EB z+&gXhCcQBhu-z(8g_)d*Uzg^O!LijfEo3MN)4d?Ce)lq1m*ailb>p);EP0PR@HFn= zX=!b%yI-=!aqYSNv6cHre2aynh^aH1epUlp*E*q@BHrXdsf0->|Gy8!6!|;#-|NEJ z-1=CIn7i9XQ%VLJIIwh+EAzg532QPWA~vK8r`4x0BcC-sQdfmIGd zT(kLME0!Z+tipvnfh&zMOgebqP!UWRPFG@xutvXh$b&1*E@QVdCJ5qLE%gr1^DHU1_Rt}mf zrMXCnlp0MDoz`~!t2R^77~!ENxINmpb%go%uSVm}l#|1vGzn_WK3|uY?<21k<nHg0vi*W$X3 zI`+2#{)0hn$IB&pyZN@in5(omM0>l}&tKZ8struI=)BUqdD64Xl*K00q4RM+3{RTuEN+SWg*c3yp-vcp(v*vj?$^KY*g#T;F6 z1vsK|=WUUtIXj8HL_{wZjTnShpdt&FYfoJ$W>U3la*DlmZ-Ez=SFH#~S&KCj9KX?} zmi(bopI0?ZuQEb_!3^I*QArpF%-u?HfpkU>^PmdHm(`3HQI;8R$m^y=%HbHp2n*-n zv>5si!qVFm&-dGQ%S)UJO~?P`7@y)S4y-<=4}}gzCd=@EMIF3gCku*CPi0_zEt5H$ z&M=allVj;nC0#mOBuUO=wtSPXKn2_KNlYdbKLfvYOcQ{!#)b;0#H3*q3iR^)PQ5Xt zh6~R4IZYN`{vl(T)pZPGh#qF6aDKXdd+znTd0aM6SyfZ@p>mP@0$q_6UnZ@DRa#(c z?eSmS?OE2-ml5%fnC7hy&=GyyG|2Fx^1NuIPLSmI#Dwd^%`IcKii*kuU{nd9rw8k= zkG;{IeP^$*E*bru5mr>4M;^5nzP>r!0lO-l`^Lu`fFiehY8GMju)cB}_?FvuIu7V~ zHT*Y-9dj5pbJp6grmjCtZ_7HHg(q09MVv2^pwk0MEk+;?S z1;gw`>tCo#391u3k_wy25l}M7H?6=#Ll50HuX47E0K> z#AY>GCSASNlOY;@nGGo78MB_-ZG<2?x*A^97?olDKcY0Ov?qI^lAC zN=b;-7xKs-JZk=HZ{{fy?KcwzZPiq1i=}y}LKiMN0y4?6l+x1IdA?0o6%{rG3UJ=7 zNP{!7r1&tnZ z6rg&H`YxZW3E}P_L%U6Vaic{B4gSwHF5+SIa8_CApT}>e29#oxQ=di&*~UiBk<4-w z!5=JR*X?MQ8(8@UV-#p??A8QUu}x&>*z4?BL6k+C+17*54ZkKbJqnsi3qP(IfwIKK zn2i_Oqyn(LgI|qNPR@!cHboX)ZO^9hop#*Qd!=MTifcrg)p|c0oi%pFW_LeP$}b_) zq9({!$75urnx+3~<4WB8=CM1seoImeEN1AGa`2;g*;O28G(CR0zUCOCPc)B(0N)$= zpDl!Ch#a(44*7gErA!B$TJ%u}ZnISw{SisT7CqMs>U^i21;~n0R z;?Vj%b=MPqHL;w@)UIba&`{g!o{8B2G|}3{Q}HY@_=v)PEHW6ybmiYkq&9u1o0T)7 zGO5j`XySbd2TC^EcOgOHxZJxzqUmhxm`K?o{`hjzDLV$45$guOPxclYW=xo{-V7O9 zH4pPJfKrHTL}sg%W@KD%KvBEm{;@=6ihgD>6{dxvQXMv3PmQE3wZy~_GhbU(0g=GL z%|BN6tWC6VLI^|Anu)bld4FHI6Pd7O6|CWdG7*&G>->ND zHs-c!t*;6z=@#QP2%w?rovMjLyu^cAS)ZFVGD&2?u>>Ep`}Dh}E3;<-{>DuF#R3sBw2g7BJ@t}pWXk9^`8z)uz6`brkr zkKd>DH`NNYm*+?A9ZR(c6RJ&#wV&;D`87Ry3V|KMd(M9Q> z$TP^C+a0!2wEpP%55XvbW_7ux&JnRn#8fs(9$=NrpFO_1`Ih zCPkU?M^0K4J(lvsP2ZUquHCTAhkto2N?<+qJVPVxAS1D}B!7l6A9|dA;mXu12ut*) z_v2Mgt_*qPO?S}3P+K^5*q1L9CTFQx4$IwjbqEEi&lB_K6bQg62M`OFtzm-p6|REE z0~_uXJ5P*qQl#P+%_WyB44N$0StuVgf4O8Bb-WP2<1^UVA`F2H^FhplLr|d8SX6Mi zb|L!L5MAj^XmSb0e-V=@nQ}&!7wEh#U|Q%mt?FcOL>)&>_mLeAqis7LB{ChSHJoDx z1d_H1DBiTdO8e`bu)AxldM~j7(yy;`K?74+JzWsW=kpmI(mLiEf&F1@BmH&hMLPC) z`Rvo9%t;nl#1Rc;>McJMX&YFF$zQqSAI1}zwns@jN9(&!Ylc?nxY`NErpR}&xM6** zw}&V-n^*`_Ia&UwBC->|YitBx0=9R7q(^sD8XRS0%I>p4_xp`{6v6>_0?rSYYu3F5 zzgmtN{q{N?jt8gm3g41wwAv9ljG3G3#|?Rht)Mdre|Dh>oQKZR%c0}E*_EcqhA*`OXawIWw1w?k4-v!_cJrW3Y1+ka;BWC&8)8d8T|tNKN)4CMKd0Z zXb3Tk7h^M9SZ%r;r=na(?Cvqx$t(CZV|**++`b3p1ezH>kd;f$K_aN z$gvLx7iVKrT_?3cV|9Tq3afI@P6E8P@q{ap5>s%>c3!Cgzv&aSOE#y3xZaW+hXpHw zPW79^KaTXiYQ5;qXxey8jPg>7Uu?*pO#7E-?mDA!OphFi7J+w>S?7F9D=oFhNvB7W z&lfE&X*8tA{yU<#$EVaMt-m%gt8Yyu-H(dOxYvJgkkopkBKoTqhkL-Mb9Bt~>8w#G z@|_0eAi^0{@|<6?AK?qA@wBCppT%34E{kyJ>%3n%{tI{ylmL}FtM>MERKGiw8{i4- zI8uueh=%G^rf{`aJ;oz>ED}8plsMNmpEb#wL&D|1;TL^wPN|i?jo9xH7#hNF8L0Nv zdBs&SHDH*`C{iqUo6MM z!V&~*4J4uk7!&F#EfUc9qpli{f0$4Q{BaHCDOuj-#pu%tr5m;G_Sa!mo#p#1$3DS> zk%BS9@qSd3529z)SZJ0k%PLpG!x^@_LKL5nFm1cQiY1-HRNT+!!>c~JCBMwZo)jri zro$!xql_0EvX$r=IXmOSK94JQRnFY9MU}=kUlIg%d?U6dHTzThFL~534J|Nwe?RaU zW=&+)*l+qtM1tvghR;_OwU4H3g?PsKBK}^K^I!NqCb+QGKW5iB;=5Jn-W}l2PlUG` z*4DY1V=jM=-Ut^qVbCCX41778d=m)efT$=D%Nls3(W_z3=6x8%naS>am4-1~n07=eT^`LuQ_tl@(+QEdG9JRX_iI+oNg@lkKLsL+f(qb=UDW9H7g=_k=b6wgz(Xh>K5jtX^_)^*o%N^Q}@M zj@74&(#6f0+zBScILkOD2^_Gi$sP`NxkRrh_XRPdr#`p_c(yWu4w`pgM&(ks%8#H{ z1G`D%Yq5$1EV-0TJt_`ud?@dTi&VZ9G$NDk!wdtJ{Dm(;*wKp+dOWS1O_D?-FUHJ$ zexnItck2)yoRKpWZAAzR6CP4*69ZBv=x+Z`5Pg{#=l(UdiOE3i0=a#H`=@88s{{Mf zm3x|v@ewN=TyWkeVZ83+b#EpmEuMd#JPu^bN?-hstm2>l32uR63pv=Vm7k|Y-Zf&6 zQ{_MwsqruG8B#3ajZD#fC_3^JwZS@Dpl~;}&e*a1UChKKG}kJFS%@U){j&O?He=6M zY^}PqP^LfwvD31u*Clpc^7iIbwT8W8glF`nilmAzJ3H1zT2{%eeIZ2uk{ zw$V69)z8El$HjvSwr&-k__`{sZp|%Ju%Y29AntuFgwwm;kN&gg_(d44xks}`5cwZJ zJl(baAMb7D@~sRaL+BX6`HkP3#=o?o?lrR!r4hv^CGbvD?p%EFQKyzvE5tH_GbzbW z(rFRO^Cnts`f2KR#UHenmd%q(V&gWg5yS zEOGII{p%_k?^~Q2F*lNqs{ycszWDsRPy>=LO$R;=iv&rKINa+!tU ze?;INp)q&lrSJ=3K}n75$b*$0bOHtcohOBV3KSL|1+hE8^j<=Txse@Mn*{EiSEzv) zu8mC>tw9!Frxx?xmoWPrk?7+`Qmi;7ai~k#7i&gLQ*kWH@8s;V7YxoNbqD7d?{)bp z7q+u+VXIh5hfA6im01eBqD~?7&cKR%!6=@dKKII_PnE6+-%K<7 zrg+m+3<&;MLvkboeC$5AM4Db8(UG5+K<0g`=(mjy@{*YH!o^qgI3t3KVo|YcmS(OQ zS9WA(ws^09#_b3LSW7B&aIm3`7fv9v@f|NC7G_X6B3n7ARXWz6Bqk3$v$$W0NS!l{ zoTkghPu3ZM{_DI3Q7N!0$na5#b!@a9pFt^d1~tayD&h_|O|I`Uoa6R13kE;9Q3OWV^v4n(#FnG%~Wl{no%Z1d<_nvN`H~ z7u<83+v6h-s+iyRcshVT#V>YWHE;P$0tgw+%@1_X_@IlW0gcBw4OhUCy#Yx2;9LgG zgs%(IDjKXDO}B+#2?IinuGK4!DE-b2k{!#=;|M@ulp*=FDQWHL={a^4C#U6Eg8+od zDFRugu)0PLEvo72f9qZEkE_1ks*m93;Py~Di+@auO0H1o2qG*7`a@#?k~5Ywu@ykg zs7L^v4rjYuZT*5_~h8+Gww?XzOmosa%f$2 zQFsS`Zx*&eeUKW<}A5zc0;a zPa?f`9$ge3dpr$#oxXagoHa=WLPN?AxW&CM7XH&;^XodyiIT50)Fz7hSVO#TD>nts;I2jl?EY zIOda)YY{3i-U`G=-?lLtrg0V@6QR+$>gX*&iEXinMUZlD+k7G)*N4oAHM$^0&GLov zl@2z$T~xS#^^Gz~^-kEh`k(Mad`G4)P#|3S%TE7wdkTZ* z%v?7Hz}+IPs+7#Ox(*i+G0}kvpA1Ro+mjOMAv_jqt1DH%gkXtqb4oOL;6h-&-ylws zgj=d-spL%XFDphna`SnID>cCw2ok?XSuDzMwrE#h-O;qeJZ6V6)q6q}rF{Dt;X-Sb ztD{vp16z(1?OKEHHBw5*Fn6H2wa6xA^V=ZY9h`oAe*6Ytp_N+`GX&L_+FS&Zyvo;* zh`*R*O7^<;YJSlEfVK95*;06&y>}>JbCXe{pe`cOH+kd`D;@p)n-7$)S*!c+(wAeBljk3vBILV6 z``Ec7onWX|DHWc~FO|xJrJmeJ^sCudIA01rO6?oR3&Nt)!o^HE#P_k~s`t{MB4)%{ z;P}Xp9g%Qp+5Ep3=*R%bH=jd`BLiU=;z`hwFvkexkrJ-s=?Ui|ekJCkt?P?Twj$hZ z#WKG%r-LoFX5nM@Wyj>)NWCEYjS7EzrMTD{bnVc6X`LdIOdr^F%TN-g_wlLqNr#SN zXa3bMpZB*GrK>hF`-|0Mp7eP@gGQ^Xsp+D)mnhQgq(Z;T?(wR%=h0C;U#u+le>Fc*H4X*$e``>HE5T*W!=ety+s9%|0WhdYWy4v!{4=e>mUY#2a zrh9c5OPqg!S1pSICh!RF_(W>)Q6QwHQ{4?NXl-KyFb>xN!yD5AYUYn>!qpHddd^X~$~}-}jMW$tI=!afwDFw7&=UMzL(fqE^yy4Xc^Hs*`1qZGoio zi*H&+ZhoRLFoNLNb+6O@=_C~|GV`Exyi{Mu#)n+odFb?gU@s|!O}E6PkPxHVGJcRt z+<*1Qj&eeNSBS4&cx`yW$BoaIcO6iLZ-n*Sk($(1wm6!)-NxM(NwQaJ1l>dh4b!Z) zVr}}htQ;eA=X@%e7v6Cf&%*!sOwk5)F#5pYDkD1a@3ZeXwWt!*uyERe2LN#Gx+1r; z$xA{^FI~l6^$XN+TfO+L&JvT>I!_^Yz+lKiKwO$3yo z2!Cs+e4~wBDlF;iC$Lj~pqCU?D`N7!B#yKbVy$huK zE7f{uxJlDYlm0ITGxbcEq9#*KiBE_HCeG|PsStb}qb#Rd*hT(|01P&K9?zgM`s0EV+-4WPsu&z&`MN+U!m*adcRsW@Zf%l!S9XZE!bq zc8+VTT}bGyKAEo%C`8>=w@>xc%3W z#t#kv)E6lB5g@T?q4n``?O7Z_h_JDkoS68Uw$lYra9VDlZ*$cEhL|t@SVS4|+uR9{ z)Gj^2ASMfiWV`z9RzZR?sW?8f`YI&*4FRjGY4oLldFi~0lD8PcauH#=BW5-^#ujSy z1T)&anfsZp8<9iJs*%VcEN;bCX_yhRMX{mu++~YnbAw;PW(Vr6!B7nzsz2|zU^J40 z-7)W&H749Q2K+%^tcx$HC-l?hf7yz#;U{vTHE`JY)T`xN7gL(J9nOp4qi}Fm|1m(b zu)rN+8Ul&&CJEq#@-W7}8*q)Jd}VRH-hUH0*7s$T!+-O?K9)f^l|LNX@q0*-VZu72 zd&OA&D1 z%>D()(4fSylCQpU@Mb{RCx5dTnBq3GNih=4FdUZY;qI!lGWNRfn8~dgH-Pmg2rW_o zEL1`D&ILevl4fEJ2Y2dWHXvbs4PkP^h+bbgFTe27G^zK5gB;@%79ghkmj^{FhYd}? z50_;Ts4 z?aV!hg{_{jmO$R>*7O{Y2iaB)ru%h|UF5UsEq7@`%vCL_hW3=dkW!!#$Vi!N373TE zVf6@;2RRzu%6L3%-t`33JlCA>$w4CYX3vt`+76G6010-W7Y zbJmx74V5>XABLPik)r`6`sX_E_QAR33a*?5gKelA!o8QO5b0n#;xxa~-5O8ZIq^Ie z^JXF~mWbK120u7gtnk1k)!ie;SY;ih98Sk!!x+ecgENa;PQ1D)1MV$0__6J;Pv=$A z!nZgiUE**bd184SU*ruQ^XqPKx}IJ6j0BWut*UnTU(Q zQG}D2qGkStsrwio233XXT?2R9NNGiKR2@4(u{DKVDo%lBd0bc%a1}67f|cK*veszG zpNBi{ys%G1x%B;NS3S9BSHLjBgBu1k4TNX}Enos>f|gA}yeRJp-f**ICXwnGB|W|N z?<6n5tW@RimOZfipF2L-yOe0Sv|#!o!fDjTVN_Dv%jG z2-S@>a-6NmY@B(6`+fD!O{h)E9=dylmM7s8$$=+j2)us2VK{Kt6E^C6i**CE{C)fg z0y#pWE2Q=n>Jp1}VJ3?StZwHt8`PP)58!Zo%CYn^$d6V5qH4BG%m}k&`!Y3wWEstC z&7%HmH%ZL26yLAiV)wD*4}$7lC1SZ2({+dnFX zSS%C!NaryClt4>mf*}eU2 zxb*OsS@$xC)QiWbWZGzeQ4M4FBZ3G66muq~9Lu&o9-NZWkJ&BngYZ9C+8eq=fL|FT zhM9f&nJJfQIS%Knd8-zqY|Ab8% z)=fqXyzM^4?FRZclcsP=gDhWw=$D6BS_8b5%K)V!^PYcOptS`l$JZk(_v0Yct{7Sv}X<9`VOrWzG z(W2siHMkpUkJhtOO^Qb&E9r-l(UiD`-Q_4@9Nf(#T5^r^w26d~N{OS|8!VO$ARAB5 ziBiwTqmRxy6$=#xzSV_p{au!h-INJz#Fdo+?d--(%*@&1A_~=#-~P_I+I#51$BR%f zQnagmg>`9W`A#iEVzJu|={c~?C~*42yR>y1Eo93AGB2j-=<7-$7^t%*aVy`DE=UzDY`1q4xI%(+5;I-K*757`cSQl85+!ULP1lTWIl>awG8y>cVYN&cOFNgWe;+k_Z(vbZjL&d z$<~;Oo94gJn4m-6V1VE|x;J!tI>2{%^(D*iLVIO1s-LnvUsMqGu6Yhyt9%AU+0yFA{KSPKi<2?{}zp|jsBA9orn|den(-C zGwAj@8avE#Sv?IKnnaiWQTu#XdtUOkYgfsbJ@5b)7{AzcKF#{}url#UP;yqpU6@Wz4hAAAGo>v?$dveHhjA+IodOYe9VzNeEWSr)Ydg zdZ8>Y$OLHiw9W z;02vK;>%(XI2ka!DVNR`0f)ZSNgr!EPbT&OTznKEb?KX1^E>1X z4h2KPk2@GepDpoAlrpW##NIz4gn4z2`o_oi6cYz}J5K$0 z&mZ>RGW{yiza@@v)d@@sb4LFDpMP;jNGdQ7xL_@Z(sMEJ_hs*ZG0=ciM3;wvWsF- zkBJT4J$3E&IS!)|_dke7kuo@M3b!^h3jx+Uk8;zvPcrhER{rbQrhl}7j*WZ=@QlE+{b(HbnQ zKHS);%C`-cu-j2QtKyY43uje!bjh(EaE+}r4rd$bP7hW76B1#^`Ke0ty=d%@Yk(14 zO`@F18b5jRXz4oITJTOa-`aj{*TdqMf#lkUW69PKN#v?j?_eT+sMV)tO8iUoVpo~W zEf(nT49;OX3WzP{HPk4QmIEM;ny?;XH%e{#QiD?76n|Q7DL!*i4YD4^)@AYS?zQK~wH-LH+08$7cUm8>Tgbnp%EtuELw69f^i<*dgO!df zAffbp2)LDX&@UorocX5~dd3QqZ`EyLeE5K9qXA&R`+m1qc?*O*!CwMRQiIpAiNw=s zk~5Y{j~d;xqS{c!kN%ob33=8ad))X;qz^jw63)M$A3Gm(vl<_^s2bO@E%0psc979y z#o@tE(TL1QyP{s@px}B^_$&h`mKDV(CuPtFT)6#?MOQEQjGmbr2b2S;kZ_$d)`v#o z>VxX{E;O-p$6d;ex`yU7{ z*R!?*K(gG&erxw-i~A2JFQ0i@SAF;2ajzzf>*PgJB_4@dIiDaW?K*1Tki4`|5qB%n zWOfr&qDr%Fc`zXltG7yn9JkAA#tuSWtJO+M*(?2wV>#n9*xi~>X(XThDxg^JbZ+P& ztj2%Fp+GxoaMAgrTGyqS$>TGa<_j6|(9F?=QX_F188LZD2`nlt7>SNCamo@b9fZ#- z%HI<*l`keBdgu7=R@nIh<<_bGmuh^laC{`36U<%v;^dI|C2z9@l3C-7pK&lTk_A>p z1{Rn&4AlZ_gJCEqkK@BPu~d#%J_*k25fMGl_dPo{fq!kX^Yi~kzH<75xNAiP{)_6a z+-X`kTY?8>0>PE>Kv>Y@3q)-2L)7yL%?(0acFudiS=Ik|y#@y&JbeM_+07}0^B@BUZcw=+L_pXaVUG!ZrGUMGnH zu}tZIhmV<7PJz!c0Y?O9t>4Q2O?kl`>nf1;<_-kI6?m>8q(lIF)Ai!|>G8hpd{VT) z%e;$K@v<-|0E35|VJ0U*W!gM|=o@X>UFX&18f;R9n_-;qp^M1q<0Tz8w;kB*#})>R zZVVeDoh5)d0yKij&B5u!Or>>%D!Yq0rL2AA4aMnD=>FOqbLYmh_{n=x^$3vIB=-YNkBj;} zlTzO&o@QUH3 zjPk_?r{uq=ntADgQ*t3Xghb1Q7V1g|`65E^p7U@-9g^pM(Q%%+nLBVq0H>lBp(FBg ztH!scJ}*HtVxk-B#Qjg-{7nu22ulzOHvstcV$v+G^P_OVfA7BHO_ZL>nq&kSh~n_1 zT1jctenfMt;^5*HnjUzUDxijsQLJU49oX3DZ{XNUnnqR(j!X$H-Zpm&lglG27aDR% zIo`ns2KJrV8Vo1E9|Z6^I1%kR7xh0&E0D$LMU)({z-mQE9Rp#tk0CGpgAp`ND+h=8 zpu5tbyTpPWXnese;2FF)e+M4$$Uq0r`c5EvW-tj4e(wgGMi2&Ppz{~d5?Ld0pyU;zHggUvv#4%Ig; zI|A&`ap%Vc=dUsqaEAZt7tR)1yGfvvPVop<0g$fS4?M!sQ)6Np)b z(c=P<9gEUsl*C10qr|m)h?;bTDDz)Xmkfm-x28FZhDl`!n}YiiNtVl?;93}dWMH}$ zHd0ARPPm!s&oP&;XPltINK8A51%r6rtq8fX)GcLAE|pSOq`|teplX)HO}5ef;dUok zb`T>|X{Amz=ta${)uLh?w5mH!%RP;0iRas(IiE`6pespYgDvB9qIKKKn^rDm?3uvG zOUAVBun(yM$Z+>5Wni885GynPF?>aWP|2%hLLcD4=#i3w{KzUN3i{caS(46gwgfs# zJkPq;?z=SnYPx|DIz7hsy=V>K7j6D7F8&s-&atn8Di*}jsa1LaqP_O?YLSYHdP zYQ}#aLUMT>r8w~B4Hezi2zTSrg3=?_)t2}n_?c~+dHXI?FcQL}sM((Wy{3dGQ;g=< z9jlTiv3GY^GU3{_G}eU+TQ=8=F9+ zpSr&EItQI1}W3czjk?E6WY&$7uGleIpL8}ibtob%m`QohrS)qy-5 zQw^U1s8+!lfLQIm0|r9MzaGvBzNki zSg$*Q;4kj`B&+&MT~Rv z$`$0g6)lbhe{^fR(MtEGlYGh9V_c;zv&T1tbNVtTLbR|pga^sr=UIOsH>$=rh|een ztMdUVOYPrl;ug4+*^;*-4DDm~g%!;flbnQqU9P|E_pLR)_xrqL%KRVe7dg+6ZL zZ%KL${!Nu1lvBt)%^(@OCXe>I3Wn^bTBF7g3;4@rmU`F7v&#*hvZQk!!?he%p_-R5 zY4P_xr@{*P@3cru2YF;bWz$p2YL2%s?dJ#XhhL&k;stmA6efK$J&%4XRqJi4Ir|bJ z<}YWTv5y9TqPff2j|kT}PN(2y^a63uwO@~yo_(?D^QGy|pWZMdz)BF2j^4S=>Aa2@ z{$C)be~`xYf+4;}Dx1q4Qk|zjnl=%wNC{mn$`|WJjHJPDB3B#KdA$^;1vNH*5U-{11SU*F!nF z*K6Q!1On<-$CxE=l>l=w4Zu6WcH|t;Ys1?9gklSIdfInJ6kecdq-l-nZlM~ohKKXS z2fH;?a%?HR+mrpTu0(3Mq1@HXi30!4EiCAk`#HkJ;HxB&|JsgM(6!g?MX97(rWJ5R z)~=HUu9G2h947C+bBeh?%y2)>fQm}c!~O$IJ_MT$apC7TDKxhPt3V2vI}r1YylrET z4W|K|3p}4-&o?v(!k@%`AR_17FpKyAxB+qgj}MaoByPv%4*81B+Fq8Iv;I#5TyXoP zKx9+m832Tb(Is!B!|?$xhd==J$wr8Ug%jQ7SqHr!rKB<^+uT84XfY4 z>afPRuv6wz{)$uCTF}Z8Y+_NICH_yb^}<_imMTQrP=%Zk;BVx8Gy@m_O*-C*Vfa>z zZL1DEl0C9thLJoC6v&1PoqlDuR=>uo(r535cEIeKedp~zVug^QU@S757%=f%bJZI)H6~C zoJhxVu){32PqMW}4HncuFC0>4MbM8Y5sRnNf4Km&4L%#Q*4MTXaO_9(d@K39)A%G@ zZ82XOu_C3rI7Mj>ycJUE{@PX=p~Z>Wv@PS$B)2#TNcv4)jJoJvuH%mumdz5o``K^K zPA$G!r*r15p=kSOY!URNy@XA%+51<%Tq<(e#lTffObBxYR0$ko1n+z7${E@8RaFJ1 z4cEVKqqqJHSRlT0ooxl3_yZ)rc^q|J zn*hJ`?Zn!HT2=G-WaiqKT{YjGIO}_}fq(wryH$wggQb*WbTWn6_3`u^FSlLJikg`y z!>z)~*_avsw~M>=qk{3i0201m0>QSDKsf1rK}&l}giWMgrHA}$)*pbi*AGN$1OiMo zc{?>ziI^t^AXp+N*2fJeS>3ImBhJMooM zX5?pxbn5z~fOrHO|NZ5?gYGtfIM9WA59RF5kNnw9wxU49EYsW$D|`oX-`frv#wKsX zaw&4db!vqe@SJn!p?^|uL_%QmizXx8aon`^s7ji4%*YCx-qiU!Z+N%f75%ExCKgf3 zOU}jNq(kQ+eZM>H(126?16xcW^0#s^%Hlkul84cq=J?#4kl3^L7h)Tsx!TB#ZV!FV z4kt6iynL+xqvCT~H=q_o7k`_j4=n@%9T50L-kQ%yM5Re?YOAv`6 zL|Q;WK%OuEwVoHe(S$&Hiv-f9nR#q->+eYxQM#xew!z&l6O_-*wKgYbFLrSm@ zP{j_iL=4X7Xmm%v!Z{1W^-Inqh=($_ltC{9u>3hX?=IH=%$^b$t4ZdStB0GAFW3D1 zyc`(_=K1PyTeQ*d8FjqYE`KaX8`E>>|6~e_8MEupQK{hfQUOfUWK;ly9$8TuyS7l1 zOgWVAKAv99K$j_Srf7fl`~9XU%T`3h8-!C$f2%a9FA#YT{^*H6M6zs#~Cql_5NFakYX8k=vR~ftGBne zr>jxax(Bq^!f)Q)uHC$SIN1Q+I4|<98ed8Db^sw4j}hP>PhqZGUGEP&19w;*EUD$_ zA89SWkJpf6V`D#lSAlk<6%hK=5l9)*$2QWw*$;a9>vX|*8lHFR$eT+5924{b`!6XK zb@PJUqvQOmL%=!F@&2sis^270&goH1RhppDT{fzxSY%EV0``uGu%3xeDt4_Hi&JpX zcB5-C#;8-`pFF1v8GQ*uKWz>a%;ds4vT2T|t2_n>{~6oC}53znoVth!pS?a`Lzb>1|4q zxPaOOY~hg*49mNRp5gu@UaBQM=PtLn!|!3~hi)JFr$hN%!8Kx|1)uWWyt`16N8g)s zHEI=^$G?<(=9{mq2TESM(#dJ~0#=3iLLFr={4JsAlZH=MpXN(d?R6 za*W=W^i^=G=7<2jz@ylsFwL?YbZr^79OxV^U)fad^g&k9m0Hwa*vS93agbsLm_a9> z_8H{PLWtafTgr1%bUE)ITP^+Qm~%YT3Bk2wEBPTsOS%Bv&o2RyLbjBa5vgB?X)v0C z-<6;#!HIL}jQE=we2_UMVazGIn3|gU=sq+9A{xv9__f3OJG@sb{mo%OuJnvs`={f! zwT_=v)hoU4X@dF8J4Ts8F zA=Zja-{NpTQE2VAA{LSsAIA*%GjptVDb@W;(Fd2u^tEcsA$DKV6t3X*(*%@2cMu7= z;g1PEz1Je{Et=I94)#8gyc6dxPg-M&1Z9Wx-@QKrO&X`xG#EO|Vf9^3K?3Vlp=9bv zl@I!vwWR~%J^$WJ$lQRIYUaJ0J7v1%e%;$PHIW3&zkG6#{YV zBxz(&q$Lb`)0dKoJQ2=0LTCUd-w>vpOUZnCk9lmCiYeTm$sH zyOvv17qxUJes{Gsvt_my6D^~14-!5 zb>QN7ygc?Vzqd6+@W}+2EDE(lYm+f-?~gOlfD*8@FOzVeZAE$mB7bl87KfaZ@9S-o zxt1&KK=n)Y*IXNC+;Rye$!JX??8n2GQbGRR7L}NbIiMp0GR!*S8VhkczNi%35LDW# zMlE@4>}&sZDkmZC0;Q6#rO@ixg9zK)UtgtO0Y&Iz`@_U zQZ3%R_YiG$POmzBX2%tVz|5;s%b{H9xB5lKDq29>(D4n^!wb>B(*{XHm9#3!YwPt8 zLFXzKvx7WB-gTXZ%pGHxq%XLF#y!Gj_7*5=_&yslms)u^hZY(uNZ}q!khK*W*B7_a zdqO$Ifh=JcmwGOmVu<#DAW2QD6Qf z>u*ai#QTRuk3#M=>`chz)~8XUsm~f=Yd6Z3*z>h=CT@3 zQ)Zr_DQnY?BcXg*E}+4a(vkL#vvt>?JA&_XQUj2>t!MQ2Q8n;rBz}||iiwWOY&;4I z%7S}y%k^b{=~=Lb>GQCcm2=gw(S(D@)NA2?pdUYZTRA}A|AR4?LGr>g(7GhB!$lkMsc>Af$9aSrORUE0-)pfhxCqr`|$ zl5wKI+#D{dfo%-@1$Me|s2{U~E8cm@u@D&kq-wsSD-aR(Ru_jG*uT!5UX9GnDU4*r zZ`|yo*&+Om#LaaAOH+2=U~Kw=6*h}ntl_Xl3%%rAG8fEM_!)Zd(fS$PFEcn&9t9>w4g`B&;398O|YKKP2C-v2uckKo`sD?XCeO8VAQ1fsa$P zOpEJUD{1eu!u)W1?-+}^5k6bfP}Ge?#K6YC^~`KpsR8Gc%bfJ3gOm+?j@#F-COju8 z3&;U;eBVN)(Li8+q`0;LFj$Mrr}>=x(fIIIG2A4xxI=6dn#x@hWO3ZE#aDr0*6>!G zG!?~(z$wu$PDXI0$4m$IzMvvJ3u0u(RM@nMK|_!y92Tc3V@IPh>d0&{n>{_vyoZ29 zCbk%xeohuGlAl*n!JEyWJG97DcfKC%QdGYxO>;yR8P^Mo%0|q@r0Ya!AQWq%h{YO` z-vN19mdceWo3|E~PdDUb73es;0??ayk$V0WYFI@YYu!v|XZi14SvaOWf5v0kp!fj7 zP`*yC0wA76+B$K);o_k1Ip&e4IlieisZa59DH6b3qKe~km!ZVd(tZ(Nb#xyrk+-8> z{QW(&q~9v3>9jb=q|mfFi4EOs8tG~7pZ52VlC-jCJBH`Fm(e#ovg^DlgA%jeSItS% zw9;_J82;hkE?&7z4}4n>T1#1m`r;pWJ;1m}^l(P>m^@d|_QV^H-|T3k_g$=3{eZnB zMQ*5Jug9s%=f4}4{g&@;H1Br1Enx`x|Gw;CuKyNNX-h3`_`u6E>aoO5ZTvB9;)!P3 zjurmWo>vNfGHn_szZUdAjcq9s7t&G>;)ua|td9FEg(V(Qr0K*K2cyHEf=ppaL7fa* zT)gpPu|&Cl*a3fz?2&CwIunz^0oKXx0!-sVP2bWo4Y+1Bww^K=ptOCCvx<#t-Cf}k z-cxx|DLKT z&YAZsKZ~A|x%(lL_-}0hL3I4%!zw|$+C#$i6?YMsgTLHzFBrBZ9*h#de(_K_5L6C1 z>DmRLs_XxbH~zhr`}g$Y;8=V?<}g+8#eMJ8%ih0>4nZ~3@;ZzB?ZdgQYl7Ou*O&cr z-N8gI_@gtYCiuDZu`+5-5yoJrJbZa+#Nl8?NP@mSv5H``Co(&0MK$A5klTn#Dt%A`Z$ANFJsny%VCwKFJ@biP8m zF-(Sqq<8Kc$P$@6y9ZJbigU``S4N!#3gzs=W7^K#WZ+_w1n$^J*FxF~PLB3EnxHfR4ms@S9C z|9VL_%Hg!G{wYXpp0YvyA7-iGe$8i1whev!IWUKgq`-6q!mNFaVqc zIP7RyBE`(>H9Y!oYWz&_g+D0i>t@hi8U)?YKUB^q7-3L|oV zK&h$E#hqwGi9jF&AC?v*;1sN-u?JUh1g}lnYe?Zme^L&a9b5Ow`*nt;}~vOs2YMYC4UF;lnwDp3wXXMZ^m z7n!;#jm9R6mc7-A)*p;L=6+xo;3!tTfEL9`d6^)i+$8W~?lf5=k@bc5Xlk0l{kbuZ z(;>HG$#%|Wn(avQp5U8ciG+c7w{ISfo&IzIDB0_s;};vC-`0Intof~bL$lp3v_2=_ z6Ao_yR!L)q3=cNQdruvS(+cOvMwEfK+^1t^C zNF82}qiZ_pGGta3mBZ-}rQTK8oKo9}iF5tgLh>8o_5~I0s6t~q57)R#Cuv`<{KOQ(jkzU{DeB0byjl+DFlm^9srkhwOM za<-(Lhwc*%G|$aGh@b%g7A( zP`7d<6E0*ftJ7s>GzmJVoGIbkrrWCgM<3Gk$Ux^eZ;<)DNjlp*Ly@bAE~ zs|dGYm4cJ-zN0HleU5H<@lp4&8?N_tBCDKwm}Ht5bp!id}a_Qf#xxJUH6^_L%?TrG;rsR^~uAk zxN3SZ7og9LUhdt4GWhR6oa3?dQsz&hT-Ul&fhdTA=369%MThSYk3}Ewo`M0-T&YLx z_?%liBr-Zr7t1ZH`AYP&`qr(S<=r-=-}<42_-a|2HLw8gsJoR(aP>q{0`r`kAEav8JjNZ0<;9FGI+kI7uC%k7hQsc$p95f=Y2=mj)n`8!T z5qR&J`=EeCUSib5^Z@o6=+A5;V1dQa9-{h+Lh7@@?};;p2SNJ0r0iTv8sVb&DR~@> zLGLx|IwJ9Wj*IqgdA$RRRpTo*9C~PlHwyYzsV8RAmH!)q6RUGy0V+wX$woo;SA5Pr zR*2=Tvt-m@1Y0~Q-77JUGFaTcB$NH($ zRC#zD5Dpuh)5t}n{}$MNk^bz5DUjLP+7G82Mj{;-XhmoUpp!H(8LPb!d>ENQ?bAn| z1x;E4)iKy9Bh^8Ye4|8unK&g75Cef=qbh{cDd3M)aLgKrv1=`Z`5nan}JTB_#r{1@d*GfF%|!C(ach zw_^JTtN^arUSD_r67n-!rf|TrzNJDjk|}eflJ++YIhKZ0SRH$ zsA{yW%TnvWIVJ@(k8iixVj?6W5Krdk`Se|$4O?j^OzI0n%7{7gMVBO%F01*Y<*K_D zq_RD8GKK|O)1|6-Wpr^K!w_Z0S7&(;ZrI>2>!-&Vl$!#hew(zGs9)pYt^V!+s@S$! zm$wI3##G97sMOJMFEy^QoHxWB9d6HCw}l0l1}}InrDA37266MN)>$2GdJSd&Ukf05pcczZn2}t%u#T%DUo=8rqctL=$}q?w z&IVEmepWb~nTrxHIE+t;o)M8Tp=71yOI8W{+X zsg!G6^GY%3ZL4#LZsl5?2{!&XiPAV?3O5T+6lBq2&0S-V z_r@`_X9&cp&*7`N)Nt8xt?C(a%b`$wrD*(T6~<^OAa;CK=p;LgZkF+Kq$y2g`mFoE zX}mG^3H%0dKLsy;Js|4$>#_I??__;gw-LP?@-$E3WY!dJDLJ_8G`op;>3;gM*H}kU zKpi>fDAur~?}TyPqnWX{P$da*8+~2L$au+GY)@2sHS=jVLaAzc2R*olrxtNI7TfH9 zEB@|6{4o>f@$0|Oz$SGUi2T?Gw3k5zYQMk1HhO{rH~s`|IGqOkD2j4yUz&xe!`ypU&O3f%E?gers~r%8CDg=?(* zyu)pR!@xhQQarasS~iXA!(5zycg(UYbL%*OF4WV1W{935vEQf0h+2zccszgmO`2eq zX@1*ar|5rH&r^mSK^B|Ln2zmZ*E?i&A0*&A&;H$P_Ui6nE~(E+^ykw(UXV<+TmS0i z!yWQ(vy~B^H?>NQY zVWM`@V?-G|{OQ$+eUHAik{)vxOK^{R!PHZz;`l^|;GtZ+>PiI;oXo(qw%?l$V49Yd zaY%YNMgq?S~qIwG}+${byfT?heKB>M8v`UAZbwrF@go;e6&ru;gSLD9; zUzpk3>ulaK-2Z#uXZ3zxQW-vYw@s&@+vhDKgH+LosHShpu)@E$__$3rcXi}g^^#9$ zK!An5%ovxs<4?6E8+Kp=F`;4#@8IejALwU#h;&_b*7vqB9f#+=q$ne^JqL;k-2IXFW#sE^f_3rXe*eq(E`jLV zdDs4NYXROP0^omu*-yYaU(EHPcH=MKe}KuLAQ6^Nsimdm4mebrgKm8QT?movJvt|3 zV`2io-hZ)L%vBz~jq&b&`JF5jzQ)@DtfMp>s+$XE9c9!?xEh_0-=1yu=~bZsX@v!u zAZmjTXx@4bHQyn(KAHA|=&u%Qm+!&tPw}Og)A87|v+7cZx$5?dNi_=)b}6(ZMr354 ziuK{qvXlMWETp8U>mF-jJ1@#ioB(?NTB$Mklh|hzJZyTd?p}%r2KPP{$`lUfZk?wt z`Yos#2Km;C_2*#t*QczU`4UwfVZ=^C(YzK**14V`8v(8*|Er*w`NcZw5LZ;orpd{!=4 zhbl&}izC`SlPP!n6sZsk&PK+@QjQ2yQ#WBbK`baBWdh-{mJ-_nc1X3Ih$85@np44~ zeZI(OmTb>^Vj@&0gEZitg`Tg789{Hj*cykefrYO+l@FkJ*;|>wX_Da|(5_K&fe>Fy zBq*iWEAcfdKp^_1*FCYuF%mie`3^Y?f{a~XQUQNx#0?w8UglRZ*{QjHAxO@gv#2gK z9e!vM$E7|;T=SfVppuC${Iauqe@D7Gry{FN(k~}7$5~IKHD{8BLVY{Mk-whz86g&x z4Ye{(*WnY5G!k+b-c_J1tSAHoM}Xo!^b<>_t$rrnlpuUqez&pQ)SB?`_^fT3D*Y zEb2!iXkyB)A&fSyf}5mh6;Vpju>j5+cKukKYAb?Wv51L65&VS>oA7)AbWo|}+rcAK z(e#E;6sTKYx;7l~_m^hYyI;hRtcHaMn`TT?!5aH~{SPvq=-~v`D-&W>(BA**s(~PT z`mffqYmZj8Dc=oyj>T!)AqDQy-}!45B2~(I>LG0WLIOTnIVS!>iQ65RwoW>FWR83| zC0Uo*G#RX6r?FkhxjlM{#yf(HfzIpfd*Dl zLkp(9oh8E$j8Da-&dhKPm>Mt&!jcd?5Wrb&kLgrUgir)XR_WD(1^Y9M%Fj*X#fO>b zhw9eX6u#VjV<|uhc)BKlV$RVM2V)zX?g)rA;=aj-eo@0!FMfPTV1@NLtDX7DzdLNS zI7w2*S($?)M2*Y8*9dz0w#6}tM@q!}sA;?R6g3 zq11qQgsj|L)FY|K)WpPBEb7i21*mwwdChB=tMi@!$8vu`$1OaD%Bc63!I1j}gzL-I+HRhv{+ZGC^iIMrq z)#zsWFA#W-CHp{GTpQg`qa;aupRbvg0Q?V^Iu^M+xqO`GJmVg|)aY%@w5UKoi10a`yuK|=w? zd*zS0@s{{``|`kr%-(+HEL7Ku+Dj=dt>ki8WU>N74n`Ihs;bL2jk{fxu;wkIunB-- z5{w=B1cq6nPM`8Bx_}=FV$aY%W-B!#R6mmlyt955HpGS7#H*$e;$=R9JHymcD1fP< z-kC4fj0EhVcct$62Cawt=p`UEE5AzZ9Q%czPvY(4K@fOQlyk}0m_{bk*=V!AxUASx zVe_}5C07CI=I|xlUgM!~S-O{7oZ2fynj9IFNoD?cuS^o(_TH}3{;OMbo*v4CtCNBM}{bsv$(qS4DnAW$y3=jK!Y;~@g{O4wQ~RicJXy8-78ZCTcoc*=GRN)}$(n#YEI9f+ngXrwdXNf6HG6zHI)~53R*V+1 zzwJj@hf3O&gjl3moM+=LZTsVLeVM1(@Q&`!5{PpW<+jr=WTnQV8G((zKkXWHQYH2c zhne}C(JE1~?NT8It#VzH0&Gv($2!30%&>|OQ zu1224d@D4Rd+WLMXB~{;TsYVU`sL`74G*@rz3Q48O9ND6VTDC z)jyZLiwSFF`WtjdKhLSwypmX|-xxp+!8Qoj_QK0?7~)u>PV`(|<`~`P%}Ty%r9?N9gW#)aigGhoOwKdNx{Jw~nP^nfl`d(1t88V(-paKv^`cYed-~ zZ3AUP-fHBqMq2RSSGIpu5{poHd0uUp0DskNx@3cP(;uwCwaxj2N8?tEYxLr?8P1a$ zK@(<;*UiL(raIv&PTKABCHj>~wp@TZiBqsBJ+u z=rpF1d9+omX%T6h6D)5V*fiAs?-(azFGEj%Ry9^-Ra;7Pb0=)n;+nxB?ZyO7EpU-M6 zJ-El3-w7)4&MSjAfvYKRXt(c2Nn^J9RrWX8sg|I}A~ZClQV{rS6qt5RbJqdkJgdiI z08|W6jWhtIW`DAF2e`ume<(51Jo^clbv%*uZ)coa3o_4LDO?Hl2?Sb#d0KNh*nkY7 zhtB+@uQo`hgw`{tH?G0VVOt|M7EGD=n$zW+E=oq@piTNZRok5gLmxQ1AxmvI&fXtK zUHHo+1xN3aO&NmE1jb&91WnGO@URb<{>Iv>ZgVPRfg}oSr|a_Rq@*-gr_?<(X@*6B zrSvT>f*1(A@0vnnpk6X|6&w1othp5@O#>Jdw3`(74=T`~@T$-5nfhT_I(GO{6a>{my ztIJ0k9q+IGW5q@zoUCsc->5jB00vM*1?77AxEp`6rF}5oE)U0iXr;IhWE}J+<{*`C z{Fj&cXV=NN438p1;obA*#pnMy<)qlT_`l*JFXI%5VAE`BN}hrK4a1)HbT^}{CC{Fs zauNH{dMi<9Uo2$I4u-}Wl5kZUax++LW^TOPz+`qe&jc!$F}BR|6Ps~{*u|Uo5hCMX zpsV-922`bFz7~~1zgv*kzVx*qyx7>ZnS0ZCAy-i|Mb%u`gf8lhgP zWIEIH5q(edN({tu5Uc?$u0g~_lqMbvCcpJ#iyCO#$5{f_&J^?Mkfux;AJF z>El~NgD6k80Pjj(dO8ECz95w{Mo5$);G9%_B6F@0zTs|?WeSQVF`Mqgky+Ku*Z;{X z{u1%%Z79EqufLzKeCzw<)c+5W%pY_q^x&hY;G}og6A!^-zu}bN2l;mqfMI0Qk7`kq z=u!TR1h|HEvm0=_`D34t$Y-**|I;fGG3-2A+kyGWV{+%?{C_`vTPce09B_^$KbmbU z?-wmGA2m(eWw~xNH-){1ja=%j&+9N#(|X)Xz;C;%ObhY7B?Es@t-U?Abtuu=>YcHbM@z%EuljeA4d-a{ns(te+tX$akS#pcl>4n`|XZr`)zY3 z%J^pq?rw8W8W72boThJ)t5u(Oi((DYrZ3LSDJ@+R=wwwzsH|dP`@KmnJ74QmQy(5JkZJV0LHqZWowvbn zPGc8HiX*})7OZe=zhpcYmb36hUxCw|4HiZxlCE7~DcQTThY8B13MZsSvrflRPv;88 z9*19NF#%Ds4H1}*Pe2NItxFjTwZ_$dc26{GPZ_68OT_!iUUEQF2HWbvCkhEkL{6WwkzEQ0I+K|% zJ6X<7eO9S;{r6x|>=YcH-fl5X`)UJ3{A+FnI=&@~{O{1-2^{?s$g26pY4qog|KF!o zb~8aB2KeXUAzXqEM1oU1`@?S;`{9vo`{)S-Shg|q2eyum9}LQM%&lXq$IG;HW8VGw z{d?}~SBRP#vL+xZARxfB)!9e|r~B$~ITn{`f?Dsj;lf?g#WBugAeH(oE5;x@$)(pv zkp4?e&D+3l3jDHaFN%K(>2N6b2Y&-U-7)-`<1*qX#)C1itEeZcn()WtZ%h7OMxvk~ z1uLnhm1jUjt@nBK^fzT14s+>0&nhoqRZ?FKQ?x|1jT>#I?VRf2p4uG@t&)=_VvsjN zu|-rnDb$xd(C-bULoqzfw zNu5hwjMAAq#5z+@Fv(Ur#R+{rQl5;7;bUptFc1nlaNzzQyIVOe>C>_&kvQf4Iw|8c z#&Ml`G}6NSX58q{_ul8yIJ&v*0W@Hb)=3F|05e!`v=-j%AqI2Mk<>_GN*#qab!F}? zvFFgxNC*pjSUw%v^(j8gNHX>?HWgPAAF;2+C95h9CCqUWHJug7cN9q|a}g0+VAeEt zZPRZ1ye`&7P(B|5`yr99!$Yl`;@%NxeAMg&J@593Hq+_bYLNo39{bFx;wHIw(y3ce zznp?a_+fnZl>v)rb7W2>>o~Y~nNyWPq|y;>;JAS;5QXOUMdzq3*)|H|pODYYpuOcK zapr4=5fiu6KK-m&itr_+H%hV&Yb5Xw`s*Dak8q=yu*DfPTNB6sEAQOO<8nj zpJz117t2`Du#()0t8HdR)c-|6aa#|TB}jRoUy7{ok1r#YE-8L;%&-7k#A+w8EIQ6a zOX!--z(jUOpxa$CvD-)5Aoo00^YxC;3}o7DW~76Zzvkcri;lc7M^2r0e;`;M=osq^ z6p|e0EBRUagrk)P`|imD!(4z_6rxsa=@OS?K?kL`YSh%x z+qLyxL2|C@gZfU2%=Jd>0=X!yKw`dJL}M+Es1;6jnJPb)P;w45 z%_~PsXSeobAPzr)Bo>j(<}R_Lf9>Fu#z+17=Z%Tat0JGRn$Odrnp#@~;^nVx$D*ES zr55!)OJI;0z7ntNhrJb$6H<7sgpo8OLeRbzt=%?g4|edDo!BEAr>&t~@=+N&y(qRP zFHRLq0y(oVZ{t)`$0AYv$M6ytno+|x$B+~QT@eE9c^s^Js>tYhn9?Zck1G3!k6DEk zxMHULTV9(No$$^)i6g?aJPr2Lp#%3#FG;vm{*RQ=IZVFNImuyF4)`iP$Qlv|CMv(h zqGn7C1wF&Qb#$y^PHmHt5))raHX>`P8VkkLZfOw<0naXbMxK;#=z1b-%f>VqcZ;0e z1$)&Zq!z7;t7x3TQE`LOtg^h&)v=Jb3D4rLvwI7pkNHnnNN{bau++W>MzM{rjg&Rp z3R6c;#cf@7Vv8>&(P*aV*d^P=Ff*Yki2DA!**E>cw5Po6E%xU7#3N*}Dk@XvYQbP=mf!OU;>I6tOn7}pUuPXo;OM=^WFEQct+fZ0>0x7YiE?o9+V2Z-18P1*9U|Sz z{6oSnUImW7x(<8vzDFrX_fC`PFe$FL1;=?F1o*rLwyc6*het*VkR-)&)JjZ_l0{;D zn!xBYq2Q|Cu8wbLKtxtk*7Uv2Ozb(XPdb`}<$z^$C^uQsH8#D*I%Iupb3;mhb_ix? zX%%NPjr!byY0L1diVSFfC5>@i_cKG+erCxCN7hv`eT4_)OPGtPMyj4iDA^ie8q7k< znnpSL>DwUjbCL~j)l7GPYeHC>j{}+8odoA8?%H~cVY0ckjf{IE|@2W>$Xo6VwD}eGq zMD&1qcbAB(V9rDmZPotLG9&QB^CJpK_yNm(Z-Aj|YikD{zFK^PCz2}eE0#)*M>wS@ zAN5Ol8?Z^fSvtz9Hhh@Z{JNtzVOXP8)?7x$`cU60ORnwlV_hjcRKbfaWYVp?jV+Fm zy1O;oP)3!z1SU?F<4SV!ocJr4`9SKES0wry0%H1XQ0|0vbC>(4d?p1cgva$6mosas z^7JKXc5jkZ5LG*2%yRrZ!(alk>H|~x?7bVv5-rnq`|K-KA_AxL*fBC3JU+;=jd8DY z#LU>&iQVcZT~#LdO&DKls-$yw;yD6Qr5Z zBrfN_=%0tMmq^h0q~*m7*dZBzp$M)WD{)GyxTA>jKn$AO4Qn$>SUO!{qtOzQDnok> z1_rdl$37`jwxhdbW^NpW<>&(f0>?sl^=>6W2S#yu4Ewsvf>=)@%QIO66B~O?dYs1% z%s=p_t0VP)fYX(EL+YFYk5XNz8R$@FPpZF0Oxib-@DZe*yd|`8>`d3C(X_TDH#Tce z5`TB=v*l&QlGBS_)vpywV+9z!_m2@hX)=F*l+3ru{u*I5qFg*&u6IBU#}9GSuC>3} z2P}tuHEd7oEPI2Eb0^FafJ#MR%*FT1JA;oO+NWfYRkm}NOc7ZG|EtVT@JzmEMy)Zx z5IXRT#aon)L1T86ifQv)NI)WTeSwNGUP($#D4X@!izLmmSq~{O^8p4A(zx)-`dS-T zp-4gSv)D2SXfBW1H!~wc;NoNgz3Bh700h+9Dam#O=6cZ%`Xew#%?xmHD+)^JO(ZNS zrL!WYz+L_f_ft|n*tR{l8X6j&^YcSIlIobTuR$8;ZyuFck{x>acyvmHvQU)Ud+oMW z7M@O3_L9OwEFB1YuCX9lx|d8mQiMu4az>Oq^T5hBFw{#dsxpj=n#!MSge4p7g26^g z`pQX$4M!{6`YKUVi#;{u)3Vc5imnCjYp`w=h7ig^02b&KS`yV|8gNKsTw^wNZqv;v zi3#Z^sU(ypj3TW^DuHX0)Iow53>Nbk_0zA}h?g+1F~hX927&u0PfGH6saW{hKFbPJ zse}E!c2kM15uF9mxo@biNXY!b^C_+lGeScnwPk{DIvk_6F^246N}f!kV)zFAhX9pb zUKvC(=$08hOhR`c6;foHPkZc>omX6}JqTZUon>PFY~76`1(Z5!pB*=qx&|mQnC?Uy!R?UmuY;irj@G~aQpb~ia*44 z)&5fZBBE{98sBU^;5xgys_{VEg}uf@oBX3wr`sVx>yu~Fzkk61B|wxTr{U3rxF`lw z1^n%Tk6$dw*#8yje$dVpoAke+Y)Bzz3_Me4d$VB?cS^x_wfCW7YM*Uy|E=(8eA$9d zmzV81|9qOR5bo{oo><{;U$$N9ix0mzv3To2SL^%Jyx2*(ytIdAMAKZc*wR@ur>r?? zgCIms3gP50|9(`sc`A7|%)= z<(0pj)U3Gupm{r~7(rZlB+LJ%$nbkpPXnjz9?yVd&T#aJg<=sC<&`mXQA<>qz4{8rEKmh%Ihi_}8=@G=*0l zczM$g!P-e<%Vq@60=i3^CG|(il5>dNGV!#k%{$j&NVjI-UXLD1>7ds!3yxwFdM)%C zFUNGH=f7Mhn?p@$$(9GsN@ahI6|Z2kayf7UzhU6C98)CBiiRwA5=6>H*we*0FY-J^ z0)8YS9$XU^v{vf@70X@Rqc>1ozTD1r&0%jG1?7tOW{9_6R!hAq_&FW?yLsZHJfrz? z`SIb@ZmhE56^Hu#}IsKyjBX>a4}PL??R!p1Yp zP%5I#7A}!bB}dd(qvI;0DpqaHvUTO}9zee!9-+Qxn3kttkWX1Y9&hebyl)tZEVfNI z3!ZSLDx;?*L<^R;?-_n87@Q-dm);w~wLWY{1AX=C=AgdKyq{-+5Ads>p5=QUF)}jt zUaIcAQ7@Ndi4CnHBQkTZa)}xH_m*(mOVR{>-&*tega+V*L|56vSLpL(;57Bfi0M;S zI>MBTj!}DZI`C7UsC+}Prn(WNd`?7sdDjIZ4a`!?;lZ*^lL`F!1K?3*q)ncra40WC zLYDAINBZA;+TLYN?f7Wku<6r2`7I^ArmV=}l@*NH=&VI4^VuXe>iXr_@naV97q2Gm zMe>1jZcp{2EF{Mu`H9hN4K64rVSekmxR+2&==ecl8*~3$dTb>@l<$7L($TwW9?N5! zNTWPzjRSLTt>%TA$}@)U=JM(e%aqN<%#9Y~kpCblnp-U7eKq$_2#R^)e29LpulMAML+3*#!dLT{dFu|IV5}-p6fiZE?TCl9iR6ZzCWgx|mV& zL`6D|XYk3~U(PF+=t)UQ`EG_hweL>m~PWG>&270+Q4mnT^~ZR`Pw!d^lRKU}(&Y2euhS$?&;K5p*S~N<*4~1`-NUbTc_3SnpGa z@QA%o9DaPNj_s(uhi^Anky+lSE8mto>H9qIwkQ9)06((iey4ya0Q^&u*zXup7r$O=$y+Fuk|H*pS%^-)I z0xtz>U;g2St>M@G#jkPx|9wTj2t<1i>|4&--`%#~0Wrv$>3q!^KgppflvTlGH2o80 zNd!>?!Wj#<4S3F3ti`MURl72fG(nsw1o~={g6t-($_yK57^rqzH+rTxF)-^abbwl` zkhNpce*R_N znpY=`(-SD&+5#0yt~WDB z9XoSFn`u*RRs{GxDJ+KGRKJTH)8wuU@wl+dj|>WvKYhPhG*#k%^^Gy))d8oo8&ZDR z^_V68g>w9mq-S(zAs)$#IuF3{4OCBooJI%x^!yWx9tCCFVr7me3>GspzopTGV^}M|wd%O zK*%3dglg_1r>;jN_+@YD#NTwpudZTwd;(RP;99=MPIM-2z;s^`4J?EFop74E{GB?! zTSKM7N&0hX5Wz(wtL8si+U!;HiLh%rl7DCD6kaH4Yi2>zqfUT)fj>*~Tko)azQ>v? zHp6Uk^9#r!fdO?H9jt}#H9vR^hrN~5C8IBQ-E_ewb*?C>tBpUZWJs~$isZ0XXBNj% z%VU080XCV@9hggh8MdJozY(iu^!0qK>nQM;ei*kF2rtP*8tfr>Kg#mW+EU2OtH5-# z@BE7Lj*<4@JM2lLsf-cd#$i`~zFkj@8#1K^P4KKO?`W>{AJ`8p&6gGfYHlXO|E83b zIZ!vaq)A2|RGlG3w6=59p~+0&tJhbKeMHQW2OdmobPH;%3lAWzW2IndZte)eLn}d9 za=|;Uu}Bc#eBjvoStSlhB@XKR27CDi9F$ya z>F)0Ct_KLxAl)e-jihvmbPEWGbhk)1eCPT5f2;)yc*`@RB!wTs{`pWdN>=_ljDmgDXU2WMM;jR=8s~0y%PEASj#lr$SMIy{F zZA2Sqnlx^h(6Q%)3mP5ziU0m2RD<}$xw}i=3P$%r1CBNV%=N!=a>i_nvgZR>{;2bko^3Op`d&+?eS^ay5Pnc8mwwq6%hcR9{w+hT+ve zO7n&+RR;O+b+*?=}-f#n|Ra((*!>YmJN^BP`#| zK~B)W5K)mxNYvLo=Gr^vWR3QXnag)kmJIXPGO5n3`XR&^g&)kCDY5(VG2^ZSjX)SMZc{y6izD4$3qXdE#L~r%xn@O=oEFbt?6~u z{+_$Gl{VF{l8$g?y~gl5(L-Du?#o>DR`^=Ij)yAVmOGCVC>FnyDXqbI+|;&|66|3YrB4ag>u7eo&R1Z-O#c zUPX(P8te7yx%QeyRC%Jjr8BPsV{7BbBDpFtxl(&Sf1c{DQ|gY62Rsh%;m?qQm6(l` zIJr*s6p(2Xh6>w5b#mrw&0d~uH~d&w7`R+HBFm69`Lc+;UZ7CXR={Z1wD6Ca;_|m0 z;`IZOfO|S97+(2AYfBePSzXN%R_bVDvv&7%&B8G|+(Cnp-`N=uX64`PbAK{0511@i z{YPiNw4D!)l7>7-#&HHDdz$ul6RuP10N@z==vK1q1>y`gvDUO8;!=p z4{o4Zrq*Snzd)mxRp7K-Cy)_fVf^g-kA zz1qowZ?9^MT?pq4IP$o|^PGD_C*FTmpw9icj@q2Dg7(?VxuB9-jQmftGZ1?Ia8gTK zYv6=QN^Hi(vwGqaFOMfE*y|iOd@Yff&}rhU^xD`K$U!8@StXbwqF~O$R}XaWS_hj` zh^oKrQW`jNS&y}hP~yi^iZ!bbkBIG4p2Fbkd)Lg}TX#@hTs%7`THaq%Prl|lw2{LjzpMB zT02p4i9z4wr{E0gGVe?t&khCVtRl0qi{?ee%*_4WLxCvnd>t{A3W2N%!WvDpY){}- z4fCR@<_s0+E2l0lT@<_C_V!9A=l(H#7ygcw(?9ck&f|kSy*Pi0iMKPXOybF{!B=je zgw^$2X)tc&_;bF{x8P7>cyZD3bETSt3xDgd;2vts*Dn#mKX`W-s!N)fn+IDn%809l zWI1-`ri@f?_fCqoUXpBFXssAS?As|MeQ)t{?_NljH@kFP!+#Kkl8l|5#06#pVe6bw zN{KTM`Es(NeH}6;bU3hye zYoOidjCr_4=#>OT!bs!taf#3c9Y_^u);cV=`v3|i;`ZzHXpOq^UMPi^CS{Z%$ZJ9N z^>FNGkt5d10-hC#R`zSg_ReKEEmu`gM-J$5KW$dm3_;|Rwicc&WyQY#J=fm zGUXc;*-nGgdSX{P#{M*2CMhx`2Y*IpXr-2RUBlKkqma>zkRqQ)l+7(4UU=D! z9U_r0U%iPYR(1N4l%?`h_nS)46!7MF zLIO~^!#_$Zz@$y|amnEnK)OCm#=>QfzGmfS9|EC9(niv*uFjx_&Y&vWq8z<;S(2BG4*`X70!S;^oXbAz8`wLpqiqmsLx~C3$^$ptPfHbNQ+0aj0auF-XJAjuN z3(Du^eY2IO(`b(f>g7N`j+Zp`^sl2!o5*kwl7U8t>E&fbN(|@=*MN*kiaM$L$TK?K zw)^h;#tD?dXX4kzU(h5LSHWB!czhKQ2W8~MUNG0`-#AepDu|u{M8_YIP)|VN@7~c- z4z@{$hwM~OG}mXH2(BA z-tGw59<>?Ub9Z759v43%UIR~@CZ71weMVjWIB7fLV#*|ZxlfmMtfxm)vrHitw=2`EJ>y zLWqd@_Xhu=gCsUCk5VX|ENK)iDoqP`^Ke`orX0J)o$*n+$ zo%s12Dq1Dt)DN_RmfEn&0^lI)#uUxmnARe|X+si1`OcnpyzQ8z`1mKjYm_U=hm1I> zK-3O_td0QSl%V?4(34nqb-+b+rM3%`fkAl90n7fa(VTq;- z$CrQqv@NAdjnOuJS>?!D_vhTYckR-lPM6~$+#1T;3if@?1%m|<8J4R_;W$(H>DPW@ zLBoNPRJ(@gsdSj%_4Mjsb|t3jx3pyeV>!N~F)oQFP zZv9^R*eTJR40#7wNM-sK*%Y#Qch(U_cB~aT?e&TwGinzLU>!^`^Y=bayC`0vpeucrWncdKU669qv@lsLUFK#6307~Yj7Zo! zDnH1oWrL$>%4LN9w-&RNwSBg8=?jIqmhH^v*1m}vA1kD-3Br~BtS*5K_J?G>HA}0u zz=xcx5IrV4DLa8GL*9gEG5ZhxPD)d*nKiG8hX!2yD|6Wy1p3b^Epwffh08xPQH$nG zah@j)ar%=t$IvNdGd#5kh&G_@E89M*+?3HPVKC3yjpxa!dHA*|aR}XF%&pV}-$dhJ zfY?`llmMvLGuVYSo*8c`sp78lo` zf@};>c0c7;r$EYfkB%&$m>bCEzt^C$04+Np8-f1uT}fy$1TO=#fZ&wmYZ;;^_E~#K zObB^KpJSAAR7fL<#&FnnD6-CSXG~=VxkLSjRPqXjU7MW8o&ml|kFUEe3~`@ndtK8) z)5O?5Qv}B7MD@ta7kjVW5NGd`T0OLG@gBf2W_HCB;#855cRPR0Q}b{0ZgUEmrU@^z zv)(KmQy6UF+%OY|S7qYm!dDAcpCTiXbH4qF(9=_vNSi1-#}xWG`CWC5yr3C^+qix`)G$q+&ey7)p z<4jVd9NbcO^WJJ_dOSKj+&CA=Q{KV$9;K|V-F&i4rTE6*>^y!wlmCb&OYtt33i zb6c=NR2Jz|LO!R^s=JHt#L*U+=@f06Q>qQ@q5e2I@c>mOi&2Wq*ixwP;)$Lq{t5V? z=F}0LsRolF#%;c~s0!+^GzR8tY^`zI_*8YHm|IWD@a1Z)=GtDJqc~3=@vNi1bQydx zE*+w7pBQN!)4R%QD=-d{LturTOJIfD(=4{96HHH-yC$BTVBv!*0K|=&wn6$m$YB5Z zuakQ+jZ!2v`n=y?SmT4kt6hhAYRa?ziqh{RZqsU(@zDBuDpc@?T&U2=Y4S2~Ada6Z z$0W|(%0Horix+zPlVs{1*w~cQEX7kVOhr`9tGFU%NMuSdf>}@!bh^1%9x2}Q)X=8I zcEV-~`eM7ol*2Htk}=9wPp78nT_$^g2tQL?Tbt1Jc+i#WvJw|~Z}tdiaPkW&3pUp`ZE+e8Lhg;S)-;`(~cMueLrRxOI(wFxg-hl ziXCSeSl_&zEzrtzrO8b>F+v-&cw{{&9d0zol07bKA?l!c5iwvyju|~I*p(Gio;bW& zlSSPU6O|}WF2_zX6w74i+`I$7m-}V0c0DOZJ|^GRGu|ABlZpDU+Qgb7C$HMo9$;vCZbszh-meDh1t+*WOEYN`*DIR`<{hY#jcI-{qTE7(cBqa>SREv;gN z7j)IG{X242{sI=yOYrtpTk}~^edt0xM(vfh8v1ahowuMWfl3U8R+-cw*cSkdHGi|w zem*z!UJJD;@Lg*4VAN|otizshlWg{McUS+}%ZxkI(b2&%TmDqTMpLJ5J|DJZ$)NE_ ztEgNQk2ak=B8C?;^;j1;QZo8z;8c6u#Nt*^k|l#L*1!ahO(^K}M{KSR%Ur^yoN4s8 zWoURcq>9BuDa(dEjaEWWDW>mctgEy8aSoOJ7f)2~6f4$SSuv>$X*O@y^e@$&BWGRy z(Ql;gZZ2}aoGU7t@*gC$2T;z34y|A=d9!H&*Ga*<`P?Cw;N&+Cu(UP|WD?O=W*)6`b{{LEl;yw4wW5~{1(EF=C-zp0v%Sc_$cPDKYNd39yS~&|in| z?d}?b8U}1waJo(dIFH|W#FH&MjlfJIrAd;5vFvWErfw&buNs5Zld71h5F;h&uyT}H zO)Tjip^2$+vhy@r!Yq!wSyqK>Z3Bs*axUbOG$x)wv31e(qc3$n4^9$tnQS5K&Nyxc>sl4hF9kf1WHJr>X*r zG+`yd3oO7U99SEq!bwn6U!Mf!{2w6ktNgvKk;6w`&W7UNBf|3$!uziCZGn2`FylmK zvRBQJx+TW%sAY#C7`1VwvEs;L!k9dKQ;6}esrJ6qU92AN%HcOY)wz@^FVXgHja@_hZ^#Fwp`6j z*K6imo-mEJX}WG+zP=(Jj<(QVoKpN)lKM4stM><6oozpTtT9#;N~Uyi2svAyltHTs zoyV_{V=UWMB#Ni`;cx_nl>V*dxpf}KrwjVs<-IkO>1vf5t@-kBJ`E8qp)S02E^*qv zQIiihHC4ec4d|EY)dfnd{r1Am!tAocd3=iF)1sX04KF8*E8=H^8SoXRe|7lrw0Rv@ zHPY>vgA^Q8GUdMd>7wiJ-9z6Ibp7^{KCY3Tg6iM8=A-eq$Da!eKbW%*0q5s88zK@U znICNil6SxeyvKeqH94tb(6fyI#U#Umifyo{E!2knaz1{SH{w>1c|K7WT4XT-UvBTf zh+Bo>Dpc1aL67V!0^36yl7(eT%QSKI^Om6RQ*v;9Yjdl;ZsbP`i^C}9O4Hz?EtAIf z-Grt-*4Y0!$>tX*s)T zSI^EniZXkFK#vw)0t>#V(a$jQ^<O7f^_^Yo}#+bKTw zO7hiIrJ8A3QJws}+kk?WK!tbiuw#7DOp)6>?+1-!!@E%_`6mV11+PfG6lD0|D=3OP zwz^6Nnp(9YB`7$#x$E5bR2%K*T>*J07)~hQVG4$}-g<8fRF#LLO>wZ&NO(;8#mLkW zycDT@YwV+H;8TP~oAy&uC(CGxIcfPOL$zO`M9dJ202PNuuF(5S0ZXNKvkoioF)FjZ zD6^m9$*9G&Q$BvQhkXlu8j{J8Pmpt|lW_R$CFXf3;`8qiNFRSH(=`Gwli#5yoOo#R zK&*N5UxsHmUn|#t49!I~%HA~N3^9IGvbnE3z=e<0+5LVJpgH>7Eq6+)k+DjfzGD*G z2W~4IN0=e>Vai?U@2foQvTI@?(&03zV&ucA3;L@(gG#x7tg4O>+(BU8q{@bl2IAiMgrZ+ zedKDBvE4dlLTTxIdWRphIONMY_^V^+UMA;F^6OC$(6EpD<5T4K#IKk&W|hs&}T z7wuX5sfSOoi_v=q391jFmO=W1As;#2TMyd2o z9qwB;xacdiLB@)p-~lA+wQEx^TS6`pspKq27`w3tv(aW$eNazb(H+FdFRGN^>lCn_ zY~{(-(h}9tnc+NHU63Tx0uq7rcS8(%r7QNbH-93FL-bR7)4VQI_k|YZe-x>DhpICl zFHVj3N;=Ue)4s06$~k$appfX z560kIV(j;lOV=#IDnrv|Z^-Z&>MTGJgVSeAS6dwJ)9WmQz0E%MDXO0R?J6|U)|~9< zQu4AI>yc@hf&jb0e%!V5s@-_ae#gS-1|hPKB*^$r(wOB9)Mj8Hg^vFoq)w2AtR6J$ zS0M*Dvf^z*g^K_Bc98yx1wBW#rhQS!zEz;$7XrzKss`MB^Mvsh$VHX(pd8AEBSwDr z?%gTdOmIE93_Ejped#H(%Ws%glTUm5FZlz(u~f0->_aA9DZAV10Yn9a*jOuf%U9PR zUowiqv#9Lgw#-8sHYCoB zzd#-4A4mOY7Eg6~BLGXw4ca<7axO0~%MH4+fs1bgtty_Qk=rb346Rihe0)QZm^9@M zQ6gk$2=K5$pP~>{9-i?6!H;4ac0N8C0g~!QKG>jtuLjeTOI*DCR9}x-BQR@VV4%?m z5?0VRi6}FLls>`$+RHAyyB0u{Yn%1vzZb|#F~ro3$a;EupsFW7fBtk{@|at5<}G(f z6k*-*xtP)NSqsG-_ZiI<^o#luEcNc4_ff-~We?}SPZUCfzWL+FkC2mSBlyZw0jLs> zu0=F_aLdH*E-f4!+>QV>(d)Cr`U%raYL?FHb;+_hdgv#27rl$H4H983(Qfx z-%IUi|Gh1h#VFP$Qy;*~Gu)EU=MO#;`}bKp2;n`@a10zEZvMU0jv7n+wx44{K%63> zw2=@fjE{R!pJ1>fhAhtyeN&&7AK|TW4xIsKE#8NO_k(|2Nv|ADdPJvbyft zy5Nj~LBBt=60WF|gzXL`7nlLhFdjd$1=4rm=D{kqwzdY@a5i>!F)uHkeC5)DUPDHl z!nQWT8rhRjv3%PMW;6um_^pw&B*hXog`}Y&8ITk-a7cm=M*i>4E37l$85ym7Jz`Fa z)@L37XglJI4^ZdgdtrNfd%JsZV0N)T1JyBFTQlLd|Md-&^iK$05mjhaQ>Ts8y6q@J z0e-DDY()5ARFJnSXlzUc{uyMu_oj+wxMj$i$wEqT5a8=&=~LE0S4v)qgOj79B?$6C zZh+fyQG4-m(QN|zZE9mfL#@p;HS{CE&_Ol5v|C)aD(9gOfC6#{g5bP`Jv042 zsk^e)afu(OANAv3VGGP`jTxHO;?3KqTIKo_H-D+EMXwQ)lxPkYaklsi$TCB;41GdD z2Y$ovVgz{H7&PcG&X!hJ3-3IZnUFu9ya&=`IB6qXN7wqyUMDe_;C98B)678W@1`ps zRFMQ`dm>LsPft&By!X!H>dz7c1k@ecmPqi&geni>U`5%?R_gS)O$bVXX#m?pvKkCp z#eAa!A^5}H!=u(=p^;t>EYI=qG(%s+v=K?r@zryP8b3n&nY~X;!FA`GT{bwC&cjd$!2%Rd zq*TlkGN`m+0Sc>ZdSIdP4FF>YN^~a5s;jMr5~*gmrO42-X@FMD7$0V0j-plHMrUVd z!KW8?4fHtC*jXY9*guz+mh8c2fDha7Y#XwyH?NWD!AZkR ze$3Ag9X~xDYb_83i8x1U*P1?861p+3g;;{c0Tm$urMk+ry}8+BOdW0Uy1}M-{Z6#xo_t z&Z{CS%D9Q?$P2$07Qle8kaT$-=~BvMZfU(iOa9Mt(Se4Xc#8~q>JOAML;~K*?mCzV zRb<1GTq5G)Ju#7CVDryZXeliSO>>KI2B51GPu>IjHcAJoHlcPmic;o&lJo(6Y-|j= zrJ;uJ>c2-Wv9`18Ilh8h^=xYa{bd#Q31D@hJZ>sIN>Gv@G22O8hZ}1`fhzCYy^`p- zJjkgGw9yAJ)q=9m=f=Pc2weU$v!AaEh0+}C2wXymfgbOUt9iMBao z_nLtuRWWg!&zU8t9|fpnwx@kd$FawuS)5>hMRljHi@lQ*3-~`{XBbp}qtd9Wa~)_P zy?%T=ehT;vC^@bIm87Jk`f5XKGkon$>;bqe%K!E)L+sJv;b!+s(rh=g*Q4I{H|ac_ zoXUE7a{M{DC%?|{&oW}a%%(dnHYY4EFJB360~tA*6`)CohEU}W77NTbpdQLZz>mnR zM_K=4S=w0?w3e)x0yhBJVlXOeq;tyzKsIa;3`txGFh0z1FJ8RxUXP^tM=1sL-t-*Y z%A@8EsEA%`ExZa^O+4KoeM$#p_vlEW*N<0SMhORYrQOh}VGP(qAo}%7 z#Q_sOSZ*9>Z?Y;RnX}8#r<4P4-TjKO~z6>G_OeISg z1oCiTWOekrL8DQcteF*9^-&!_nBxeQpu>^`k1Cn#e>u+}tNsKiVZTVHj+nCpj4XC? za*|#b)M|pYyA_T$6U;YMYc_y~kDm{aH((Vt4he~fdN~z*eSN_WG_|ezVv`vY6Qgdg zg%x^o;-apx1=dPYR~PAf;GF=^EHE-vyx}HzF|o1iptAGm9wxJxjh9!umMLqDT}UWk zcFr8{1&BGUE?||Ga^b5!&UxEr_IH1H$p_F?9iyhz4=S`Qw9r-G@{zWTCOt{u?yw#ThX0(M zJq6zDq)f@by!jZ!e(PqWcX+khpd{41kv$eJ5b=^dm5V~U=Nn?5WFO&scEzdyN zc>Vl!R@V(780%|jNAvy788mbQ!`gncodsPIB_$={&b*`GXc{MUOG`{l4An*4Z5^); ze6WN1&Q2iYv^2P#5T{V9dM0+X5#jf_An zCIHM9D+6SMaD^;KF>kIm%%lcv`~aZ(i3v6aAad>ksRf4Ns0Sz&u?{e>Kv<7& z<-VKq032cD0U&OyF(1N!3e-X+6RQl`U@EqbpdlG0#u`ZdZ)RBKt^xLJ>>!c%AXjk$ zCbAY7P7wNmLF?W*d-k`7^fA69*Ei}o;Hs_fVByFUD&>B9KCN{2a2%Lt&piU*i4i%L z4$h+sFgbuC;Td#-9Gll(xEny-QhIS@0j1%^Bsc{C-w7%Vyb zdfyun*l0-vR1EYF85A!OxC3%u$~1B*X?f-P{;8^J8NVy^&}&V!m#d>h4!B5|?Jy`& z%K7cDxNtjL<$sw<`3ZlUotJkIO0eE=t5u=#&$<^#MuRTj_d!8HvsO1V)d79i*Sx^V zz-N*Z)E_@v1T`y6q9}i%&ja1rVB3FZg$%K#=y)D4KO~;b1LKDQ0cPe)FcrF0DhN@x z%3>oTBCb)$FlNwqLR?!Qs=)-A`J`aQ*tO(b_g|oH z#^rd-H$=(2aK{JAaRzOK^qZgXUH$e<=rkejZ_Y6!oXc(&Dl2+1XY^FJlY=xcZq=J% z2FePd2CljhA07I|BK}o{Z9N-Wyp^ktgE~@oeKQd_4%N^D+v*pf{)R?{b5#Z`ZTOBL z?JlpRR1d;F7~rNvh4QSnhEqz+?Z7YKNN|32g$XZOfprn-J>j?91Lf@1pR)kzc@~>F z5D{en$(Mw3`kvI%jrIN9sQPU(Dj!gtZv<>hc*un4oX)XLFw&HR>bl} zxU;r^a>ldOd8<5!jr5o;L@C`OJ!@>FSloaSZ5l< zgSMb88#;v`@(^AHoBeNeZk9at4G_m3Y+HG71SXFb3uzJ9C%5lpieaD`xz1(dT~+6; zZ7`WJcuE#s6jyNLz@01uc%bvM0aP6O8cbBQO(sFxE+8{F zz^TBgumNbV*W`Xf>sY{<;mY~oOViZc+`zA2cv|`{&#QgL>|zhVp40_}pu^Z&`T*ms zG-ylJW_pjFeF!L zEhL|1Dg~S-LG={h&0VA&@(2mVQp$jkeavQJ=S;m~Ml<_Djy%NJ(lV^7s!H?lw2VFL zv8ukl9((%U^l}+I2-JM_Zk*sASuJ?V4LX7-8r6SS{Qh>XD|fm?n~_3BmEq5wdAfoZ z7<$9gTbSwT>CdWV8;)qfWMp|0)02}yiDFjJ5W_-=;?L=366iIbjEh` z?4~{-=Orm59yrS*s|S8`^lIu<3&4gNn1LGb_k+IYyahla(9ugIH_Kvqt9d8i3BLyp z6hv=Iwy$Iox4m1HUh1UAVP4X#L$3|?Cqzf|4q9^t@c0;a{*N}9_dUY*Jud!=G$SAa zc5{{O!eZg3i7IH{#IrIL3M8Cw^I`#D{u5uzEVT6B%{AxWl9{IYw@twgAsgxPC%^xv z2ny&CMlSEbBKbv6iHZ1g<+Ua0ssHVCo>0Xc@QNyzanQG}z)+|VyC^n4TrTLPpao{7 zGu^wggM(m>q#VV6Uxo?+clzPb&DA9-0J=)68xf&@pwoy_mcJQCgbEer?sWXNynhEy z_fx|X?BI-Fq`C&MadVKh`9d@c)@wL8C%rLxx}AG=4JKo}|0CheCXF!SKRrY62UOKM z$Wpg`ej_0P9e@Rf?xVobJsUec07JBiY;+9^1Im0&p_h|qj1#=nwi}f#Vz(Lv(9~${ z9msybWD;r_4gUdg3CVjP8_S?hpOU&n!+~4xN&0rX68am6L}9FnCt|^N`!+J(SGhFs z??-Hl&2;hX#WUcxy!MT9{|%;_V)DZVHipF$FP|d!-GD<=2k1IygMo*YfhCU};w67( z+PFJnZ(or;rb@FS?+_6WT6GD-%aK$2eED&SrU?o{S-CGlgeE|w0@YuH zfl`IXrO8wM+dVD;Xnj!gK;#weRrx6_HNf#wa8htBdWiE_he>x(tMhV#QMV?Z zXXW0KS+^xI&+(BZ;Cwpo>kAxdrW2t%jNZc)zByh!ucA2NM{o7pxBn)3ohL*Zdg0Oi zUziUeEwFlM6p{0nI$gEAq7!et8;vJ&MFl3z))J9*TK4(HRtoinR2vkD?!+S068~65 zfr(=r$(;6dA3wCbaYKL;rG*!h4hcV&HEorjUJK~eW3gl?k|P_!pm86Z;J$3r1$&;6}ihgzF_c<4+vC3cw5+%!T_tnBBWE*QjIbfas7n1 zj*%nq5dxKxZuqTM_7fjp7<%{XhZvFj5ob}wXEzbasgy2a=+yCwyIh?Y#d_L4-BkAz zg{Xu0U?Xb%DvLxXtbv zE@Rg#xlKOK+9V5x)RR?~0EHs;Ob$;p`1{k6m7a2)kl{5Gt8f9v&8$LF;w(YG6U~A8 z?K%=XJV5qv`De+m*Ao5HEx9a05Eg}emhI4+oF8R6)ODsNVKUTbI2GG82{SPGqG-*( zKEkxA47fb!U<=KR*O^M`6?mwz{Gi5%w$eZUddluMo_)CXQE>zoM^+* z=#YMTjisqUGnpU}iC@|(u(NHYsmxO%O>|=2q`xh-oF%zl^t>UxBPlVqa>RZidu6~i z6-m?(2qZ!ShxP-5<8>%YCJ+`5eDw`f?3PVeHKtGO_y8P0k8P$z4U6{Q0$aoc3(O6m zZ9R3BNv9$tR9aaBs%PQuu;dkc^dl_#LtmOY5+t}oKg)T4q|dan@?weeHuxO(J4s7fF{{%;uyMD=Ak`ndIFGTRQCbZMrGm+Dh)2bDE} z)XaJWEf(K-NHPr_nKC3_#ch8St7LEQXf1H%LNPbsP$$>_RL6cq=y5pwLl`!&#SvyY z+(E~r(+7ugITChM+)n;^v&eM^j+1cam z^|KeNtLuKSHG)_@IJ>S$LQk#l~T~ z?klr>ac;^-og>I6Mio|l7h>wT+ZPFcJ}E5?x{~t_mYs=@9*&xGqmO=FH%^Ro`EkSv zUDJN>d!hH;_p*YGVKxpKGa(jzc3?RthZ?Fq&9mqX2LL%rD2#Irazpi)LLRk@(M>?Y z8^qE!iidY)Q`6HutsV!u10X504OIa?I{9bJ$59eWV|i(yF(FNgs?#jC59JRre%V;! zOR5oe1}&y751x+KbouN;T!YKFmnzk>gPW=X_rJui72%k0#JSY5%=`O`tq<(Yj%F(G zwRyiO1^!jrZ@68QLk_B>J-_g(_gvDg3>y>gy5-6}7@M@3QfbU4U-A^I0%5QIT+TE# zz6&gDFzdG@B2$;>BK(Xp<`i9pe66@?>NydY=BMy1d^-K!{SmGBWOLKzU%Yr_l)mrY zIUo;~BIRO{(UJR(QW46L2g9&P=a)ab>?I4oJlg!W`YM-34aJug{_{HXlu%riB;9?o zEDGYvm7yn`>6!Sq`=Fc3m6}{1x$Oj&EqoHfetJE11`YZxq*{rXl(b696vf)JmYnP5 zgsiODqZD2Y9LfgGt8>G1KvOQT{e;I$R$cxSrJyu>G#%A zon`Q_=26Mv{C!Xm{$8H4H@XKJ;zP`hOJCrNf9JM4%;0@%a}3QufE4O>uX>3oK`w4^ zymTqseaHs}0!q%H%-Y>bp7@y;8yATE@<}wEj~etC=Pdi6i7Jq^j{c2Z@K^H5+#zLT z`QjH%Y)VS;hKtXlV%GJf^Q8(^q|8yuzgrqvgm@+n+P;v<`P5XBrq29{O@k7rc`!2y zD_dgl1ALlc09|#*T0=}s94jZg#20-tQ}G)%MiHfoj_AKYjFLH-nTN6$a{WAwXL|9_Re5!@f|7<&dU~0EK*s+v0 zDmc#A*oCy~YT4`bP;KJu?CF+IvrtseFV?O~XJUVbfi01ZX#UMY+IFg*llh%mob&XU zoDQ!Om7`=&vO8yq1g}qg?Rtj7H1`O|?_3Wl1$+nazOIAgZXv}a0<605w+nNe0jBk` z7sSN;s6uL=rz6%{gfbI*YpJsLd0B{lb+?iDgmm5auAFTXR!Jnm?SJrjP4rvTN-^GW zMuBlgAwo_hg5ic!HAmZ1WFQlHZ$)@mUMP-5P);*Ry79`S_ocNqHOE_2lML)&^@-?r z0|@Y$jvFW8 z0Esun;9XBpEK<|=-77jDJKo4Q5|R+gbL#hyv*L!|w4Vgc2U=RV;jf8(8x$g;7TG)W z-^hR7vunR>MY-R*`(tiv9r2}2PT>B&y5%0^)<`qVM+zsLyZhusRR`mg5^UtyolYoZ zL)oui0|zKWpJ7gIRKO`1iAElPmMBX=Ic?*b!RXLpSk<-0kHccnJIgfTwiZRD|Mk{% zz16tYuxPPV1rpz6f-WJvvk?(B34RaKE)*)R>M+scp-f9lbffVJUn8? zzkwY-aK1&u-`a_Ch;`OmxZ#C@D$%6LMOUX_EKT`SH01X5x?mmbo{thZ5=uJ=b(Sh$ z{r+Yr;_-rO>860M)T|Z{G+Mm-oF2jgF)hO@5{ryS>r<8wW=1WzAw(pTB+{ya1P2>TFy(o|J!) z$aj!Ui?_J>e2YY`%$4HxSlxs^2>a>-ad;!ElMTyK^lf^RC0kfNYx5NvM-oTnvh(wK z`(WQ9*@<@MS2|liI>{|hameX2OO1Dt>Z(jK_-Aa1>kW%FS>j_#c6SYWnVkGNWW>d1 z-Cxjzf|) zup|Kx4hF5!jW7!r)rXE<2bU0xbv+*lRhTj*q%7vIhdh;}Ml`vR-}85EvE;V9d)%Ho zzH2|8wO1*oGVO}5aK$i2;(jdav_Ge=2x|D z4RiRNd`cy-@Fqv0u#bz=;0?Bt3n&ggxu2d$${TkZ>P!34R+FZTLbr(wFZyk)X=zmH z&mH27tcN1~iP@2Eqdor0)8kbFWu$Gxu-?k;Y<*4e00$RW-+|GSZ&k}S0kCQ}9V>T& zAOSxP8aRG|ZpEsD#F4G$-GXS{9eux}hMnF<=x&S9diU@63^M3%0n|@p@bgWMY=Sb( z#D?9|rC`78^D13mu2iag0_W)kqmj6-9n82s8#}r6m(M!2}QD?yCc{D#z zMjS<^Zo{=Rr6OFv|$*Hm-+y(v+#Lb@)_NjJTS z-B+)kF|5|*tA97jTbt2au}{5KvstO05^%h!K>CqIdg)Tk-0Hg~%`*Mdnb zqh!$hvU=)~HeEw_=RHe2>``Zwb(&0wyB_Xw!d16r2>5P6HC!9(shNC(c^)eNV}5PsELp#sqJ+3NA*pX26>S-qQ| zB|^vFHWO1+AppU^iP&`rLki3 zoV!Nx5*!u;)7Sym#|z#AZ?~uCm^bdoW7WPpFVyr1Jm)dye?c`Op2Eq=Cz8OGEkU?| zDuil%?!`>z&Ul)F;i2~()6niAcAUx~14l{l1glIvN{+cRWG&wWLl<_T@q~)B!dfMr z82|w9MnzPxA#($$5LgBg42;}kHV(cX^#v?B7VRjK=`ziV^v1|y8}%q`F&2sR;K!f< zO~dxYqlHE|1yRUX7|<~XG%^9W4YD=|adLXz6p!0Zk705Chh_-mAe%TMBkbYBCor)A z&Cf!=SAeXmvPKH)D{$p6LLkF6n61C{FVxtf`7cHee0qRt+?VdlK(<~BH+t!pc%XTc zy!0adq)T7FKNVf3b!&aRM>#hWTaJe%H%&|N&BGz4GRER}sbh{T9px*rkjfvWTnKQa z8Y-oT5i}?v8yxKF-+)H8hS;>!Zq|yzn~+?F*?5`$GKU4lT|T^4Sv|e5=g~OMyWdt5 zC|CixE@BfXC~0(Df(ob3*xD5*V5)0;Q2!+&OVDeI(+m4rQ8DR5 zh1CUj_pfgV`M1j0%Sj4(btGR?)lq!>Oes41cZ+w!C%WB1?Wg%=ufS=iVq-^AtVC3u z8a0B&*F&Vww`8jGrg}EPokK5F1%e2UbLs=67TW!G92rMUm%>0nd*>^oYuYF=msaTx zL#bJFEG$YLF->!2kbhU>G*{^N^2ajBH>|2ZyUE$)cl%sN=Wa0ICMbJ@QC@D^f)iQ> z%K(nT-Tp6a(m(R;UBdufWoWU3i|JjFahx-eW{aV~mVv0}5rJ4mi0Xb}yT7n2bL0k3Gvr|_=5^d!UjN3E8hO$X(OmHz2^=hv zs5C8INFPBa{oq@l*Id-r5xmy~LKulZCSzb`q%kPf(m&D%7-PKP!kSW7MX~qeYGui{ zY%84z;T?qc1dIaCKWvoiRg($%O{#n4I}|m;qJ6mtUhZ!;3G`GQ>D9_%7zpw^`3z7o zz17?5wTgr5UVRh=Nf@opHQ92@6x&+a9$Orhgm|3&1_@r{ze z3H%kvR?}4)Cyg+-#e|avq(j;G=PlAY?4JN97AS-98=m_wD)fL(f=`6?c2TOOTr#PK zvn0Z3Ya{}MlJeX<#{th6xwlq?#af@zRZ4Q?+k!9 zXo7G5;*F&3ZwV1Hh-uWgy0SRi!C^KD6>ZonbaoxqUxFU-x@=1NEQt=$90qC$=8-?f zHbS_AtbgRDmNrM1=`n}t^h$NJ!k1$xyK*v?1if*`dTz>nC6#Ns z+d^!J^@%%Cy(vWq8M>S@lZH;KEuEj| zQMX%%R<@=S1akisp6i?6@)-%>Qvt{OO___ljEpsaVy7oPqx|o1`47&=mV*D4pp|Cm zOx}s`OBjMM=a$j*DeieRZsIWY3FKvoBZZ7IU52_WR~^81^uH#SiGj-Upo9cE{b>m? zw2{wyyMOx6U{kAg$2$$~%2F9VW4b`df2hg$R<|U}8zK2UvG(mWZs_35#}bL+1v5Is zCi%cU#Kb9(eNUqGc71{Plebf^P_r7no~T?hTs%DNS7(LvylPE{p+FOz02{375x*{; zv1U}SMNm0C{Kxkqv(~G%f!@hgom2?(Mbv#6DjGDq}uCRM` z)mTow3xYlMFGf2H;;3JsrdWSPPRy(p3!exhBfSWjA>4D!n+S!o8#4c`(-uG+RTXb7 zr&5s>A}YO#T)ssRRaIksBIbEaYyvS4iA+Egn>IiEL3`&(y8h!2TnrH&WsWSXfWQ!y zo#xcc(z`E~>wK>}(ijV!jL|)NzBAzb4V5?+AqC<>vVHOJv|(aeN;H|p4V6NK(DW%N zeTjDaZTnry8zr$((!b!_X7a!|F-Q9I50$^e@DEKaZC_NdNp|GBT2J*VbAC65eX}=} zks~EFEB>xnn?!2xMZU8QEx%~phjz-f5_aqP*S{0#=|*~_C^Q+&j{Sq~H)-Kop6m1f zP>^@g_K|!WU8OTrP~jw@xc%PJy3al}ZJs_H%)?yAi3gduMKLT8{mkH35i>RJh9M`{ zYjWsAF(`gl>>Y4$MzrXKHqGvH{^Xau)aF%R<3;W(FQrNa$?v3+{uexRC_xi! zG@uefkdxqJ(k@-B57n>sEfsgRJbmgB1?04ZBYCtDixWZI3PW)XN}_41zvBG-Z!^+q z+1R*6jk(gZ)ALoi^$PhoNQz8TWIIO_vE_h-UwTz8^KcP8%19rRCSo+#=$gxDnE>2b z+lhZ%dKH>n{|^sr87&!0GwaN-1K#W}*Kvg$BLCOYS%yW~eO(--OF}vYh8~cvhX!e+ zyQI5A(x9bbq@=q91QaQe?(UH8lJ0uX_<#8V*Dx@^&3(?*8_Lv$EDk z)`e#3<(^}sg*ExV)4Jc+NBa6;$tOu9;F^Be`;xzoK%<`WgLL)NYcj3kkSAewgvKz# z547?E1@hHyUmD00Jdmmn*L+eWpWD`d*xKJAy#?KSHCct?*$=#{is6d#M!7=R zKo9qRlGpgus7!%AjSFbaW$TwD3_!r5Sy93Mr=|04Kxs6~tRZL*CcWR}&RT&F9=itc z6IS>|7<$|6-ZfRJ3q6_8P;mvBxU4ZBIL+PB+0S6F4iH06sp4n^rM(T+jeZ5MTE zdtu@4|0M@8?8p&y9$nQF@fE((KILlt;P2IuDG~2UmYrIt+*2(Lxenea4+?o5SK+;{ z&YJ$f_1^v8;&F_iqt;4QP?*z4XAwH566vjT~I zG*9JaTN&dFnD@ zRDAuW(&991R=J@bZkbx^5WF^cj;1(>8$NA#O}0wuO0h(9Ci!%fV96BMjNbuk@Qs55 z79Iuw<{0c?7?|mP(BpQcOg^r1HM7HdW$$D*K~T;-l34h5?gXl7)QQ<-g-5edaE}6q zsDLRkZ-#&IKO{X@M4<%+ujZrnL62hNHtnC!#Wpp>^A(8^N^F z!CbX;#h*(d3BZyvO%~;?KyRQY1?0WN}*-iBVjQE`zgLF9YC~Cb+{0_y?~>!bCmvPm>0%^nN-m zq#tTa@@@~4Ds|uso0QSE*+SkYSa1y;Ei)-F(8c?|Z-4I>vWnn|?G~4O)l}Z7w%3L^sld{UzrE`vBhYKBrIeJUm~X8z zoO5%GiY1N@&Q`CW6uU}Sq8*%YICmpuPR(IUqZgY!dlzadhtN|RSSWY(Q3>i4_qPu} zm__ArnCN1EW;%jgTOA!t=lPdl3&IiYIUN>T>ig&q$VJC+QeY9UGf2J3Bw7A#IvvtB zXk=j$e0F?;)jA;H$Kb^kxAW#UMRHlfrYM~BL;ed%m}FI`IeNqcq+$0tCdJq%2)-%~ zjRf-em|6lQi3IYz1_^U2#GK{VDN#02(eW9T!UoL?shMKB>%g`nETiUOe|ibaX=4;N za>rRV_l~0`Uu&j8p>XS<@$;Lz4UUZxy zdMJmsr!83+Au7viND4J__vi;M#Qt5cG7;?>wN1Izh8C%p9Q~*aC>Q*zwt4`E0U3|} zG4k0+bdOofR9<}hNs#ZIE$;ii{LFA}tx={l{N``+BnNfI==900sj1@1yG%J@#7G^| z_&Kv31R*Uf@t-ZQ1Y=fMH4($q7bVJL&6IvExt%|XHT%+a$>|W6Mu$YSE zr@+LuWeEzO1&PKE_YtXgHdzQB1*NQ%L7+F~= zmiek8Xz%8#IhMBniWYKY_(vR<8%ya)ssqY=rl8e2U7J2aY3>`y^O@$@QrloAuomSP zGv%l-JZG$6ooNnag+#P-3t#t4C58EVP4p6b!R<=GdJJ#l;D}zmfV)-TBy`|T5#G_X z>jq|(Hh4`sn33g1TEOv2gdq46PsZukSx5lV3D%4!78%kzIlg&$F1;2HORgREU2|@d zgkYlQ%d^2Y9nx~bsWP_qbT0lL^x3j?-Z3#NWJnPTqH#Q&;Z6*cdx73{{^ zHMuADp0vGe4-YXAPFs^?0&Rcz8&m~ zeaUb~_isyZ&W(=1+ai^fRuzJ+-T+MUNL-asmiUKUg${MsHjWo(Y2=F*S~A*%WZK+( zb6$v>gL_V$-L*0V<;hwQ!L>%E3xN8L%)l~c#z!p$c*owR_gc_*&a_pDDSnQgn^ zO})jPEtdkKUK#Dq_{7kWm=sqn?$~uht2ft)0 zk+)c2OpY?Y!S2h*z{M^?#9NfH#dY!;JG8^aM1fbnC}iVli8OVVG_af@zGF*LRO5ixU$Ba6wkT8<u6>^Aj~r&D=H9 z&jJ;6ro_=)^BdOsvu(*hY^R9RKp75KZw52q2W?>tY-g?VC+1RTau^GU#4ZxQCHxp< zl$^5nFn%3iD9$Man~DqB()z3+mBun6N-iUbHtfNASyDbxktyJqX5$BI`8G=45V5yS zVQkxl=+mg=aYh>!2d!z8X{u3XiOa)YUSDD3z~Ta~S>{%XFh{T~Um&bt0;!h+3uq(< zGe}50Ur|g+a>-a?4o9Zv!>F$YOa!ugA~)mZ=*3Z9X>IQw)%6DlCOQZRl(h2F5}GTc_;+W1Vg>3I6seU{Fm%w>faeuJ&}7^gHV*Ourc zTi=I%0oLE}H(vOfj%OXjbVmo=EI_&C=H%pL^jMW{+DbCG{z?7tVWGV}SUk^)j*(YW ztBqK+0yRgkn^QTELz?JG#Md_vvxz?5(jOS7o0sPE&+KT|6DCPR!@7#6?1Cl23(3ze zzr*%y=;8=B-@e83=ELr$!=*|1n#06(k0=6xxu)BF{B(?=B6>y;P8YkeC1agMN%j8D z6=3a${t1UcbQ%uinHCI()h%i^Fe!`;($!E!DVA)s=%ET^%>(>|KiPTQVUGN983LZ& zD3HK>CF_cy4Pplb5#w60onZ3jtnPDdG1mG$B&iojmHq*vt$*10p;+C&or zxZ^=Vw}k8|FXCC70k@LbC{+=;@YOG zwyt7A2bihH4p;e`?+={0g@p&ena3N9H6B`JX#++C@2i!tPn~9PqBT(L?&)uT{`^_v zw9rsvr}1wGNc~@D17yXvjGP-jQj6YjSOj4pgeMV)=ylW+P3?MZ?MgsbuTb{jj+I;pIpAwt2jLXLupvPfVu z8_Lk5(p`c>NGiw^b1*5_X`$`Kw%$faeB96YH&3i!WRcXGs}eTl>Hjk{r+fSFHZ7&f z8WBS(r}eiQ&ItuP6=jW=LvNUe$x@hW~{Q0qo2wt%9;wj=r;EGgelue{Cz2>NVOXbyG zs9&~PPsPmpN(jaP8{ps(4c@CmqHVzm$X7MnDJ|%jUn6)%ICI;!D44lpjwk5?0u=B@ z@=(Tqdd>cVm_$$k^V~TOEVwpB_3mm#5O84XRxyC>-^1g~5}B9h*BspSy(SAJYXcA{ zjhZ3(cRsoZhqqo=a>A(0fESu6;w0hz#1a-C_=;h^JtP(S(8?*?*|OcM-b4Itz=|_j zy*_a3+T+3#Hc%K}JUTia^Hqx}k;UeiQ`_(xQon2O@OmmVZBV$giiC1O1cJ{=c8;+& z?umwMj7!@as4E*fbS)xK_^CK$=C$}QW0~N<&CH*4)sB2v>kbDY^B_*Bm;_CQ6$zP; zF_>iK5?Q5C;glHAw1*%xs%}JJHtPDRQQOe_7fA@GO8+o4^Q2_;u+gz}+YvYwYOtLl zV%DN1w@NAH4TqVj%gsEo2ndSG>`?h$C9a0{wlr$ywpe@5qLe*T!>Y~WND>v2_zSx= zG?V8qr%)}2Mz;Gq#>_oaZ6!!SswI6=)Jom6V_gb)*cZK*>gW#RR^A>d(!vtLq($ZB z*wdcJh*<%2x44-vj)JrcIz5DxS<>2^ul5%kZjZ?1DGWYXU5s1?fLzQ;wTj~FWU>2k zv5m^dn@V^Wii2^lAGnGgPp*YSBvK>N1tT>RIP;#T}88{&wZ z;YJd%`Rb|NAO5}~1^@a|?KNM(P=@G9Rw8_y0O7Kaj`|P{PT_f(QOz!#m^i25Y&({O z((%!GWxlk+&oAFh*?<4Bq3bz{ELZ)pqMMXlP@E=&${;7H@|3)k0Fqq?LO-+UxZ!Q3IM6aG~jHIS8u%y?ByOA5(xwBj15&%odmpdq8OoQIe#IuUaHow1TO!Yd02ejAZEAr z@c&$@ML(U0hz<$|5CO`^*3_a;RQ&kCyD2rV$+%pD+kS%F({S3+vVFt@TQ8wRyA+H) zydgg477-n6>O3XUZ_+P!<5FHv86&)Bz%jLSHE-$bb< zgp-mrZ{A>`@Lpiay^gzkI?mgp+aJ#DyIUV9$1)Ak}tX^e&oH&%`5fX)NC zO7mH#-14*|LlNdxjEeF_c(2f@ub>2N)Rana=4(WMFvQRC8nQmJ!)dizV;dN@-A zs@wDdh<)ZJdHJjjx0fe1^d z=xrbVHao4B2(+VyxjH5*@wJa(CWG$ou%l1R&ZnLF=RToXkgtaIz|E9Ik^C3TJ-KHB z@ndGOhvQh2(JRn$1nc!KVf4Rk{(rKNPv``}*RYV0t8IccDf2THu4Zc0N_}tCuz21? zzIFsaKie=%NQDG7hPo(4j(Bp~V<*fKB;>xZ7XqhRU8Q!{Agg1NVfwUB+;NKP?fLe` zsGl^WQd2Q%H>w;qXsdgnkvhiVaXem*OUebh;`lX-yLv;$O6M&t2q5nOe9oQXOg=vC@>}IvQN4m9_Q=aLKKq9{0f8yx$!fatjP+a^R)o(#4 zrTK0hMN4B1v?G#Dz(>s?U=-c?Dm+LC%NSw!MHH zA(|bNe578kr;5l0s%uFt0Z`~B+5C%`tHUm^>B+cPR(!rTp|OrO>xAWAxYCFuDKHmcr7vmChr zIj(OmA2hjS7+_ZY8U{=QO8l&%5p=G|)@4mSaNje~@%b4G^$kWkSF59W^{ut0xrA=! zN&0ncMuI3iRQI1YAJ7&x_R`<#O(j|8a<%5hV9Htfp_B88Vn-yRRO?tDEx!o32_Y!p zYWvR2Q;JneX3tn!H$I+Xu5rdFL{jp4IaUwJD?RiucaOPgL|7PGOS}_1ty9rT3JQT3 zPH!BeQg1~&2hdq9;`v{fru8Wgv9(c&`t6#l2 zr%(@5Gee1hqS1%6yp1m)kn#&FAuO}_Z|u)et$j(NGKL?{*xHmFlYC|tQ?-~$erPZe z_mLrvUVJfl)|~q1qJE^D;OTF!MzIS97pwDBtlC}Me-AC~JOV@YIz5r+(jV<4JM`vC zbW1d6ngebrR5mcf$RJHDjt4Z6elBy%~8mPa3bK1Wv6g-f%y+P~lWA$-wdF8ur4$Lnw=8rlyv zYW|GvJcodfgyh{UOPgvF>hJx;>fq7&aM66e;V)v$`@>&Nt5RIdz27Yil3raWb}8I{ zrR*+SL()t0WyfGM8uyUEe1bS9rjI50AHlu3)cN!SD~pG&Di3S6ScIwkRF`}*Uz+uV zo)CTYo(?%@EyZlYc{XHMBEorb`XO5`K7nGss$8J`5Rrtaa!6!r+pzWF_~`ABd=9fd zjes%9U*2Zkb$1^iupeFqVF&yQ7*JYT3~a%9j)ekT!1eX@5qn}cqp=|Kyfp}-9&+cU zr4gniO-F5*{A|Dy2sY?~>GN~e-)gv$Xvc5jv-Q75%+2X@r4Bd_lL*x+yAsWKIXs+& zUD`b3J#{fa`^7FUnnp*e^2Wm5LY=frlmu}wMDY}#vN1wz*>Zl<*N~HtuB@u0JY-!r zu>M$nf`yVN@tk~DuJAM^)M#dxElK)``X}gYi2$!Czi3vPXLK6asUNxUQl)dq7Ah0f zF_J8U{z$>mN6x>_wO_qHAJ~Q_S2T6IqsAPm?iKawvQQ6bxihV=1eNHSxnd`)_gAp_ z&8Sk-+84O^8CnEtF)=49alL}_hQT;fUVkz4*;0Nu8PS=wXp|>G4|R9tW5Bnm!Fb-py;&p z5kC8TjC#l8=(xHZA9Po`rGpiWnuN1X*}P?IW3vo8z#CwpD+2knRX-3kgO{%jmXzJG z)L_aaJidPX-x644L7S3oZ;r;o6H5rX>(wvk}ZxT{$6;8^> z)_76%h{<+l;+_+n$TV850{0|%{6YN&hD9A zyR{Lz?i9}42^(gI_UP%yoSweTBVPxH%t^Wh

=rHvgg0fkDf4_y3;)57FJUA6*VC%yathHutG)|^z)a^#mIU}K|bm>q-s7v4pd>P;kj53x0Od38O+erBZa7j3S8hs>lK%_5X`NHRi%ZNnrH zoX8l876m%#ZmCT*`WXtlid=ZYJX<}lrf<~Pz~sx^%V=YEqlREhgGA4ypGAN4#DCdO zvjgfWq$>GPf2ARiiOc|U4w+}4=sCMWaW-!`w~ zAwE4l{Zr==G#al48fh}k4yNk~8Q78B9i6^9Lbd+%4HfPWcRqkyu>{m|r@hRAuC6Y) z{4tUv1qXwz!%ZfX(<_&^V(Rl?Z5I{A*Nj9R!4M?Glno}|eN;6@s>%^7L#qD|fFYxP!{6_b9roBn|T7KH1 zo|D&di@9TSy9S*r?~(Q>aV2{6%-ZgYW7mHU`dvX}(wDzz5($go{PG1^w$olm$_qQD zU{tj+iT4S0je8AG-P8O9c_${%6yq>&vrtd@MBO`DyqftI1lA?} z-POXlnNdn7X(kV#0nFhhV;N^iCC6%6GhxYc<3RvUxtb=_Rpfl!jArW?5H6W@WjCo;*!X;5c5TQv+{tYkiXwA8#~*vj z|NavDY?ti8<$`i^bKGHJQG87ajdjb4>CbS<&*8X1ONEc_oZqzjN{yp8J^}WKLnk{f z!g(uiF+1S`;)OSFtdp3GxL;W0^Q~{CWL2JGr+Gm(T}Y{OEHaM#dQ#nK)KVqKYQIhS zd$*BxddsAZ1ofw`QYue42tpW~+1#rj@>>C1+LYLt(6lMh;EjuH)@Oe|q&3u&*}0uU z98f*@(PKl@HXp>)-+jZzdkl$+%P2Q%lmq2<9FAV;Lz7Q{rlm3jJKal z2U=D_Hm59xHo|gSTwKu43T(5fu&<>UEM&ern|&eP#)m=oDhix+MvkFXS5|AUZ{Za= zd_GoXBLw`!RDRzcQ3)QM(?tWmlpFlawdIdqN8n(4Zf$xQ9`an9==Tm363i5W99dG| zA&FP?Ibr4VL7}!_UQ---bf9TUhc#>ixp$+c>7+5M<}pWVf8m<+;iB?^>h~AMFX3*2 zT>U$dm9Qat%-eWPCC#PZ(s6*y*7#ZqH%l0h4&m(l0zgRA4YZ>PzCx<=Oen*>Yys%E zD(K!6vfjY5>gUdpx;sqk-szY;=t~3r@3nOa%FLG2L{=4q1t$@vsZHb$A}^b7RMH#o z_)7aD=S{jdJ?&lhLSi=~ZSgvVBK_!=a6rzVq`bTw0D3^vWyOW0zZwvCu%0~wpe^~L zaiphDzzPd!hG1|epjNrNy9c+MqM)LpBRyR|^#M3A(AEYZP4x`$9z?>r*O(_$hqqYg zn5IahWzfUgv2bv7CiEOYm{&am9KFx{Y%{?3AoL`fmxJK*AH4N}e_?-D?o8Lw(NV;` zvHsTb@^W?6h$m%ixBK|Jm*wu@tWs!RrT$yTH}sQqAQ-^a(INec627+USrqCEs~Y$-o-JKgS?xbn) zfa3}rQ}-8trO81gSrR43c>ttE`TcvbWOk#r!%O$0<<-EWo<-KvqP=UFc z10DV+4qo0LUzZg3CU_iYbphz^v^n+_z^zpaMcz}9lF|@hEKD!4g0t6U7*vy>EVryI zGA1TwUPw(1t!jHx$7Ew3OvvqCzy{Cw`C^9}6q^We?pOB$GW-zNCokz z?0}Nl6i|%`7Yo0xYODMR$Rv%q+7UnHlDS^oI&lPjP6BfP8wcQ3pA4cNBeAHJ1`-$U?^|0W Date: Sat, 4 Jun 2022 10:13:58 +0200 Subject: [PATCH 08/19] use shorter license header --- anomalib/models/draem/__init__.py | 15 ++------------- anomalib/models/draem/augmenter.py | 4 ---- anomalib/models/draem/lightning_model.py | 15 ++------------- 3 files changed, 4 insertions(+), 30 deletions(-) diff --git a/anomalib/models/draem/__init__.py b/anomalib/models/draem/__init__.py index 3e691d9efa..68091b1f91 100644 --- a/anomalib/models/draem/__init__.py +++ b/anomalib/models/draem/__init__.py @@ -1,18 +1,7 @@ """DRAEM model.""" -# Copyright (C) 2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 from .lightning_model import DraemLightning diff --git a/anomalib/models/draem/augmenter.py b/anomalib/models/draem/augmenter.py index 33216f0611..97d9c4ba76 100644 --- a/anomalib/models/draem/augmenter.py +++ b/anomalib/models/draem/augmenter.py @@ -143,8 +143,4 @@ def augment_batch(self, batch: Tensor) -> Tuple[Tensor, Tensor]: augmented_batch = batch * (1 - masks) + (1 - beta) * perturbations + beta * batch * (masks) - # for i in range(batch_size): - # cv2.imshow("aug", augmented_batch[i].permute(1, 2, 0).cpu().numpy()) - # cv2.waitKey(0) - return augmented_batch, masks diff --git a/anomalib/models/draem/lightning_model.py b/anomalib/models/draem/lightning_model.py index 6ef71064ee..69fc15fd59 100644 --- a/anomalib/models/draem/lightning_model.py +++ b/anomalib/models/draem/lightning_model.py @@ -3,19 +3,8 @@ Paper https://arxiv.org/abs/2108.07610 """ -# Copyright (C) 2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 import logging from typing import Optional, Union From 29de3a8abafdb0423ec81522f9b5d287ca4dec8e Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Sat, 4 Jun 2022 11:00:26 +0200 Subject: [PATCH 09/19] allow multiple image extensions --- anomalib/models/draem/augmenter.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/anomalib/models/draem/augmenter.py b/anomalib/models/draem/augmenter.py index 97d9c4ba76..f7e22bc3ac 100644 --- a/anomalib/models/draem/augmenter.py +++ b/anomalib/models/draem/augmenter.py @@ -19,6 +19,7 @@ import numpy as np import torch from torch import Tensor +from torchvision.datasets.folder import IMG_EXTENSIONS from anomalib.models.draem.perlin import rand_perlin_2d_np @@ -33,10 +34,10 @@ class Augmenter: def __init__(self, anomaly_source_path: Optional[str] = None): + self.anomaly_source_paths = [] if anomaly_source_path is not None: - self.anomaly_source_paths = sorted(glob.glob(anomaly_source_path + "/**/*.jpg", recursive=True)) - else: - self.anomaly_source_paths = [] + for img_ext in IMG_EXTENSIONS: + self.anomaly_source_paths.extend(glob.glob(anomaly_source_path + "/**/*" + img_ext, recursive=True)) self.augmenters = [ iaa.GammaContrast((0.5, 2.0), per_channel=True), From c95e1dd32a6b292d19725d38bf6c15be8020d293 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Mon, 6 Jun 2022 22:27:21 +0200 Subject: [PATCH 10/19] replace loss functions --- anomalib/models/draem/lightning_model.py | 12 +- anomalib/models/draem/loss.py | 209 ++--------------------- anomalib/models/draem/torch_model.py | 3 +- 3 files changed, 21 insertions(+), 203 deletions(-) diff --git a/anomalib/models/draem/lightning_model.py b/anomalib/models/draem/lightning_model.py index 69fc15fd59..5b658ddab4 100644 --- a/anomalib/models/draem/lightning_model.py +++ b/anomalib/models/draem/lightning_model.py @@ -16,7 +16,7 @@ from anomalib.models.components import AnomalyModule from anomalib.models.draem.augmenter import Augmenter -from anomalib.models.draem.loss import SSIM, FocalLoss +from anomalib.models.draem.loss import DraemLoss from anomalib.models.draem.torch_model import DraemModel logger = logging.getLogger(__name__) @@ -38,10 +38,7 @@ def __init__(self, anomaly_source_path: Optional[str] = None): self.augmenter = Augmenter(anomaly_source_path) self.model = DraemModel() - - self.l2_loss = torch.nn.modules.loss.MSELoss() - self.ssim_loss = SSIM() - self.focal_loss = FocalLoss() + self.loss = DraemLoss() def training_step(self, batch, _): # pylint: disable=arguments-differ """Training Step of DRAEM. @@ -61,10 +58,7 @@ def training_step(self, batch, _): # pylint: disable=arguments-differ # Generate model prediction reconstruction, prediction = self.model(augmented_image) # Compute loss - l2_loss = self.l2_loss(reconstruction, input_image) - ssim_loss = self.ssim_loss(reconstruction, input_image) - focal_loss = self.focal_loss(prediction, anomaly_mask) - loss = l2_loss + ssim_loss + focal_loss + loss = self.loss(input_image, reconstruction, anomaly_mask, prediction) return {"loss": loss} def validation_step(self, batch, _): diff --git a/anomalib/models/draem/loss.py b/anomalib/models/draem/loss.py index 08263aff3f..c04ed57fad 100644 --- a/anomalib/models/draem/loss.py +++ b/anomalib/models/draem/loss.py @@ -1,204 +1,29 @@ -"""Loss functions for DRAEM implementation.""" +"""Loss function for the DRAEM model implementation.""" -# Original Code -# Copyright (c) 2022 VitjanZ -# https://github.com/VitjanZ/DRAEM. -# SPDX-License-Identifier: MIT -# -# Modified # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -# pylint: disable=invalid-name +from kornia.losses import FocalLoss, SSIMLoss +from torch import nn -from math import exp -import numpy as np -import torch -import torch.nn.functional as F -from torch import Tensor, nn +class DraemLoss(nn.Module): + """Overall loss function of the DRAEM model. - -class FocalLoss(nn.Module): - """Copy from: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/FocalLoss.py. - - This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in - 'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)' - - Args: - alpha (tensor): 3D or 4D the scalar factor for this criterion - gamma (float,double): gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more - focus on hard misclassified example - smooth (float,double): smooth value when cross entropy - balance_index (int): balance class index, should be specific when alpha is float - size_average (bool, optional): By default, the losses are averaged over each loss element in the batch. + The total loss consists of the sum of the L2 loss and Focal loss between the reconstructed image and the input + image, and the Structural Similarity loss between the predicted and GT anomaly masks. """ - def __init__(self, apply_nonlin=None, alpha=None, gamma=2, balance_index=0, smooth=1e-5, size_average=True): + def __init__(self): super().__init__() - self.apply_nonlin = apply_nonlin - self.alpha = alpha - self.gamma = gamma - self.balance_index = balance_index - self.smooth = smooth - self.size_average = size_average - - if self.smooth is not None: - if self.smooth < 0 or self.smooth > 1.0: - raise ValueError("smooth value should be in [0,1]") - - def forward(self, logit: Tensor, target: Tensor) -> Tensor: - """Compute the focal loss. - - Args: - logit (Tensor): Predicted logits - target (Tensor): Ground truth - - Returns: - Value of the focal loss - """ - if self.apply_nonlin is not None: - logit = self.apply_nonlin(logit) - num_class = logit.shape[1] - - if logit.dim() > 2: - # N,C,d1,d2 -> N,C,m (m=d1*d2*...) - logit = logit.view(logit.size(0), logit.size(1), -1) - logit = logit.permute(0, 2, 1).contiguous() - logit = logit.view(-1, logit.size(-1)) - target = torch.squeeze(target, 1) - target = target.view(-1, 1) - alpha = self.alpha - - if alpha is None: - alpha = torch.ones(num_class, 1) - elif isinstance(alpha, (list, np.ndarray)): - assert len(alpha) == num_class - alpha = torch.FloatTensor(alpha).view(num_class, 1) - alpha = alpha / alpha.sum() - elif isinstance(alpha, float): - alpha = torch.ones(num_class, 1) - alpha = alpha * (1 - self.alpha) - alpha[self.balance_index] = self.alpha - - else: - raise TypeError("Not support alpha type") - - if alpha.device != logit.device: - alpha = alpha.to(logit.device) - - idx = target.cpu().long() - - one_hot_key = torch.FloatTensor(target.size(0), num_class).zero_() - one_hot_key = one_hot_key.scatter_(1, idx, 1) - if one_hot_key.device != logit.device: - one_hot_key = one_hot_key.to(logit.device) - - if self.smooth: - one_hot_key = torch.clamp(one_hot_key, self.smooth / (num_class - 1), 1.0 - self.smooth) - pt = (one_hot_key * logit).sum(1) + self.smooth - logpt = pt.log() - - gamma = self.gamma - - alpha = alpha[idx] - alpha = torch.squeeze(alpha) - loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt - - if self.size_average: - loss = loss.mean() - return loss - - -def gaussian(window_size, sigma): - """Helper function to compute gaussian.""" - gauss = torch.Tensor([exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) for x in range(window_size)]) - return gauss / gauss.sum() - - -def create_window(window_size, channel=1): - """Helper function to create sliding window.""" - _1D_window = gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - window = _2D_window.expand(channel, 1, window_size, window_size).contiguous() - return window - - -def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None): - """Compute the structural similatity metric between two images.""" - if val_range is None: - if torch.max(img1) > 128: - max_val = 255 - else: - max_val = 1 - - if torch.min(img1) < -0.5: - min_val = -1 - else: - min_val = 0 - length = max_val - min_val - else: - length = val_range - - padd = window_size // 2 - (_, channel, height, width) = img1.size() - if window is None: - real_size = min(window_size, height, width) - window = create_window(real_size, channel=channel).to(img1.device) - - mu1 = F.conv2d(img1, window, padding=padd, groups=channel) - mu2 = F.conv2d(img2, window, padding=padd, groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq - sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq - sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2 - - c1 = (0.01 * length) ** 2 - c2 = (0.03 * length) ** 2 - - v1 = 2.0 * sigma12 + c2 - v2 = sigma1_sq + sigma2_sq + c2 - cs = torch.mean(v1 / v2) # contrast sensitivity - - ssim_map = ((2 * mu1_mu2 + c1) * v1) / ((mu1_sq + mu2_sq + c1) * v2) - - if size_average: - ret = ssim_map.mean() - else: - ret = ssim_map.mean(1).mean(1).mean(1) - - if full: - return ret, cs - return ret, ssim_map - - -class SSIM(torch.nn.Module): - """Implementation of the structural similarity loss.""" - - def __init__(self, window_size=11, size_average=True, val_range=None): - super().__init__() - self.window_size = window_size - self.size_average = size_average - self.val_range = val_range - - # Assume 1 channel for SSIM - self.channel = 1 - self.window = create_window(window_size).cuda() - - def forward(self, img1, img2): - """Compute ssim loss between two input images.""" - (_, channel, _, _) = img1.size() - if channel == self.channel and self.window.dtype == img1.dtype: - window = self.window - else: - window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype) - self.window = window - self.channel = channel + self.l2_loss = nn.modules.loss.MSELoss() + self.focal_loss = FocalLoss(alpha=1, reduction="mean") + self.ssim_kornia_loss = SSIMLoss(window_size=11) - s_score, _ = ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average) - return 1.0 - s_score + def forward(self, input_image, reconstruction, anomaly_mask, prediction): + """Compute the loss over a batch for the DRAEM model.""" + l2_loss_val = self.l2_loss(reconstruction, input_image) + focal_loss_val = self.focal_loss(prediction, anomaly_mask.squeeze(1).long()) + ssim_loss_val = self.ssim_kornia_loss(reconstruction, input_image) * 2 + return l2_loss_val + ssim_loss_val + focal_loss_val diff --git a/anomalib/models/draem/torch_model.py b/anomalib/models/draem/torch_model.py index 1b6614e137..5f0a627caf 100644 --- a/anomalib/models/draem/torch_model.py +++ b/anomalib/models/draem/torch_model.py @@ -38,10 +38,9 @@ def forward(self, x: Tensor) -> Union[Tensor, Tuple[Tensor, Tensor]]: reconstruction = self.reconstructive_subnetwork(x) concatenated_inputs = torch.cat([x, reconstruction], axis=1) prediction = self.discriminative_subnetwork(concatenated_inputs) - prediction = torch.softmax(prediction, dim=1) if self.training: return reconstruction, prediction - return prediction + return torch.softmax(prediction, dim=1) class ReconstructiveSubNetwork(nn.Module): From 9e3a699b69adab81c9efc8ebaae88ffb2d0a2b57 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 12:18:57 +0200 Subject: [PATCH 11/19] ssim_kornia_loss -> ssim_loss --- anomalib/models/draem/loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/anomalib/models/draem/loss.py b/anomalib/models/draem/loss.py index c04ed57fad..1e05bfa073 100644 --- a/anomalib/models/draem/loss.py +++ b/anomalib/models/draem/loss.py @@ -19,11 +19,11 @@ def __init__(self): self.l2_loss = nn.modules.loss.MSELoss() self.focal_loss = FocalLoss(alpha=1, reduction="mean") - self.ssim_kornia_loss = SSIMLoss(window_size=11) + self.ssim_loss = SSIMLoss(window_size=11) def forward(self, input_image, reconstruction, anomaly_mask, prediction): """Compute the loss over a batch for the DRAEM model.""" l2_loss_val = self.l2_loss(reconstruction, input_image) focal_loss_val = self.focal_loss(prediction, anomaly_mask.squeeze(1).long()) - ssim_loss_val = self.ssim_kornia_loss(reconstruction, input_image) * 2 + ssim_loss_val = self.ssim_loss(reconstruction, input_image) * 2 return l2_loss_val + ssim_loss_val + focal_loss_val From 0bc4dc5dd452f338640a5b68e13ce5412f67487f Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 12:22:46 +0200 Subject: [PATCH 12/19] update license headers --- anomalib/models/draem/augmenter.py | 2 +- anomalib/models/draem/perlin.py | 2 +- anomalib/models/draem/torch_model.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/anomalib/models/draem/augmenter.py b/anomalib/models/draem/augmenter.py index f7e22bc3ac..4a453c2e9b 100644 --- a/anomalib/models/draem/augmenter.py +++ b/anomalib/models/draem/augmenter.py @@ -1,7 +1,7 @@ """Augmenter module to generates out-of-distribution samples for the DRAEM implementation.""" # Original Code -# Copyright (c) 2022 VitjanZ +# Copyright (c) 2021 VitjanZ # https://github.com/VitjanZ/DRAEM. # SPDX-License-Identifier: MIT # diff --git a/anomalib/models/draem/perlin.py b/anomalib/models/draem/perlin.py index 8fbe3deafa..0c7a72f394 100644 --- a/anomalib/models/draem/perlin.py +++ b/anomalib/models/draem/perlin.py @@ -1,7 +1,7 @@ """Helper functions for generating Perlin noise.""" # Original Code -# Copyright (c) 2022 VitjanZ +# Copyright (c) 2021 VitjanZ # https://github.com/VitjanZ/DRAEM. # SPDX-License-Identifier: MIT # diff --git a/anomalib/models/draem/torch_model.py b/anomalib/models/draem/torch_model.py index 5f0a627caf..69773a7fe2 100644 --- a/anomalib/models/draem/torch_model.py +++ b/anomalib/models/draem/torch_model.py @@ -1,7 +1,7 @@ """PyTorch model for the DRAEM model implementation.""" # Original Code -# Copyright (c) 2022 VitjanZ +# Copyright (c) 2021 VitjanZ # https://github.com/VitjanZ/DRAEM. # SPDX-License-Identifier: MIT # From e79833718d5555472494104d8098c27b04ece241 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 12:44:20 +0200 Subject: [PATCH 13/19] clarify anomaly source dataset in readme --- anomalib/models/draem/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/anomalib/models/draem/README.md b/anomalib/models/draem/README.md index 1331b2d40f..8780574c98 100644 --- a/anomalib/models/draem/README.md +++ b/anomalib/models/draem/README.md @@ -8,6 +8,7 @@ Model Type: Segmentation DRAEM is a reconstruction based algorithm that consists of a reconstructive subnetwork and a discriminative subnetwork. DRAEM is trained on simulated anomaly images, generated by augmenting normal input images from the training set with a random Perlin noise mask extracted from an unrelated source of image data. The reconstructive subnetwork is an autoencoder architecture that is trained to reconstruct the original input images from the augmented images. The reconstructive submodel is trained using a combination of L2 loss and Structural Similarity loss. The input of the discriminative subnetwork consists of the channel-wise concatenation of the (augmented) input image and the output of the reconstructive subnetwork. The output of the discriminative subnetwork is an anomaly map that contains the predicted anomaly scores for each pixel location. The discriminative subnetwork is trained using Focal Loss. +For optimal results, DRAEM requires specifying the path to a folder of image data that will be used as the source of the anomalous pixel regions in the simulated anomaly images. The path can be specified by editing the value of the `model.anomaly_source_path` parameter in the `config.yaml` file. The authors of the original paper recommend using the [DTD](https://www.robots.ox.ac.uk/~vgg/data/dtd/) dataset as anomaly source. ## Architecture ![DRAEM Architecture](../../../docs/source/images/draem/architecture.png "DRAEM Architecture") From efb306d134ef80e414d03a4bf385ec8096488669 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 7 Jun 2022 06:28:08 -0700 Subject: [PATCH 14/19] Fix model registration --- anomalib/models/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anomalib/models/__init__.py b/anomalib/models/__init__.py index b848a86619..3ee3d44932 100644 --- a/anomalib/models/__init__.py +++ b/anomalib/models/__init__.py @@ -42,7 +42,7 @@ def get_model(config: Union[DictConfig, ListConfig]) -> AnomalyModule: Returns: AnomalyModule: Anomaly Model """ - model_list: List[str] = ["cflow", "dfkde", "dfm", "draem", "draem", "ganomaly", "padim", "patchcore", "stfpm"] + model_list: List[str] = ["cflow", "dfkde", "dfm", "draem", "fastflow", "ganomaly", "padim", "patchcore", "stfpm"] model: AnomalyModule if config.model.name in model_list: From 9bb2a210bde4a1d0414495c07d0a8cc334519e58 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 15:30:55 +0200 Subject: [PATCH 15/19] remove comments --- anomalib/models/draem/torch_model.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/anomalib/models/draem/torch_model.py b/anomalib/models/draem/torch_model.py index 69773a7fe2..133441d4d0 100644 --- a/anomalib/models/draem/torch_model.py +++ b/anomalib/models/draem/torch_model.py @@ -84,7 +84,6 @@ def __init__(self, in_channels: int = 3, out_channels: int = 3, base_width: int super().__init__() self.encoder_segment = EncoderDiscriminative(in_channels, base_width) self.decoder_segment = DecoderDiscriminative(base_width, out_channels=out_channels) - # self.segment_act = torch.nn.Sigmoid() self.out_features = out_features def forward(self, x: Tensor) -> Tensor: @@ -468,7 +467,6 @@ def __init__(self, base_width: int, out_channels: int = 1): ) self.fin_out = nn.Sequential(nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) - # self.fin_out = nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1) def forward(self, b5: Tensor) -> Tensor: """Reconstruct the image from the activations of the bottleneck layer. From 5d9a82117424ad7673457de395d1baa2ef17392c Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 16:01:15 +0200 Subject: [PATCH 16/19] update variable names --- anomalib/models/draem/torch_model.py | 117 +++++++++++++-------------- 1 file changed, 56 insertions(+), 61 deletions(-) diff --git a/anomalib/models/draem/torch_model.py b/anomalib/models/draem/torch_model.py index 133441d4d0..9da0dab87c 100644 --- a/anomalib/models/draem/torch_model.py +++ b/anomalib/models/draem/torch_model.py @@ -9,8 +9,6 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -# pylint: disable=invalid-name - from typing import Tuple, Union import torch @@ -25,7 +23,7 @@ def __init__(self): self.reconstructive_subnetwork = ReconstructiveSubNetwork() self.discriminative_subnetwork = DiscriminativeSubNetwork(in_channels=6, out_channels=2) - def forward(self, x: Tensor) -> Union[Tensor, Tuple[Tensor, Tensor]]: + def forward(self, batch: Tensor) -> Union[Tensor, Tuple[Tensor, Tensor]]: """Compute the reconstruction and anomaly mask from an input image. Args: @@ -35,8 +33,8 @@ def forward(self, x: Tensor) -> Union[Tensor, Tuple[Tensor, Tensor]]: Predicted confidence values of the anomaly mask. During training the reconstructed input images are returned as well. """ - reconstruction = self.reconstructive_subnetwork(x) - concatenated_inputs = torch.cat([x, reconstruction], axis=1) + reconstruction = self.reconstructive_subnetwork(batch) + concatenated_inputs = torch.cat([batch, reconstruction], axis=1) prediction = self.discriminative_subnetwork(concatenated_inputs) if self.training: return reconstruction, prediction @@ -57,18 +55,18 @@ def __init__(self, in_channels: int = 3, out_channels: int = 3, base_width=128): self.encoder = EncoderReconstructive(in_channels, base_width) self.decoder = DecoderReconstructive(base_width, out_channels=out_channels) - def forward(self, x: Tensor): + def forward(self, batch: Tensor) -> Tensor: """Encode and reconstruct the input images. Args: - x (Tensor): Batch of input images + batch (Tensor): Batch of input images Returns: Batch of reconstructed images. """ - b5 = self.encoder(x) - output = self.decoder(b5) - return output + encoded = self.encoder(batch) + decoded = self.decoder(encoded) + return decoded class DiscriminativeSubNetwork(nn.Module): @@ -80,27 +78,24 @@ class DiscriminativeSubNetwork(nn.Module): base_width (int): Base dimensionality of the layers of the autoencoder. """ - def __init__(self, in_channels: int = 3, out_channels: int = 3, base_width: int = 64, out_features: bool = False): + def __init__(self, in_channels: int = 3, out_channels: int = 3, base_width: int = 64): super().__init__() self.encoder_segment = EncoderDiscriminative(in_channels, base_width) self.decoder_segment = DecoderDiscriminative(base_width, out_channels=out_channels) - self.out_features = out_features - def forward(self, x: Tensor) -> Tensor: + def forward(self, batch: Tensor) -> Tensor: """Generate the predicted anomaly masks for a batch of input images. Args: - x (Tensor): Batch of inputs consisting of the concatenation of the original images + batch (Tensor): Batch of inputs consisting of the concatenation of the original images and their reconstructions. Returns: Activations of the output layer corresponding to the normal and anomalous class scores on the pixel level. """ - b1, b2, b3, b4, b5, b6 = self.encoder_segment(x) - output_segment = self.decoder_segment(b1, b2, b3, b4, b5, b6) - if self.out_features: - return output_segment, b2, b3, b4, b5, b6 - return output_segment + act1, act2, act3, act4, act5, act6 = self.encoder_segment(batch) + segmentation = self.decoder_segment(act1, act2, act3, act4, act5, act6) + return segmentation class EncoderDiscriminative(nn.Module): @@ -168,28 +163,28 @@ def __init__(self, in_channels: int, base_width: int): nn.ReLU(inplace=True), ) - def forward(self, x: Tensor) -> Tensor: + def forward(self, batch: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: """Convert the inputs to the salient space by running them through the encoder network. Args: - x (Tensor): Batch of inputs consisting of the concatenation of the original images + batch (Tensor): Batch of inputs consisting of the concatenation of the original images and their reconstructions. Returns: Computed feature maps for each of the layers in the encoder sub network. """ - b1 = self.block1(x) - mp1 = self.mp1(b1) - b2 = self.block2(mp1) - mp2 = self.mp3(b2) - b3 = self.block3(mp2) - mp3 = self.mp3(b3) - b4 = self.block4(mp3) - mp4 = self.mp4(b4) - b5 = self.block5(mp4) - mp5 = self.mp5(b5) - b6 = self.block6(mp5) - return b1, b2, b3, b4, b5, b6 + act1 = self.block1(batch) + mp1 = self.mp1(act1) + act2 = self.block2(mp1) + mp2 = self.mp3(act2) + act3 = self.block3(mp2) + mp3 = self.mp3(act3) + act4 = self.block4(mp3) + mp4 = self.mp4(act4) + act5 = self.block5(mp4) + mp5 = self.mp5(act5) + act6 = self.block6(mp5) + return act1, act2, act3, act4, act5, act6 class DecoderDiscriminative(nn.Module): @@ -280,38 +275,38 @@ def __init__(self, base_width: int, out_channels: int = 1): self.fin_out = nn.Sequential(nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) - def forward(self, b1: Tensor, b2: Tensor, b3: Tensor, b4: Tensor, b5: Tensor, b6: Tensor) -> Tensor: + def forward(self, act1: Tensor, act2: Tensor, act3: Tensor, act4: Tensor, act5: Tensor, act6: Tensor) -> Tensor: """Computes predicted anomaly class scores from the intermediate outputs of the encoder sub network. Args: - b1 (Tensor): Feature maps extracted from the first block of convolutional layers. - b2 (Tensor): Feature maps extracted from the second block of convolutional layers. - b3 (Tensor): Feature maps extracted from the third block of convolutional layers. - b4 (Tensor): Feature maps extracted from the fourth block of convolutional layers. - b5 (Tensor): Feature maps extracted from the fifth block of convolutional layers. - b6 (Tensor): Feature maps extracted from the sixth block of convolutional layers. + act1 (Tensor): Encoder activations of the first block of convolutional layers. + act2 (Tensor): Encoder activations of the second block of convolutional layers. + act3 (Tensor): Encoder activations of the third block of convolutional layers. + act4 (Tensor): Encoder activations of the fourth block of convolutional layers. + act5 (Tensor): Encoder activations of the fifth block of convolutional layers. + act6 (Tensor): Encoder activations of the sixth block of convolutional layers. Returns: Predicted anomaly class scores per pixel. """ - up_b = self.up_b(b6) - cat_b = torch.cat((up_b, b5), dim=1) + up_b = self.up_b(act6) + cat_b = torch.cat((up_b, act5), dim=1) db_b = self.db_b(cat_b) up1 = self.up1(db_b) - cat1 = torch.cat((up1, b4), dim=1) + cat1 = torch.cat((up1, act4), dim=1) db1 = self.db1(cat1) up2 = self.up2(db1) - cat2 = torch.cat((up2, b3), dim=1) + cat2 = torch.cat((up2, act3), dim=1) db2 = self.db2(cat2) up3 = self.up3(db2) - cat3 = torch.cat((up3, b2), dim=1) + cat3 = torch.cat((up3, act2), dim=1) db3 = self.db3(cat3) up4 = self.up4(db3) - cat4 = torch.cat((up4, b1), dim=1) + cat4 = torch.cat((up4, act1), dim=1) db4 = self.db4(cat4) out = self.fin_out(db4) @@ -373,25 +368,25 @@ def __init__(self, in_channels: int, base_width: int): nn.ReLU(inplace=True), ) - def forward(self, x: Tensor) -> Tensor: + def forward(self, batch: Tensor) -> Tensor: """Encode a batch of input images to the salient space. Args: - x (Tensor): Batch of input images. + batch (Tensor): Batch of input images. Returns: Feature maps extracted from the bottleneck layer. """ - b1 = self.block1(x) - mp1 = self.mp1(b1) - b2 = self.block2(mp1) - mp2 = self.mp3(b2) - b3 = self.block3(mp2) - mp3 = self.mp3(b3) - b4 = self.block4(mp3) - mp4 = self.mp4(b4) - b5 = self.block5(mp4) - return b5 + act1 = self.block1(batch) + mp1 = self.mp1(act1) + act2 = self.block2(mp1) + mp2 = self.mp3(act2) + act3 = self.block3(mp2) + mp3 = self.mp3(act3) + act4 = self.block4(mp3) + mp4 = self.mp4(act4) + act5 = self.block5(mp4) + return act5 class DecoderReconstructive(nn.Module): @@ -468,16 +463,16 @@ def __init__(self, base_width: int, out_channels: int = 1): self.fin_out = nn.Sequential(nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) - def forward(self, b5: Tensor) -> Tensor: + def forward(self, act5: Tensor) -> Tensor: """Reconstruct the image from the activations of the bottleneck layer. Args: - b5 (Tensor): Activations of the bottleneck layer. + act5 (Tensor): Activations of the bottleneck layer. Returns: Batch of reconstructed images. """ - up1 = self.up1(b5) + up1 = self.up1(act5) db1 = self.db1(up1) up2 = self.up2(db1) From f48ae4e0364f70eaf7ef0345791c18c02fa7ad50 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 17:28:51 +0200 Subject: [PATCH 17/19] move helpers to utils directory --- anomalib/models/draem/lightning_model.py | 2 +- anomalib/models/draem/{ => utils}/augmenter.py | 2 +- anomalib/models/draem/{ => utils}/perlin.py | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename anomalib/models/draem/{ => utils}/augmenter.py (98%) rename anomalib/models/draem/{ => utils}/perlin.py (100%) diff --git a/anomalib/models/draem/lightning_model.py b/anomalib/models/draem/lightning_model.py index 5b658ddab4..72656869dc 100644 --- a/anomalib/models/draem/lightning_model.py +++ b/anomalib/models/draem/lightning_model.py @@ -15,9 +15,9 @@ from pytorch_lightning.utilities.cli import MODEL_REGISTRY from anomalib.models.components import AnomalyModule -from anomalib.models.draem.augmenter import Augmenter from anomalib.models.draem.loss import DraemLoss from anomalib.models.draem.torch_model import DraemModel +from anomalib.models.draem.utils import Augmenter logger = logging.getLogger(__name__) diff --git a/anomalib/models/draem/augmenter.py b/anomalib/models/draem/utils/augmenter.py similarity index 98% rename from anomalib/models/draem/augmenter.py rename to anomalib/models/draem/utils/augmenter.py index 4a453c2e9b..7ae49c1315 100644 --- a/anomalib/models/draem/augmenter.py +++ b/anomalib/models/draem/utils/augmenter.py @@ -21,7 +21,7 @@ from torch import Tensor from torchvision.datasets.folder import IMG_EXTENSIONS -from anomalib.models.draem.perlin import rand_perlin_2d_np +from anomalib.models.draem.utils.perlin import rand_perlin_2d_np class Augmenter: diff --git a/anomalib/models/draem/perlin.py b/anomalib/models/draem/utils/perlin.py similarity index 100% rename from anomalib/models/draem/perlin.py rename to anomalib/models/draem/utils/perlin.py From 0b4133f2d7b87006931a3fb910b3c8fbee6e4551 Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Tue, 7 Jun 2022 17:45:14 +0200 Subject: [PATCH 18/19] add init --- anomalib/models/draem/utils/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 anomalib/models/draem/utils/__init__.py diff --git a/anomalib/models/draem/utils/__init__.py b/anomalib/models/draem/utils/__init__.py new file mode 100644 index 0000000000..dde7003813 --- /dev/null +++ b/anomalib/models/draem/utils/__init__.py @@ -0,0 +1,8 @@ +"""Helpers for the DRAEM model implementation.""" + +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .augmenter import Augmenter + +__all__ = ["Augmenter"] From 4ab21a30db391b4c69cef4844b64cfd3b2af5efa Mon Sep 17 00:00:00 2001 From: Dick Ameln Date: Wed, 8 Jun 2022 12:00:03 +0200 Subject: [PATCH 19/19] update third party software --- third-party-programs.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/third-party-programs.txt b/third-party-programs.txt index 09433d8408..f15e7a26fc 100644 --- a/third-party-programs.txt +++ b/third-party-programs.txt @@ -26,3 +26,7 @@ terms are listed below. 3. FastFlowModel Copyright (c) 2022 @gathierry, https://github.com/gathierry/FastFlow SPDX-License-Identifier: Apache-2.0 + +4. Torch models and utils of the Draem module (anomalib.models.draem) + Copyright (c) 2021 VitjanZ, https://github.com/VitjanZ/DRAEM. + SPDX-License-Identifier: MIT