From 75c4511e6b81ae8fb0dbd932043e8eb35cd09f72 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 29 Nov 2022 10:28:41 +0800 Subject: [PATCH 01/53] add AltDiffusion to webui Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/altdiffusion/ad-inference.yaml | 72 + configs/stable-diffusion/v1-inference.yaml | 71 + ldm/data/__init__.py | 0 ldm/data/base.py | 23 + ldm/data/imagenet.py | 394 +++++ ldm/data/lsun.py | 92 ++ ldm/lr_scheduler.py | 98 ++ ldm/models/autoencoder.py | 443 +++++ ldm/models/diffusion/__init__.py | 0 ldm/models/diffusion/classifier.py | 267 +++ ldm/models/diffusion/ddim.py | 241 +++ ldm/models/diffusion/ddpm.py | 1445 +++++++++++++++++ ldm/models/diffusion/dpm_solver/__init__.py | 1 + ldm/models/diffusion/dpm_solver/dpm_solver.py | 1184 ++++++++++++++ ldm/models/diffusion/dpm_solver/sampler.py | 82 + ldm/models/diffusion/plms.py | 236 +++ ldm/modules/attention.py | 261 +++ ldm/modules/diffusionmodules/__init__.py | 0 ldm/modules/diffusionmodules/model.py | 835 ++++++++++ ldm/modules/diffusionmodules/openaimodel.py | 961 +++++++++++ ldm/modules/diffusionmodules/util.py | 267 +++ ldm/modules/distributions/__init__.py | 0 ldm/modules/distributions/distributions.py | 92 ++ ldm/modules/ema.py | 76 + ldm/modules/encoders/__init__.py | 0 ldm/modules/encoders/modules.py | 234 +++ ldm/modules/encoders/xlmr.py | 137 ++ ldm/modules/image_degradation/__init__.py | 2 + ldm/modules/image_degradation/bsrgan.py | 730 +++++++++ ldm/modules/image_degradation/bsrgan_light.py | 650 ++++++++ ldm/modules/image_degradation/utils/test.png | Bin 0 -> 441072 bytes ldm/modules/image_degradation/utils_image.py | 916 +++++++++++ ldm/modules/losses/__init__.py | 1 + ldm/modules/losses/contperceptual.py | 111 ++ ldm/modules/losses/vqperceptual.py | 167 ++ ldm/modules/x_transformer.py | 641 ++++++++ ldm/util.py | 203 +++ modules/devices.py | 4 +- modules/sd_hijack.py | 23 +- modules/shared.py | 6 +- 40 files changed, 10957 insertions(+), 9 deletions(-) create mode 100644 configs/altdiffusion/ad-inference.yaml create mode 100644 configs/stable-diffusion/v1-inference.yaml create mode 100644 ldm/data/__init__.py create mode 100644 ldm/data/base.py create mode 100644 ldm/data/imagenet.py create mode 100644 ldm/data/lsun.py create mode 100644 ldm/lr_scheduler.py create mode 100644 ldm/models/autoencoder.py create mode 100644 ldm/models/diffusion/__init__.py create mode 100644 ldm/models/diffusion/classifier.py create mode 100644 ldm/models/diffusion/ddim.py create mode 100644 ldm/models/diffusion/ddpm.py create mode 100644 ldm/models/diffusion/dpm_solver/__init__.py create mode 100644 ldm/models/diffusion/dpm_solver/dpm_solver.py create mode 100644 ldm/models/diffusion/dpm_solver/sampler.py create mode 100644 ldm/models/diffusion/plms.py create mode 100644 ldm/modules/attention.py create mode 100644 ldm/modules/diffusionmodules/__init__.py create mode 100644 ldm/modules/diffusionmodules/model.py create mode 100644 ldm/modules/diffusionmodules/openaimodel.py create mode 100644 ldm/modules/diffusionmodules/util.py create mode 100644 ldm/modules/distributions/__init__.py create mode 100644 ldm/modules/distributions/distributions.py create mode 100644 ldm/modules/ema.py create mode 100644 ldm/modules/encoders/__init__.py create mode 100644 ldm/modules/encoders/modules.py create mode 100644 ldm/modules/encoders/xlmr.py create mode 100644 ldm/modules/image_degradation/__init__.py create mode 100644 ldm/modules/image_degradation/bsrgan.py create mode 100644 ldm/modules/image_degradation/bsrgan_light.py create mode 100644 ldm/modules/image_degradation/utils/test.png create mode 100644 ldm/modules/image_degradation/utils_image.py create mode 100644 ldm/modules/losses/__init__.py create mode 100644 ldm/modules/losses/contperceptual.py create mode 100644 ldm/modules/losses/vqperceptual.py create mode 100644 ldm/modules/x_transformer.py create mode 100644 ldm/util.py diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/altdiffusion/ad-inference.yaml new file mode 100644 index 000000000..1b11b63ea --- /dev/null +++ b/configs/altdiffusion/ad-inference.yaml @@ -0,0 +1,72 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.xlmr.BertSeriesModelWithTransformation + params: + name: "XLMR-Large" \ No newline at end of file diff --git a/configs/stable-diffusion/v1-inference.yaml b/configs/stable-diffusion/v1-inference.yaml new file mode 100644 index 000000000..2e6ef0f2c --- /dev/null +++ b/configs/stable-diffusion/v1-inference.yaml @@ -0,0 +1,71 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + # target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: altclip.model.AltCLIPEmbedder \ No newline at end of file diff --git a/ldm/data/__init__.py b/ldm/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ldm/data/base.py b/ldm/data/base.py new file mode 100644 index 000000000..b196c2f7a --- /dev/null +++ b/ldm/data/base.py @@ -0,0 +1,23 @@ +from abc import abstractmethod +from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset + + +class Txt2ImgIterableBaseDataset(IterableDataset): + ''' + Define an interface to make the IterableDatasets for text2img data chainable + ''' + def __init__(self, num_records=0, valid_ids=None, size=256): + super().__init__() + self.num_records = num_records + self.valid_ids = valid_ids + self.sample_ids = valid_ids + self.size = size + + print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') + + def __len__(self): + return self.num_records + + @abstractmethod + def __iter__(self): + pass \ No newline at end of file diff --git a/ldm/data/imagenet.py b/ldm/data/imagenet.py new file mode 100644 index 000000000..1c473f9c6 --- /dev/null +++ b/ldm/data/imagenet.py @@ -0,0 +1,394 @@ +import os, yaml, pickle, shutil, tarfile, glob +import cv2 +import albumentations +import PIL +import numpy as np +import torchvision.transforms.functional as TF +from omegaconf import OmegaConf +from functools import partial +from PIL import Image +from tqdm import tqdm +from torch.utils.data import Dataset, Subset + +import taming.data.utils as tdu +from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve +from taming.data.imagenet import ImagePaths + +from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light + + +def synset2idx(path_to_yaml="data/index_synset.yaml"): + with open(path_to_yaml) as f: + di2s = yaml.load(f) + return dict((v,k) for k,v in di2s.items()) + + +class ImageNetBase(Dataset): + def __init__(self, config=None): + self.config = config or OmegaConf.create() + if not type(self.config)==dict: + self.config = OmegaConf.to_container(self.config) + self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) + self.process_images = True # if False we skip loading & processing images and self.data contains filepaths + self._prepare() + self._prepare_synset_to_human() + self._prepare_idx_to_synset() + self._prepare_human_to_integer_label() + self._load() + + def __len__(self): + return len(self.data) + + def __getitem__(self, i): + return self.data[i] + + def _prepare(self): + raise NotImplementedError() + + def _filter_relpaths(self, relpaths): + ignore = set([ + "n06596364_9591.JPEG", + ]) + relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] + if "sub_indices" in self.config: + indices = str_to_indices(self.config["sub_indices"]) + synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings + self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) + files = [] + for rpath in relpaths: + syn = rpath.split("/")[0] + if syn in synsets: + files.append(rpath) + return files + else: + return relpaths + + def _prepare_synset_to_human(self): + SIZE = 2655750 + URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" + self.human_dict = os.path.join(self.root, "synset_human.txt") + if (not os.path.exists(self.human_dict) or + not os.path.getsize(self.human_dict)==SIZE): + download(URL, self.human_dict) + + def _prepare_idx_to_synset(self): + URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" + self.idx2syn = os.path.join(self.root, "index_synset.yaml") + if (not os.path.exists(self.idx2syn)): + download(URL, self.idx2syn) + + def _prepare_human_to_integer_label(self): + URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" + self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") + if (not os.path.exists(self.human2integer)): + download(URL, self.human2integer) + with open(self.human2integer, "r") as f: + lines = f.read().splitlines() + assert len(lines) == 1000 + self.human2integer_dict = dict() + for line in lines: + value, key = line.split(":") + self.human2integer_dict[key] = int(value) + + def _load(self): + with open(self.txt_filelist, "r") as f: + self.relpaths = f.read().splitlines() + l1 = len(self.relpaths) + self.relpaths = self._filter_relpaths(self.relpaths) + print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) + + self.synsets = [p.split("/")[0] for p in self.relpaths] + self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] + + unique_synsets = np.unique(self.synsets) + class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) + if not self.keep_orig_class_label: + self.class_labels = [class_dict[s] for s in self.synsets] + else: + self.class_labels = [self.synset2idx[s] for s in self.synsets] + + with open(self.human_dict, "r") as f: + human_dict = f.read().splitlines() + human_dict = dict(line.split(maxsplit=1) for line in human_dict) + + self.human_labels = [human_dict[s] for s in self.synsets] + + labels = { + "relpath": np.array(self.relpaths), + "synsets": np.array(self.synsets), + "class_label": np.array(self.class_labels), + "human_label": np.array(self.human_labels), + } + + if self.process_images: + self.size = retrieve(self.config, "size", default=256) + self.data = ImagePaths(self.abspaths, + labels=labels, + size=self.size, + random_crop=self.random_crop, + ) + else: + self.data = self.abspaths + + +class ImageNetTrain(ImageNetBase): + NAME = "ILSVRC2012_train" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" + FILES = [ + "ILSVRC2012_img_train.tar", + ] + SIZES = [ + 147897477120, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.process_images = process_images + self.data_root = data_root + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 1281167 + self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", + default=True) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + print("Extracting sub-tars.") + subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) + for subpath in tqdm(subpaths): + subdir = subpath[:-len(".tar")] + os.makedirs(subdir, exist_ok=True) + with tarfile.open(subpath, "r:") as tar: + tar.extractall(path=subdir) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + +class ImageNetValidation(ImageNetBase): + NAME = "ILSVRC2012_validation" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" + VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" + FILES = [ + "ILSVRC2012_img_val.tar", + "validation_synset.txt", + ] + SIZES = [ + 6744924160, + 1950000, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.data_root = data_root + self.process_images = process_images + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 50000 + self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", + default=False) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + vspath = os.path.join(self.root, self.FILES[1]) + if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: + download(self.VS_URL, vspath) + + with open(vspath, "r") as f: + synset_dict = f.read().splitlines() + synset_dict = dict(line.split() for line in synset_dict) + + print("Reorganizing into synset folders") + synsets = np.unique(list(synset_dict.values())) + for s in synsets: + os.makedirs(os.path.join(datadir, s), exist_ok=True) + for k, v in synset_dict.items(): + src = os.path.join(datadir, k) + dst = os.path.join(datadir, v) + shutil.move(src, dst) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + + +class ImageNetSR(Dataset): + def __init__(self, size=None, + degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., + random_crop=True): + """ + Imagenet Superresolution Dataloader + Performs following ops in order: + 1. crops a crop of size s from image either as random or center crop + 2. resizes crop to size with cv2.area_interpolation + 3. degrades resized crop with degradation_fn + + :param size: resizing to size after cropping + :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light + :param downscale_f: Low Resolution Downsample factor + :param min_crop_f: determines crop size s, + where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) + :param max_crop_f: "" + :param data_root: + :param random_crop: + """ + self.base = self.get_base() + assert size + assert (size / downscale_f).is_integer() + self.size = size + self.LR_size = int(size / downscale_f) + self.min_crop_f = min_crop_f + self.max_crop_f = max_crop_f + assert(max_crop_f <= 1.) + self.center_crop = not random_crop + + self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) + + self.pil_interpolation = False # gets reset later if incase interp_op is from pillow + + if degradation == "bsrgan": + self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) + + elif degradation == "bsrgan_light": + self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) + + else: + interpolation_fn = { + "cv_nearest": cv2.INTER_NEAREST, + "cv_bilinear": cv2.INTER_LINEAR, + "cv_bicubic": cv2.INTER_CUBIC, + "cv_area": cv2.INTER_AREA, + "cv_lanczos": cv2.INTER_LANCZOS4, + "pil_nearest": PIL.Image.NEAREST, + "pil_bilinear": PIL.Image.BILINEAR, + "pil_bicubic": PIL.Image.BICUBIC, + "pil_box": PIL.Image.BOX, + "pil_hamming": PIL.Image.HAMMING, + "pil_lanczos": PIL.Image.LANCZOS, + }[degradation] + + self.pil_interpolation = degradation.startswith("pil_") + + if self.pil_interpolation: + self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) + + else: + self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, + interpolation=interpolation_fn) + + def __len__(self): + return len(self.base) + + def __getitem__(self, i): + example = self.base[i] + image = Image.open(example["file_path_"]) + + if not image.mode == "RGB": + image = image.convert("RGB") + + image = np.array(image).astype(np.uint8) + + min_side_len = min(image.shape[:2]) + crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) + crop_side_len = int(crop_side_len) + + if self.center_crop: + self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) + + else: + self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) + + image = self.cropper(image=image)["image"] + image = self.image_rescaler(image=image)["image"] + + if self.pil_interpolation: + image_pil = PIL.Image.fromarray(image) + LR_image = self.degradation_process(image_pil) + LR_image = np.array(LR_image).astype(np.uint8) + + else: + LR_image = self.degradation_process(image=image)["image"] + + example["image"] = (image/127.5 - 1.0).astype(np.float32) + example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) + + return example + + +class ImageNetSRTrain(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_train_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetTrain(process_images=False,) + return Subset(dset, indices) + + +class ImageNetSRValidation(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_val_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetValidation(process_images=False,) + return Subset(dset, indices) diff --git a/ldm/data/lsun.py b/ldm/data/lsun.py new file mode 100644 index 000000000..6256e4571 --- /dev/null +++ b/ldm/data/lsun.py @@ -0,0 +1,92 @@ +import os +import numpy as np +import PIL +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + + +class LSUNBase(Dataset): + def __init__(self, + txt_file, + data_root, + size=None, + interpolation="bicubic", + flip_p=0.5 + ): + self.data_paths = txt_file + self.data_root = data_root + with open(self.data_paths, "r") as f: + self.image_paths = f.read().splitlines() + self._length = len(self.image_paths) + self.labels = { + "relative_file_path_": [l for l in self.image_paths], + "file_path_": [os.path.join(self.data_root, l) + for l in self.image_paths], + } + + self.size = size + self.interpolation = {"linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + }[interpolation] + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + def __len__(self): + return self._length + + def __getitem__(self, i): + example = dict((k, self.labels[k][i]) for k in self.labels) + image = Image.open(example["file_path_"]) + if not image.mode == "RGB": + image = image.convert("RGB") + + # default to score-sde preprocessing + img = np.array(image).astype(np.uint8) + crop = min(img.shape[0], img.shape[1]) + h, w, = img.shape[0], img.shape[1] + img = img[(h - crop) // 2:(h + crop) // 2, + (w - crop) // 2:(w + crop) // 2] + + image = Image.fromarray(img) + if self.size is not None: + image = image.resize((self.size, self.size), resample=self.interpolation) + + image = self.flip(image) + image = np.array(image).astype(np.uint8) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) + return example + + +class LSUNChurchesTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) + + +class LSUNChurchesValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", + flip_p=flip_p, **kwargs) + + +class LSUNBedroomsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) + + +class LSUNBedroomsValidation(LSUNBase): + def __init__(self, flip_p=0.0, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", + flip_p=flip_p, **kwargs) + + +class LSUNCatsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) + + +class LSUNCatsValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", + flip_p=flip_p, **kwargs) diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py new file mode 100644 index 000000000..be39da9ca --- /dev/null +++ b/ldm/lr_scheduler.py @@ -0,0 +1,98 @@ +import numpy as np + + +class LambdaWarmUpCosineScheduler: + """ + note: use with a base_lr of 1.0 + """ + def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): + self.lr_warm_up_steps = warm_up_steps + self.lr_start = lr_start + self.lr_min = lr_min + self.lr_max = lr_max + self.lr_max_decay_steps = max_decay_steps + self.last_lr = 0. + self.verbosity_interval = verbosity_interval + + def schedule(self, n, **kwargs): + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") + if n < self.lr_warm_up_steps: + lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start + self.last_lr = lr + return lr + else: + t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) + t = min(t, 1.0) + lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( + 1 + np.cos(t * np.pi)) + self.last_lr = lr + return lr + + def __call__(self, n, **kwargs): + return self.schedule(n,**kwargs) + + +class LambdaWarmUpCosineScheduler2: + """ + supports repeated iterations, configurable via lists + note: use with a base_lr of 1.0. + """ + def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): + assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) + self.lr_warm_up_steps = warm_up_steps + self.f_start = f_start + self.f_min = f_min + self.f_max = f_max + self.cycle_lengths = cycle_lengths + self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) + self.last_f = 0. + self.verbosity_interval = verbosity_interval + + def find_in_interval(self, n): + interval = 0 + for cl in self.cum_cycles[1:]: + if n <= cl: + return interval + interval += 1 + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) + t = min(t, 1.0) + f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( + 1 + np.cos(t * np.pi)) + self.last_f = f + return f + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) + self.last_f = f + return f + diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py new file mode 100644 index 000000000..6a9c4f454 --- /dev/null +++ b/ldm/models/autoencoder.py @@ -0,0 +1,443 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from ldm.util import instantiate_from_config + + +class VQModel(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_,_,ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode="bicubic") + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train", + predicted_indices=ind) + + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=""): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + + discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] + self.log(f"val{suffix}/rec_loss", rec_loss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + self.log(f"val{suffix}/aeloss", aeloss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f"val{suffix}/rec_loss"] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor*self.learning_rate + print("lr_d", lr_d) + print("lr_g", lr_g) + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quantize.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr_g, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr_d, betas=(0.5, 0.9)) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + { + 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log["inputs"] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["inputs"] = x + log["reconstructions"] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + log["reconstructions_ema"] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(embed_dim=embed_dim, *args, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + +class AutoencoderKL(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ): + super().__init__() + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val") + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val") + + self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py new file mode 100644 index 000000000..67e98b9d8 --- /dev/null +++ b/ldm/models/diffusion/classifier.py @@ -0,0 +1,267 @@ +import os +import torch +import pytorch_lightning as pl +from omegaconf import OmegaConf +from torch.nn import functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from copy import deepcopy +from einops import rearrange +from glob import glob +from natsort import natsorted + +from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel +from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config + +__models__ = { + 'class_label': EncoderUNetModel, + 'segmentation': UNetModel +} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class NoisyLatentImageClassifier(pl.LightningModule): + + def __init__(self, + diffusion_path, + num_classes, + ckpt_path=None, + pool='attention', + label_key=None, + diffusion_ckpt_path=None, + scheduler_config=None, + weight_decay=1.e-2, + log_steps=10, + monitor='val/loss', + *args, + **kwargs): + super().__init__(*args, **kwargs) + self.num_classes = num_classes + # get latest config of diffusion model + diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] + self.diffusion_config = OmegaConf.load(diffusion_config).model + self.diffusion_config.params.ckpt_path = diffusion_ckpt_path + self.load_diffusion() + + self.monitor = monitor + self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + self.log_time_interval = self.diffusion_model.num_timesteps // log_steps + self.log_steps = log_steps + + self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ + else self.diffusion_model.cond_stage_key + + assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' + + if self.label_key not in __models__: + raise NotImplementedError() + + self.load_classifier(ckpt_path, pool) + + self.scheduler_config = scheduler_config + self.use_scheduler = self.scheduler_config is not None + self.weight_decay = weight_decay + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def load_diffusion(self): + model = instantiate_from_config(self.diffusion_config) + self.diffusion_model = model.eval() + self.diffusion_model.train = disabled_train + for param in self.diffusion_model.parameters(): + param.requires_grad = False + + def load_classifier(self, ckpt_path, pool): + model_config = deepcopy(self.diffusion_config.params.unet_config.params) + model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels + model_config.out_channels = self.num_classes + if self.label_key == 'class_label': + model_config.pool = pool + + self.model = __models__[self.label_key](**model_config) + if ckpt_path is not None: + print('#####################################################################') + print(f'load from ckpt "{ckpt_path}"') + print('#####################################################################') + self.init_from_ckpt(ckpt_path) + + @torch.no_grad() + def get_x_noisy(self, x, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x)) + continuous_sqrt_alpha_cumprod = None + if self.diffusion_model.use_continuous_noise: + continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) + # todo: make sure t+1 is correct here + + return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, + continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) + + def forward(self, x_noisy, t, *args, **kwargs): + return self.model(x_noisy, t) + + @torch.no_grad() + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + @torch.no_grad() + def get_conditioning(self, batch, k=None): + if k is None: + k = self.label_key + assert k is not None, 'Needs to provide label key' + + targets = batch[k].to(self.device) + + if self.label_key == 'segmentation': + targets = rearrange(targets, 'b h w c -> b c h w') + for down in range(self.numd): + h, w = targets.shape[-2:] + targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') + + # targets = rearrange(targets,'b c h w -> b h w c') + + return targets + + def compute_top_k(self, logits, labels, k, reduction="mean"): + _, top_ks = torch.topk(logits, k, dim=1) + if reduction == "mean": + return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + elif reduction == "none": + return (top_ks == labels[:, None]).float().sum(dim=-1) + + def on_train_epoch_start(self): + # save some memory + self.diffusion_model.model.to('cpu') + + @torch.no_grad() + def write_logs(self, loss, logits, targets): + log_prefix = 'train' if self.training else 'val' + log = {} + log[f"{log_prefix}/loss"] = loss.mean() + log[f"{log_prefix}/acc@1"] = self.compute_top_k( + logits, targets, k=1, reduction="mean" + ) + log[f"{log_prefix}/acc@5"] = self.compute_top_k( + logits, targets, k=5, reduction="mean" + ) + + self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) + self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) + self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) + + def shared_step(self, batch, t=None): + x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) + targets = self.get_conditioning(batch) + if targets.dim() == 4: + targets = targets.argmax(dim=1) + if t is None: + t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() + else: + t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() + x_noisy = self.get_x_noisy(x, t) + logits = self(x_noisy, t) + + loss = F.cross_entropy(logits, targets, reduction='none') + + self.write_logs(loss.detach(), logits.detach(), targets.detach()) + + loss = loss.mean() + return loss, logits, x_noisy, targets + + def training_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + return loss + + def reset_noise_accs(self): + self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in + range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} + + def on_validation_start(self): + self.reset_noise_accs() + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + + for t in self.noisy_acc: + _, logits, _, targets = self.shared_step(batch, t) + self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) + self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) + + return loss + + def configure_optimizers(self): + optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) + + if self.use_scheduler: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [optimizer], scheduler + + return optimizer + + @torch.no_grad() + def log_images(self, batch, N=8, *args, **kwargs): + log = dict() + x = self.get_input(batch, self.diffusion_model.first_stage_key) + log['inputs'] = x + + y = self.get_conditioning(batch) + + if self.label_key == 'class_label': + y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['labels'] = y + + if ismap(y): + log['labels'] = self.diffusion_model.to_rgb(y) + + for step in range(self.log_steps): + current_time = step * self.log_time_interval + + _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) + + log[f'inputs@t{current_time}'] = x_noisy + + pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) + pred = rearrange(pred, 'b h w c -> b c h w') + + log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) + + for key in log: + log[key] = log[key][:N] + + return log diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py new file mode 100644 index 000000000..fb31215db --- /dev/null +++ b/ldm/models/diffusion/ddim.py @@ -0,0 +1,241 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ + extract_into_tensor + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + img, pred_x0 = outs + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + return x_dec \ No newline at end of file diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py new file mode 100644 index 000000000..bbedd04cf --- /dev/null +++ b/ldm/models/diffusion/ddpm.py @@ -0,0 +1,1445 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager +from functools import partial +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.distributed import rank_zero_only + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler + + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + ): + super().__init__() + assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + else: + raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + *args, **kwargs): + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox']: + xc = batch[cond_key] + elif cond_key == 'class_label': + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + # import pudb; pudb.set_trace() + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + # same as above but without decorator + def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset + def rescale_bbox(bbox): + x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) + y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) + w = min(bbox[2] / crop_coordinates[2], 1 - x0) + h = min(bbox[3] / crop_coordinates[3], 1 - y0) + return x0, y0, w, h + + return [rescale_bbox(b) for b in bboxes] + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if self.cond_stage_key in ["image", "LR_image", "segmentation", + 'bbox_img'] and self.model.conditioning_key: # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert (len(c) == 1) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] + + elif self.cond_stage_key == 'coordinates_bbox': + assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params['original_image_size'] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, + rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) + for patch_nr in range(z.shape[-1])] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [(x_tl, y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) + for bbox in patch_limits] # list of length l with tensors of shape (1, 2) + print(patch_limits_tknzd[0].shape) + # cut tknzd crop position from conditioning + assert isinstance(cond, dict), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + print(cut_cond.shape) + + adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + print(adapted_cond.shape) + adapted_cond = self.get_learned_conditioning(adapted_cond) + print(adapted_cond.shape) + adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) + print(adapted_cond.shape) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient + + # apply model by loop over crops + output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] + assert not isinstance(output_list[0], + tuple) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: callback(i) + if img_callback: img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: callback(i) + if img_callback: img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None,**kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): + + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, + shape,cond,verbose=False,**kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True,**kwargs) + + return samples, intermediates + + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, **kwargs): + + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with self.ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with self.ema_scope("Plotting Inpaint"): + + samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + with self.ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with self.ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class Layout2ImgDiffusion(LatentDiffusion): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(batch=batch, N=N, *args, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs diff --git a/ldm/models/diffusion/dpm_solver/__init__.py b/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 000000000..7427f38c0 --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/dpm_solver.py b/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 000000000..bdb64e0c7 --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1184 @@ +import torch +import torch.nn.functional as F +import math + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + + t = self.inverse_lambda(lambda_t) + + =============================================================== + + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + + 1. For discrete-time DPMs: + + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + + + 2. For continuous-time DPMs: + + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + + =============================================================== + + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + + Example: + + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0**2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + + We support four types of the diffusion model by setting `model_type`: + + 1. "noise": noise prediction model. (Trained by predicting noise). + + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + + =============================================================== + + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3,] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3,] * (K - 1) + [1] + else: + orders = [3,] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2,] * K + else: + K = steps // 2 + 1 + orders = [2,] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1,] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + + ===================================================== + + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + + ===================================================== + + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in range(1, order): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in range(order, steps + 1): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order,] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,)*(dims - 1)] \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/sampler.py b/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 000000000..2c42d6f96 --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,82 @@ +"""SAMPLING ONLY.""" + +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type="noise", + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py new file mode 100644 index 000000000..78eeb1003 --- /dev/null +++ b/ldm/models/diffusion/plms.py @@ -0,0 +1,236 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py new file mode 100644 index 000000000..f4eff39cc --- /dev/null +++ b/ldm/modules/attention.py @@ -0,0 +1,261 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat + +from ldm.modules.diffusionmodules.util import checkpoint + + +def exists(val): + return val is not None + + +def uniq(arr): + return{el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = rearrange(q, 'b c h w -> b (h w) c') + k = rearrange(k, 'b c h w -> b c (h w)') + w_ = torch.einsum('bij,bjk->bik', q, k) + + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, 'b c h w -> b c (h w)') + w_ = rearrange(w_, 'b i j -> b j i') + h_ = torch.einsum('bij,bjk->bik', v, w_) + h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) + h_ = self.proj_out(h_) + + return x+h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head ** -0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), + nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +class BasicTransformerBlock(nn.Module): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): + super().__init__() + self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, + heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) + + def _forward(self, x, context=None): + x = self.attn1(self.norm1(x)) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + """ + def __init__(self, in_channels, n_heads, d_head, + depth=1, dropout=0., context_dim=None): + super().__init__() + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList( + [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) + for d in range(depth)] + ) + + self.proj_out = zero_module(nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0)) + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + x = self.proj_in(x) + x = rearrange(x, 'b c h w -> b (h w) c') + for block in self.transformer_blocks: + x = block(x, context=context) + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + x = self.proj_out(x) + return x + x_in \ No newline at end of file diff --git a/ldm/modules/diffusionmodules/__init__.py b/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py new file mode 100644 index 000000000..533e589a2 --- /dev/null +++ b/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,835 @@ +# pytorch_diffusion + derived encoder decoder +import math +import torch +import torch.nn as nn +import numpy as np +from einops import rearrange + +from ldm.util import instantiate_from_config +from ldm.modules.attention import LinearAttention + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0,1,0,0)) + return emb + + +def nonlinearity(x): + # swish + return x*torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) + + def forward(self, x): + if self.with_conv: + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, + dropout, temb_channels=512): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, + out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + else: + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x+h + + +class LinAttnBlock(LinearAttention): + """to match AttnBlock usage""" + def __init__(self, in_channels): + super().__init__(dim=in_channels, heads=1, dim_head=in_channels) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = q.reshape(b,c,h*w) + q = q.permute(0,2,1) # b,hw,c + k = k.reshape(b,c,h*w) # b,c,hw + w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b,c,h*w) + w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b,c,h,w) + + h_ = self.proj_out(h_) + + return x+h_ + + +def make_attn(in_channels, attn_type="vanilla"): + assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": + return AttnBlock(in_channels) + elif attn_type == "none": + return nn.Identity(in_channels) + else: + return LinAttnBlock(in_channels) + + +class Model(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch*4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList([ + torch.nn.Linear(self.ch, + self.temb_ch), + torch.nn.Linear(self.temb_ch, + self.temb_ch), + ]) + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + skip_in = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + if i_block == self.num_res_blocks: + skip_in = ch*in_ch_mult[i_level] + block.append(ResnetBlock(in_channels=block_in+skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x, t=None, context=None): + #assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", + **ignore_kwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + 2*z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + attn_type="vanilla", **ignorekwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,)+tuple(ch_mult) + block_in = ch*ch_mult[self.num_resolutions-1] + curr_res = resolution // 2**(self.num_resolutions-1) + self.z_shape = (1,z_channels,curr_res,curr_res) + print("Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape))) + + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, + block_in, + kernel_size=3, + stride=1, + padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, z): + #assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock(in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + nn.Conv2d(2*in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True)]) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1,2,3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, + ch_mult=(2,2), dropout=0.0): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d(in_channels, + mid_channels, + kernel_size=3, + stride=1, + padding=1) + self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + + self.conv_out = nn.Conv2d(mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, + ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, + z_channels=intermediate_chn, double_z=False, resolution=resolution, + attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, + out_ch=None) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, + mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), + dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + tmp_chn = z_channels*ch_mult[-1] + self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, + resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, + ch_mult=ch_mult, resolution=resolution, ch=ch) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, + out_channels=tmp_chn, depth=rescale_module_depth) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size//in_size))+1 + factor_up = 1.+ (out_size % in_size) + print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") + self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, + out_channels=in_channels) + self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, + attn_resolutions=[], in_channels=None, ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)]) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=4, + stride=2, + padding=1) + + def forward(self, x, scale_factor=1.0): + if scale_factor==1.0: + return x + else: + x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) + return x + +class FirstStagePostProcessor(nn.Module): + + def __init__(self, ch_mult:list, in_channels, + pretrained_model:nn.Module=None, + reshape=False, + n_channels=None, + dropout=0., + pretrained_config=None): + super().__init__() + if pretrained_config is None: + assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.pretrained_model = pretrained_model + else: + assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.instantiate_pretrained(pretrained_config) + + self.do_reshape = reshape + + if n_channels is None: + n_channels = self.pretrained_model.encoder.ch + + self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) + self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, + stride=1,padding=1) + + blocks = [] + downs = [] + ch_in = n_channels + for m in ch_mult: + blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) + ch_in = m * n_channels + downs.append(Downsample(ch_in, with_conv=False)) + + self.model = nn.ModuleList(blocks) + self.downsampler = nn.ModuleList(downs) + + + def instantiate_pretrained(self, config): + model = instantiate_from_config(config) + self.pretrained_model = model.eval() + # self.pretrained_model.train = False + for param in self.pretrained_model.parameters(): + param.requires_grad = False + + + @torch.no_grad() + def encode_with_pretrained(self,x): + c = self.pretrained_model.encode(x) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + return c + + def forward(self,x): + z_fs = self.encode_with_pretrained(x) + z = self.proj_norm(z_fs) + z = self.proj(z) + z = nonlinearity(z) + + for submodel, downmodel in zip(self.model,self.downsampler): + z = submodel(z,temb=None) + z = downmodel(z) + + if self.do_reshape: + z = rearrange(z,'b c h w -> b (h w) c') + return z + diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 000000000..fcf95d1ea --- /dev/null +++ b/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,961 @@ +from abc import abstractmethod +from functools import partial +import math +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from ldm.modules.attention import SpatialTransformer + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(num_res_blocks + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + if level and i == num_res_blocks: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape == (x.shape[0],) + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) + + +class EncoderUNetModel(nn.Module): + """ + The half UNet model with attention and timestep embedding. + For usage, see UNet. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + pool="adaptive", + *args, + **kwargs + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + self.pool = pool + if pool == "adaptive": + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + nn.AdaptiveAvgPool2d((1, 1)), + zero_module(conv_nd(dims, ch, out_channels, 1)), + nn.Flatten(), + ) + elif pool == "attention": + assert num_head_channels != -1 + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + AttentionPool2d( + (image_size // ds), ch, num_head_channels, out_channels + ), + ) + elif pool == "spatial": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + nn.ReLU(), + nn.Linear(2048, self.out_channels), + ) + elif pool == "spatial_v2": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + normalization(2048), + nn.SiLU(), + nn.Linear(2048, self.out_channels), + ) + else: + raise NotImplementedError(f"Unexpected {pool} pooling") + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x, timesteps): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :return: an [N x K] Tensor of outputs. + """ + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + + results = [] + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = self.middle_block(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = th.cat(results, axis=-1) + return self.out(h) + else: + h = h.type(x.dtype) + return self.out(h) + diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py new file mode 100644 index 000000000..a952e6c40 --- /dev/null +++ b/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,267 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/ldm/modules/distributions/__init__.py b/ldm/modules/distributions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py new file mode 100644 index 000000000..f2b8ef901 --- /dev/null +++ b/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py new file mode 100644 index 000000000..c8c75af43 --- /dev/null +++ b/ldm/modules/ema.py @@ -0,0 +1,76 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates + else torch.tensor(-1,dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + #remove as '.'-character is not allowed in buffers + s_name = name.replace('.','') + self.m_name2s_name.update({name:s_name}) + self.register_buffer(s_name,p.clone().detach().data) + + self.collected_params = [] + + def forward(self,model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/ldm/modules/encoders/__init__.py b/ldm/modules/encoders/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py new file mode 100644 index 000000000..ededbe43e --- /dev/null +++ b/ldm/modules/encoders/modules.py @@ -0,0 +1,234 @@ +import torch +import torch.nn as nn +from functools import partial +import clip +from einops import rearrange, repeat +from transformers import CLIPTokenizer, CLIPTextModel +import kornia + +from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key='class'): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + + def forward(self, batch, key=None): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + c = self.embedding(c) + return c + + +class TransformerEmbedder(AbstractEncoder): + """Some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): + super().__init__() + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer)) + + def forward(self, tokens): + tokens = tokens.to(self.device) # meh + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, x): + return self(x) + + +class BERTTokenizer(AbstractEncoder): + """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" + def __init__(self, device="cuda", vq_interface=True, max_length=77): + super().__init__() + from transformers import BertTokenizerFast # TODO: add to reuquirements + self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") + self.device = device + self.vq_interface = vq_interface + self.max_length = max_length + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + return tokens + + @torch.no_grad() + def encode(self, text): + tokens = self(text) + if not self.vq_interface: + return tokens + return None, None, [None, None, tokens] + + def decode(self, text): + return text + + +class BERTEmbedder(AbstractEncoder): + """Uses the BERT tokenizr model and add some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, + device="cuda",use_tokenizer=True, embedding_dropout=0.0): + super().__init__() + self.use_tknz_fn = use_tokenizer + if self.use_tknz_fn: + self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer), + emb_dropout=embedding_dropout) + + def forward(self, text): + if self.use_tknz_fn: + tokens = self.tknz_fn(text)#.to(self.device) + else: + tokens = text + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, text): + # output of length 77 + return self(text) + + +class SpatialRescaler(nn.Module): + def __init__(self, + n_stages=1, + method='bilinear', + multiplier=0.5, + in_channels=3, + out_channels=None, + bias=False): + super().__init__() + self.n_stages = n_stages + assert self.n_stages >= 0 + assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] + self.multiplier = multiplier + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) + self.remap_output = out_channels is not None + if self.remap_output: + print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') + self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) + + def forward(self,x): + for stage in range(self.n_stages): + x = self.interpolator(x, scale_factor=self.multiplier) + + + if self.remap_output: + x = self.channel_mapper(x) + return x + + def encode(self, x): + return self(x) + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPTextEmbedder(nn.Module): + """ + Uses the CLIP transformer encoder for text. + """ + def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): + super().__init__() + self.model, _ = clip.load(version, jit=False, device="cpu") + self.device = device + self.max_length = max_length + self.n_repeat = n_repeat + self.normalize = normalize + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + tokens = clip.tokenize(text).to(self.device) + z = self.model.encode_text(tokens) + if self.normalize: + z = z / torch.linalg.norm(z, dim=1, keepdim=True) + return z + + def encode(self, text): + z = self(text) + if z.ndim==2: + z = z[:, None, :] + z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) + return z + + +class FrozenClipImageEmbedder(nn.Module): + """ + Uses the CLIP image encoder. + """ + def __init__( + self, + model, + jit=False, + device='cuda' if torch.cuda.is_available() else 'cpu', + antialias=False, + ): + super().__init__() + self.model, _ = clip.load(name=model, device=device, jit=jit) + + self.antialias = antialias + + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + + def preprocess(self, x): + # normalize to [0,1] + x = kornia.geometry.resize(x, (224, 224), + interpolation='bicubic',align_corners=True, + antialias=self.antialias) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + # x is assumed to be in range [-1,1] + return self.model.encode_image(self.preprocess(x)) + + +if __name__ == "__main__": + from ldm.util import count_params + model = FrozenCLIPEmbedder() + count_params(model, verbose=True) \ No newline at end of file diff --git a/ldm/modules/encoders/xlmr.py b/ldm/modules/encoders/xlmr.py new file mode 100644 index 000000000..beab3fdf5 --- /dev/null +++ b/ldm/modules/encoders/xlmr.py @@ -0,0 +1,137 @@ +from transformers import BertPreTrainedModel,BertModel,BertConfig +import torch.nn as nn +import torch +from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig +from transformers import XLMRobertaModel,XLMRobertaTokenizer +from typing import Optional + +class BertSeriesConfig(BertConfig): + def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs): + + super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, use_cache, classifier_dropout, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + +class RobertaSeriesConfig(XLMRobertaConfig): + def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2,project_dim=512,pooler_fn='cls',learn_encoder=False, **kwargs): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + + +class BertSeriesModelWithTransformation(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + config_class = BertSeriesConfig + + def __init__(self, config=None, **kargs): + # modify initialization for autoloading + if config is None: + config = XLMRobertaConfig() + config.attention_probs_dropout_prob= 0.1 + config.bos_token_id=0 + config.eos_token_id=2 + config.hidden_act='gelu' + config.hidden_dropout_prob=0.1 + config.hidden_size=1024 + config.initializer_range=0.02 + config.intermediate_size=4096 + config.layer_norm_eps=1e-05 + config.max_position_embeddings=514 + + config.num_attention_heads=16 + config.num_hidden_layers=24 + config.output_past=True + config.pad_token_id=1 + config.position_embedding_type= "absolute" + + config.type_vocab_size= 1 + config.use_cache=True + config.vocab_size= 250002 + config.project_dim = 768 + config.learn_encoder = False + super().__init__(config) + self.roberta = XLMRobertaModel(config) + self.transformation = nn.Linear(config.hidden_size,config.project_dim) + self.pre_LN=nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large') + self.pooler = lambda x: x[:,0] + self.post_init() + + def encode(self,c): + device = next(self.parameters()).device + text = self.tokenizer(c, + truncation=True, + max_length=77, + return_length=False, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt") + text["input_ids"] = torch.tensor(text["input_ids"]).to(device) + text["attention_mask"] = torch.tensor( + text['attention_mask']).to(device) + features = self(**text) + return features['projection_state'] + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) : + r""" + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=True, + return_dict=return_dict, + ) + + # last module outputs + sequence_output = outputs[0] + + + # project every module + sequence_output_ln = self.pre_LN(sequence_output) + + # pooler + pooler_output = self.pooler(sequence_output_ln) + pooler_output = self.transformation(pooler_output) + projection_state = self.transformation(outputs.last_hidden_state) + + return { + 'pooler_output':pooler_output, + 'last_hidden_state':outputs.last_hidden_state, + 'hidden_states':outputs.hidden_states, + 'attentions':outputs.attentions, + 'projection_state':projection_state, + 'sequence_out': sequence_output + } + + +class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): + base_model_prefix = 'roberta' + config_class= RobertaSeriesConfig \ No newline at end of file diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py new file mode 100644 index 000000000..7836cada8 --- /dev/null +++ b/ldm/modules/image_degradation/__init__.py @@ -0,0 +1,2 @@ +from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr +from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py new file mode 100644 index 000000000..32ef56169 --- /dev/null +++ b/ldm/modules/image_degradation/bsrgan.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(30, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + elif i == 1: + image = add_blur(image, sf=sf) + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image":image} + return example + + +# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... +def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): + """ + This is an extended degradation model by combining + the degradation models of BSRGAN and Real-ESRGAN + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + use_shuffle: the degradation shuffle + use_sharp: sharpening the img + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + if use_sharp: + img = add_sharpening(img) + hq = img.copy() + + if random.random() < shuffle_prob: + shuffle_order = random.sample(range(13), 13) + else: + shuffle_order = list(range(13)) + # local shuffle for noise, JPEG is always the last one + shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) + shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) + + poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 + + for i in shuffle_order: + if i == 0: + img = add_blur(img, sf=sf) + elif i == 1: + img = add_resize(img, sf=sf) + elif i == 2: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 3: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 4: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 5: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + elif i == 6: + img = add_JPEG_noise(img) + elif i == 7: + img = add_blur(img, sf=sf) + elif i == 8: + img = add_resize(img, sf=sf) + elif i == 9: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 10: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 11: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 12: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + else: + print('check the shuffle!') + + # resize to desired size + img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), + interpolation=random.choice([1, 2, 3])) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf, lq_patchsize) + + return img, hq + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + print(img) + img = util.uint2single(img) + print(img) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_lq = deg_fn(img) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') + + diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py new file mode 100644 index 000000000..9e1f82399 --- /dev/null +++ b/ldm/modules/image_degradation/bsrgan_light.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + + wd2 = wd2/4 + wd = wd/4 + + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(80, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + # elif i == 1: + # image = add_blur(image, sf=sf) + + if i == 0: + pass + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.8: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + # + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image": image} + return example + + + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_hq = img + img_lq = deg_fn(img)["image"] + img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), + (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') diff --git a/ldm/modules/image_degradation/utils/test.png b/ldm/modules/image_degradation/utils/test.png new file mode 100644 index 0000000000000000000000000000000000000000..4249b43de0f22707758d13c240268a401642f6e6 GIT binary patch literal 441072 zcmeAS@N?(olHy`uVBq!ia0y~yU~*t!U_8LV#K6FCbb*5k10w^2r;B4q#hjSG(fuls zlhis+zMZppRob(6f)lT=TDNZAy_YkezS9v?+3@?seQ$vW@;Brim};T~xb4ntSYBPW zYxyc8W2HNH&xCx9acSXSBxak!Bq}QTGVxv7sw$T!yE*^G|JVP&-SF7kGH=bl5BcjW zDl03itA8)A|8<^WUQpKBUPFcj*Q%`MPD?f19Q9hl*=0t?mU~gcf=aLUzW*JUtsPud zx9`ul+t2mmuYv)?kq0ux4-wjw>FN~6E9t{b!MdI)5pjAUAE-To~ig~ zTkiY!@ALEXx8Kg~`S|kka{u{ubI(8bpKsS|7#ANu^JmSz89#TeOXex}*VH}jQnEL0{q@(|a+PYV=AL=8{eIo5s zyeyjK6?XO7u2n)GPoF+*Gk=EV(vsb=@4vr(S`;}=*kUI$!yLcm%a^Msttw4tV!Hpj zi^C;j@%DRnb(XsvNsFsqy>{2mTM91hCoGMPl~%1<_3hrw%6ZRE|D03A*6{mp-1onA z@xNcZlZ($ubD2`CmwL|f>aBO*XItCE&+|LY)bXl~gQH^i{kp$Tum9fl?c(>`_sI&6 zjP#d(epzGL?XmXv-#TMcQ&B-RK|u%kz5KGP4&OQUVr~Du+X1EK?CcCmCnF8R3 zGBPeNFITFn`}N`X{r^{w_sidvl{tIn4&#EY-@g5e`ks}w{PWMJKP3fa7hK<#d%J4? z^=oHBavN^P-Tqq^ecklkG{d&pI~fHS8dlBH-laUL*;vq+nSp_+VfW9+e%xSYM*G}Kj% zsi+`0ckR*TrxwhQdr<$Ue0lx%`yVTR&Ds%l{aTyV4F0cdDlR8Guij{gzMgygR)&&= z@63Xrid*lN|Nrs*_Wu9;-*2z|GVyHYiet4)xh$t&&MZ9s*P={d;tZEi#t9v5d}(sY z0`l8gq>cr@d5|`D1_S4TgN-u}=q^oJ#nHx+<;CG4a-ECu?A(ry3$g-?GcVkgQnNu|D+0rxKk>{45{%oRex%_L@-}e3g`SauL7SC3E zV)XxE{=XmV|6Q#&)=yU{YPojn>#w@qary7_zt-LUez!zGQBhIp;z#X(Ka5Hb3kxSQ zo|I*5?2v7YlzG&B@$oYj2X>_*b~QCMm!k5P$8seFN^TAg3TX^~cE3M=d)xM~vpffm zzxcha*YH_olAJ!HyfEvfrO|Tfer~Q0AiVRhv$q%X5l~ zKHs}7aZ2X+)|I#Ry|-dh>Ugq)Ni$tcF^Rq4c%_}9A_JGYO3>RNububb|K9ig-v2Ml z?eFGox8ORS5y^U9MODw&pkW#YLM1^yV@wf%ILC-+eaX_P)YkygNp3`Q?}L{~pSJtJS}3qF;RFv7y_Z z?$b{v1^xQ-^Yga5_wT!V?cDn9=jU|x%l}>;Zhu~TKR!NQ@aN%n{@1&6wUufO;8?c3Fwk_U zpYzMRxRlPAIcxQ*St})1yR~|2`hJ#~dR~5c|IC$HYp?oj&yByX``qYQ#@?Ax=By0C zd)L4I{dMowcgO7&4r!)OJmQkvvwi#b-*3Z~)!5BnZp4@%!lM@>aNz8jYqh1(zMgjv zXm+0Y?Bl`L#?jf@`TIrxAvcD;2QCs{8Dtm~m;QOX_Upx+p@N=DN;mJ8^0<_wvj;nO zL>)iQ{3cM~%)0BZ>;Lrs-}wFC_4>cpJx|4bzx}n#Wzvn39luYv^WU%kZ>w}~k$>g> zb+6}k%&54(ZJn*^yS>}uIz4k07zM2sG$}S%H(4JEeA&ive5Yrj*RG@dZZ0hzE&Z>i z2Bj^#7WMw^kHotN|3-gIQc}`ds<7zht+mPP@9TfGPjPK&aCH#@RL7{TL<*s|z`@6eO@*Npzknz;WQpS)RFb zBn}9kI4H<)W@bkx4Ev#lJb6KUP8B5>FS;3^BrFigS&W^+@#{R8q!^_iEnAvT**ch0Mch8vR z^vBwYL1~60Q$bSVwuu}KGgST8nmVMVdA=;FytVFnSh<6arTyIbDyl36+pir9+kWlU zu6y_H@7wbG?A1Fmi}y1qI3=kmvAdW#Sa?YY2r>%@FfL$LVo-Ly$m+uW=h_p+1*$P9UgPua!fLAmNwkocvIoo6qR?C1@e1m zXT3M&+&#PZ-f@G_VA)lxoK<`;o9Iur-V*q@qgq6Yy?^;*+sYgc?|FgV6= zq;g)Ts<{5;E$?stt-GDAx+E%F^J&pem-stT^VO>AD)+_c{obdXqp zmE&_c#EY>(;)>h*+i~~r-8=K9#x7pZUr9+RxhJZ;e8#euhuitL-(LGwDs}(&jqiWk z=xeGfh2*B)^l@V6zHtL*yT4Xg}f2j}0 zie+hiEiNZ|_Ll!=`n`>jiOIw3vPtKMz{ER^22YGbw5F5_|Ia4ly-d zxY#}`>%DWumTQwZlxEMKWvIy1=(uH1isDmM{PL^XH}8|Gs|xTDEuX_TO*oUhg&DxWwdG#@=<`^WH6EV|Za`?8pNfuZBuJz0jccg_ZR?G$;|!qK5J>1;!% zYlO=gpR0@$I`1^Jc1Q@iT>4pbPV@V{?YYT`=bqmGzr6m8Q-gsUYeSFQ!0_6IL2+n7vwx>4M71NZ*5u4k6#KU;kS5bh>dtTEmeu z9V}DUtX6KET+OD^lDL?Or$lv*!2AQ8ft~NCr&rZ|tKGbih2fPO!}I0OD`$FcD9^6_ zV)kgW(Oc%LXB`F4&WIFckZ5?sVdTP)AjaIis{7fqzBGq*X(nd`XJpPR%D&(he))Ky zf6oM!-iw_E-7fZ$iwjJYE-v5?-QLLHZqNQ8?Tpiz7K{k%e#AsK z_>|{XNr!pQL(-HAHz(eTy1lJ-@A|KG?{DwDuKUZ(iD|h?%@wbb%Za{sduPsMoyj0F zA);mG-HRcNoD+h)JPeMlSo`f>hsG>>2b1qVW-?xsJ@aP9p|g+dSeATEbu2HqaB;$g ztAd5|)C3)sTqF)3-;kUTp1t~n-{mHur(xZ`+6*E`SNt9oV6>pJ)_R0JAX<1ep~n0j*8abf9v+{ z^Zi_7HNPTZ?e3YK9_LQ3$-ixDVCb^NFz@>J+S>vPw}yTH`&+)wV*WJ!`TLE6G*e@* zU;JIQ`&#bb4-cCKSl3H+h?zO_j}*kZ27Ermg#9if=Ys(t4gfq z`YktRnB$ios=L&u*l$v3?(J1l9UO)_r{CnQeY>jm{%Wf=(W%c)Kdq^(^3wGD!flmz z``6FY&%f5?{`T^D_wDZY`@2`odRJXrzyEzHyTjE+jx#+EI#zs-&i%bDSF>}~wOcbQ zwQiQoU}7@hR+_O&Kp`ksNrz;KcBA(N-qx>j1<2)Md%?X|tvuFaace~-=U3%~zf zi|W6(YRV>^?wbmWGH2;M|6G2*_PEjmKaR9(w*q86&Kx<#$8z`B1;-A{bU`*2_kwa2 z7k7s%3zym5GAPt|$MrFbu~A?nAG3-Bvy#J`RabU@$t%p>Y27hf+EP$zN3+qSiGo2Q z9BEHfG*5p0`)mHckMb>+PlJ|u&X9FbF~0cq?c3||^|iItR~Kfz%JLLa>bV!SdaXHw zc`^HD!OcY?YDMPkE^G`X#^%fg%g;}u8`0-Pp;zz?_D05pgo@-|1gndzHl((?ys+5zb9C3 zu68d_VRl+@;1|mr!6S@k4*1^Xy~xm>%$V#lJFT)(Gk*V%ICiD39X>y|h=}dGf4w$8 ztwkb3RB6{NqrgdT5?;lrtem;i_5L!3mL;oBc<8uX2@Pa7nyGE980h8H(=);Qlq&z# z7spyGvxEc{cS$uUZdQ=XfA+XUvWB3NlCf-qrGgTlM4I!AgM|za9711P-P!y7*qQUwmsdv4$awK$?aE$< zJDirPH44IAbqWWRR-O^&*m7-V&?Fv*G=>MOPWiaJcqTMC%!^Yndu^czTZx~`(kBn5 zg_OpAubfwOZtwR6E34kGHFXk6lwCag?QgrbS-n%996rtdwl><_c=q$3FAjG`-QQpH zGbsBn+f5^=%KOQ&>R@A~)ccK!o5m-zj4vdJ|+eiX(%D}KATf5C+rmIl9aZy%HR zy>I^c=a*mF9IMzLH-FwdhKFz8=HCC_;JiFjOV~<5=@>)mvsGOkmec#??XN_+{wT7v zx1GN|*ZD~9{qNi6`z@c|um5?IM1_3vjTL)yZ_CM7dl~7c&N5=?xmeO;c730Z%AcE^sFZ_9Syy|&=asgRIOaiz0ly&B=p10*=VzM$BCy!fmcgolS2ZG&83rPyxi5NAHQ$UzJ05r^en&UWSx1l z?w#HF&p&JYEN5q~6}@vTAk>+~U)z7xXifez{k*cJ1~yjb~xs zwVW6d-Yx5E$m4t9{@2M1Sj&$V4U-8junLyKJs z+^wVv(%5PewwzcdRXoF5K$2mnvw(1#S&4#;Rk>3ruaD`oY9%IzVBv)cVvAYJ%s(W@ z85gspo>R(kOS@KGen4gA6sCr~-*3zmylJ_~yE{`-%zI(qfBz|;P@$rdXoqFH< z#$R7uzEv;r~?r2`D{Oum&<7MmxuFn{zoskhNnwEcATaH6u#re!9H8DG6Sq4~C&8ed0Y&qX)s+#bUjs%ow(CZ72#x7 zVpLHOb@AcPGPgKGRmw(n=`sH^1epSy@t?8$qK7IP~=TA$CWrilNR2hsWTj$*V_4ReS zQbJnr>$UHHe|viR`tQGXzL#zMU$k{trnBVKKDqY$MN7h}cWy0d%bcUvFJpW6v2jJ! z%=4dZ<}Z6$Vs}4p`?_$36u*`=g}<3QI(9EtQPQ+h4%KNrcA@tEw%o-QTN%&1sf*uV z^RsAoZ1ww&7%#)m8okwbzeo9>e#Ef-w(oI?+J@g3UZhBFWj^?VnPIt2qT@*+Ug4rI z-5FW68F!=CpS=>vl)~PB{HzG$8Jh*BP9jQ9Qf5ut%0gM+?-f)u+-Ys-x3cE{;pP4F z?WP9Zi@X2tOZ>kzwM(YBl^X|3AG&k=$L0C|zD(c$hv`A%YJ<-ixv|%;X*Do6H7h&i zUj0%bwfH)p*2K;UHx6Dkd3C{j+1azQeTSa0y-RaD!ob&;u*$H)uy{kT%M_KPjZQ6x zgfRN8(Bvq(69=;$Kn-tQdll4T8oua;ZL@?HtKdgqLw(BkDTQ?}(*GnewM z%Xo3)+pgRTXV1)5(SB-jWy|6+#xDnt^!2>xIneMz$KdRwBNs18JY!odZMf0kk;9P* zoI6%p+sDqU|Nr}b{htqp;?q?2evg{HVac50b5>kgm2YGs`H z-kDvYt7D;M1rv{E9|vQSmLz8>Pmw{fL;9fyU!BcFMf`jme_VLZu%Kg!iZkzlPDzcY zn$IVlHd#8S@x)yokvo$&JYACaVZj#n6N-FWTPHI!S~fHbO;nzxt;n<@U{*2HhrD&{ z+-)5z4AW-sa=c>ae*Ix+^=n?44uJ-_=l={}D+nunwUaa2K0|A6a~U(khkMcQO{FYY z0%SSn87!Z3@Sx92#X`gH3nZ>g;n3u6SlxT?)vkNlw;5NS`J6d(Stb|H=GPI|t6#76 zVU;|4)**0$!0WxcW=@$?+{fpCmSN%q#tE&-cf1cNsmz>tQr2~nK+^leXOf({;?I`E zzJFW0!DEVmlGI}5ZxV9yXSO__GySQEf}&ur;hnc-ySH7-$w=b6=_<0v;`sb|^VVI@ z-k`k7j}yN0rTj>eV)DSIy-JJ-+R2_1$TyUF!95&z?_z zo*V1B<63R(^~;%xhwfEh*?({0lu47eTwA1aeQ(}tsmBTp!iDdd?_|lJ`E>XDecSqq zef##m|J~P+72Ccla>C5D-|XD(XXl1qU3dNW{{P?qKa~If(Z2TAmyhN9e@hB}-^*#t zFf%gW_w2UX-FM$zHnFw0XI3!o=2-OYUbEHS`}ecoF58tGott}q`|GdQs>L5p5mMN7 zd*9pm{dKvy{EYKfEt}I{EX65!mci$2$X%bUQx5&fdDpP7KW!a{W8Feam#>!+TWSPE z^XyFiX)`D!Zuyqg_M*-3oq&?S=0^UvHyj;08iGz0&wBPI-o@aXrHPt)#-^Ti?yYXV0|Dmp-!i`|)G_ zj|CqOHg>9V9$NfDNl=(+RjGz?8XZi-U);%&eLUAI8+%YP}noEc-}J0EnAa& za&IpVR#`Ux-`jHbS)~)6i!6_Neb;7zAIC|~9m+ukUtLnn9x2$VR4}A1D|_+cN`KuQ zAI6CdRqvk(B`=!UsBJO%mEE+8OMeqVHs;nb0}TkWE} zT!WZc3c^js?o+tk&1=y?;IW{o%VVEUs6kJY}ejZ0MYFq=unzp-B0r zB~Q{kG<-a++zqh3b)-UrDacz>aiKt?HFtDFyOQP7DJe5n|GZ%6c7NO5V+==Ls9fpm zQ+c<=AtQQiqmcX|!2?q&PCb2Ev~!N%-mhzWtx9?X&+{qEe$m=7)7f0such#C19SHb zX$u$Qa)rk(X>tvWmVyhJEgNoiOcxSkk+w2Ug7UH~w~n?IS#pPslmytC zuU7BApZ|N?^hwKqPAN)U#xS#E!`EAGp_dca#2(K`JN{W)n0xx?mq~nU!&v4_ubeks z#WS>1pySS!nk)PAb7#z+$+bYxyUC=JHu#s>|(&{ ze{0Wezjo|J;7I|;P}v37ZbflF@LS7#z~=l%)7RT_U+;SV`){52#O2GE&$!_<*=*{g zcWbwveyXI@@#E|3@U)rBI%iJ&P!jj1YVPyOS(@Q;>VY3*WO;1wQ67PZwckLUjYgI z3)fzK{Z;C&XH0xB>k*Yn-kgkuUjlyb9qNZ(skuz4e{xM1kPylO#*V^+f{8tXPOk*)+b`H(;l0+|IrC@? z|34unS;0TccCt6TD3hOW>|oOn8tQAxpy4uW*~|C;p4&4eObII}a8Ti|F;eAR_b*;c zt>vF8gOY(bC+n%?jtvrzY#+EwoB4<;2nsTZ&OdYI4#S0#o{NHwrW;QL#-01QyTum<$BKJOcDfuXSmC%{mFxVr+_k)mCSE=g;<8CU&XaRP z|Hl%`|F`Quudn~U`@ZqX7qSjlr(Sfh5NujqR(jwv4Qei5d&T^)Kt9!~0&c0%?P7BtA2Wcl}cv`5cN!9i&*M{e38Z7dR6d$Ra4tb_Ie|qXURb%OG z%WgZfck`89;kVi{oxx2b?VS|QL&K)!Q-b{#_p^Tzn{l>qal(ZqhaX>HQ*mQZ&nW4<>QU?3~fLpnU)R_uqe8`CtBgx|?~KimLy+b;)1Cj%OV5ICRFM z#AG!?o1aU~KAp>Zro?3Bt}G}x)R+8zEqia`GOJ%1@17Yva-ML}-9$x6=I>`DFg4v?nmzTl4sx&F`x*GUaR|A=IpuCr%yjEs;t_j@L^Y8{jbaOqq3(j|NOKl z?CLdXCe}8wUq3di$z6N=g;=P8#k|v(i#1zkMy*~Ie9rRuohWb3+CN{KJih40cOD}z5v^3Su_j1YJd*5os6+YF1PoK7Gu6$+kbo%q@ zwfFbc{5+(zJXKG%{`=kK6B3Ij^#sc#KWk|aYR(aN*rTxR-uK+>wO4ob8%C{d$XL}l zN%+C#&zn2;?BBn4|Nix_uQ?xh+2-+1>H(w8lMR6xcNL${34W%c`CNrla59Hs`(FJI zDM=rjPxHPNW%#j4M=58O<;2F^_p@hmc_&rw-M4q&zPszoOM0&NSSg+&}YUV zGjW1J>da-E&V1f8=hPHN1&#aD`}ODB)kXPzmJppVfkiU^`s|qt1(=R*;PBuRus0B5 zyRFiBaPL;NrB5dLKcBp^H7ui}fzN>5UH$v<%%o?pY`0tvT)}^ZHNcr&GJyY>fM~4l z&YiQ}zZS8xmzO3gE}M`s&u@9Zyxq5T+i#cl2)b%K7h%om5p?eFm@?`3h3(s~|Ng5t z>6nD!#+lYTcOSM{{{LD1FX#8Nf_%zz976)Pup z^2}n6C!U^doH0{Ukm1M~M#uIO8Z1ZZPH7z8X6Q53?Q-K<-n-IfsR|4eRW_`?ek?b7 zR(y*6Q=^&u&0ce43h%yr`}*tY=b{T^&MmK;blGO(62k<&V9&7aZ4T2fPilR=ZPu=t zMojJVcmlRIH?hw4m36;(kTI?XOl%_VM88S?1RA zPL0Qh^T5O-CM&geC<+R;{+i7&u|>dD$ncUvD087RGedy_gPYQ_8E0Ntd{i`%UD(Xd z5bVw*C^&7$!Zs^8p|VFRE*uZes5m)IX?Y&gazaPWL3VNfLT6p8WaIM7D<&^|V_qI= zTyEOkEupqy!j~1AjD=o0b2iEES-+F7_QmIu3|DlQ%T)0e8z-?OiLtL(%ULYL%75_4 zK_TPKtBM~z-E&0dW~qRJiXxM&S=H-g^8lsumV({y&N?PP(s%k?X*b`$bba;i?PeT- zcYWM#x<9^?nlpeet9oVe}8|5 z4eP#FobxqxDSTIFHTTiysA-o?bUW79|NA_@{_of8=i@J3zHIr%*)zy%X5^#2K9^OL zmQGo5-YTg$(aA2p4+ES>X)}a zCZ6;9+qZ9GUHx;44{#?=+I{~$!-eYl`g;GJ3np{Sjy&fY>i+)v>bU%bycZX$q?Z+I zYN{vMEwu^Ro;$Z-TTWUR%TAATE4Rx%n9a9#?Yq6L%N!!EgfzISER9ifx2ieY@kggS>ZNNStMh3#Axb_<<0t?djc~BbGl>nrl-#2H@ne0k@-;<^C8*y-%DSt*t+$a zkavdeFU*&ICQxwd-y>0F7ZTXB%$D}O-b~GQCP1aCoTcOi7X>o^T z&FpE9_+JR@Zd3hvn^EerjAg^jB*456MNmXpj2s| znql>-tzYlm?G;#FIq$jT9oawe|NZN8%RJg22F|p}+4_L3UYvWEe10VIz}@z2B{q@ij&19b_wG0T zVKRS`LE?;$a|L|Q_A9^o@j~{{;l5{ui{zVEHcvjiN$2pqXz@~KF2=KslV&mtH#X0j zxpP(<--MYAld^7T$uQ15+Ix1-_PT%t4a?H(t~jJj)Uou;XytQd5$yEvOy*#@q@cKu z-E@gSfs&P?p^)-sgLdh|tvByn%XMc6yHW5EZ;0|#ElzpyGPNSr7jb}g#gF1L34 z-skRfOd@w)dim{by&X~6tE%>frKvod^Lbw`Lx7j&(wf;9?*6UX`|sE5^=FKHKga+3 zP@hKTE5PfypsU$0(xYu)Rr-Mxmt%J0{H|MJD9C9pQs zv~62zq{Z~pPlb-$+oW?*e(h^EhRY^Lqq*0vYTmzZ@4DA-OJ?o={pI4_=+zw?bT;o4 z5;lH5C**70{@C@|?{!yQ-~O{{nx<7+uZ^Rtceqqk-2w_b~y z$#J1to+IvArCzXJVWGOWnVbBoJgy_2lYE0s6gn*0-j%-JyLC2k#Nux2x9-JN1G_~60$m<{mM3C*1-Q!DGuBpqx)&ucEBKXh{nj*weYw?ZOJ}Lg z47z=L?Y*qK*Gr3yStPOoU0PM6jxU(3{N{m)?q?HWVS{BlnPLJmYv14IV>rXaaQ%Df z{Z}@=s}^s4#;|Hyv7=LBp|MNG-jz;%mg6?#$GNWVN~lUFQzDRD57>V6bazyW<_YI9WBtO2$ma)o^Kym*O&&OrGDT9qSCjHhcVFi;M^Jg)$6}nSt;yN;CMFY&f4qm7;1TnuisN%#MmJd(7sYi zNpO?3LShHQ;at814h7E%kD{#u5@w2>P&C|pK_cr#U*DUI4xSW_zM_^|hqb1jN|RPj znNqys>sdCT72oddp6&Zt#;I)UHS@i9*IhQU^-WKkwq?m2K?Y8@>$h*$-oBQYl`v_i zh~&NO+q=S;os|p%F28=u*6?`?gCh4z$qEba=1Fbte>kmNbod%3yIgRa)3ZK*J;Q+` ziP^n9^QRQw{a(A?L!|1LhLGmc=g*%{c~P^^C%A=8HpY97Amd98?=P}zw|@Wr{l(N{ z5+B#7X-+?#I&->R)q7c)^Us$*U}k7&4Xu_r?%2cFuygs8rkhpO^{ETz*Zo>KWz+lL zyViaGtu;rWFm}5{kK8Pg==M;BVDxKpnww~YrW5xIH*H5%2rCps^Y;^tF>X}dH z9FzF|+xGbzzSn={jF!v@>&%F~eq!0IWh=M0Nx0Q?x;4&NRyk{#IG;LindVcKLmdBngPmQZGWZfS3#{f=ycq59{Qme|Saxk~t*eW| znFrgqXG@6k?PUl$XSq&_angiWFU(vny)3G#veJJ(vo+iLlfKIhpK9sfajlaQ&Do_B zG_%89!j085E46ky7}PK{ESy-Z`mXlL6t9yEeraDU?p4eC#0EWL?^wGm&+W*&13njP ziUO1x8S8{+3mO)BozW?XILfZna^~d*0oy}Td^K^pS~{8gK38Z{SzGMD|d3-lz6sl zed+D%c2?;*waLn@OYEFJpVyeq@1Hs=E`I;My|sxKA3ol8FL{frgM(4HQ{m_B+rPj5 zDz|3QYQ}}x8?;%({;;#>x_j_B9N4r@TafkC^y0j&d-w03fBt#1pMa+lui+)`=c!z; zLp`1gF8sc2jvt5Ot$X`xelA)iZ&$M;#!qKWuFhwf$&qv3TP_W`Wa9h$b0yE)T!kb} z)n1XQ{4$4lpS|Pnj|$_R<$LGv>*@NI{`>#`eb4!F|DTuhCpv^~&#kVm-W|7o>$Oz7 zv!`oz@7*0eQ{28!uI7yDj@;YZa+hD$-0@mJet+HHPhUSjKY#xGbpQGLgcrX4D)lk@ z+l))6ZSL-``C7Vm?KZPhHs?PlrM-Upu5bBe*Q>`joPOHW@%^>d9A4AsI-XZb*5AH- zdFhiqci%f7zkJzvN1xp!mBe$JbI&io{PRrDgA3Kn8+6o@PFbpYrWx*^EX=sb`difD z!w1ehzr6CR-7$mjl`|PX9NgqGLw?pyEoO%jp+>u-yet(eXB8e4MLi0x>e!Zh+q`M< z^Qj^`Ht&mnpP&Cex##rHl}i>SH!Lm?nx-%<)Rc`i*gvvJFYEBDvdXGo|JvU#QlayvcbS(nxJ&1qwUqzp<0I#p)}Q>HBY~$r{dMeF?^Bwh#@ZR9)_LA)Nl7kF;XI#L%4;20p6wg@c%sSY{XggbzkN+>vY^~9Bl88! z&sa7mGAI|7%gEHo%*%FA`e3n#N8s$l83)hUu!-AVa+Hxhd*II84#q1vKN^i^&piIS zsKkZUp=juFOTCT8ibs6J{+bIc z(2~$lGB_e|^nk!jABHJHjl1Ox6HnT-7RfTcFl1OdXNLa$9B;N_gW#)5u3lMd*}2xNN?Y(M#p8^G!>5UYQbo!RN(w$o9xvRtCBKkR zne+CDg2%ZRue#biI)kq_s4_A{z4sJ)R&n7BZ;_X#I)gF)8?ig>i!FJQ>g)EksQufw z&)ds*Zb#<~hQL_KpMOg1;{D|9V()>by#I#gx*Y0d-f78j^aFpx+S0Uhnx0-97yT3! znPh}CU4#;qw08QW3Yn`a1t%{%{c}?xuLK{*Zr&SVennREmt__i7i4c;*_Rqwbng4@ zt$DxxeO)cN;?>`>+t+`;{8VD4d)Y*IvpRG8wONc}4hnippUipW#qdDluf)q=8=6~W zEG&{lnHL0qUie^Pp~H<=ySA6v#sB+YUjOIc@%M4@``h-^-QM=2wC7$GueRsRXP1lq zeYv|lGg2T^>uE_;TzvffxcJxmj>dd_`g;2I>)Ul|?B=#cmd>!0JLzC+QndW#AGY`3 zZ;PFJWB=!?eEqMN&!cW%*N;DRF6LAU+nFG*pFhj(>wkW7J?rLi1$8&|`Dtv>GWPjSfjR($w-q)CMT-obkYtdhz~92ctI@4de+Trl$4(Yx1w zSM9!kcguH!NsmpY1})2to@u#GjVZAy)9I?RgW_}^j@lJ`Z*w`kPZrDWZeE$awsgz2 z-xuuM%s;f-CQC47$nToDlifVdefOMms^Z1jYq#GzCgEwcfc4~j30I>eC->c)+I0!l|L-v6G8}n6dJe6V_qnJl#fea)w219(#(+3yQZd3_R9y^RNv|gNLS~ zp!<`>EBP!$IyF64v?%>mJa26LGQg#(vXbqS5szI?#R_gOaZKhelA;w)UaXgWy7>+CdI(PD-fM(Zp`K;f{H?KRp$xHHN zvB@pVQQ&wIwwBRboHeSH7F$M2_qp2L-XvavLnX_@8!KgE7Ox8L8_!{_hse=uX( z$|q9<1r99wrzGRI@A&)JtB0;hPi$JjZecb3GTV*g*N$J?Dx^KhSV(x`#-pzlnp#a6 zb7suseVfJg!pXC7#$Ss-OS=N=rulMhjxC3^gqnohRS!-vUNGZ!MtfkCJlBCKM>d_< zqmWim9PCuE`CioOy#k#Ej6G)Cw!VwrT)W)khtd1RyDM1^?VSANh?1cB);VIHO-*Mi z7|aEld;@}wSsY|nEpgB|^k9ZUK{)e)d$T)uf)zb?b_ASS`StILK2NWW!HapW@3Wpj3C-Io%4YVw*LJ?enyFj1<(BTf ze?8YWR*zxF^;K5eN{btUKJ7W-h>nZQN27OsXA<8{o^od|yR*<5p2BG+XHCv7 z-deUlK6-uDsWX#4o19O+vF)wLnWkswS}xn{-@pI+_4WGt`sela8RV3HINT2T`u6SH zuV1f*oShQ<@7-noBVRO6hCI9Wd)GS4Ymt;oNfev(3~=lb%dz+R(APWu_!wyzHXqI-V+1u5lDZJqzjD`Td&7 zbLaBn37;iyurpm@GYC=P{LCz3If*Oexbsn7b{98y2A7ac3#<0uy?_7S;d?vY{}zm9 zt=Jj!EVazOM38;TDW9HN9tQoTQ|3&pxP9$w-TJMTd!EjTf3zQPleLG<5$|tFspG|iD-j|;y=Xym}fXyc8Zd7`j;*uZl`hUKu$i6sFPLTJS z^C=O5kSje29F{6ym)Wribs2p+{8DGqj+UfW<#`To8}4@sD5#$N>9Oxy`;UJmR;_L? ztM;}DT{;tLYx}RTf8(X(CHKBdRVn?rd%x%U_BjU@?kw|sRy+0UPL-~a!m{QtA}b>DZ- zpZ-~A=0sVhmnkgbO6e2l*ZmJKarXazWzSmn`K8JE&&oTH$MhT*Bblu8~&15qL0rTfH`Pd56~Pk+9AIaqV)6d#c~??gWrg=cnE z`}f69e91HKvd9`y!|6dXFUoe`d;fi69L#-NgYc>+7uk#EbCa2wrW~X8H;JMi)ZJ{AG>ew>%SP69OIRkI^Ms3|9jhNN#XBycJ=>i z;`4((ltiBM_trex*1znkwXy%sf>Vpj3nx#h;n{r9>Ab$by!}3bhrfSU%P(GXM(3Ty zv5o^a3?*MLhEDDdm^7t`&rd{fvPVXDNe_qB$AlI}<&wkywJQ@@+F2^t{^=hQ_w+0h z^0aCbSU%^D4v&_SOxm{(byuTg|7@A##Lms2sV*yZ|Ippg<1cIeJ^cOtY|edib1%)e zx9_c8y^_!QNd1RL|9{Ts=((WWrP)%*;n9*et%X_h(UErsKKy?VG#8|E%8GIt8*&M+ zJf(P}#Ojp%5e~ON%L_9F8_)PoJ=i((BHy`~t8JFdhEsP>UGHnAW?=GMv?e`vrS@ip zw3(sP4jOClsQ6^wzJC7t*VC`R?h6=ZN`$LalJ;mJ>EDf@g<}^QHlz8_}pwG3}*I7!0D`1UGQG$}4(B^=%SK4MO z8ZdZpu`vtq$ne}bz-ybvUCM4A%zX653r81D7McG2sV(zftZ@9%>s!j+?D&lNPK&$c>B}p> zZ_G7td;TQ#+2M)b*7jHbd6$vw%*6TR9ZSjrcW>qg@AZ^4msk27*>Lw&+7SST-?z23ogO2!{77U_qTj-qJR9!#9Cpz8IN^EFgb5vIbM9W>e!Dd8+3BZ~mM>xU z-FnUH)y6F;GoP<;Sbkaasmw~@XA15K?EMQLB!(W#U+!%3-uGv-f*#zUA|vQxdw;L_1m#>p@uI@0+?%muYw=h!ho5anAwOdzCS0Y#st>PjPBb%$&uU{{Xb>{JO;fwIvx&C|U z?d6)wr<}66II$&rnZ>aei^IW{NQnmWmor=z0zkYrG`uM}MClpuJzwfZ{efsjH=ha8g`)gHy z$GN*m%w)D$STKoWY3|zfpfI0pT`ZJXa(~;}ufIP2o<9Bg^vjox+U|WTU7fqax7aV( z?a|)1sWa_re(ks$?l9l0vdT_y@+$>5M$J^EMN!r7nPoPoIKQ@2x+e5zHnY|TgMNQ? zrKkgkre(id_5YKB^91ewn^$`~|tz^JT}YB!RTp$s=z|`D1D8H3u^@rHMW0Y=1rrhGbz#S*@;!^1qx3LcFu@w zpBJQO`BGx-_c=d){#=ySAau|1WKTxc*)^>vV|_#0S9yL4x@6+($)&_>RG=a(E%eY~ zX+(d5($l9uABXtWW~>gq8}w-U^v|C^|17f9FHZDoxj9!LA>;kt{(J3T4WriHu8pk@ zt*o!__nj*DDSKMwJVrsmsI{@|OiL!sm~rrI+UB5Voe_&xED4gaF*CaGaOdS4?>1jQ z#&dtd-Wh(67W99*P|bgXMJ}e?W5QygHLs(x{W+Jf6ewiTz4yLq|NZ2c*~>C}1Ve8A z{%x(WGU|QqtYz}`e+X7K+SwR#DBnz3qLqX7H1qmJ>LYB}#~MZ|JL6@4lD5O`fAK?#C%u+Ov@Vd4tC( z%QZVDdfEA&KOJCvWP zoaWbZ*`#~Itb5x^Z*RZoc0%dVqg}gI7+Z34x0;zgj+o0@u;v`IxS#6OBRk)H-=Jva zB)@pKnnO~Q-tx~+r!2RbpT?n3a^H!4j!H;<`|TMrq5Pq;Eckq?TMSf^;%2@`*eS;z=94_h!ld?)&{UmC64}|M_ho7zW3D`@xfkezb>@;_V@Jj^J~MB?rk?Ma7`?nw(!?}`~Mti31XXrPRPZ|{*{mu zYTV*G_i?{W|1fEdqX_plkl$iNGpGYtNqH?Lqy}^#Ht#+Qs zjLzisqD&o@v$Y+V+D|+xTAV17X!uOxloJEPr^A_I@{24F6`HdY=qkp$3(b0#>1m`m zN%BQNsPx5^*`kc@Y98%BAI<)7bxy}!%|{yq_-CiR`L1}Rup@xs#LSq5J14JXWa2Im zGFLV_9JgEU=^RdmHcfV=H~jAxH1D*#H0@Aob9Own3Cz~FUGb85irqdiwm zHkl^BZ`gf1?)yV4e{Q9`txVoarW&wrUev$;t+RW!mQ$Ko`-9|DM~#?qF4UcpGA%`7cbi*J5^J3YS6cao|qGe>|Z zL)H8E{dIT0+p6Z}DkL2#lTi%|YUn*vbLGp-V!!j}&&Tht<2`vSByHlA>ize>mRL=l zG;{OJ*!9=1UtfPlME^$h{AHQ5mIs~MmY?{)%gfR?ID2isp>OJ}XA2*f?S8JQ|5-)p ziOhmItImE_vfNp{y7qp#OI2N+WxQTp#moiQmN|QC?(`{CVo(zP7*eFCXlSrq*d+(UU;{M#*A*J`X-;?5Qwr-lfvgZHa@~x%Ct8@7eoje#)x_ABBFoP4S zXYwUKa>PlyKaC68Hb=!~x& zFMs;?ZEfszj&t#+4xLF$NSypSt=;*5bjysReKTIfMIL#x;j_#4w=4`NW}f9yR8r62 z;bl;`F?q_R5Va)>9xnXu$6(g* zesZ<1W9`=|eGv^W90fkg+y5ga@s5|Gh~#E zFHdD~RMv0poAp>gPtUIFnW9jqn7JL#)(>6>1n)Lg@XYl%d?$q8Gpj_!pk+n}zt)$D zTT3#g-90#W;hNW3&-U(Y?lX?@^Z)!(*u<7*wGDO#8c@9yfYl|MJdpiSNI?j+<3ROjBKYC5l_v$7I!+ zKY#ux@|<}8{(9~H<2KWSvi2%pvY8uIyZi2Yoi_`9?|Z$2QHaytcJG|?>1p@A+qQgK z`sBflJGMFvdpevqmYYAkMdscI=0Ttn0s-_xusie&u~68 z{rBtX>W(v~KfS!X{QP|T^|#aV&ikKFtUasAd3x8~=(SnKjOVzO9|YX}R(pF}(5cqT zpFexgZ}0eG7j$alA8*li(}Fd7S4YVomW{fdENH->^tpcTR!asq^V{3jZaa0Pw&(WS zvi)~WnQk%8$cz(XDBke9ik-ofHC*;EN9T&KX1iut7F+4Rd6qga=-l4#wd*g3xJ-&p zf3*DcWJitde(1_)s+Vj# zer#bmlgIGoe}gbXL-c}12bOnY-|P-3q`nNy?@Rw7$lb(IcVwou0kg7=jE_Wv#D~I$ zwvXSV!{v;4*_}IjEhL(kqj0M@h`S@JUupAM2)5^Rh@i*V~tG_dBk4*d6p)lK7AglX(4s zD{_CB6yKG;U(BtpbT9kwSN;7y#s4}}doBOT?-rhUK z`{eQ|r$eSiTxLH0!pB38XHrb)_Nu*W_rH0|ePV7+_g>a_3$C5nwm0@~t^BTe{`37# zx2v=qaLIYr;%wo$_NwAHv279kd<=35ZJF7+J`?tq8}Epjciw;clu6bMfjka^CZ=-# z6caTqqr;7D-O`R&JPkE9vRjtPb+_z6J-knNDxFH1_q<=;KIgWS zu`0unrWsQ%y)4;fW54p%@87#?Z?AK5u-qDU|Ngz>FFqUXjAXe}`|r=!`TxJ1zx_6D z*{3gGzP!9#Zu$Jc!IFrK#Hjb*fA6~YK7L=_&F!(JJs0^Lr_5AQoMhnFx^u@FRnJvh z2Xz=-Lqxf*))i<@V z*LOtJ6u9)Yh#fxrkoD^|>jn4MUtc&ka-O}-+;bB5)?D1OJx|T5ZPl|NkBTqP+LqZp zH}Y3sBvSO};#J<3BdktFj}uvyj<{P*Y5UL~IV03prSbRS@-@tENu9DclV7|oyZzm( z<@h_+CF}NnpI5YN&iRf7vy$z1%~DkoocHYI?mwDwYq!fCQCG-$EjA-X;mO7CEBfxS z8(VI;-8{GWcKM9wKVQClS-kStTm{>a(o z!Gm`m8&y>*7JcB`@p%7?mjV})BY843t{NP0Rr`E6LUf+NqV{Srj-;pR3Jxxkt({#w z7Z38;2Y$%8yW!R6qF;y4+yDKyTz=Xd{+kOXHZ}G)n11}||3Xnv-Y=$secnqsck#=M z_wABj8NYFhF^dGtIlVp1Z`KMGp3^)!B{QReZ)Nr>W&YF0Qe65t4rrczaN*dCykpL> zciT4$H7}eU9nQeIR_xVJ2^P+?cZv^PdoA&O-I0)U&kyx%wPrBl_9}a^aZ;=N#SV@H zzSjBxbc6P zJwr)=(P^!hz_nFB6gU;9{ZUX5?!C4=)lPSL?cM6{3;*iZ|4Y4}ts?E>9Pzq#_4UN^ z%@5bMX**uH~1QX{y*Wf%j zzxBPvfn%3n3Z?4rtDEupMU-JzV}O~_{-btou1@XSylD#TdMuv zFUdL$W$l%5^XA*1`K-CLsItmy?Vi1RU31zsuU!+UxxGDOpFra6cWJ*qK0dBsCimn{ zZBODd7fJrL+m1HM{kgwMXX%vX)1N;se!(l??J4LYEzRwI{o3&tb33k={{3_NdF@@5 zkL&mU`_}*eN5A65K2d`*mS2_EH|Cp`C9&R+wHNwy&|`W`Th^7kn*PtkD#K5|4DnJ^ zlm0jF)2?@W`Bii{&z&~%oqV{XI8n=0<)}uRTZ@{~!jn0BUva!&<0_N-hQXv-<-v@Z zQid~k_IsEux)*x=u525h(v{`jew;D~4Vu`6(isJPLVrt~o3OlS)~BRpYhSK-tP$nn zIccTh#3dp#js8uDxcO1rz>w+Llt*Pu?S^c;dXttJ6$%}xvzp7{S=2S@PSxdw&%`91 z+|AS!p43>)e^#0JXiDlVwcy`Rr^iSBczT$BnN-TF?f-gaEL*wdTJ&p1Cywrk-bd_( zg#@|(>wkBC#BGS2oiACJuwzG~}T~1!+#ANbEZMDPq8{&WN z9-ltH{?9uTQJ&S83)0!YKb&~(=n)==iVyh@*w4N^?q$Tzt!1=L{z#i#@pk5)s_bkI zZpBMld_uPWt=p}gb&|Ew@X^DGLGzZ*naKY1Oi)ah6vHNIpUYEZe!u8gcJHjUL-^gc zFcW990*h;=mI{gnj6yTCJ~2ohXiH+e%w(`AQli_^Z1#}~_6KYa_1bguw^zrmzSel{ zK#h#Ah`u&Aarpd1A?9-Axb2yP2xo>Uzxt#(Nt>5Iv zU;iDaZRq>CbHaY%nbTCCdxj=UZdtW#iha!wi#8=ArMK16hrKi(tT=q``SRz})5Cu} zy}tj?Q~mmT+upu>`RPkZ*w^#(?c1Z?*Vs)rI-0^Mz}d2LYuVm=hjIkkUz)%4dZO!k zwfyt#x2vqC_Un79gh(7QG&byh66CjBSa$WTE#q^pJUA)`gQ1_zXbLGrunOc^| zWd!cdo~3tMsOog8*OVSuT$`77p3EzxoZ?1eWQzq-6QMx%>E>vVgmU@BX^Vbid2(sWa8W z_uPNKcvg4Q+wI$PzucR3?sCYdH(hJL<*qKxf4446r-ecD_}wY4F72x{6%*a0l&uyy zFB9XKWUa(@VnteV#)V6aGtPg$RQl)F>-D$A92W!?ePlO1$0f-P|1VujqZ+1 z3gx#x>bPRe`!}Uyw|t;;isA-i_a_0-Ng^H2EBj7N>|pm`5u78n^^L@hIh>|k^9v&C zohAssd+vNdvO^+iL4=~byHbd{g1hq~o2mZu?f!gtscNXOVz%&t*6-((-k#){GspG{ zZ=-enY$p!R7aOZhBAleNCd!E>d-(avPfEpI5pKa0O5F~vbg=%nU zo*?Vqr?S&n;>+RR;=Gd>UDVz`%<6wRtHa^#zmnIM&Alr{9PVVtzh9r+b1Un|N5K;l zS=2Qxmp@@+QIXi3=BiLGeDJbK@hdLt3w#?SOVp2gd~j!wnfSVlMN4K@U75wACa0eX z%N!D)vLA4C|Lyuf^{D73`fJ^e9Ma zW=QqzQW?L0hCB=V<&`;hu{@T~UZ6a!pr>&6-9;W>@4flx-(zulQ%&`vkoaU#(rl~E|pn)>1D~Q_9=P7$ ztMmD5Rq^*Zn`C#c)@xe(?z{BVN#5t||9*J<`}gm!Wp}@Ooj53REsC9oL44vIVTCta znI71j|7_#CbHj&?H65&+(?4@EEHU$xaa&<{?1gQ{h70nub{iDwEnPC_@;-C1Gm(cL z2rbn(BeXTl;fL7-!AYMtDjs4C2|cSUdW*4WUWeiIo`wf@#~PW>o?bb{$tCy8uKZki zPcN@czpQ2{a%pTatXjhH%*dBp>FTX(Td%Ekoxq^T?qCDS?X$iPaPX>WbsCQ`hPNgOa9C_zAToC zQ=dmy$+aY`>3|zQ7PXe;}ltb{_p11r91BaW$0%( zz{@%D$khYRDe_udJE9pZ%GxUTbOaxm?5=c}*^aZ~l|~)|!@=%^eJVE#d!ywBPTZYR9mc2TreTPO+3_b`CZqoS`SP_-8_zP z+>4U8y0WJC;_r-AwKBpBoA?WOw_IJ&pLap092Y-gl*}ALa-tG$)_nbNZ+<$IaYD=|tybL!m$~qq!YN8OqaG-<&(FSfn7? zI6-2=Y<{POkJVXS+I9%Mb4j{yCeSiX)qpd>Nhzg3(8*Khocp}>cGJ|{4>}~8W^dhQ zcQ5Y#`|syZ7inECs{FO!nWegzlLFH=#wnyi@`Sb7h zdqxFSv)5L~w%uKpsW;gt*wMVix#ilo@1P+}Hl-<(yk~YssI1iX=882{n#FhZUUh8# zsTYDjCq{IsD7&$w1-^cL&3@0GKOY`{pI`s`)6ebq@2$Tc_1eUL@75@Dm-)|viaK*% zE-fvdJw0{avzQ_cH37q=bC&zGtPLwSlfJ!c*38WN*Q(#gR)>H6{L^Tvj89olXRw~C z|D;xdZ+`{8W@;SG7M^(T*8#^P4GKD+P5gsbZq1FBUHk2pZb#kc$YqynxKcF*rv%B^ zsCK$IyUjVRqmp>`WM-6Ds*1t&g%5ZHS3b3iTEBK@Y2-? z7`6MeR=>7fI;r?NqvxX+Vt#@XPHEcix0(HO`uX|p@5|1t{P1(T{_lP2&n9h}`8?)O z&l{=6Ut8{7|GLj^|DHML)BRqS?Y?)t^zOFQdFQ8}4q0|DdadcwCCiqkOjM5Sibz_u zVylpGVbS*gTWy37B(HL0U~Uf9yB`~8XII;BWbM}KyI23tc*Y#m#NH)7Z$0~*K6jY~ zJKbkYIT};y(Av_@{$)kIh(mpdZB0*ApN9tj9fK8`ecvP)7fLUVbI5*5#2e6s|@%1|BZYPu^k8 z)KW8FpfRAOgY&Tp6Sv^6g7Y2IFF&~#ZO)?N$NVDf_G_y==9On$G?zYkaCggfwiAa% zEG?2%+yc_h>7H_6m)>R?Armr}P zrc^%Qur4%aa5{O$V+Ak6ggGx4sNQ7~54LD6E}Jd zD)wnaB{H}kO*Y~A{9t>`1&;OZ46<)?_r89y*UVG%U!Gs*#F$=T)-XOWiCND}yU?Ee%4EBzW&Q_*g2L>U*f`Ve76+rxJvYsKO3duLW|_Sowe!79DxIOi}aTIxZHWQ@9pi|w{LG-dwP?ButTbtikGE-WsvN{eRrH%Fym(?#~~DURJUN-;X;8z7Hz&@AZC$9mhh=l zI5PL%?Od@fOF4m;%|Y>#@)pjvtfYC%Px8Flcf5B>kk#4cbA%OLpLNVq6-qqVqxv+& zs7xmLNyxHFuSwf$>uYOY?_K|Ws(|-74)@5~`uhEG>sKi==?0%Vy=O|0Q;6HJu|0Jxk z>_gJ{4tvk6`}c9V|MYZ~Jy&b*hS`X;1^PLO)o}e@Q|2W!rGB6lCdHhkdV9T+KuGJ!pY()j0pZPzWR-00^JhbyNDvEyx`K*>n~g}G zz3emIk&)&ZF}0gt=lfYEZG+`V{@~jzy!wz;dcvJ%F3k~`t}-m z7s`1EIfXG4%L~km)wwV7P3H-hl|X}>xpL(4dv2@uzD>S=OhWP4qkF#XUNQoKd-uI{ z?hNdlS>11XR!d3$yuORguY_l%dzG8CR#oQiy*4k{&!@rT%tUvmBYXl=HTQWRXnXiA zcde^ZfkQWg`yYjW-52?i@5X};66Zf7lK6T0Y1I;ejveo_&xAT|-d(*UmVrUv$K~VR z8J5rgL>w>WzR0CC?_4^=k6&M3o5+TU3A+8VcxvYqO{b_F4P)j-yY)ZoXxir8mwV#1+O(l>(xg@Q)?LrdPjmCJ z^yi2@_dIjfnWVXD0)>__wL(9ioj$ZGEp6o!&FN9uyvr39t>#+JA*i-+j!?Ht(#j1z zb{VrmdO7;TeO9hue6(-vGOf!dzDLhKwtuGc@ND?jg~}oc*=r@vgiU(7<;k4V7rSy5 zZ3GQHjnq9Y86EsM1(e0tPbspS&-wA~+uYk`!Lz*#zvtflRvW*4>y%F)54T6;$(1}l z-alV&`sc4@zt8{wUH|Xv_WMz}nosxt|6pBHAW^2X#c1M}Phb8V{(gUc(a%3mPp{wq zr)c+mMMLArj7R+;%NKmvmsogz)j2`lC+&Qqa;%Rv)Zg8|R{AUXiItbsua8C>eYyYD zGrF)88UNVEEq<`yR-wH|cV2|KiFDvkzFLL_26Lx>dic|5>X8-g(}WoAo|t=JZ{N4N z(jzYqw?A2SUD`l9-C)C<7>ll&8kZw0URhmv;#Fjca;JT6^jD z((L$cb#KFZ*=PTfShgfB@33yljh-8B#=-1Hr99Iv>P!zg^{HrKaOQi^`nul4t@l5h zFmL3$f4pwqQlCuoeWA-gOy0>Xb9-j1#+sM?tK0Z;91bVW=ngjLF5V=+?@)d?eE z!^8E`&ysn|G++HOC{=${V|V_u31h@+?S^fJyEL12@7jLYQ8ezcK<6CL!3FkhH*D3G z1oZu^Pc8fA{z+JA-udS%irG2+MLsoNbU)B0cuz|0+K<{NfqVDvT{=Z?diTtuR%r|! z3ho8__wBbn@cDDG#+AFeJ0v&nNbKurOLpau5cIh$Ga*gWb&|p)k9p}P=1t-Z+zTK7 z-Suv((8879?j3*e{(b)bKToIE-mm-d@%WtJju?a8&`{mK6N_#XHJX9 z$uq)&a_{6GK3l)a?w5{VNK(hT;wK%EP5gEi3ahqmxfWu=!rx%nc6P(v_hpK#E_-71 z4)Hi_H4AvLd*78yS;Etzx*jIP`dq9wNSQmQNoav`$-SA=e}2B~t+(@9+0%Y$cjcu^ z7(Sa+K2P%EO8D_}nS7AXnYT6jT3!fVh?CmArDf^Ww7sh}6?g-8?A>`YXZzAA$B(-! z?b^M+`un?BufjQMpZ-02wr%UL^4sOy;%6l4cZ6^;2u#)Q@2~&=tI9%#+q{Rb&~neT zwU?Y4jIVCc%g%KzNV#_RbXM}l;%Q46|DQU&!!L!~R#x!fnmgh*vL5;}tZklucmCm{ z^W&$lFzA+wDvLiIk=Cxm$jBIFaI@@%{H478_b1JrvrR7ghY8by#szNx& z&Nkg%=v;6>qPbj;alr(31qQ)IK8tT(nYt`4-uCaZb8D0(bR6C}Y5je6 z_{21Y?D?A+&b*eDJRK4$=(+#rtS;X#TRiSF3Tm>fUvRJb{T#jDm!3w2Ui|ztC~Nob z{o5rp17%iyYi(bAD;c^O~NOYbqz*`@MU&b@rU;=J&V1dMUx|&{4Sm ze!RMY!^VYYtK+lRnzjqs}}EFhpc+KDwl`t8?a! zV^@~?wFDZpZ8)`ebLGPAHq&PXa~LlDoA-Y2>!ADP&OI9oO(PS(c)Fas>?$e~$aB%> zL$+%(SApsdEe&(C`a`EH*7=s*UaQN#!<6Ah-uB&pdp9vTO}Bb~cTRfRw|h$k1UJ0j z^*1kn;zA{rLzgWYvmU30hGoxvb~5DNE(rrIr8nDdm%qP%_s*Sp{>$e)pK_^e`&{pT zFQ3nki;qv{*}mQU|MAHacB)=nq!@cnf|ae-QucVJPO(_f7e~$`(^9e+HLl-T|5PJ- zMwWK8@3dPCuYYk}cg)G$zdo$Qpp2WGPuzMi}|^_3ID#)(~1qLo?2Z8wFiyVt7M8s_SKO-RAwNJ9t93mHX* z)fNuz&*FF)x>{HsOuIVWDymKJy+dT@f=;iJjcrz5+TYwAP5JpnCobbS*M4l$POj@| z`_3!%9y}_NwBbY7?ZoSr@rHNy-0%BbG4p)#ykj=q8&)hxNWFd`^wP`HnzL^omTi|_ zB)nvXmC^*s4hI=2?qti{YfPJxb{4(Wzm~ex(8OVX-6pxz1j&n4y79*zMDFbF&N=W) zx|vIN>LtCMOBEg&9k^+K{SfPdbCDu)N=eth&OYlFUR)p?y!zG88oA>atM}i{aq^0h z;A~+%(KBaO@sTs7c^x7#*PlI*=~9_?t>L8QvIA!|yyUpp6Y`>>W`DLh?=$1u?dp(# zM3)Um4qBeK6c02!ahCsZfY(;VW7X%+EIV+QM_OTzk1+$wdvylsJgLtQzX~`x9GX$( z^6g%$#6`hXSFY(zzjz`+vy{Q9Wob&9TwAZ^Edj;nJEz2CvmVH9WNgSj@#~fH)3=(E zt>>39 zsO{Eg{(SmXVmE(z<|YsSXKDAH3RF5)tlK$dSLHdSO*eD4-DYHH-T8jkHLrQk3Z<66 zH0obGaku~R$FCo5eNtpO|7_-?602(Yq@E{xyuO~>?QyO$RCj03~vZzy9EGFLSTlaUeay+R_n|S;7?XzjY zUU_F~?;oELH!ogpe~skvi5E(1zt`_t!|=fAZT!!cj#E8-A*s*04&LNk;wBt!c$~{A zNI=zk^R#LPC+6zwle1Y{&PYUwIrh%&s=v0f{_^JAk{&4{JUiC;WH<`-CT@DBeB70C9X7#e_H5~0 z4+JhME({6d*XmpIDY|5)bF3$C%eiMxDhwqTL_|0xp5N+AKIhYuvd7FxAZ`WEnV9Ic zTMfgP-(i>f)ytG;c018{i9nkjZ|3UAzUjeJpP8^+e_vjkXy^O*V@;q7WB&`usiB|m zJiC5c`l@fil|An74>EIEMqh92DoTD9mgOdL`_4(DkGK9^auGBOyIn7`&YU4;?~ZHU ziKTIFdw#j-&0<>;TE+O<{9#q|Q=>=+{XLgr3MJU`xSg~j4-m9we4m%__-c-)ejARY=kyGwSzIbhgDZfO=ouq$__uBr5Y@K4`t*grX zA#L~jU2zegZB}OGZoPIXYwNGB7KIn*pHG&3z58yS1e=(qs{X8NQv^0niAmEoJGFMz z&J?9{m+#DCVBN3cDX>{7=>y}HZyy$Dox6C-GUr-e#J>5psk4lf-hKal`}XbC{(Bm` z!Y`bEcS>v8!A)*6!meh{YMDLt(b=@kH*=DE&Q*4^JG&?ZJ&RgxxMOayB8%8ZMV{LG zU;l1Ty>xEV<(C%cZ&u$gox3-Dd8S&4Ve9SN)=HUcdmb;n*47a#?v?p@%IcZ>&tJCi zdww}^hvoI_S>nqIR%^B`e4pHNuQFn1a@t=u1`aE~QDrB^aQIrCWNoT8JLO}@r%znk~|{kw0s|Jpo1 z{Bh3bJ2^&5J&NCc*BR&ZoK8qyH+TKg5Em~^DbCB4xAM$?-|dT>vgPWm=TTYvV)Pzo z{Ni1owJt9rd+VFz)pyozjp-`Zb9p~kzHQ#j=v=K!rJ1{Ble$!=ALH>@wD#O*i`4gb z@4mhJv*zEYAFAhzdjuKYeo|PJIWK$d%GWg?)|%z<3i|Nx+O^|Qn#3IbdlO^lxZU3y z<5vB9Mn~1W^k-f!JFopelpj_muOk+)>C3g%%iEZgX1raoqR@mRY`4L_suO4`*IIF@Zvf6HND68QAgx7 zTiyeU9t4#h`{Fj`Se)dHlkD;~s~If5_A%H9bcUpP&vtV^w5B+$$m@ZSg}B4jW(D0i zpRiS-VsUD#DjOek9oBuOxTHyB$)VI)=Y#cbG%i=_UTV{wTZV>x@#<8+;OjbstYsSaAPB z5ktsSj*zSWL{jGK?CQwR-?69R$J;CWI!;ZS{UvsK?!73cC|@-fr5uOY*vxXv^ZNSw zTpMoY?bef=$mHU(bLz51hD!vPlyr2rax5yJ!XaDby(=({&A?-hXjG3x@_ei1Q4=Q2 znHCh4tMec^!b9D~bZ6&8;mse4$<7%!(ulw*6H#Gd6@E6J}I zGuI+#e`@yTyLsu$XHW6N;mR#x^o%YN^5k4?_IT=`mi zz4WlbjNmB~55DVW50i!UpCpb^h^LpoM_b5D6Vorh9sV}80)i-Pk&C% zKDAahuD|^{v(<;bARohoFN=U7F8rZ3`tuvj^$@r6^tzSq01luun<`rJ~S zp`dSBrqhw#w{Nd||7%86cJaLRL3^$&-S@s&Q*p=b*|*=8x%Zu0{@KL;&W$sBy>w4& zU;X;MJpbG8yB6oppZ9-$`lv^?U5kYCeg4|(ytVmZEtgHc{=KlYh2!&_vj=)w9k2YH z5me++JhT7!vA3pui;QZvJFDlx88qWIxTb7nPvHx`OVF% zpLe)^soCkXng5=3d&kZxQLlg2+{IV>c&}?{tLvTn$86QCrCD;m?{hI^T6fLv z^v12*j&mQcRZUX36_Ch0J1Sl83^(Jy^E0p9mkHo{CduLD(ZqCJbh>Zy>;82&CI(*D zNjPoyGfjutm4?(5?7lS_mDKqy>^W{{Uc(N@uk&Sc4%PGv9A}pPvCs?_ep9Xn` zN_S`oX&P@?wRFPMwJQ`BM(dPJUna}2zAtp!Cl|)HqtyZ-Axo|pW$*T}h}-14Mduqw z_qv42yw5h*Z{2&dtNri4^8Ho+r{Ayt{_O1Cw9Sl%j;%er^Qjn*#0_ViYsMTjKNf}2k=Qvo&GDlBb`sz+YbJs-=?lj41OPzZj z+_V4J)%77)%Py|U(%W@x!-H896Sm)9D&n12_HD0R-}2L}o!f5b`q&oCUdFv}_0EE| zy*I0m@AOJmG;+A4q4>>6qVd9$OZOIk-D6gLQOa@C-MsZYeAQ~l-t6~k7GLJ3dFbqw z3-_v%(>Hi!heoVBQp;^$T1K^ZxovuRX98;;VgsC(O~Q!%&4GV^Qq=T=QBE&rtoxGm4*G z3+joT&d|uP>NBh6b}=Ug)^1zQh5UsEQ}Z;JW*OUcCTblN6t;4Ao}bjiP+&JVYvp$H z?FWU6=c*^>&0hE3?C{f~nQrk}rRNNbb9GJjZMm2f7Gg3z#=G>~QIXy2q<>A>HE(8o zT%6wH)LCkIHCOxZ-?@`#K3in*sy%!5=sia^74w}fkiDVwTI|B?nCsvBGR{TLDBnHTAYM=0;YR-V zyB8{p`ZSK6`}yF6tl#lT+zs1~CA7IKSx(n8G=3hVqw(bb+;8#M!{Wb7a_1-gTy$$z z5^M3aSIu4wi=MTyZ}3}xx=j9LW$c@qZ?8nJl#J(=d}O-h$FAQ|-WQbAHtD~%5?QBR z}H|PHT zdJoO7vErR~Ce;0(zQ3}rw$f^kT)bR+^u7CcObeIC9JEYzWhz*wC954eQ79(uUeybw zgaDUSm!ue0Z;~>dP*S=i!kQ~I`R}T=>igdY6+CQTx7nOQhb{QEP{{PQlN;`|-YQmI zf9TNbpWo!B3*Ox)n#5z*@Jh#Ox@W|W=Tjzq=3^+C+S{dNDc+R7R<_@rdEK+hdpTw< zDQhnnv1_r*Px4;F!JuJDc4%g3PR(vqoyAwIdB`ZoY}vd?w_GAZITc$T zvmn{4v&x%3UZ(9GWcsb3MIA$pS*0de4VoU+Zf>}AtB!8Xh1<7p3miIq`t;??llT4zPuu@h#BkYJwqu-r%bm|z9?RQ( zH=|FdOfe zTKH}6;7i{E)K%r#~8rl+V~a8sW1e9zr?XCBlf>O|OdS&CN6e>ZGd zpk?~p=K14~5!tFOyLbN+6EE$>$gKD;$c zOYzDrzvte1m4>#Q#~LpuO1yi2KWBUNy58v7uhwLpdv{L4D)zo$(#ow~d#ji%pYQmX zDigXd>UVdCrP8_LZFTm?B%EZ-qC+cdZDaJNOH4ldv2Tu&NghYV&KSSt!FsMCx!dBz zEc?7xGL%ZR?UN6(Z<$hNzFl$I@#F5>&N?SQzmv9m=ia@h>jJXRTEE|8zFR+ZZt=H! z_uqVry)PV?n<{hMxVvfB1Hlu5ckkc7`TOR?-93*V7G!LltDl~&6`gHg_kDM2a%23@ zWbK~?OQ){Q=8wLpJ#kHeyrNd)DXT8VINdgpgO*vX)8AFGn(eoI@@7lauj1_0XMF_D z2yqD%>PB@xs3=_Rv}x9cGgd2Bs3^Q{T*<}7y|ezL#NnqmHs`qfj_nB9!g+pbqlw%`!Q=)7TQb12=w=w!YD8vH9P$RU2*;=(_cMJErzU z(ZV6~4i~f0hh58r#l+TL{rzC>?XP#s@9+IR`~JQ9@88yO@7wnC^YZKVe|~(t{Mm8t z{`$}FzJ1HvFCWx1d*5e6jRQZ8UeyvoqwD zWT%{oI>a+6M)l3ET7v%cg!d>P4;rp zn?C7zWaysnia*x**Y5w=cJbz{gR^3y=Jw3A{Pke(>aCGWaz0#iSpOhqW6SbuQMwEo zy4xqspEu9x!*+A?mEDUjeedSGBFOr_ntM^J*j{xdd7I+BZH-?KU-<5PpZ~JSvz$Nq z_RltMlK6D3bmMEQ;&+ON<}6>XKYw}UtDyN#HB~iPcY->;bzPpcy?JTK`#t<7&tHE1 zaHx0t?pU6S1>VNTm>E9%emL0r;8|tTG>PW#j1zUz)t6oPs;PQB=s5SbV_I{HO_e{t zO{=c1X7*TqIdTf8r%S79)&0_w6`!|Wt&|JCepI6Dc5meI++TLfOujUKSA4$nnoy0& zq>|j+bDy6UomB0=d}M8_gg{O1)TyniUo~0g7tj3M<6wC7Zk@UB=Ff_ynx(NPmu|}{ zZJO|`&_>V5VxjKa@8-_alRCJp`WCNsi_Tsf@_cXZ&K;T8zwf?l#&TZg%-Shk4O^#N z%lcwxA)d6syk$j;fX>FHQ?!%}TYvxl9d>*9`T5HWH+_|0m=d&mU$^3nnF0zCUGGfJ zUpxNwaKf)b2Hr%^86g+0X))w&lYW*K+{Ybz|ErMA~wq3jR`E|D5PNQb6?ioyM z;oknAPYGR}^Zc1C!>Viht)yR^4FvQoDCT&S%%x z63eru2>&>BH$p6;+i|{R%89C^$n#y{h1z;9E=^Z2iSM7}I88Bk=fy`Fj0vUu8I^5i z|5kpKX5sd#;o<@ehC zcT4W?|6MTW%VWdyf4X}f@3cEUNB-X4?1t%<&C8Si&hucl$;|9Mwruh#vt1Tj_j&WU z8k*}p3e0$@bSPA11K&Qclz$2ZZ(B|%q|Uy6wN5s|{o~O9Z?Sjhs|?r9lU>GiB-P5@ zecQE|8=s$vuU)`pnQqjm+fc5zc2A+f1qW6g&p=j&B(HA^TE2XHsH-HfQ{vFiqM6Gq zEAI2}m7FHFQahn@S-|tK*4ZlxvX`Dw%9$tD7Rn|*p(A?kr=0KaYhUboGH3T3_xjL` znZ}a^Re$d@zp(x_+xxw*UhK8aKT~Np|NP^pkM{JhWOiUT@(-S-cIa}+DdS}?cCYi- ztjIs?J4gTV<>$vUjdRST{Z&LGq`3PZ=k(_pw7+a$X4yY|Vym}aYUiY9XVW}3uHM@B zcMbzkRQ(iYBDF zI26qiclZ9hbD!A$LK(r>FPX9l@&ie zJapzVd|tD2j^Ktu>$(1qQ&XP(eY@^`d)*7pKWP#{)7RKV@~+;>_Fh(TfmC%|$ll_6 zzvn!k|GGbWU;RXlO`?b|ftj;J*0_+#ErtL)}3 zuPk#pw^w0>#iT4I_D+Q{&-Zfkh6i(*TdwJB7k7T&V%n0?S2s=e z*N-1FieK(>aS=OZ?EBf`+|q3VHiBEjcnu659!%Tqt2wcFtw^Zhc5%V4a@pBi&#s*o zblT>8?ETdqkJ~t6?|;3!VAcQYuLK{QnjLUK`|AO%-nmlRLA=_(w)J==RLiUu7Ry}Q z#guT5dq>0_MMfp@%!Ze6Fdj_-J)eefFqShL5SryHboFeTd-S%O1 zJLpdI{CoR%dLQKc{Q2RR{D1$B|37d4C*Wx9|G)WlU;o{GTQ^%~L5P!kqgCnBC!PL! z$Ntv+zh3`w_Vdd#o`1T-5;fD`eaG(K>i?h2-zTGA|Kp>$0(&n5Ylq89rUd7?(TO*g zT;Te#g;S^0ia8;7)0%H#FQ+{e^S(Q=)I;30U5jf6*IJQ~E0$3L(Hm~Oc-_)=T4QgQ z^oR4il_F-T|E{>|Eufd(+~QboJ@$OdaP~1y@>jb68$4o!hV2 z!qIX3apk4hkUw^);TxSy(r&3cY{n^{QjDMHhq`V4r@*D7JGPY z$5pfEJ74dXU^W+F1ltWkR?v%?GeO?e%BslfK-?L}u9kBc)W|QK*BWTsh zz2>uhyFPm5nln#%Th{Hd_T$Hjj5FrH@0Q=MeewHWyL_FNYW_Cq_j^v}|Jqmo`GZ|ERR2P@x)C)hmCnZ5jNn)SJ4&7IyWUtg-P%6WZk&V%#z1uI2%^=ZBi ze-Nd-Pg5e>Irnkz%H?76)q(;PnvMqavxrZ)&=a}7eBUEK@0U?#YOB{Ss{4K5+rQ=Z z^+ES-magA^C3H{P{-tNj`=+ctp?lvs)F~(-OrZS3+$rsqChKO}aC)UW@4{r{Wq`>X$dJ2!FLth3ek zEN`{FVDw9!bLvISjbrb^zvtKOssC)#7vk`#q^9=nx5H`GpAYxjU%hic(<0s1J$FT4 zgiN#Wp^Z}|YPx3ozF78A=PYAIyI+H^bK=$GmO^4NWqk}{C(|U}m)5CYX`QycL~_@j zOWukiu~kb`w^r6ho?ZM?d*<ajD5ZToJQ#9oie?A4!MyfRgyEh=i|)rrEPX_-o9W<8g0WNm%wbn<3d{h4QZ8f&*o zBwttMX4n~{cl>eViJz=%Qp_~Y>r8)hHqy&-uHQ5H3?2vZSg*ztXForgvwCgRqe`EH z>$IN7+;?|MUpZB2(yk|SZomItUS7U+o8j{Zwb#EZvW0dSzWrY9!lP(1?ex>6)d_K% z^~`7UGF*wuI(f%p>6Y7X^`?V*m}h0Ct+;tK!b?z)^=^*Y?YCl`S*xd?zFDn*>dq?e z6p?i97_*)IduN79T>O^3aEj49{X?^y*gejyQP{fe*qW`J+k=eE8ACI5lnxu*n6>1J zlZ(zAZ#Uy+_mnV(j5{(qM#`(>mQFbzzi*G7?e!q%j_=>gW3T`H@z{U;diLiTreY^s zp2!@X!NJM6tgxoGQt;V~Nu^)T{Ylbw3{sr_W^VDpfMgxj=~4QRZ+Lkt-n(~)?YaK) zVvaZwz2%?hESH#Ee9kO!_uX}`t)ACTpY+j5@T7&)of~QU&Zhmp+s?nee0%Thmjw|* zp&eOkmEP36XkWM0?7Zc&DM7z}eEj_R^XIc~-x_@QS>iPHMyX=kX^Sc)*1fy;-p(sG z(-B(4x}348%k0^l&lY``E%xr)r&qN1m?BHDlcDqbpw;2?v$K`B6tqwazvFg} zS$}{3`Sa(OUVCtL=fk_Z%gf8ZTb>uYGx@oW()Rb?cfZY>bL{fZCnt^y=&n2>rN8)7 z`vcb{TU{9*%rWo2dG^BnJNNEA>RmA<=-+n#&)1om1g|@0eRTgZZTH90)_osr*8SP_ zR9oA)SAbW6DZ-bBS7UdWX3QQ-v7nuLlLd9yr>*1ra^L2O`BC5LrPGhTp0xH{nB>Lq zrK|s557I07$mzb5FTLZ%g(hX)CD-dJ15-91aGtF!IqeN!cXYC-*>Y{h6NjeHnaR4> znLS7P-!a9GtYEh=#peu5uFiVyqqt;B(c0o%u8^$Nrjwn-lf;Dus;=s_NYn_ju3hlT z@%{ZffBt>n|L<-7y?pbPQ>!d|?EilG`2PRH_INwH?|1)|@Bj66H~;(cSQ(+EPofS- zE!n2ia(1G!eDWhR-{}`;E?(E~#G-TX;?t5pPmjyLle@Qg#W`cP(C3X}-Fj@g=g#ZK zcy-R(HoI3rm8(m32j5bi$L2;4N;mI_6Ux3btM=kAeu3CoDpyM9G$|Z3)7&W~SXLoe>oD^x7<@D?uSGGn0MtTKBCjXJK$ZvTE(MW1@^%YmZ+x z{PEzPMX@f!iRY&yqJclbw4pX$6D8#lSMX2)4XekKVq0 zYvbQPfBzAQ--#WX%dWHv=;~f*Kd?+Xf_-1mE1T!p)*1I?&WK*~mzTS@YzkjHvvIyz z^V;HE-hKDq%gf1`?VcOh_0;)gBh-FWZRc#dzI&pP%HTsmmc9q@p5dIoEIf z`t>ol^Y*{q?dOtx_}VIErwKuyZ2TWTP5Spwu}x%hVX1?ng}&b>8+&_O;iR6GuXhXB z2Ig#?8Y57+`}VRacJ=@M{JfklrglK$((dp1_xIJ7Tg~^7bA6~m)Y0q*v?+rd|FgzwKBwW)lIrTs_un5c1Ra0vw|ukaJDbYMQSuQca$|Jw86y^kA{ z_S#H5b?J)!8KIah8;^;FIEV-|az(H%-4?bgSTU-HhgU253&WbN+b*a6`c$OwK0iM{ zrbO)X(ld8%RPLW3;OZLL8r-Gl#yBZP@ADg{Pl{FyL2q|W+10{Pe0r7EodZGp_uSdP z^Y`lgf8OTTe@=W+d-CVgk5Bji++P3Vuza1&@?Rgm9k%~DyS}ixT>sW2@d-`>?RKeI z#i_Zv#UBMPR%+;-e`_OOUsF?Kx6kQZO!_4?kN-c8+t>X1WHFDAi-R}NMf`d!V}w#c z$VyY8xjVftvEI)wFMq%6IL{>?M@Eflnu>QO+_6%;+2rqd&FsZ%)uix)hc1Z-WLNI@ zQck#-f53Fh**vdrlP2l~eSNwpX`)1O&)g0pbFt2IzXi?)oMWGTKy034^klbb4$_An zG#@lmn)G?nOm=T!{&SY0icZg+d_@hX_rl6L+3DTxZ$sGOJwOT1KGFx<~YlDL_W)`s;o3z=+pD@+qZSEt*1)u zaa`0AdHi#w%<;=FrzDl!lT4fCtyXldvUt(f@(HZHC%i6fnV8p_cS2Be;$CYXe>2}; zO-mMjrL;?lORpOR{93qO=F;g3t)=h2n;&DanScKLdH>^&PnskuvKZH{*t#WVdF8WR z85adMOnGS(x8v%yTQ>cdO`h3okK!;a+xuO&Lce$U=_h+1U)vFud19)pq|5q6g|n7b z&hplbz5nw8fBWH=B}Q8vcs`!}d-(9-+0&;_*N?ZecIW5M|CM*+-L~0h-9kQFa2M-U z&Qe!Caa7{>y7#;IIOkeDkG1EC^*U^@B5P&oyl0m`{wS*2nmRR;N8o~BZ25+JmhWR8 zpE`8Wwc2<~nEv#Olf4ahy$w72`KF~HgG=JUEgkan}SXxea0B)Q}H=atiX7!F^E4vRT=Mrr4hpi}d2s_NgE5!i8b zqW)5=_orSzjNg5Bk#20bi}u0O*Mv6sn3}~(mh!E8Qy%;CYeK`Tuisw3ie2v`d1R@~ zx}_Y`{=S{svBPSja-4KFYm`|0F0H-SJPYpjuTh@6DQ^G&&^v!hBe&OtGfutsOwiH6 zQ9y7iL%@_XQ{$#)gjk-dye#!}r?OWqOMgOWq+_T&bp_CLk!&KqM&KGmlMW5RU&U#$j(Ko@v(Bk`tXV2FE|F!-7{r#SN z%RWE7eEfL*kHh@-wN)Vv-`~ssd-Z+)@3Z;$d1vq2%5^BCQ(eg}pgr+i(AKTbPPAWp z@inHd&hr1`ro|0tQV%D+f$;2~@u9rov(u$F>FyA<3(sH35 z{cU&e#vg0;+A5Li5XbiUsC2FD4qeXbnb$5(x+=Enq1AlJX5rGi&2?{=guVRfbSGk+ zXyQLEz12)F_DjC|UmZBB!z)`Urt9;;Yd#(Ss*Jj`R&Slcv|#t$V>=gKb>iq-u{G_a z^tAcUo67sCOWo)%qqFkjK zCLC_x{(b9}prX`sJ9i08(D>nSkxfTzV$k%!GrPj8zeH&(%{+VBN-(Nw&Cff@UWsz* zo1bjbiwiB*?44;dEvW76v7dLouYA9|a$eEA^UqIT$S*pludkn`opT~Fj(5?V;G~(nRex{WpmR}FDXr*WjP9X>A{-qFr}qS{I>E!Rb?fgbQlD*}7fL1f9Be;+e0iqS z-p__^0e5r1f8W0S`QxAM_J2F0_wL^Res`Rstfl)CL6skO>$cy`>t7D)zMXngw}1cM zovQAiZTy4twr>2)a)RNrg)ASti;rR7^sG&~g1qNut~V3RH9LIvZ11Wqx1L#Eo;6ER z>G9JV%lZ3k>I?(K8AMK6pDMPCKUaHyf92<6*5y`nqqDaei98fYy8r#|Heu_J#fDe! ze*XOV_3OmVckkW5|9sB!Nt z=1~;TJIJIZbuCi9vq|~6tcF&?+NVZw&nkai{VrekcXqt>_J*{UE8m;HKYV+8`?=Qn zb-!MI{HS^4!Cdp3%M7k_oykdNw7#yj*wokMny9A3$)CSg7N2|NtLy71d$Hb+S-0Wr z*6bLsN$XfRn~xV3CJV)ARKD(5=Ivbj)cr;Ho`w=z#z!wRjyZ3heP;hBp>QK5tOLoPGIzI!%Qff8pF8?OBfmU+_ig?EU;1^QXMexKH{ad9z5Mpv*Hun0vJ%$?c{`WbvGvR-ULCsR>Z-R-P5yp3 zeYxMiUBBNy)h1BZ*32<=t(M$PK_LzAnHOD4+xBt^FbNwh{;c4)#dGm7R^!}Xc`wSD zCOqd3=Zr60Q0^Vh5Gn-*O04sL$En^`n`@{6xd zo9AjBV$Nc?UmDA?(Qvg# z$4c*$Yog`k=kv8cHaUCm_XB33$cVhPYUP=CR;~IaaB^qZYNju->$$dyskE@wNxA2p zoyN7zf7ia&oIjiDZxp6^D~CFkDE|2V{{B&cX8t>~imycN?g^fEeD>_!`{V^Y8hU3u zn6oZogW{$q|45=6k}@eU}bPWx9iBYE?1WBj`DR= zfBpJZWg)Yt_^`p7@72F&Pb``2zWFE8ikfB!45!d50anW5xK zFw45PMKhyyW2Z-He>T}3Rdmwj!qOInJ4)BDML9CHFKM~=d(LU4HPPZlTf@Zo4sx46 zzw`I)+rMYeZn!xmXj!4mu6sRVd%wSzzbAO{W66`S;On{O(%u!FZf@Z>bDB5A9P?fo z${2LNR5N#5bdk!Yhy#1~+0@Tjw)=N=v3lS{0VSbUi4&IcPOG_>FEC_(ALPaI!uiuV zWxdtb(wjcdaou5f{&SAs|J;w(;gju^7xn&Jw5Ut7^!ML;CZ`!b{QR8mef#6Nr$IA* zetTB7`}Wna?CicfCjQwxw`<>5i+<3HKG0RH*K%TQXT$-cYf(|jY1jOEg?Hc0IeT^C z)@xC&3?fIaEi0V$Z09wlL;3gZ{v3_}v+eyafmprX6TFAn9LjgkUiaF*7F2uMpZz>% z*Sgoc7QU|i&|%>_xpQ&**#?iTSLSFdSUq?C!0<$-N`Arzt@M&}Tceg|>Mi|q_n+=M z<5NzF330pbrba|=d-2-tOq-&pwRTw8k@>>v53;{TNhEbgw|5=5-o8nxV(#{9Td#F# z9nAWd^5Sy1!o@u^5;(hmJ$tjs`JCLDR|<7&S7cAKYFM{D;^tJ27aXk;VrxVN=6qMG zdGzh@%PE|{D)St+d(?95*j2~Dz!CCH?NRXdy?bB#Pd^nS_1=81yLKU$&b&y+snXkz zE!rl|7?Z;wx?qZzBg3Mp$%1o>XL_?Lh1>#-du;b8u#lT)w||e#+{g#l@7LG=fA{d& zyJhbeaf>vmr{^TID{Q@D8J=~i<@m}uap#X0)=4h+Uw{1h^`ACdf7V>fR5^E!lc`I| zV{+I@R;8S+t7@DjRGET=0t6M*bQOmvyx6 zwa?gfFp}r8P`J{WrD17&db|Ag8a~Utv$4W+_uPr=c~)+&7r3df`?#XhuGbcq$em_?bBAX4mbM$L@AaQ^o$1~byTow{wF&!BK5W1Z{P(8RL7yu3U{h3AW^oEAwb zE3Mq{{b7-%z5V_vLBAH>&oMJ(36MSR*)sF#)2C0repPKLu$$lfed|`e((n22ey^MV zxu>D{g4e z+fzI{ar6G0-(%m;X*{E+djI?Fw`JT6mrcIaZr^{?xcL5eb93{C4R_z}*m`~Xbj$OG zOXh$EjclHuEjz3vXSj@kg{@54!7DcVs_E?f1PQi4Ub~Xm=gs9$&bU4;u-SHP#?q^u zuA3xhSau!E-}Za|pVRwy?A;l9weZaP_3JO&%+@<=S#C9Ve@&&#@|!mftSx+XDR1}I z@4sHw6y3|;e!EqBP1esjYePeQHLK+Jls~ysnpk5q*YEgc`FnqV-JW`FYsSG-e%H2g z=-xTJ^}w2dIfZksAIy5{ec0UjvXPGB4Cekv^RJqx6%|>4xMc#OWklIkEV4Z$@oxIQ`#`e2!%64DjJ@e{X8UsU4ZR-=BnQNx5y}7?`M&P>&N!E(pvcL8j_hi4= z{jq4?vXgAj=R7}t{P>hh8*VOHXPkT4!q=_Sq%ZaQ*QHlJ|E!dHzD|A)zhXz_4i zmRc13U8lYC%x9fb8i#zBe6*P7z1Gg{)aREa55sotb3PE}Rd6rAylkdNav0A+Yxf1O zI(MxTzIgayLfBEemVK|owy#o^J!`%F{`+&!Kfiu>_1t=i#mdJIX&LziYd(E@`1bAH z(eG^pjk>cE&8#PhC2#SN6SSOvzQf|n-`VE;k4@zKm)BTL3wqP=rn{=)PYsREk=*FS3np9v&S z-!)^>U(H_&-Io-~h?eZX-MOkbq2>YNsDcGq3r+I{xmP{By@I|16rfEc4vf zjSLLizy0{KWYOn2y4&uo5a{mKV6wc!HzirIVN#B78b>?iX%-xkKpD$JZaX=^cNd{j}h3m+b7*E?awgr+NJ3bxr+i zXsTYJVtAchdyG{!K?xLYhFKq*FP%k_ylwW+zkHO@Q} z%VwFP7IfWMDqO|h>i8iSiDV8%h851nIhUg(9dgT4PgreU(Uf~GOIPibiW2k1x#}64 zex*Blo!lVL5HK~UD>p(QBl7v}H>FE+B=(;Am%ID*zW3&{ds~Aeg2L~Af0k(9``PAM z(B|T}B~y)_$Fx-LzRiF5fy~uwb3Q*YINGt~amCJ1X+jnj6^gjOb=Sz;}BtzHzlYGzQ?wFRO zcXE+X&4$$)$5L+by1tz>i{oJRcBdV;a@Yd*?cIAj$K3q)UePz7e}4J$<nI3dn-R5v-y$y%H`^!MaA>% z>wkSYeO`Wf=B^7CJbQO;oYTMFU%$O_o)d>%%j1a*N7@U@>#r|eclC3sO!nHt8w$@& zk1E^!_U_-g8#Zh(%$P1P{{gR$`OW(Kz?&R28MG#uSqE_71_hT?b)2=KPznfK7Tp<`~JY$ypD^O1B+?ECvg+djoI^!iFa@oOhb1^$$+3qHyd6033UJeTiP#uY-^HuC=R z`)l$-PqZ$SPkVOl)XSPXC54lN#RVH3S+8u|Am(skPVu?WjAH`JcRoAzhsg)+`QyWi$*zM0p?YpK3gAWK|!qVTiNFJH`+-_;iqtt(oRv)t@_>bXjp z$^Gx|?>(QYw{*#jw#3J^2Afx&P2(_}w7Jwos&De5`?>e64xLV&^V~CP?VYIAwfASI z8616aV(L!5!xNH(LXGotvS0Z1WNyD}^?aSq*(pKxIbVtb*B?&Xv3GyJVu@y@cv~Gq z>+X%Xa?7YKPXncCCn-vm!(MkdjJ}W@?()ZU2hrUgwTq z4}Z*7@jfuL)5YSv?~IF#It?3;fZLr1%&3j{a>WUj5qL!$NKjBlOi`Qx!SyD?p`fLK?R1-i^82ECcW;hZ#eKGtw4O5=Mh1L2O+nY z>z~(ezdU<)d+mO~MTdVrO43+5=lM?q->r!Xh0ETAX7)1fm^RHj_tusI;evZNeJ;-` z)0_19-0xc@S|^P5)c*fA|L^Jg|A)Wl|MoSx(K+*Yq1D{aHooW2_uK#fbNOS1)N(7o z(^K7CCOR~5ZFXW!2op>UQVd9u-+7y-nz71u?vgVM3=T|uD|T0Y{$O7J`8t1j`Trlw z=l}oLZ*LNCpI^&jc71O4j*d4H0gX0RLGBF)Vgdp#aB5aOuMt-YF`TwcYs=NHoS(lp z>n*&U!Z#&*`-1NOihm>%_Wqd@akuGe_kXLyx@+W)XgejuIDWmgL$T5MdX%t$i^60t z1_edInI8qZHWlPf6?DS6*l|!?UU-M*bYbtGn{m~ z=j2lIq-Ezeu2qXJWfh*DIYab8{TKiL&+5PD|NB<|?fu_!_XYuFrL&bniB2n6oA~Fg z$gjLr+Wc4jCR35T3Yt>)FqHBr~+OPPy~_@CHeSja$21rqvq7xvidZ z{P4%8PiIQ&8)Y37yD3~6t8_%7xoMV{x%auHHzXhJ@z>;a?svI$XG zetg#7A9KuGZ>j&?yxq5Rzisrs$1Y#f5iNhHZgcqyx%u<_jz9icR9RIgbakQpoZ{Pe z>t0;Gd-w0*-Ry45|jNal77WosxYl1?>RMUhrJI{zU<$ON-vnI~j z=Z=a0)~RO=_EdjAXL7Cm@yimMbCo;?-UjIA2X(_uj5c#{Y;TQv=@BYzKG|WRb4sj(9&6~csHn9YUz^wR zuPUCY&1`Z1?YH`$v+w`=nqR+N{#X0#-~0b9|L@~$5pxcdo$LSoNS*g7$$I03wc-oT zRJ@IlW@Gm`6jb!}AphTE3(qO9?pU>ARf7Q2g6MynbR3qnC}cl7`|2#e_U#KZY}_^l zhAwDbka0!Gx%RqcQYc?DDP>}7&iazxe!C$pA3nNxmm zci8MMwk5Zs&b7||a`?d<^%6T4`9%j6xgN|dep1mn`AS%JnqO@Csiy+U3$yRckPPeF zwf(yagNaz6>4YPvBm{jgfBEw#XP4^ZmC>puw`+c@<}6MYQnfzkrMYy9p6Yz7t2_*b zv)UTW4nOEf;@LXw+ARz5Be%Xcf1h{ZjgyQo z=RNPxF_RL=TC3?cJJ!WSpV_OfsxHensLOo!>~emYOnXVj$M|NZ!4U%`7^SNg8 zeZh;LKYxB&r1(jxXr^0tL~wxMv};a1xBuq7sCG``O%s?J7VT4L%)^nR778_o3=eihzvf>(T*Li8=gnpERBZDGNa<~c|H@9%%jJ3WX?`&Me~#=m!h z7}oCo|1Iy@rX7Bz|LxgD=;LNzsBVuIoFdO3Iej*3aWV$NFA* zRW{GkRaON-4lJE1l`3!A8Eelh^|RCt_;m01x$m(z-aN{C9Q;2`eB1lx_f=gBJFcjG zo+3YgUfsW+)BES!)&2Z5<9Of_Mvcu4D>`PxEH_=TwyVSS``Q@4z_>r|UMGuecvk&Q zyIlJ6{JHh*AHSAb&6noBk?obeal7vFiH1kx z(j67^d|jhCR^D&;bYqpOEz^P(lNhfnb+IUQi>t;oCPc4&#l;>!_4Mi2O>JUp;(zS= z6~#TTH?dE~qA@Hw+hFzQI>W=xTO3@{0#id1)09f;ZaQz=Q2gQqqeFIY>syD{?FT1p z6F6kir}U5;&}MV*WVv(yA%w)^^= zWYa(VW!;;s_j{ju3SB&WxOvUXm_2nmxyvG2LN{D|Yt&a{S020lX3m6;)fvg5xrgkQ zP1rT3cy-f@H!ggJLRE|REt!(!b?$ffyknUm4@DMtab>T)IgxqA|I797?f=C8U;Y0} z{SW!LsC}_qRa@(f1 z2MiZe+%&At&yt64=!szrSsAuErUHjx+mYiwbX}J4r+UEQ3PhO09 z7NsrNRUvAcE2(JpzI=DAo~x)=YT~vV=Fv;7pNm92j*^}}XL+U3rzT4QzS##=*p<3C z&inh9ZI1Ew%6u_*=Y?MmvD0_ymDQR*pHr-OC~^%3pI91I*c%JUj69VyKnE_mF?dD_SZSfg1$qs72n_G>ahw&pIbU*o%ijx-%56RJiU_d zwYL7(!}jafnf0`_ok}#1-p$*5m-kGK)!bt*=R9|EF=TZTPrmcEeD>K5xAV<+`*Pen zzCtNpZob_7c?^wnRy<3i)X+Qk%>({MY(+Ud@UR$lY z{aRMGzP|o8i9StEeXZY1cdbwVA*wP(d-9uI_B0Sc|Ex4-{-vhU~q?7hEsU7fVGvdwiIrrW9x$R05Higi2M};j*Hnl9!U98+_-D4mmWjAAFuEpQO^fY$yNe+qvTdgMrlyWdo6>%l ziSCSJ-4RzDZtZ>b>o3C}_ZQhko?R%}=*m+b_r?1jb3?AlnOD;aKGp<^KGNPA?|87< zf5nqKb^Ey$rDn`4{-&6*`Sjg64lR{4-8yY7{ayMdL{2=R6X&hFD9M$Jv03!$Rlk#; z8pZ$ZTId{hY>i9g_Y5t+--4@amWn9H+_3Md&;M=^cWk12x0dj|9|uU7u)}T`o91F zU-|z(+CLVpTojeP@=5l_`^#VNs(ayjRk(?JtB9I-?e(KuB-z|0f`)36McXnT0}Flevpt(KjFm7B$uxb9UkH{ZUdrn-2wmi^yP#tc4|=7O_bqjK-< zyqmXr!SmBguYP-c-hQfmim-U6-G(u#)|McmM1)az3AH?B>7CJTgc6 z^Q&b}3tpwH}%mJy*TNvTxgY?HS5h9uHRen%%QKXl@tEutCLA z_dtKIV|I(gx~7Gu$Ho4=VJ`ie8S%i+$syuybePS_HOZaRQcsz$3-xY2y1LV@&N(Rd z^oE8?dC3itf_*z}VxPsoZF>^KQ|Iy0X6wtK_&C0evuAA!I(0Tze6er_gKJ}iNNekv z^EHBP*D_t+3m90HDh9lXm5Px$!rxm}%6YNz#lx?k{`~pEU;pPZzx|&-zdpsd3Mf?E zU%uo}%SG!=I~m1Fclvs)Sk-Geea`dBdFq*2o25F9g%Y)Qo}K*RoF%*0;SH5L_IqF4 zY|s#M?DO${`#JXinS2Dyy6Pk+o|$}jSwX4`OLahKkTa_&!<4PpqNJC+cwBJUsPJ*C zL`p>O-Y|!hdU+@J*E{xXa49`nwEvSw`1~rCh|6=X` zOw~4ZDY(dS=Ik;sns{oHnX`&gh<2&!=8nHR%I67~AD)nXcIlK^GZ`IvE~MM=u;y;% zn$)rF>f=S6x1EhQcL*)qzCEw&Vp+-SYToq^kJjGzziiUCSoheP4>~1lD(5}7e7@&q zPEWUe-{RJ@(x2Y#)wWdDuv)%g(H#li=^HrG9E!d%2?;JN*!(^Hq~u)rUx}B$ubQy@ zq~)$HX)KFRS0p~0p``HVd;Nd;Kk4=V*Z-e<|KIlKHveDt*W1YPY2ErBc<_*O;k>jo zwgC4fs~Tm56kj|n*b#H?pfH1!c8KdGgBFP_)n{UR%y=yI|Js~?e)(yMRji))=Luzf3>RV+ zhHiZS`}4~WkNfSV4lP)<)GW_**}C_;f=(%87^o|E>>iD?%^88 z?=EO6wqCI8+8Wb!TdzsYJuI;2^Eo5mkcF>*N&K#y*Lt-wJlj^bzvBx3-q4k*kZ3+heU9HE@L*s8-)MknNZ z>mEi9hL$AJgpL0r`vha!7_MAZs$0wBU8vl>pe6eU>)J29ug&CmK1PQnZg_Rz+SE;N zPjUoKciPP7CdR!r;L?Rx>x5>w9}5b6nQgxIdLs8)-|6|MG!8L^=q+NhP7Y0&spJ^S z;lTRh&>ySm!I{5(eGFNeE}y>qT5-O<@`94t@7}#DyS&`Uzi&#z>|&iqSEFto>veRA z(mq&PJN=j45s$kHne7{oo2+;*eWWre^XbozAN&7*Xz$B zm20c)HSc_m4znqiMdx^9?=ROYe_3O7u2_sUk4I}>Kvr#Cl}xduc5(CV*}Heowrwn! zyzI}HFBMgFZXwTQty0)|TO1rjTUd@9P|RgI@IqF@W95szm8&8uCVZB=Xe}QjBb6Rt zCQ+VvPQO0WQ7OQ&r0-6W*|wa0C6cecu3CO9f5nIz@TuC^DS+9Fkq;@Yqm!D8ns-ovz>o_bzt%~ z#WQZ^-67xZ>8m~ENL#k<`Zm7M2+qQ{TOC_MT_@}abJ!fO&T{3N*GcDB@7~{^)3^S! z&B}jaYFQfozcBy*>Ho*W_y2Woub;X6V#Pzn9jCt5%nL9ny=!yc<;SEr(yE)Y^B_6)0Tsk=g&rRR#sM4R{i?)spv+Y znRmj^q_-Yo*KDqb%~qHc9C{#TmBH&>X)D%l3I2SpT_DKEtU_|NR@;uddE0NVjeV=+ z6fHDSf{EMzb4p$RwKF%X<@YN_>Qv7^x++iccWkkCiQ$xZCB?+2RhmpQg)%thubD2q zz&G8Mk7tX^5r!MqqP|nNev(+rcdDrRsC{(W+9l0j_e9Cbi=Mr5v2Xv^@{L*UX8cV$ zeIE;sT26PJzo!1;dT#xz`B}H4WY-(d%zY=K^SNq`TQH}5!h>=4g_MQ4K2)*XmB(XLl2u;|#m@7D9poF4xC z`Qzp1=kx3TTz-A|@yiOWolG6>0;0KEhqj6d3g&Ilx)E`>k5k56W)q zGQ?J-q-{UZxbSL`jhtTSoo98s?=E@L>C$@q^V5%y(^vR(C{~rs+iL0XknP1flTWTY z8#>ZjdAJukxAY!AwboB~14p3y74CEu_9(_#H|oyaS>SSF;!MfTS(D#y^*t!6yWwO} zE1%9sq3KL%;w!#NuUMir{j{Xx3hym9R;*gJE-x!B=SA?RRl8g^9bK{D(6h@6L6zP0 zQwfFk3$9rU?d03Fv6pL>cd6pHmEM}1+}#Wf(XW>p+&(JPT&~!`v2jY&>7PF5BD+9>hZ=KQ&x4{SDv<=uW8=F)dO z*=yx{c9kdBp1pM1V8Jo@)tvOAb3a#{ioGv<^ka?Q<&x^^+u!FFbl>ZLE-ZNG?)@V# zL^r;#cCwM;qTkOA7l^w*vEJLx_^H^$Ac5OW`}owe}8{(ZMB8m{Q2_* z1tn(juMHK=Tgxi{CbQys&nJVv#WNLe-v0OV>CdN6pO)B7{CxXu*}P}BUVW_KI(PM! zK;pTZx5^wg3A%?&dcW;(i&(OwXLv z-~RdYXKjXC;w@ z-FiD8ICrdHzdn9njb;3~&ozl+POo;YJG1olq*Z>O=M=|jJiD~*_Sto}+2w=8TVKbl zYTPmV?FA|SWe;}G+526q%4&Yd6oFq0-!t}9ty*O%{`sgwOVFlQYCAUE%;QxOdv?sn z<;fl7O+p z?;Y1`70AxjvUr}eW&6rAXL`6))0rj8cF*0rJGrY%!0ASg`LaLrpI4syY;#s~>6Ox@ z*9zz9-msZ2Dd5EM-|Lc><2KXFht@`1_7;0)wC;aw#`)LhG9ETdy4IQMURlbsMpf{` zF8`>1s%UavU|o15TSMTnl`q#2Bf4 z@l;TBU1L5++p75bjjNwM%H|A>4Ys;|x4S60f#-wPug@0upX)Ikd+`5N`ThI%@B0Sy???=1+zuon} z^XKoYVM%3jDd$_iH8reY@YXgxFD$rJqRn^z?R>_`yI&N18gKL6QA6{E#nPY=y(O8OhXkh|^OPvcWBx7O)yIB0(2VR-E$ zr@)v$%R)t+KDfCBu_`Us+G_SbmcubLD>d{shxb~a4l(7_>DuA*p4aS)$;wshVT`%b zWq9mZZ}jHn2L02m&i^%7GUY_&dl`Xemgg^D$uhkqC6u|kth3VJkX0~HK;=u>Tt$a$ z-Se9bPd%8;uVgji-8#F(!1W> zij(#Vh+g?z$a%-!oA!6^AHP!n>(h_Z$NQ(Bw(v{N-TMCb3+I)tp~)QP=GCWV?&q*6 zg?pK;c7Fci-@mg4U$$<&{BZ%tp=FEO1(+V5Sj6>e5BIB9%Of8i{;l>lHkjRW`e~8m z@u=G$Ys3`(2YsW368!P)? zA6&~Y&2wkS?tC`6*|Q5e^&|85EmNs)RZRSQ>{Gz1S960WzCOiSDtT+#s*O3f92j$| z3uABS%WHSbi_c?Q_kT|mcU?2Tl3vB-PX*c&)+QW1a3y!u{SfU~3z={Ok+5sttTtVB z^G^4lUw_xPUid=wp52n)ZtpA9T{|J_8_QL#Q=+v26T{AQIAm*b++uy-bo|E8v%Ft^ z`)o9Iow&|>eSiD>e_xhQ_n+^%`a4HgZ)9l0m5G~Iy_qAdD!A$Fv{gbvQJc&*hZ;rn zJlJZw*0lT6yJzcO{|eGBW}6tcRFkXn{pGwHcix)uO$|)Xl%GF;K0kZNmu=f8M;=tP zoW0?}Dr>f&%oXQqZgTbnDnAHt-C|hP@1S)mH0jkYzt1|Cen>xiUi5Zl)7)cxCwmgs ztz}ua+fdLg=I6!@Yf?A^re#LE`V^;?WPIsP7qE0^J9*`nK<@Yah;V~>EDTqIL=~Rf z_$IIRUbLj6i8(D$fWhbQq>>Xl2bIeA>Mxz+zdWp)f1_o7pY$PL$35J!_XUGimd<;A zO-pd;I^*e4&z^;|v0H9`F@3%Ni6Fh_mH)oHy!^3(rD5Ou^4)i@e!lMH^X6OHi@uIy zrDn@c%{l$EC~#V9nd6nL$=5n$wu=S3EShuLr1$vc!d)>jK6j)hY8Ug_omqM<=(9;z zK^G&(w%xwMX5q=KE2hneQS4_uAjSeH8Kk z%a<=dFE6h=cW?KUAoDMW4Q^!SC4zQuu3tYT=!~Vc`&KhiU(J7?US57Y{rvRLKmPow z+1X?HJW=6!h6J0S?DCsU`+Pe5V#}+mg`HSh=5RC2Xm{02J$Kop@A>DQ?|;25b3S*r zxX;km#^%qr?ezSwVSu zb$Rji^1v2(`TaI^bv815#~&A-y38c>b?@10vDcT1MDoZ8S+kkX?)`qyf9>yghuiu6&!4aVUS9wGQ;|n(P+%r6 zL(#mZoins6-Y0jZPCNay=}FsLKbPWp5jTDpl-K{i+y4H{oPAGhyl&^1eg9r={#)An z+!pSy6;{2UZRVXZUTSvSndFrAHQ{diO<{p)X;=2Wj@3<{yfisR@NmA& zsq4#kgS;zWS9$9NVo=*L-pQk9{tu2Gl zLV>WO({IV?e^Lw->1Y%X5al$nD2e^TvS1BU@66~(u6;|7t>AMO-}_ndOL_jgeg98C z`+a_L&(?QIJ(_#>?b~BxyJ*XUwPoB3nNLPR_R z#2U6dvFV-6f6-EWPkQc|&=v;8V;kP@I>_#{HfH~xeSbcExx(OEc6zH>YU#lTP73Q+ zHJqCeD#92Nz!aL<)AE8dwUNH=WM$@C1|bb(v&|x9)JAN|Nlq7zkmO^pTB=sD`($Wb=8; zVnUW&TjKWn(%kx`cQ;6$5w#PadrZ=yz3#Hq^hw3@{(m{#|0DiyY|lZSyS@w`pFKRA zc4NZ{8{ac?cI~^8wkPYh^AeHW>)0DkKmGIN%bUHo^U9V^F$b+J+jsJFja~dV^WWR~ z`8zD8@4B1k{GtB$_WIvnzEs%FEjsr-{~k~6tmlzZ<<9rN?)|xWOD{lx=ymtH-k^r-p%FJ6IfvHNf4wC^+j{^a!2GfFNYStp-gin{&2{QLLv z@T~cqiql;MG#lC;>@8m;5wg2v!Gxl%qrmhJzcGy_;a_{f`XFDpUD#kKO#%ed|h&VPJRm(^` z_LxiP)|E{aC-uDr6j%hhFD+Wy6B}r;XPV68>YB@8-3)8;7IkD-aJ^{quj{?W%Amj) zZP0bX$vx%;*ZmDor^_FAz2kOH;{U1F+qkc1wJ^A?xgKi1L9RpN^^D4+YeYmuI7O$P zv6TxD?D}vd>O%Xz$!04xwjYa;Srp76*3i1DMdL-ooA-}H{!9oeSRnA*uJrJR-52ZT zFF%$k@oV9eUq7e!r^*EvaU>n750f>k-7gvPw8UzzpZMin8Um|~v=kI8m3S0?PUtY2 z8j~Fw#<1=Fdd}&S4(`07`as83)^f2-$;#dPZ-12yeRkO7|6lpKKR-^VPjk}RY8v>_ zrEAThO96}rEkAH6XkYUQbC~vVm*Ju&D-OA-TQq;Ec@%5v@G(eQm8`H@EYi@V!NIF= zsr1Cw>z!-t1r4TI?O=VpFHAq+($ZBk?^^5KsbXyi@;X=CBiqpTI3xUVl04V-049@n za^`&prP-ZaWG!#Y-a31ty5!tU%T+I?a(X<;+PYBd$<8yEWs-Re8=P`oXC=Lgcx7{5 z=_vcFUjj<2%)}T&#E(9YI0tI^HkDkR+cQhR+R zQ^%AyQ7aE5oqzl9U!2Ra&#EmA4Ua9lPTjpbm%mQxc;>9NcDD9*^ZnGH*LcmnyZL)u zXi(4Yvu>gM`t6Tja;&=l|JQH+_xC3kFMn(}oom%n5vCHC!)YAnx?jC4S#jrB<+E!+ zUJe;&J-lM~-+WVcTjKG{l3zbQK7MMT)-r{O!Rq-oXZxHsz0&EYe-=r`^RJ)&S@lPO zWxrv|{eMT_*L~l;|IeqNpFf{|{rdGm4VBpI2h)B({k+`%&tdx*J>eC3ZFZIFI+t#^ z-~TSlEgu)(@!{LIHi01XJ(b1xrR?h}tz`N*Ry^oGe*Ie1xu-8*db+%O_pa=>tYE3( zGKscC3mLw(CpSp8EtGFdJa_r^Yi-LEvDZ^U$C$m(*I93Q?7~u(j&1Yy?%w;m`~E+( z>3=?-x8JizCZutu|8bBfzP!9_|NnXYXYu`i&n}OztF)EL%P%W0|Gsrw=B%18wbyUY z>xpdoyuJMUyAq+$=W})A`;RM2d+s+AV+#|MJwIJpK6>>trB^bMW-j-a>ao3dzFuzV z<|4Cv<;tq}NnIU$8&-O8?3>CnD=bT%;exrs?N>U>wRdS;KcO07l@wbvouNlyztjW0 z!xtvSr4?MrdQ`&th-bdbRzInb&;s8TF*i((ec2Ezsd=KbWV1Yb#2wBs>A2M@;c1*P z;%f1V-X@2Z*gpO`^TpOLx{GHpth@Ex^>5AInd{2iz58sGIhYtHXf#*^J&#;@Ry5>@ zNg2zXC8lLh4)Vn<-nuY2(Lv$Lv8xl)r)6sG+;Z0L?h@W*ia|e~zC3;Uv9iYPMY2;P zj<$YyeZTJi`G3#ylX&d^e)+ur*ZKcnU!U&g+B$7!_+f)0h7{{4C7)V%m#s;B{tR{_Vb_SA5$-`2yd0xJF&#+L(7#LLc7nf+9hOU00g4uHn?$Gmr0R<)5s6FR-!OJI|e4M}wzwnxXjQ zX$!PW#Qe`$E}9~wG;LD!YBN!*C6{fCKi}S--@7Kz%W%WSD`{UnUCMUPRdtwU-u~CF zy!`vxtNrr!(}IG$p4{0wch2*lMsZEwo#p+O2e18FU?E|A;Z)-F(!Oa77NKrIf%BeQ zp1(Qgy{_aXhb-|&7xK+!uRXN<@WU;4^XBmX-}1Y={_ErapY4C%t(Vxd{pkcj@wYem z4hIzd{6FbyeC)KXo>dZJg6nTTn_)9E^4aS1rQZ|M8XA>ZJz97wkE_0n+8=E1aH(~g z3)d1c2S)|Qo|p69y?l9oif8d1v-RBclYes ze-pPWFPgGheSYz~-S0N+zj%E8QlHOXcHjRSyLRboE7z**wZH$yxj)?Z{`NXyvHf4H z*ZnvY^V~w4>D}DdyS}}97kav#NdXHVpsqR3UZQjSEt-YK?jD&vCe>9&{8p6x9*J7(c~*~a(WjoY)O)Aq&auM+<6 zB>0$KT)I_y3w4|9A2JAMN{pzs)b!dw%+4)bl;Bf1Qh3&8e{Uj8~JY^J%-SoKY=V zdM+h)a~;ZqwrW-NxhS@5d;M!$_AWNYT8`N3ib;OiQae6;t?K=JMu$P|c@9s|^|f2Q zL<9BTc-u;eNnYKg5S8R4#^9U2{;SFVn!Yus)-QdvfkSAmmTZjRiSt~SOZ~UX%-s}G z|DY@Sl2=yzns@OXc^fu}Gzi3doYFrketp&Q)FM?QJ(~*E6(?SGp4a}O@nL7x>)_~_ z%G%p@$SvKGl9(wO6SqUbz~kAhpgxvVuQRP2ol7=wcr&Ir2&AN`2m~=%N;`>ZDmhHh z_CNe$-n_&ALigW4$~50@-~YS$b$|bSIkJe;iFM+t4|-eH$L+8C`>gc#mUBPl>wmob z`EutqSGUz+Ix!I@EP)3sZWmCTbcr}UcTv#vvJXKdU>e{(nF1?}ONI=rOZ-Dl;rS;h07 zo17PBl6Kb7(YS0AsuwFifhDW3;h4aUnLnc{!=@>}`0|ulDmz=VhhtZ)dRO7LYZdQJ z-u-=6;9)q=Z~5!5tCUh-%Sf&^IVzESu2RN5by2pKm!|6T73pFfM`n0wUi-c7^_#s#i^!x-}%x3>PJA1pY@UqNbKVDw` zTJ?5U-tN0{{_3$ma<@k(_pEWuIyU2N+2!xCnydo*|NXkVI{drk^9w9%pI(1mI&XPU z!cu87pSdt>ot7wZEc1+I`?A7oPv?9oIk{8jdHOol z^PApmemigbxylC6iAc*lA8t6Cws=hylaHq5``z14 zTgJ5;F1=xTcG>$m+>Z@xYN{lbPt=|sr9b`j)1r;X^33*b)nlo*xYKeWr(D|UlBJooyR-py&dZ-0+{?woP^n)UMOC3>N=+oG~HO@B;?*>>hJm5@w4fL!fPkbYpb^2*%&`<)7DKFt>hlQ;_{DbEvY~Ds=;B0 z*6NNIPvbLXD`qaW+uyk3%ehaMZvVRDH$CfE>z}h!Vc(U&nC(eBI%4AISv)+wIzs!l z{NG2V4wonU`@6^Jy0S2caX$`yrxxV0uGs4Q%X7OUEPRU#%qo)3Ih;r^V&;-manK7` z^N}TF$E(-RVvZQFIT@w&r2PB&zW(3o{l8v+xBqJq;}_Gd=;)Nj);8(0ji2T7yJ^d- zrFw6>H7x4S62SL{Mzp` zQzvRozgzY@z5biJH2dSvpYPTc*H>Szc+L~Gf=x?z^F-0eyJ}lKxECx;&12%-TV?r0 zTg>;v7O9JinhQ$8}w*bHkcOTB=F8^d_rwWSyTGoN;%fkRQVTz}fkslh5iS=)|D zyk>}46rwq`_{_;Em%i@1zpG5SNw9)tU(~85rv$HKl@h^y$5^a+nzwFyU;WjRr6K6j z=g*hFTJ=0`iD@sasi^pHPoA%RStiQ^s~%0&tb$R*vU)N6m zTx7K`h->1;wib_>I_J-yS8UPB>0vdwY+++>|8!2PcJaq!lFmw(N@K;FO7r&LFHFtO z?fLldq2jrZ8}3Ud>tFZo>fD)i^}Ah0s?#Eg$B!Q;Yc73ne(JwW+rfz2*RDxistA5> zqjb#V^{LZR$+M0{tXgVkXH)m%$H%Ij$0S1SlIEJaoZWZx-L`AD{^rTgDzJ<1JAT>5 zf9t2acNrP7&UPrA*;#b|>-zZpewIoOw@a5luI$_R-YU&;fyMd9r55q$Tvkk%WDxlL zvr6`PsEX9Ye{XKc&yN+WzHuH9)?3P+i$=9`*&?#!P-KGN!~l>=o`AeZ{A!c(f0V`ku%4n zuIh&gzK_u~otngvD{PYc-q=0aQbEk?nE2}A$gYkx><5iD?5X&-CO%TmFYcAVzLe+} z8pqb`?N)4^?l2?O_0^Qq+nQSb9FmW$H>thZxjuT=(|{{o-#2~z_1LJb{^>~gb+?C6F|1SP|%>NIE-`D+pz5n;m_w^FX4+@DgPRu$v(eTWK z;9n2<>wmm@U7maUYShUI+qZ?W31s(fIH}3GLh`tg&-EZ9*TmC7mApa`CuTJ0M9k!j z-F;IA)Zf{w=%lhz=(*)Fjx*cM+Z9qTZ_WL1?Se_q^v64&=p=={?_da;dN`(0kXPep zx?xLfEN9`u*cYsaGcqHi_i?CsaIAHxow)V&53U8$kue)zO;2kN*LwLpJH>HNVc^Zz z!UgA=8>A9nlt-ucGNzUJWh$TgtYEuw_da&9S>lm)Gy)b`xb@t&j={4*nFbZt6{mSLS=m_le9~mN_f^{hHEk znNkLAb6)S3W8m`6kX8!Lww*uU{?DI(^*_Jb|F!@B{r~j;k7n-=DEj~R?_RmG*z0f2 zqO;%1|NC(Gy#4$J3txfC@Pek|GOfqgQbG;y4bn~Ez@8h}G zzw1u_^SIys%;hUz@6Nuy{`bE9bIxD0a#=NX_O{x;zpie-Ul;9VH?ME&6pQq|Ya=4E zEa&T=5$$yPV6~(A{odl1kgRi-TXT2c{1SUz_~FwcO;y2*XaA`!4JlG=KezMqWy{?) z#XAHiX37M6X}Y~$Q?9qn#P709y3;4cDU)XICW5l=XP40KY#hM#W~Go zGn5#<#P)l9ws|d+v-W9J@8zePS5D-Ul$Hb-zw3!cDGQQ4T)vE=x(U+3cuj!m!M@SuN@V1@>d6Gwu+ zX>E_q=^jDlN% z8>byhf4N?a>7`Zk+GR=H({}j1Y+0hBBbX@Yq_b0DqG{iSh;&ZR{d^2^+;4s)b{;v-^IP%wf1c3Ih*2hpV6j~MpxiG9+ zsucQs&o5@qgtbWxbBp8m*0wY4|NZvlmEm%RRc`!M6Q+Qq(>mf5Z;eZ_H)d**)6z1$)&=W~ZZ@|@?9tHqU`#9Ybs zK0U)}<@MJE`(jv9<~{oS(x>g#vdZsQciaEpY`=H6ZA7~FIYVEUv{}m%C5|P2uzR5y z?{%r)`KSFWk8Tdl=a~|%mD_K>{Fb*la+>)$%ccE&k5@cXR&w$vp3}ME{ubZMX1>hY zw^v+?Dk{Ba{e15C7LQ|3rf`X8uZ?Qtx!%~i{<^l{z592~a;<%X^`5?a=jXESaD?{j zUuP@L`yPM3{8=X0@8pw3r;VS_y~nAzBZ!NM<$Y&QL(Z+W-?wYA1>QaSu%NoWK7RF- zpti&r&n{Nn-g5Tr*~I;~OJms^c8VB1E2w@DReb5CxpDFKmw9=43=eAdnfVqM7e8+P ze9!7NsHgDp@mDLK%WIa!pZgiqr4=r4(R#Vm+pZO}JzV=IDzr2-6s(>9x_6Di-9^(c z&E#~D)!SmDQz@Ap82QG1`W3w&YZT%X93y8XEN$eNvDIX)&2QDz9bql1qH+Gqr!V~Q z)V)?u`a|X6W$_PL=S9_l=X`^WaY z33>AL>%pkyGg~Yd{+)1YxuuYoz|QSg#0|=>Duy?bps1?u|&8d3&1PtJI*&|2H2~bV@Dj z=va`pFYV)Qf!9~_cCMN{GjOR>VqBF>!ZWQ<|K&leltQzD&&34$TsFBH)wA*T-E}YD zq&<&aQEZbsk5$`w?NslzhK1*1x;cV9n=}s13|N!qpYeNv!tJ!n+hfyvPOkP|KQHz9 z=g6H5R)H(u_i0sJ*}I$5_j=h%|eEDPPlgzF| zOJDEWmaD(~`EQXrzU%Q7_7rp$e!FMxTTxoiu5kVI*PlOcu6=k~kwx(3@3@&pmra&WnYHX<>DXBk|^P~Tv^b}_wZ6CuiM*{Hr>qSxn$70 zHHB$c+;T6l5G zu63_xt7S|mIc51&q$c3{kwgiBz=KPt6ltc;bLYJfJyk-GVfMC@R8^Q*!St}iEjHP>IT zNX$Osjg3QRwf2{7fB5Z;8NzRIyx5X9rMyvFKKbRgk546E@bScX$VaV<^V|4!y&Zql zqLB5wHgJ{xW_{hW&nN4Z>h37he}-yX#V+grs=4rKKM(H_HsuKhijHf}Y`v9zM($oo z-gDUiu@)|-jGbX>v5i5gr(#w&y#9Um-Ld=ylN|qi{Q1yhS>eH+#|B43c4!!~O9O;vr4c_>r>~)HPTCYfLCFKB z&M>6RVP+|1b5NNo^im{cjcZHgEYpRDBJ*`hXK%3I;uGxSJdtThd6W3MAl^;Nf(|+{ zf?SGIwN%f2E86*D<_;Fa>gRJ5nVz57losQ9YFB`#rsVS}hATc#xL5s3B*aVdShn9p zkw4q6Ma+5pf9sf$f58wDCphxg_*M|=M{TpR>rSc#$Vt1u>5*^{hhD79$pYs|KZ@$ z*0Z@xX8GgK#kaSukK5~IdEBxrHdKtGsrh+18BSiR(=E<#Ud6Gdv?D^B|L~Nc zFZWiPo;2}2HskQa4R_yNX*POVvoA*Af`FY(O-;=a^Yw9WzgPSIDY~|5z3jwaJe!Mq zx1Ng%_HuPelWX62WzjSLRd>1!g#<4swS`|hR<`?Uz`EJ9XREQiePRB0*=wo0Z+(|C zcw9D%shqV;DIojlso48gub-J@?_U$*^Zbs9Z_AakwV&%`(giv`*X)eT*3=2&nrs+p zv8=*Iwl(|u_t|y3Z@&4pkKgxp%YxFUMSFP;PvzNo!lv)~>7_>Yc6O4<+i%C|$|uhW zv2{DPWX75-_na-q!k%SzEfq-%v=3OPqjN4g|83gt#AgzlwWMb5)Uz|c^L6fKG1Hli zE8hQJ_j*t9sbay2g>!lp#dgkFzf>fdw_{I5P%?+=^7{O$i04zc#_YY3ud_5`ZtK1U z-~V2`sIY0#Q~mnYwfCZ01g>0;^}g_b|MK5lzxRAO@nfb9M|NQJ4hTKtKE!iMl`+QqYmk0xWSDY%E$8X5d{ZT+i;2RKOAB7R`R--A{&ZHu zuXWNw2a+-zs^YUWTRB@-u(}vIEaftGNWD1WVu-47wbFf)ec5Kd`*-i%y?5{b`Tvgp z|MBI+n;TNuf1a#+?5g#AmgJIaI!$dae|-6J{iQwc^w05s9`CRC{Os)fzc1}M?oFL@ zfB*j$hPE||lZ%^JcRo#J4%JqWQc()3$lQJV?%jKx2ip1H&zF-=?peA?LCn!@R?E?y zS-!K}7-klCtvnLr$}MQLsFHEfRnKO}wSCjS$kw(8u4 zkLzdBwiiA<{dH3DLeqk&CkmcEe(7VzTom`&GScCO zp|a7e)|nw|QL_8q&K~ZVfWp2xzUOHV)Rb&#PP{y&sMwA^SMBFy|bok z;?mxn|DO)Ouisc5ntjr;xxti8AgL#9;w<&FdpX-0B?Y}!J~oNdYd+;NqjLA#2?i|< z*2d>Qo18T~wk)%3ci!&1Pv)#OJ*E79mtS|`?X6t5E8ctSeYUwCb8A`U?`LPjkm+h-klU$%RD{{44l zyB~inv60)lmmxKx&~oni&o;^j9&UKQtIzdWVbnFP1D40X+yBqVbvSf(S-8xnhaX>7 z?R;jU!@V$ud-~UTlY^FCdsMUecHWw;JD()Vbo|NNp6wK!E!)O8v1N6V*zxDe&bhbE zo=z#syA`IN=fv%BS!Cu-%gwjnt|>J@oI{JRk1{r1z38Y>*1GV8*< zyRvPGD|9T=84a$zI5f3m!TYUhI;sy-C7Py}9y(orz4&qedA11i_)o5lm4$8bVJu9C zdXBB(?o3T$jtM=y;n(M;nrSl*a&H!}7vIu)ijhHM2a7~qof6AwCKm?IrJtnsZJZY$ z-_zEhUsD_LFG{RIBy3B;&562CoFn|wDt;cea#+Xp{OR+IAInetTIZ;?i-Duqp-%b1f>GAb{-)_HOQ@k_AZ~12n-+Mo&ue_xrZ(baEU(<+7 z+T3^h?YG*Wf6uSq7qkAeg_ep|6WhrNlP4KXmr4|3TNJaiOF+=TYi+`sTLByesRDtE zc0Y^?oI5*!*W&uq7hRXLR2@}Tu9-JA!nI{0k3eH-z@#TZyb?uK@0qhF|Npd*V@06x zpYwGe#V;r})dldJYn_tBs-~oTdNQ+1g5j}KLKBunYLyu%`Iht;?T-6W8h7Yh7UUDypaSNv!7A z3LCD?o3F5~Ke=kxTaJU$$q#<~d%wP){X_0;Nu{mUn~r_?o3{P-_S|SMJ=OBq>iYWp zxxarrJp8hPh2{O9vuFQZU2W}PA?N?RDljEqCfjS}>s_~RM?c@Q^;wbSe82Q;-|M9c z4Q$D?+CNW?T(hx#_TRet%{OyyZJW6~u}_&}zW?&mPpy25dy2g_=I$1-QIc#vHY26y z)F%`62L(4Oo@b}#Dlj+*@C5b*e%&u`UsqFAWi?+v>$2z`^R`X1s;{m(dGYyY*P{^% zS1q2;DLsXKm5bI{^#pL3D$-Vv)Q>TukJ`HRaQ@82<7_u`Mf>j37N}x-~LUz{C#%ayyT}$ z3~dvp=VZ5DH7SWsOuqH)_qx|JUq7{2F)7skx>i!b#*GdFN*bDz71xPQ@KD~|!nyHU zkalxnhuZ`eiG~JkrKVqE&3(QcJsByX$m?N9)tz!4^KCV{92e2fm8RG;p8@wC@B zaI&Xk!_Fxmt8#a3%Z?73ttH}eA;Ga#IM}N}g(<5dYi**Kv&I?U+ZO4MOu9WZQ+LG8 zpEoa$ul?^owc|6z)-wid@BYd+EojM;oN)UeSD7X)FHF$8T)}%#`t@$nbtcEM;`iUl zxHiGm_j18giRG^iTm;hd*Mv@7yLzdM;QPb%-|o);TmS#V_45+{55C=hr$$@%!tNdES2iy)3il!?78spRPK3J1TiCsJCdaL?k~y-+SY# z^1Rz+`|sy(w>J9mJ#Re9&~G&>yd|5vJ>Z+GW9r| zvoxAgB(-k(9B+<&+2*s~e!Kd2_3XoM%XaIw_8-4|ykEZl$3uR8{(eI@feRN*->>-` zC#=wWapU^yUqgEgpYFK-HZT8u&qDwCa`rVp8aQ^}-S@sanS)XLx&x!by2Dc^FihyV znOp8xFsEJQ^S`v^f2RA-mz%TsUVC}`{@rowk3SYPdK%JZqoDA4((*fIYYmrhR$SXP z>63+D)t;!=S-MljwbYieFr150y7Dq>Ub>*&ymLx2v$jp^@=%?A`stfF%E^L;$EF0U z8Qt5x|6QKv6z7c>{+E0cWmpj^a8N)%ckRY++c&sGXI)Jai&DRyYQ9AHs+az;=|?Yd zeaj3Ce6(ZRQT@riHGx-I7$OcR7`#^G)Iail`n%f5v$}6yippJ4_heYj9?B{Z5xniH zTGmH>HkMp})7j1u{~t!J3&~$|OlIJqJ1@B)TI0k0hH zPIXG0u65iblueU$W<-Z}@KT#)@^!!4Us~<|{p|Jb@b4NIzTS?n`QOX<^Ys58)BXLK zi&Yn`do8)JZ}Pl?P%XpIhD4dV@4xr&?z=pDdwzT38O_N~iw`wm%R^#i-IFEffSGjKGlo`c|+s@?7&7PGe zb9{c?FHZ)i%OQgAWAB%`xIEKQx+bCYc+N+=lHBRVs>u#Z-j{b2?41{MuJ`-2oDcu< zwx>51N=)#%U3TqO)YA7=j#A4fvTz)=+WfxyflB|~7m+agRyozz_n+;n$xY){K{avVJ!8Kp6$&&3KODsDR&Sma?`TqVsvttrQ*KX!r-gsiomzjsAt~T}7;$<+{{5`Ja z_+yD=p59MtJB;pp*?o4|`OhvK_iKN@-G0BW*+cVbp^Uw)O;7Tk7`^GIHJ|Uf`DI$c zyyr8Nn61;J)AxR#rDS+uY&e;L;WD|M&9d;86gRlUFBcEmD#RoN`=mldY@RhG?}s`fgsVlM857Sd zUs|OQed^*Xm#r7RrAW@1;=W;q+AF8|orYJD6r2 z;F#mOE~ZZI_!Mt#p3a>MLJl73stt266zNzdvHtq&!^iutO?tP)nd5%l=dTt#hRZL1 zJacH3$yT%HAzyE#>8@td`T6nT!=FEAc(2^LN+aBXm(#WU{3b7^Yhs(7kGwWGb2%bg zFjR@TL95bL-8(RpF;nlERxR@q)t#=}rsqz{lI4*JshVdFO;|bM z>=JbeuEIw%7ER!2-^QJLc@^g|ndeOzZ=G_~98N17lS<2TnvuS4>T?PCd2+QJvA3B{ zm&!c1=Qc?%tN` zbYc4T8QcO^*Dd&Gd2hP^er~bv>)@VqmC3xi+d040_NSQd&b_{E`su3?!ncmGu6|#+ zb|z2Nd$mQ&pFe+C6ZK7r?%X)Dj&-u8Y?@nxIN22-=;CB@aB|CZmEUSx9o^wS!v zd3O8$M`STB`TSIg>HGTl`*-iwZ{M!{z7KSPORKtyLd-)FvJe^Z~Hh9k5 z@AnpOwQG5>N7=csZ|RiGo@+Y}$G+aXKD6)r@~D{tYc|iSdA9d*lezEZc+>6k@A}8@ zy40%nV-tJTO@^0&{jW7P-uicF*32FA_~KcB(6T?lk@Q zr{Y&TOL9N6#+}-8AdEpfZL0;3aH4DZBdg;(SwAoxX_?x@tHIQg)5%aHyzRI1_40-f zA3iGU7p{)u(+;5Kfuc;S&sG-#2Hnk(22% z=7}8>A`?`jl$$=S>-0!u$j(_+xr0wtJkdZ}yN8We;OUeIhXAh4t)hJ&e4AS`IaQsX z&)F!ZRsDX|l_)KxXLI^NYqXgb{5@;;qhKfK@Xj?$Llu^XFr1WV-m!nDVs_v3&lWtN zEtHwA&q@k0ipY>!Ih8kbUB{9KB0`bb+Qla$^V>7$Ff4fc>j5*c@8%v6@NT`zWn%d_q1>OF1`@W&RJVJ>sipL`EvXIzJ0ko-u~~8+W%TVE|@g_ zl4I~&e)(mM=F*niw_~~(?%cn-bWTvunIA^=F?!SI1i!L+Ui0A|%l)shnyz*=R`btI z|18NnH7Mf9q>uMDop}6sGDmjX%B*ckKIe~LHt8`uH+A(dM!7t`-0P9qUYfbLmxy?$ zI;%}DvYI>hyYzE`g->QZvgvX5x;Sa?{{8zc?^L``@>#sSL&C|qrDccB?T}sjZomI_ zrl6+IZ>7cMClbdKbAHe3xtU{Ts3tJu`HZ4@%RQZI8CKr^R%PCLVNdPvs}G;4O+B-< z?{SHhZ1K7B^62+-#nMviU(B#OZ!CEBa|+M3Lmto0&Nh$V`}^13>+kv6qZK?f&p*CV z8oO5YXvW;jb9Ua%OYf0v5B$0Dp1IP4Ia~dB)z8|TKjXSId9{{!QrA*3XEztW=f%1= zE!i86MZI40{halB&$Ok}_Pu0Gk#S4YSb8NYiN(l`p+`}E)`RIyUBV1?^R{e}Te!

yVsp(P2N}*=H*yW@N8To%cMd>)B`z3X6l>xK3+KU*+#K*8_%d+pOx%$*<|XXx}vZdYYZkA z>$M#9Sj2hl)&;Xw*9}v8Hrus){%I3){Pa_!zT~x6C1xet$oZ=t&z#pHxk^GQ@tTZS z*vZdnv$GiwHN+Y zwUI%3ma_Bv+uk^}@`uH5JM^#idcOH>k>H+;T)$kuE2<@f*pdilM6hI~%h z{VC0DF3;yY&wMaT)b8c)cLf#|>FO?Lf6RU^IkD+`hY6qZ=dG`Q-Oa16|NrXj?CZ;) zU%vnE?R~la+kQ`w|n<# z&HevB?7JpalhNzCWQw1e3wt0`3*XFTZZrZ-)*N=PfzulS~)K`&SoRSfr#WDPQ{31 zzvrJnfBu|y`T3;hmG6Gfo3Lux|D};jgo3V@UbZ>=nTO%BiQ=N-b<e^k*1vIGN+mNUp}I=Ff%Wozy0vs zO+JaD*1EQG4~;xJby)V4GDRKSYyarp+S$63PSvx#dujTMRpRII{o$ShYx@+QBPXfBrs_6dBSnyveB($%Xbj3-SLuSCuH%rGHn zPFv-yG$rN^Q+2&0Q?5%^{SDW4uFL+lk3FhNRm|`ln-b`(gyWApJHGw4HGVQBxhGe6 z4}VP7$(2`*D5;9erY)~DJ13>$@@ALbQj_zW+fHShFZtfD*v#OuLVDBXmmyhO&uL0E zyR`M}IxmqO8R5`ls$_IFdF`Aer6a$0|GvQTURH2g%#JI^+SN=BfAxIm_}<1sUFntG zpD$ln%4UAA?>PVCvcK1|{8?q2?`mvJo%N#K{!gQKp<>60k3WAF?Ugb*CNa^;<;)tr z?(46=K71H_{oDPz-*4@Is{h{+W+&G#Z&xGp{G|2cIqFV#>v|tw*&<@dc;N5dzxw+nOIp>vuf#^vgp@y zR!r^Os@Re!bK=6^zqQv(uSHF?@^x}Kc44;p`+IwT|Ek*i=i&GIiXUHIR@ilRl)tm$ zE1XxXsbd;yc1?zF`>~sU|D|oeEi^G}QMTq|i@E;l@=doK8&BN5d-w0(y_?tF&sp{` zYNwg6b4tty-`hNg4SY9mGi-RY_U^lF_iKK?4UezAT3*}rvc}5LL-T2gRqtU_P32<} zrn~mUtv@CqyUg{7Wp>X>(U~_Tm?l)tdsg{SSo1RH#IoJF9K1?W$pVGN`va}hUgvyZ z6wF?G{WUA&@=Ce3)u!8WXZh|goh7jBW9gx5Q4BqrXFu2MjjOxAM@%yQRO6(6QIV+Q zf=nzA&o4X?b;?BAHLU-F_#>7zhcACxFFNb;$?UlsSgxhse7+|2QtRc@GZ*vPD%Avv z?B8;r-tgz&MM(>i9{fq-J#%J$_o0G$y<6#_UMH$ zG(6p`kgFRuKfPW5QG>m`EraQdipFd~0oKlii#QxiGY%?rO}*7pu>JntT{rKuoVT1l ziBZdY?(>%~Uv4Z&eO9)4=gr*uuczPZFTZ^MY_U#Y_8!MeQA=}|axfUah~E6BF1)qfx$5%2?_#fDPj|jI9#)M?Q}c+fmP}Qu9PXc`hS%2 z<7ex~d|>|jqcbD2pVR8On0maE5>v^-wIt7GGzE4x`?npn1y* zWxHlw+rsu%h?m2KW2@TNgMty)ayV8TaW!pqS6X*K?rwd$<9DevuqJ>Lz_a_FeGA8R%!R%7N`ymX25GETw0-#^RDpFe-z{P}yo7vz8a zrO9spx8lpa^4Qe}ob*y9<_j))UzNH(@^#tm+_h_TB9scQJqs#2_ZYO4y#Fx=)BT#y zUteEezxVU|d;4;4709mV=b!Jl{I!MO^3R=-5eF>Ql@wVRH{O%W%Rh19@ZrNhYtC7& zUYy6FsH*t5JoovUr?bva|NQv!<)1}AU#(srx4+WZ_wmc0U;g~DIsf_l{rdIAx`J1u z;%u7CXYadKVkOVLJnyz-{;XwhfBUiUJgVD%|K0bd9XE5f@1DEyNln7U%z07r-+s^S zDDi#1$DQT54AYcTpKWC4_dm9{E62C<{jGI3^U4nwh^~yipKE?w;;})G;klhDE;rtn z?S3o967k9)?)>z{msV!$U5nt^Fu^WsZ;am7YrglTlds(F;g}M*>Hd9%OP4QCHhlK> z_Vs)1i8E$yE?)PFL;sRV;;d(LKFg#hWZdR+2s(FW&&@Aq*6_B3{oMxYbza?i_vKFx z1309m`i}dG>Sw7+Gme4o8Vly%hW?tL>?~-WT?dPoN!VD|scSncD zv!z7Hu3~U#Da<@M{qWPLXBM&+#kkE}nxs4Bs6NB-)F|hMuODKYmV}!cay7HHXLWV#9 z7o)I+lGldZL%aWeyM6fGx?HJdCj$>hrDPVyOE&iQ_FN};Kc{WK{dQaZzx4W9Nq>Id z|F3v-i%Nms=Y@WAUfuoL^hQ_y{iW3sLBfY(mL6MUEp3!>giEK?qe1+CVL;q+K^D^k zLMfpSCSEk^?^qJvu{p$bt(>R5Sj_tQrDjn9PELXe32k0JX(u~{H$Q0CSdk}v`l(uk z5yzRw8osy9c+Kv{C_k5MIA9@oEysKA+tQi4c6qGTJ)`u<&0)j2PLATM{+$=5ObI%9 zM(50BlWB8yZYpMLX@4DR@}+O*lu5xso2-webgbEUF3KTf_G}SlAyY+$pq1W!d#xls zBqjSYnI2n_>2b)XBlfmZP*>qBH6|9O?B11UjzuN=aPTIW9+OITSn|v#r6k1Y*qu9f zGL(wFepc9oKAUk#Wbx;N65cJLEyb>#P2L_Y9V?y|EZ*mP?9I2|htKwM&x_Q%-n}#C zO6x+AKEp}B3L~PqvQExj9_iAv^|M9M)5>|vE9H)JGU@~h*XS@PSjOzpBs2zD!FHC-u~<> zIRXsF5_M%a4Vx-`aBa_V)GLZ{K}=ef|Ew-=YN{p3+?Y_@m|WH+%DMvoT(X z71h_fryMJ+sjBMJ+r4>0 zJ8&xfCzJ6NDQ%|L|5ol(ST|q$X3+i;K37JjlmyvG7A?+iV)t`?t^a=8e0JUaisl&Y z&=ssIOPSb`yb7h{y)}1o6yN@~{@we!pWW}DXZ|_dFW(}}y*b3$t@36^#OkonXQDGi z9gJ2*mNj&g35Fj?>S{cuu-eXNu1m`PN~e`@Soz&J55NnL8usxfG1+UAdQYpy>pg$`(t?MXf#v<)^4ROSX0f_kpP2L|d##*e z)0ccoa^kObTknAuKz_DqT~>Ha>+dP6>sxgd&V63P!|>de!z6d{l*c>8uHE#Wk*%tf zKKJ`Rqw~h$X$o7|)|T5{e>6ATsC#Rky%LkX zc5BB6le3yfjRQ}%X1gA_{8A=4XV$cxMw<)v#Z zEc$w{{ej@du+`;Xx80UVepboUur6C|jTGZ`6S;}Ovlt3@zkL60*4eb`y>b4_H?u8L;di-YXAH}c%G;)I^Y(KuWOQ&| zAt=B5Zdu>wQi;Cf%PVJnH#g6}U$^yU&M)8NXO>@n`S79Q^EY$2?=hb+@!gt|tRq;s zHtY1F<+s09&0XgCv>_|u+v9$FJ6qeLr*HF~&#|6=e)?&K1G%?_4?ayYn|=1|*@F_s zy}>;<1)fe>R;iYJr6Q-IMoaa3>~im}Il{$fH5sqZ3BLb*VeT`FYEb8N>$S(93;(^+ ze|6Yh)?fy!m_4_ci%8YMnb%Z>R`79!9q{2#eW*2`v#WkjG1F4-HLMK*2WuD;*S$7a z^oBET^>y*qpmyD$BsO8fnv4viwaJExoXrzBBAmA_lIRsadeoftN$bQPl1Uys@;mAvMA?)9?R`9-sqTQ243P!+#qP_kNPLQIyc%iBp)7p|D| zyHFfE;$-aq`Sa&s z&7)DveKjMyJFeZjy?ghv-1di6c5{1`nPwcfJZ35G>|)nqasHKQ6T`aPvfFEmtW17u zEj`k7u==^o^(e2vlW)Hku?nWx&GWlwX|(LwqSAfSjCzy3E=E;b&Yw50&r&`2_GOvg zt+V$%KXvWG-`VE(Yrp6AfVKh1v}NBf^~gApV}ASi@#Fldgu+!0aKxcvCK_xJAY|8~#b&Q_-X_`0`ki2>?bE_?Rx zKdma)FRt`vSK8*c#pjMcfBdwh&(fG-;@vN?8BL{r&nmkDpX|7qH+S=88{WeO)z!b> z>hG)ne0BE)mh4zv)nhDYI%aq?Cg(IPdmq$u*&>>Aso1<%27L4FY8lc*G?t!GJ! zWbM+!4=-$uT`3yz$Y#IJM%OIOr+?4e$G0t**Xg`voqYLl zY-5R-V&qMwGe3;>=}cw%z-m(?SvPI@CNbBw%k@8>jY_!AnAvLh_0F?WpBRUWSF5uu zH;9{LALI-_5+W~gJ!a+8{*Db6a<(fT?a29U61#M?ol;tUc2`Qr zIr&6HlsrjNRB=n~UZdm`b>rb`7w(w?3?~GHIt?~7FlzRw1n@eDrWkO3B*9#5P`SMgM!uON_>&FVKX;GXX4 zZM|065S87jS*TfizdUz(P+wu6Dz~xVtOu<(qZU7@laKK#))Qrzvd@Cyh{XGM@9yoX zZSL?tf4*wPk~vNszS6gr{e3q(fBvsJndRAbiVTb#7x({uxc-0m{y*xq_kAzd*v;=a z^Y`!HyLb0)ER#BZ`e~5%<`X78-3JYw?tjmZ+yCp^@BDZ9?}KK|J|u$31C3yzut7+j*0dQdV|9ocewG>ryHA7`H39J;&h{sSd>#UqzruDRaw^_u1O+i&Cc*WKNgd%N`ZTedaxpH;}RWphb7 znH*zJ76{6^UAOt>HecfjCwD5H`26|v&l<_~;_H4E6jy(L6}E%#@YP!%y{?N)Hoay! z`BIH+n?&--NQO0r_o~zSGUq)`xlmGT-go$T|NLczbAq>|tSSBuT08%{P~v$e3rn)k zZIdlh4Vw&<1O>Yb)_R%W%`sEG9^-ZY`|i7Ld(RfFJmn(McF?lAynJ@izYhy_w(1NCa=hrcF$y(Hh5%+^mi;!5rVFudFgWkJufRUWYx)bXl)tw_BbIz|is%sXc8u_l=|0%M zXO7RNBigTi?pxzo!pYar^51FM6d|RE?6p@}IrDzya?Fbixh598XD8^`h^1E&cdVLz z-p0RbkJ2K>#Y>f1p5_1lwp~*#-S@KDf+?!^i7?=hu0=9E(tSbZVMR9vDeA#qgC}#Si*Xn;Sc}i@uTGo9_ zNs#k}S+L8JFfjp@q=+v}EgUTwYhNrq(seyBc45*X)jbm#R`+e5%Gk)_YP8{>FndD7 znl8ZrkBAd1zHbOysAZWdlDzRFZ$Wf1lSYS|%gR1Mv8!f_R_Jr-dFu!&{ks3&`Q*=! zA1|Bu9sWA2Xv;B$1jQLkm(6H${fA zW4_EN)@;>2UdhHF_Hb2*^2dcYbIN@$D=>s#yTC8F;oM|Hrlhr-@7Mi5z3#o=^Wbxw zE!n9JQC=I5r97*gw|48bYTJ^Clv&SXx=gRNU-&-z_U+w&&nC`#*3ZxXS?2riy`^O` z?%8`k-;-Qk{;ohaXr)rdiJafP+xMl-d+j5}X~>e^Q-03q`mFAb82#)1^7g#fvoD(- zo29m;I$Q6#_sXLd>C^VgPY-hDu>0})>8~$eO#CL#W2jY|3^AbD&UX#{{N@{|FZub-Y+k&cQ{lq zI{B^z4}X7uSK_f53<+^`^Uq}?fmlV4x3(=NuHCe zV)XWy@GP^IWw`<;elmP=Wcjsh^%+ey#+$k22dm|sE}N{o;d>=nSFC<;OlG z_wEdHQ+Y4#`+rxQep^RJmS$(RW~7TrPL|2_#~N}=R7t!eW4Wy>Z&oTUdN(iZ^wO<5dan&9+SmW{Jo&C8BJzp|FJqFIrK+HTOYZeN z^V@qIdYBlFNvH@-d;QgG%j@5E&wV=nrd{5;*6`ddRo2C?e;q&W@6{0WYT2gx<^1xk zM=R3>91lcXZc%$WCFxR%x6`E7#Rt5C-|Ra2|BU78H|Y&W8(m$$OBF5IJaK#7$-2v$ zyA8s>Ei`O7Gqp$cNRU^LaItC2u>j2&uflU?xze*3l$h*Oem#rrqD1nsjS5ZM&8A0Xs~(B6JeF}HhsW}H=LxIZTXNl{ex%GTH+)t&<5{It z`^4;vqO*$m7#S=2I2q;?r|4eyI%bjlYngaUp&V=0_PZu)!@0ziyjD&>{qyp2|M&I3 zCIsDjvu){>LJ8iPfB)^vyuIx8l+^H`e_uWb_%DAL#qi+Sv$V~3_x=C*{PovcrLh@O z>r9tkX=(Vy(xbU_gMm`t;}zWMVjIqx^y=^YlVAT+{Qs}*{{;ig3qREV=l}mL{BG=8 zo9BOiymWLa`+YY&e}C32&^Zn3EuWuF`~B_h?4VONy@fI@2PHPNHSx13wEX^?xOxB1 z7`^k$i#0R*E~o8gKCpbbdz#rXDJS2JCk<`wZI4=-`6|a`Yc>?@i`i4Hu;^jc%;Ynw zckka_E$e-I+xGJE+Uwh6Z-1@5p5OD;Zp*c529`xSOviQz9H>ZtTX#FUStrJOR`Q(Z zKfl+1|2-#JVbd|+&lbGSYp+J7?^Sk6+!wP>)7@ydNbt(1S6_!SU1~WtVdnG5-`D0` zvN?NMRVW{{H^sj|=zTew&|{)MKfZy;kjwRQ8(9Tf-tN z52pQXZLw>a^x2^2UUVR{xCzsx@3s>w?c`?uOe;4&Khbr~#%qzuKBp>Ysb5_i{o2Ll z>(r%IwF0TWArI$tZ|&3ZoM@pg@IWi)N`B~Ew|J9ftL5KSO+WldPecERX=*y-fkUb<^Rp~Ki+MB|G2EYK=I0{V!czHCpEK!_RKO> z5~w@oHT|fVUL=?IjjmI*OSBfU3r^se$Rl`F{E`*}gG4s(0;A;|21^fVxP-0OVtQiQ zF1630zcBId3)AB#ly)XDW}Linz%5$sZkJ({iqYxq(O zl_$J=^JA%EbXxrck#xb%LM_!$y{#dtmyb``lCwQmn`x<7_S(;OEGZKu7H@Ss@MKER zxvgQzYxl_ni65qEWm?C&v5XrkYoWOHXJ{5BjyO_}HS2r|0!_WXtJ2pR!m{U|Hq7 zsl1t6`M;hxb4}v*Hci#9LT9&b$-TYh?B~nDjQdL26BAcf6<&{G{P66XLxxj9N?`8G zs}IeOb#UB^6h5@<{qjPqxqgPOq1VlPi>>C$$;*HL{kLp;@CTD)E8b7<5Li^MXIWld zU0q(DzxL^=(^?&dG{qeZ} znqJ%CDlv)IyY6+IFzHRWP+e~~&s(>=ynOq1_SbhF7QEQ~uEa8Y+WLJ}pRdkdzw7L^ zsP`oc+`LxyB(tz&_muN-oSb3zW8M3xd~d%(8U5?0kN3;V$=S_4pPqeUV(#s4-@bkO z_wSxWu=a5i<^^SL=DYuHyT7OY|G&S#zu$iQ?cKX;+2787omG@HZO-I*M~^?=ymi|( zrxvLrlZ!WAGxHVRbKYgENb;Gq-MO>U=Bf3%uG{)-(iizE`ES4fvKC0F+g@x-JfQlP zr?|`a@!H~3zXEr!kE)!vY)+S2;(HdlgEw1-&ZUy>stg)(43a zk8EC_;9z*M#?;N_%0nIXud^7l*M_Ms-MrZ91ed|}%2lBj({+@tMLZ47I{E$R^`MZG zOHR!8)3OS@dMYbNEII48nn~}%HM|;3LV>rp`S zFK0Yi$uC#>Mj=RA#9_tOs`pMAJ&H?t_2pS67H@l%)G|{${8{C!=au*JwjZn(Jf|t0 z-O^q5{b1$aik_`4de6@&NxfV?_09JEzkM}b-FH28P+3&Ej!Ut_pkGJnrhsdZ-SW7W zu5ZOxb@;xVYveHgf3y6yf2Otv539ER-{YsfD7GJ|-BBmQz1ibV* zGI|A-oQlrr@~+sb5U?Ywd1=Gieb&E3ZGTREy>8ximzIvHHw73u(yxUrc(Uf%)P@sV z<>t?SR(a#ywrjhBggK(0&)t4^S?%9vUuUn6i%ibwQ|?}&+;fdH<%DVW>m^(ME?=%5 z$+p$-SV!A8^Y&>`EPj>eEOX6jHJ2{w$euRKaM^1s!IfL9o;z_o|18Py_36_oUTdcq zp7%8MdDV6Q%iZg<_vQYIdTry+(xn_;sy^%4#pwLsy^ln9-{oWYqG|bjO=~Q?C=F>k{Z?FHi+W!Cf|MK;p-!nM$ zve%o}%vt`@@0P$U0jJ9*k1cBc{0MySmF#oQ^CT-zP|~TLJcljL7T-48eRtjKU3c?} ztE$d#WHT4W0&7keojFuZC>(}D8J>2d*aqjzkK=f z@_u>yV!iRh8=6%h2`t?5^wm<$T;S`W~zf{6Y z@7lvlKc*`wX>DYhkhr7cO5J8oMT_ToeC!Mtl>gOO#p{a;K0K8nz}eBWsOMZ!=(LL_ ztpX)s+pd-6UN$)x>3&Q>$-w&cmXoVOgHB!REcIjQ==nD9jL>t7^MzlFwkoy=>@f}d zo;&NTE>leE93}5mF;|CGE7xCckDc}YmC4!QwcZ=oY?a7v=07vXd)Bj?k=ISuwq3D! z?!=*Z;QRgh|F5pT{`~XFoU<#}#;o1=x?lLLiAP|erKL*sHZOs*`NIcHU3hhbT*yUVQ3SxHXLU7k;ao>j6j z%G<2Xl)4tpaN+yfCyPGC{?$BZ$r5NdK{45U_SK_}9IPD9w~0J({g%@ z_UjL8O(fsP9*gnTR879EH8*z0y2a8RI@MDy{|sF26ZLY|(kGcI>1+Ak-LuTHwyocK zsaa=E>Dw5`X;=MLv2stD<9xa0!XDG<^3RzS6s6A2cXL)bYI7fR$@C>59^(lDu&b!{X|tI{OzIcd!)xwp5~K8@01jtB_*`zZF?qmLW^ z%#lc*vph<>V+H3FE6Izy8qB>|R4(Pud&bE)dzqpNTbP%2_moLj?`Aq4D$?6p6sjH? zoE>;3at&+mlayysYfTv@Ju^I;+!ZX^XK3mxo?~z?^0`w+#|f?vCcbMj)RMd`m$IBu z6_~9PTnyzhA3VvBpUJ zO37y5{f2(>a~xHs$9NeY(>QwA+;`sY%stQMICB^td;Qfac6s*Rm~-h$T1(A*E55#( z>ZW4-{MNSE6-sBePMHyOF4M0wdvDBo&0_+G%C^5YpFPRD>*Ad^b4x#)2ufzF9E+UW z%fP@HsbaEl%BPUW_ILK*%rQG>Ey===wU*0iQs5+${@=RH+n`OWe=Yp9KC(@m6MU|6^UaoHiWl$B{=NSG-rf7_ON%#)H_quc z3|q|S{nTXnoFcu*BNM(Jd$#X&4#&ZtZ__s4%@s_Tb8OCMnafXW@@5&t`5#}d-tcPf z?wh%c=C|*jv(8`RdMHz>v#0plg!G4&=dR7TecQR~<;Tm(mrdg9|GxY9F|*H@K_-v? z+^)wjOYG)9UURKrP3^R;Va!|ie)qjxQoBBJ#<@rand2WzY_0`eO zEfbyKn!5kUgOe|7!sJ|To89}Z_4Tcrn&H+k6%NHEEL+1ucVwp&zm}TzO!ZOZ@%2%w zH@|yU@cG@{-IaaEpC_l5-HT3pdxPPi)a$+5k2N{;T#R_USNgc;dB?Zuj1KOSxg1;4 z`4ubVL&TJ`x2!2XCt>WnSnj5#YBI-0!D&HSOfP)beN#FnwQh~qDU0K$pKiJTR)4y& zu-30d8;hgXW$DJ;v|O%MVR+V3T!8bQ@-M)DU9foGR@hN2!aX(A$P632W*?#MDB_WMTVX z0}nseo=04gVzpb=c(DsKh$stkDX-@;S2A*uN{!2MWOy#iu;RtGX{y!%UvG8wYEQXk z(yLazBHUrFnuE%sn4*(MBZ{tBrOy`F60EURVBu2LhQB{-j?K6b^}MNc8>jE(GGF02 zuB$)Sos1NaFg!LRDdfzNsMVR%{Qdo3O|a)_uqdXep{E>#4)Gucu>1ilfu%XbE?Ziw5`SWsY5k86|9(CUzQ6U}%x&DD%T#ArUJnX(J+SZf+TzC+SAur03upcQ zvZSw&ulM@z;LGbm*A^CU=0C`P`0LA;oGy9!`MriqMM^(iJ#2o?SfOo0OXAciOEwv{ z?3i<9=@UyQtNG7YJPG=Au=@S1XMQeow%&caP_lXJwQBo5Wd^BahK)rhclJCsiE9s9 z6`S&dkzq;LE35Z^%66CDYZv(CJHPm(Wt^Tk?;~ZUM8jhnL{{9{cJ}PUXKDN04<$;> z^Y^df-R^sRTd$#u%lf#zv!4I>^5w`TopoF4R=hlU%k1j5w+tVOET>1!TBd8MeLSf3 zgVt8Pr6RpE*8ccYGv|4vcgJS4WiyH|MlYMfrEo4%X>;GU50g9>rmu{gc3GmvBYWk% z;4_*N4ciYd%#_MrGjD^AkIU98g);4lCsbcMzMS*%-qR`BYgZ+-b}gNDN|e+2gG^FPN3amf6Jb`*_dnXDgrdbR1-_Wie{&DB0to z$=Si#*3(%$=lPYWu#e_on}vv`3j~&E&^6=Nn$^`MyuF*g`H+q3aAoS54N}s2mFy zmFGFUhT(CwUEY4&A8RAS^;5Iv9h=kLFeRwyLtKaeWsr?Uj^X={HUjJHm^L_Qf?yvWz2Q_4` z{aUp1%x9HQ_lis0@2d9B^HXPd@jEX3i-yeemsNZDrfXlxTFW|Z&gaL6kBcmJyj(i_ zY~E!v-Az*_2?g%I8bbbJ^~Ex zIXe5owuMt#Yekg(AD=msdz~r6xbKW8$XeLvCx1)-3j5#Wo?_jYtjeFQs}?FVHEhew=QIDcYHPV! zoB3?#+ZDY(<@B2UT8v%(3ZC1&>dmGpZHXr=l^aa2Nv*!%x|WrJ!?Spo>x~H??=c>* zIp1-?L|0Np#ic0kX3<5hg|C0hO)T;&?Ax$T>ngLjU@5cHmM4E!Y76Tn#&R1it1r3M zY1pwP$oXUa=jYr1uisyHCYC9lQLZ6CA^qEtc4%Ftt^X$GCZe>9Q+b3~ZU2DqayWRlKF|#ZxAlczG^0 zU{kxkVdDYOB@G68ahK;L?TTxw+q`kz$Az<>b3Jg;nyIDG{r&2yC##p92^JA>Q@FFpt~dt1_QU1(>n^|jwJmqoHQ{NOzh0a+DXE9;WR&#dGeL!aJ&MmgHi-0ch}w#J=*?k|5|`_7%a_rJzw_iSzZ_wW1u z-#;GOay5I*$-Z6jUYgv`!@V*`y>Jxqkh*n-(c31$m{O3u0y1Q)+tM@~oAF z;wqiv-($U7&i%gp@za+NCVt7MBFjHrKYcao_f_-!{P%gwGk^Vfn7nrCG=>*Oe8H)F z_nuF(tN9V&weqy<>wNz0T-RP|ovyxZ;<)6`hr|5SKOeP9o?HF*nJ>q~qMdQ)uFvt- zd~D;Nds|SlA*W~MQoq!o9k(r4>(2jf_q@hx?y}5nWoIn6=O!P}a9ZKEbjJbZkZe_> z*w?*VEmgnPxh%SUJDOovoc!GHwfE0|o)W|~an7WTVb`NNR#aa0y<8zbM~HE)=28iz zr7HxI&S@T7l5H&LS@~2lDtm3{o)wu)o9CQQUwZFKQ0)grmZc?}C%zV)+-Z_)aLhze zBzxzzeDX}R>Kl(5t6&pZmM>0e7ruW4;rJbh;0q=p+U3r=i4cWtRyK9itg;8|7y?~`X_ zl7$5BcD^c&(AJ)@=VqSh+Q0YzF8=-hnSGsrqllqXso)RA743?hjEAR+76f}#SosI7r;&?(JhhTc4HkNpriZF=tb=MUe%{rovoyF6Fg$-w2Dgu;c7b2=7$ z`g_+_(D(9>4<90m+!zw)1-~+%{WeWFS!#LVthBky_I@{SpS^pxb#knxs_NI9CYkfv z3oo=5pWJDb{W-5zp(%uG%AVVKzt3J1+URk+Wy;KQ*|HmGADU{Re;?0lDvH!2?|9@!^BU(E1an72T zyZ3!eoOS$F%=X(-+0QL%|9pwy_BUK)m~!UfyJz3t{VUjU_uckuJEO8g!+j2lBu5r7 zZ{EN2?i;@RdqxTs&wIAc_I+QQyjFjyhH$>j^C{kIHASz)t~I?-uBe)E`&@MMA=Tsj z_O-w6=KoVWw&i9??&UY-*7>z-gSG1Py-tCrW-`-ub{{Q926z^BN)^YZJj&qUwm^m*k zx!1tn#>VHeOUt>P``*tjkGa|qyJl_ZTWA-rh3% zt=jWHf4;2AQ;5AC_4-(0(mKU2#{$;6J`}jyr911RgzGdPHl?7HtCseCho4qmt#w{& zdbGtrYSFUXz2%xuPlfhYimV9Obol!FeQD25|6Js4mpY|Yprc?%T(9Zvv)7`;pI6Q~ zCKBwkF?ZJ8Jaa$gg*WrIE1DGdobh3~_xo(x>pkAZXDkbKe_dpM&p46CRB_3iUQ^pZ zFG+?S*DU>Hx7xKtd7onyDC*QIIGuUz)h3CZjIVxanfRWwT)HBBfuV|ymRI39SJ9)z zYp0%GRJVSrcx=GRrD0|+StnoYz5dmb`~Uj?5C7KxpU%&r60q>a)_!hNxlRG?MW^iJ zu7s^_$d5d>?x;w~g*8F(Mgan*!BRX66pWbGKbyAd@TE+@nL3LMwa>70ut|t1XZ}%A zVh|Mc@Ce%|#T23=xJ;`|@7UtfMOnrQnxZmY7fkv^m%RP8?>_SAMv(`Dg!_IPeul#HykaYcfn}=+#x9&Zw)_Ypl zCw1$c7g@eFdp+}_%-lmds_B}=6RTv;X~wqh()n2R&%)oIL7ba`WkT__Z3;gYd{K{m zRT?YqG~>DD+01!ru}}2AoXRXZcQtB>(0G2t&>qwAVSMX;9~vv+5bQO|Dpfy^iyR`)ukcDo`rn(zUSWF z^036}?BNqjTejVNyX}7M@4J6rU*COK&tz@Y&nGb_CgeA9WnY+~*D}NSc~eH&?A+V= zrAK9-SN5qc)imL8m}(rTn!J5??%K*(ZIx25|M#!5@84N^_EYnQt=EkA9=`Wm`dq>L zou#W+sh97*n!-Ida5%_TMG z@B`PBnd#5(eEsTszjWIQy`=(+l!~&?+&QDoc)+^7qfPEcu((su)m?&CLN}Hc?=);> zb=+I5_aiDg?(aQYSSWw`>b*04_LKNRU37!I7_WS&op$zg zWGJVI?p5}yI+hNOtFN)$Ik0(^)0-(wOblII-pmT~uQR>)W%u>3-_*~)bT*ybf1ruM zA$o0B#vyB^bbr(QF#j7_XDd_9QnyW7JmuFq|2sD_Rhr7B_V3E? zu|Jpn@U7e0U3O{r*BMu$K1X@2T@~cT`gt>3pvLE41#tmiEY81oIJefM{>J;yv(8_x zj9ng76xsdujBdN#_HFB4+nkrTtBJT?+FUNMRz$LhufuTZzW2RaX8NbE$Jbe&zr5Y< z_nGz+y=Q$?+cI{uFq`ZtR&*=zc=9XvZtS%^0v9h|e#~G0~#@@4bvXb>*{CABM$IF+_s`s! zG5u#w*t-QyeBS2{?YbTNnRnHs{%!wyUhlG-@2_}7V3B6rS9;cfu|| zMj0jHl^%j@OOCyfIV!NHcx#U3z!6t=*m3>*(s+a5XAf>|vcm zdAqHW)%&vDcMW*%2{z61TV8s${qMn_59Vy%Fk3{pc=ztz(brpN24A1KS>1cJZ*ouC zJe39KtY3=>76$*?a3ySR@oTN;mn~aV4!w?=At?}|a%4u5)Uku?`Q9%jg4ZtHc1xo^`8t>P46R6Gmg!j>>Q=B*DE8}tzVy)m*@E0biEmK z?&YbL!ds6WVq11KXy?ak0ao$ZtO2JdnXkBPv)ec2ZFBiKzF5J;MIN)yXQe;7sHOWn zN&QOoahVg>?4~c#7WUHj?hI^O7~WbM`DWtkrLp&8BBz@ER68W*lfiI4Z0F401@bi< zb~EPgd>fg)@*caq4U>fumvnr+@`RVuz9eSP4my>${r=lBw%ZoZBW~24k^kB1|L@WI zy{tQL{GYvBuf~M)wnX(V%}u)N!Wgd33f!=YZ8qbK&YdqFNp72YMp~-l_K(&Vdjtgw zKQBDcFPNORZ%G#O_NU8oJ#*x<)ff(>`L=R(XsmV0z96-wOVN?zm90h=bHRcos_71r z?IueaPe-XRZ&=mvX_`XIi#yZqW@rj`gm@inQ!8SwY}3y-;~p_j%CD|KPxIY>ebZO%O{%%8;PD<+R(AhtL6FQ z;IliYep(^1XaDZq$$9JJ_HGK12o;@Js>`s&{!QGT8bywB{_pUmy2NVBfD-t21Y|?fd`bvblczzKpH4?|9VCx z5Bjue7PI2d55ga>=1>3rgri_L`|J&+0soK1|Jd&MG5`0@`@g^ZX=w<*e%;)BI}byb z;a6w-Uk{|rU8)y3y_)o0kRf8nG%H{K*OQiZI?tH)_2Iw6CUeVw|25#Lv^={!Jv3v} z$=K_AjJE3?E8L#^?D9{aj@@@NVq>o_kBZGb+ufA4x;5K0UEDp+G5ayw9-Y&bv$ArR zy=7>qli8&=HKxzsot0Nnpi$;T`R_#m?6BJXP!T4xWI7><%$BVt`w;OIfTAmW>TU+9LZO+Nvg);f^dt#;?^)FSbAMaXcs21Q6+hwT5%*0ak)17=4uNC` zhG%KoTMj==*gMO%#cjRzQjX#t!|PEyC5>HOyiP8E{PODQ@bz(fYiepj1Vc}EC2+rf zcr2mKt>a1Txz#H=T;B=h#`qpddp_srjpwY|iqm4wXGwkLXjpsFtaWu^+l6f(r+rpG z)FLNPA^fu`hjmAx@qgLUI$_`Xe{b`zMqRh*?r17XzQ?uM;<>|Bu@4o6?>2mhSf+%KzCZzW-15`-Vn6 zqm86H78jF?1ow_bgv59`Iki(L!B-Ih*v&zZE!*9pg%R9I3N?fVL;b50{O;_n& zwYUtE&vIiSiy}j}i~SY5A2QDFm@(6Jf`>rt2S|y);{qy_I z-98hTt=ROpPTnbFt>A=%f|{y=x?8VBohf9{o1<;5)$u|gI{W;2|6_)xjkEV2Z#{ka zv2(WQkx9WRFWRqskL8)#8m+za+O?dz?=F?G*(;YnzNq*#=-qPuGe*9_*`<-wrUb1` zotN&C(;L69>g#0veZPNQ4z~||#b5v9aQ?o3$4>Qkr7ZgO^}5Ua{Xf5)HTaT$f1m8H zf8V}+tFhW!{W~xA`rW*QXS;t}i^V?coFFGBw=DBj(5vHy&+lY0w}?Ep;M;$A|IfI( z^b+e(gG?@^s1?Wlf4~0!mi>?8|Ky(k0WA#Un;YDCYe!*ffQq;?kKxyq=a>3iezljG z<#X%^x$p$veOqQ&@!`eX*@aP; zl7fVqr}~KQJM;NtiIP)5z17@x>(;f`Nwgib`*rO`rF&L#g6b#L>0!~YcD++NksV_y z{yetpqV@XedXqYmyzc${Tp2Yzb(VNw>A9I_-Pc}yZ^O{_;_~OqA1g#A?pb%*=Jqtp zmM=fPZ27xm%A1$&E~;GjBF)paE`~aW^s-NU6J++;>iHVB*$emO))i8V&8vBdx1%&0R_ z%?zD5WDoB2JW z?3h8B;jz-#>!!*~n@#d=i7@OHXEFF5tsN+Sa@!Bxli!kVDj({dZFcL1@YZ9uqPDJZ zR=V~6SmrDSla>qRbD3Dg8wUA%0qeJkX&$r|iD{Ei0urR1j@8#0&**Z()^Pc^Rf5VvbIaBxpIAK1;bqOZ!(B=bY{eqtGGzV zZ=3keXxhuCtqc-7n@yP;tmIj3txop~229^9kR-pTKQ=l{)KFSL#6pJe8K)7?-lN6l zo7;-calLi#*ARTV>{#Ot0m01ACX5B=CM&<6lc%}zK<`0EP1XR;PMgSelmB|1s}}Yu z%)XcD5qtf;*XmX4L~U%R>hIt6|JCXC&&HXr_Y`h_V=7tmW={52M`mRuEBD!g6>G2U z*>cUnyu9-L-tUgC)8=Gru8Q)KJht)uuIuaLmfI;gWXE{d*461%8moFcpTEjcz4rYi z(_7reN>BI9=~ri+`y6~-Z`$t3VYeR^co@!2jJkf*^8V6-8~w3eu|4;H{wWTb@$6{J zmSsCaF5L1uDJj}_{P7;0&o-?Id4`jQy+`5)(IPLBZ>BuGWm+I_) z&3-*!uI|(MKd!r&tdmfZyn&WqTli2KU91m=KRj0px{rYd+^q}yppTAzO zmw)&E`kY{s$o~1uRvx>-d$(@8@WX{Y3@eoG+^`f+vY2}=JFa!7cXOjLo9yO}?{)j_ z&wP%F)}6{5{@tN>_KK(H3s+y=p}6W+sqELiZ_hM;?`37+4Vt~Ia@MrWnaN$Ns;p$+ zzk9cqz)j56Y(6UC>md`(BlHcq;XS3*yrOo_h zm1h-O?poAMS=e#n>(#5>;`)BeFMs|xBYSI_Ovvvh|x*N7OI=JER!wI`uIG&XsJyaQc zJv7@>^Vn&#SX+zdvLZQa-ySRM-=TN&n&^MmwPSYKizLc>X(a|zJ@ANCJsK(YU*XmxD-}|?8|Gy1KC&e9F$-7RNS@0-F!=e?7yp)t)WlUzdUFGBW_MtiF zwwbAulppio4mn|PPX>onS%jl|D^5lXgL#_^kyg37xf&;^} z4NP37%_fDBwJkFas5UI-aN^F-~Fpk*KAU%YTb5Up2M+aEvNB?ugc!6`f}oH zbGPi_J!MuU(ZIj)^`x77a!>C*R^u7cX?W|$i(rA!%jcAnIwsE9e164Nw*|5C$zOUS zrD{Glf6unOd*62YoJF@8XNtwHQv9*@;nF%l|pP|CjvL(wlGFOy!dbo^Z%MORcS`v6!p-+2p85^7Npd!v;Ku7jT{4w4uYs z=1tk|!w(mnvzMPQcYTk}>c258vBi#8qfT$q+j{N!^5yNne`n77y>ENEprHS9VZpn< zqE)uvj_-ZE@0ywKbBWrnmD|LUG%8sc-mjR#b!zRk$1httu3gYMU1TGhz4rHCyQLvF zR4OZDi)TDv+jZrTPV99{C23VbAHOFd8>jsF@MX)BtH*AhE#fVRy;h!o|J>y*I-f6J z4$fZt+SwSEB zx;}rq^UAGv?<+4V_X|n>^80Y2`MZcGFF#hyTUIEuD=&CKyF^mQveb~P>t5%s-25zt zVNbDQ%dJNyB5eUj3}$Q`E@l@@!I4GLO3+<7N0<{rQWUd1jHb zS7<$%bANC3`bF#G_y7Cz`TX4C{QLV%;}%JZ-ZE2q?&BghRprd_=Zja)?$nqb6Qx@8 zL`Gqz)U7ivJpuoW?SF5+|Nmb8)R?Tb&nj;$efR3sDJ}LzEdn|ETc>(HcR9koUSVfS z^qnnX-jR$8yjQ;89aMDg<|or_DZHXCGUdB7=e>XbuK)Pr&+o3j4i|f~{r1}=4z7SK z#v^zCUHJaj#Q*f3h%I{6IO~!`g~N@D_1*XX=SpxQssBU>pme2Jacb{U0F5T*+6LKh1(qBDL+=# z@@p@<9g37@WfymJ2x?@lKIT#4ac=PmHBkqZ=N>IR zQA&q8dODU~iR#XsV%8JJIkD!0rkj(9;Ofwnm(Co|TGf<%SIT`mn}kUjO-;-%8WBz21Fz$Nqi$-jzSzwMO2q zCSz@>;=Y=%T1#A778KcSzx?vcySLV<+xSXmO*378Z2QL8j1P?$Br~V(dEX|;@VxS; z{{M^n-{1e?T|bS(R#14u)9%~n%Dd|?{lEMF>;Dz2{y(XID(}BH{oyLUxzc6L?m?9w zPp%GE*rfM4|KG9uKcwsbhR>Im-}Ak)F+FYCtG)8a<6Mp{^P^9SGQZa@-^))u|m zIQy(bbjECH_b1LAu~NV4w%=d>`qz&iH)b&$*rd1WLy67V%X6+vPMlM0Hf`pr%-GX+ z@7}$9dGq%pIcL>6y2|A)K04%kd-b!t)Qg9=kNQMHEVgI#NO|RE%=<@&sn{8-ucTPOH@>pj-OdFD@aL2P_H%jS7Mq{ z&at(<7oWI`mxu_OzwVk6#B*G`i9Owy|Rhok%o`bE-%z!>e#wTdGVCGE&o5XEa8}bL6@7s`J(*R{n3pq zr7jKYw(1I;aAu88@v6a_#P%*88#XP}C-)?A#ev zZw^%IuiC0%$dG;7X+@9H9vyE@SJ9kko&^kBx3R6NwXeC8w@P)M5^H{D?uj!F zv+tcYaeA_Gno~;O5ib|9eCgn%(EJ6jR6VZ-2Kb*g3N601Zl@Wi-2^p<(kIRw|E|8i zzV_Po{QI{UgGC)mJovwV@9*!weeXw-R_=VSb2ldy*nNz;jZ1aD2C-dh*-IqFy42RnreFHPn?pE+ms+Pa@d#d|CF-@a>W zv*W?ejpbWzzduv*fgLy@TLbHBx}U!Pz5I$E(M{`c$pzYiYeWy$+3wJlCrd)Ug&REy^1fcW`){7x;iyK4+y0z8S*}jc zHQG8{)aquPPoLfEWBGi}>}8%V$L?}4ES*w%HtF3gLnSA@n=-$mSEnt1(lMv_&Mgz) z(>C|_RS52Rzw4YxX_nm{@%3DjzX%51|9aTq#;WuAm*#A9eQ`A5Ie%Uho9x&J=7hwYxkixlH`Cg&?-gS~3+D=*@h9+aJ}$Pu^YVeIwXX1)Wh zA01aI99rej(IDX*!cx4cT+i5hsaMOQC@Xkzk&WySG2T zzwiD%lN$|(xswl>#C)rAX=FLcxM1ZcUI`bM2f6#V-W5$N%*f#mb@6@_8c;fqL(3twzV$}&7-(zK8?`b>u6mGv?`F{TV`TfTq|Ew_! z*x`Nm?c1{z#jh97UzQm+jsLx!#;W3dPtIIoSpABJOUJ11@$>oh|8Az|&rxS7(Gz=g zeEIUInX~WM+s#{Ud9>wPslv5mPhz?Rg=byc)Vbvpm%+QTm9KY;#P9vLMEaM+rL%9( ze*L=j^EtOT`KErx33HTPkFW7y5zh{~y(rK2{nCz?tn&Zk|Ih!oebp!b|K|f97t=PKlNsQOqUH7c>C&q3K6MeCJ-|OnkH429>RKFMfQeq|B@4i^HMIfoe?m}I? zL45u5nsco?V~dz#S6?+^Xy&*7DQv&<_FKC<%`x%u#|3s@k^E3IAZQ|p$PP+6z-^WRf;t$W>GCv!aYu9Tp&axQaz=Cfljm|dNI9Ik)T z|F<>%-{-P~_C$u(&2w`9=7g;{$ew(5>5Qz^Tmjj0msLtA9Lu=y$l%M?YeBDG*4*k% zzFvBUHzfF#fVqARYwp^(SlJz+?nl<1y`1uF={-v&@g$SGO*hr%oL{b2`eaU_c<=6q zbBv#Kb(HO%8*?Y@WzvMi^ovzG8_n4rLw&zKogIH|=hLK%-7YMv%;K0=RMlIBKD<`{ zZRXDczvrLRfB$$|$Y8MB_Z438YS!?3Y1dyPxVyF5+RM9aDUr+00;HNl|5#U(8v%%$4j!L=nYP{ft> z@A12vRz)37eOXXp=xF$?fA6DpuLUl;C^8uE9NuwT(ndq+m_c6ed^!2&m6>;%_zhhd zBCcOub~JUKyu4iB@y9mjYp=)0#;-i_oB#d3U%ysgyY;ceNL^0&Y-D#t_UnjU>x`Xy zKb!o$mS4Mn+pVP%P2r+xsS``je%=$&bn)%mx6AYGq}%`fdUotZ^wz2Je;%~+_wz5` z{(h&aL)FJmS7)2g@7?{bU`Ll`+XS738;gq1EnWBd9Ooh}qt#6u3(S7X|KI*^<^Qw$ zf1IDR;3U)bN2yDm$^TA1d))r}`Tv{$ude@9|MB+y|7Z2=7Ow{`+s@+1JrKyZE|!}(tyJ=t-BP_H3XhyLPw!DUXu#uf zCCYlfpUwF`9|qx&Qq%OS`|iIj>+kPB?|(k+N3PiR_P-0|+poQP{I@wZQNI8Ds+!{B z&!3)ZzdyX`%xM#Qd-?07rozfa=PL7=le5oOf4cYeut4&PRzVIg!K(s`co{GMwDEl| zt{=ar=4aOn^J$rh&-6;$SFgUCw_NGz zWxnq&+;4ruN|)vR9__6a&o?TsGIch))5)=y|FVhi<(ms;RL+vx9+Z`>do<;7$Xfm< zG1pz6?>Wi&X8rYB%Q81DeCV_+LAdm7+3p!;vcEQ+Id-VLW$Cs#;_sb%F2)Ev&zv{i z^2davO~>xOyL0$qKz8s{k;ylANzcBc7wMA1(%tvv;Oz89{&{+vBHh<*+xD!crYj7e{PNB# zObmw0&K}`cSif^y)|-X<-e>Dt3Jc!MQ(mgqqWXN#neGj5%63aMcda`em8GdF{OI!M z=A(6O0=4@EABuo{#BK+(YhzTOmxk>Qgr7>%Q4+A+lq}kq!}Y_ z{J8lnJ=J_!M@-C&xv7&()1(vUman~Ox>if7MqpxlJA3HX9g^$bZvOb?X(h|yFD$t> zn_8zZ-aR*Q@6u^&*ChCA*IrAM;cLHZz_TH5;ScU{$QM8!_u zv&Q=*t6-D&qL@>6W~^N!E$DKzj{U;17borQ?4rJ&&OW&_MDNaw2@F$~>b~Bj)6G;8 zyIof6*@M4K36G}Eex1D|pye%hNJ`}OIpM4ZM_0(!CECsRW7`oQ`}X_pjjw;-&HHk$ z{^!@}=Z`-x)m?sB^ZBEW8CMHt@AE!=r@dMzFxSW7$zG?Y^Us_DmrT{`Ny;@nE8n+&uB@u7s`>p*TIp2~ ze}m$Yb^Y6#Rb1ZhExg>@@~g&X(mTUHuNbH^~bgIC9p1dyS2qt^LbGRkh~K^UE>Orkm~F z*Ywv ztFKy`^X&b2_v}Qor6u#u$=|!r_$u#C<@@4u=g%L%_Ns3G_j_B9o$21GA65GO-t4Z^ z*BV{6ocX-z%FazPh<|nzXm;mzOUq=c(@&%GO^tvh$`UY|7gyP@js8GBydo__gWLDSg}cm3>Fa9(VD$N2y0`iJw4KXAF|I6W#H8*R>;atbeV`-Z&F)Bs zM~Im}izrLV#C5NOyx!m2uUs^LU#0Ok>xtW4x;pOto^<;1<;k)i3MGygKHsx?=`7{l zKT< zG9b>J4{X1?!dH*8IZT=rW3e*X@MB$gwku`_3Hxm3Ehvh_x{ z$tj2Hv*mr=jtVR~`;I|?d5W(XkD&{<{$Ih4=X(mjm2?XVyl>WRj?P`0l4@u;d1mpt z*IGhdMAIDI6FzfCmaOv5&waRL4_S%Cy&zrYz4O=_=bbm+HdjUp) z)|+c;=Q?}O-|nlsS?6?6wD82A6(WKT22!>PmisHuNe1w|a`h3t8X*yK(sH(m>y7R$ zLDNzLxqZv}HXM3<`SQyOn|XdmKLsC9@%Qj>w&(D+=h-TF~Ls=CA^u6^1x z&1m*t$K24Cxr^6%$KG!hNbXv6YllhRl8~aEyLSCMIsXS|=-&_joX;6+@W$rX7WQbC z-W89p`FV7Dyl_Cr-OHCh?|L#_Ki}^Ex9#_9zNxirv5%2mm*>RuASTXxt;vcRIDpAjNUa2ZBPG0qJpLZLJ)Z=F*R&`&mPM5z| zKVw?vi#C7zpDJIMUfZTu87ZbD#B`cHavCdx*SVb{-5n->i}Y@~TzeKO7@Yp>Mc)ag ztk9#8r#A7|Uf+J}-v0g0U*^~Un|pr$-?z(`tG{pV6`%ad#($f}-d($9JIZNPt+53(Uo6g*s^*vGIw_eGiGj}*VMRs^TKJ9z_&zB!lrhl!l z>OEYrW0#lVq~gn!^PbpE{hPkCOIsd%-yQ2CyM5Ybou#uX zyhC@b`+iUH@g|*K%cUk2=?99=UA-s#{EviE!?T!WnVbI1Si8kqJvwg1E%EC4bBYaB zL~HfVuif~Ot(HS<-x(zrk(woIr7}6@UD$rjZu#^3cKu~*SW}Ge-r)LlZI#f@Zr6pT zYKs`oSvNBu3`{=u_KG&sg{9Y;ScUVavq}9^o6hen7&0aFY}1i>lFUYd7r&eo`*6nf zfS^@=cA;=)oavf# zw6zrK9<@^pIq-1*A|obU77|F^DxxBLFzciq#s|NeV} z*P3A!m&d#sfdc~V6B-0|%#c~}#-+V*i`+KD>!#cu(|1j=sy-O1t;p!)vr_(;0JHhL zWiNmJ+11B{huPdO z9Yt=5?fCKW^fr60|Ig|_ysy)#<8tcIV<|cpbl~A9rhoVDe=k2K0owjmz+y{n(+aiW~~yQp%9^Sf(=)f<{`r7I^INU|Im_umCg!(;PW`>>tfwvbw(PgK ze%>vGwuaURCwDGfm0=#20?> z&m5PM*zks|M{EDq-7lTnG%GN8?d;<%r3{A0!fwATiJEp)j&t|&EM3cVhLU@X44W<5 z&%Ay4l2afrKQG@oVdC!g-?yU9u@&x91k9PEM0s~d;6<7 z?o!D;T(9QjJzX1m`*5p3(yQw^idk8=wen1-XW4d@Hh$X@-;~SA;I+~+-N5#|g_Kdg zuIa?XzQVa~VkvW`PqBK>)FyzaFK{9Sv2SiH*I>(?85-S9Y#zDcoiwu5e^o*0rEzKLZ1!E2XAo@Hhu& zD++Zo}sx+ z49^1m?@2~v8St-|WVL_Yu^9mjbEiix`~1>E^7Zv2svQ|0Kh$1Ve6(rZTGK#ZL#yXv z#ShGag|AL4UTu1$!a`=u6n1{eSdh)vc7cd{$2Zi{k;FR`1wXEfO zy}JuvRQq2kvYdZDzV7GK$A=FupI;}Hykmd8#aus+6HEWd?tjkzr+WV2zu+?tL|8q$5b>F7$XSMzBxs_+_c2v%L_x`>Ab7Q4rFL?6b1f}2Q(srmrUGx?TpejpKaHQBA->Vg{<4N=JmR1mtTJQ^5x6JzwM>5 z(^Bgyc1RyjtiSP;|MjMG&hyK5Km7Xo`fpQtzV>zf{NMTW@9&%Y{HC4l?!|%^D&NnM zKUOz0eD=4X>2nqzPQ3s9@5>Ua`F?ECk-KwOs|B20S`vHxYi-X*n~8TXZj&wC)9bkP zYURv1#oNuzOYiFx#DqkzU6fQ6_x#QU{(HYuBj?Ybe}8{{evSCt=p4~yKngBo^f`WvutPK?YHH2KMdd2@T}cBiCd|xmVc*0(*8#Kvjy9_D`rM# zZxyRPX7_1E=vV$fjrJc-*FT)@t+(v}-?^n%B#$5bRLN{9wsPy-<(mteeD1R`Xdaaa z&J}%jbd_xPmawFr*PB+bJ-1buHOY$Q>(V7N_LM)U{g8O}^vjY>5e=EMrl~47JzRRM zWs6|>XH9d>^~&BcCcJK%rqkCj7_zW19T2b)oH{2s{n{3;=q+lupFf(jc|~FM!EeWu z0zb??ztz3vf$K{q`)fg6#b3JfKiK~{b&5MD@AGtH?=4#wg~fK95EK5Dyp>abne?~& zbB)ii&2%X5`3-LZGV$zY0e4d$8Q&_J0yszu+Ls4 z6cTFKP<(*t%&7pcNIRW_?C=BcHwsM^oGT&=J_qp zj5=0$US&zN&G~F|erp)BSKs;bmu;?Jn^SyxQ{-s2^5SaQ;2%#W`}0f=y%2M)?oF`0)L+I6om)0>tuV<{3i8g2+4K3Y!3--d zM_tYjuL?Ns>7UNI6RC2Zd-@SYH@ZrNhOWy3d@_psDO*-ih+nOe`{=3D) z+Oc4Tw0_5&{~opbCPeD;Ufbc!we9y-_fr}BHc2l1e`B(*xLd~|)@R%9{ju-eUVlVe zP|%mZY5LJ~>+3$h?pWfVFFK*&&W(CGYo+K#Vq#_IEagK=Bd1r+J2UZpjaB{6r_)bw zig11ZJ8}R0nI@%?)4KJmWsWDlnA~CH{WQ#bgH-KBGu7jr27XlRLj^&(05YLxL8&F48P=F-B>97?mV9qRJk{bS`O4kecN=8wZz z9L}ar^|7M)Tg!;urxlu}FWA)I^1F9(W3-b=qrr+UA%&f% zC-F}RWt?KZbA}VpAp$q%9P4mRxLvq{sqwbf0vVb!@ixhdL&B)5_n)P<_ z4UXIY@qRUfenjY2A68|ii9xBOrboHt(E+N+Uq6@BO zeORMhbTC}_rNqBorIqrQ*v!_ zoMO}KZ2!%0`k^yRr|ntCAXWQfmrG09?(MhV-rrLx>}2ro#wMN9o9^7dzqkJX!Md%l zUuzeY*w{Uf38{z-&zl-)LIb+MagHm>9{udAtvdG`71*{^5czP*~) zKX>-Bmp^R$mm4$v5QxfNd)3T$?bRuljMk@~boo;8zoS8uO-bgLVrxWn?$)VcJGZ9B z_8v-@(-gIOsmrZT39-|!Ma=NMUT7uzyztuAITV`@jknYY}S^DC4q71K6^t9C2F!z1$tDo1pq@)RMxU=Aeve3NcE2qo}x?Vb$ zSMN|#U+%e!&ny@Wc=i^va47zWI<@Icd3jOoSq=mJ)3>+hduwjrzI~bCwx)w;jz2bh zzIDnOo!AbykDu;IGaveN&3sz&hn27Alpk8OdRkQL%@dcGPAQG7;qcfZU4DAYna?%# z_4j#`KiED0^W}%es%`D;p%3MM29(5ppV+H|KOZa0=f+{A9Ghr* zVEL4!ZVmaG`=y63tk1p|a$boc;e*fGr%$`DPkSL-Q^5SRGQ)Tfpvx7M7S`yY&c~3dF^q4?f;TGu)Q|{4f+hb2y&e~9*9UuzR~k(e%s8pn`*w!5i68^dMdq@Oj8`w?f3z@uwEva0c-d_> z4;KOR^E?yX9vrx&lc-&EP9b2*={XBJ99-_W^t!OItn$j3eB;`eRYFawU79o3TUiy= z{5<|GtJTqU!W^bkZXd;0ELd=-gR${|yZ)w)@3!xD3$<;sU^-!`oMc||*y5Og-LD_7 zk97+wecCdyGelHi=FSzj4m{p_UxmTtT9Nc28K;c3LO~qM!Vh~hx;x!jm*?P;v$ZH$ z>C=-M4#kR_iwd4>%5>3A$hvy|Z`~B|f>g%!B z6%SlZ{C`q?{=TZOT1#gHv9wIOuJx$q#qU6$gv6k*sR?VYMHTBkDZc)_k7?5qhqRq7 zt5l!gTBXXd{&dy6bKhp~n!bUfZ;+x=U$metwKkxP6X$O@=9#VZ=4(b?2&|pU7~! zHaB;^z#%qmhDFL#dDq`#vk8ueQv7VKM zJ?$2IZDsv;rU=(asGQ|l8ggXniE8;IpS2rX*KLjI>-T@YT$y#d&gsX8lXX*N+?hXm zX?`wYY7mIIUe2&AYf|dWU;CUJu4R1)@rs+abc*SZ7Xr*bcdY$xw)#hEoYeJ&^4Y5| zZ;^<;{(Hjtn{SrK-??{Z-;SNTcCFkx@4WEDD*2*yT3+YQp8oXkU0&q$?)J;q4n-Ht z)tw{kt}Ji)JS%&r$YU?f6_q=!K5e_WQTj~4j;QRO%oo)~=N<_b#hqP{zy0>xvc(zk z-m%k`7FJhP)l}>-e|h!m*ROT^*&F6;-pO)frpHN^Uo`1vKH*WKYQ|llERaL4MD3{&Dwi8ZP&HZZTcLB z3Qmrz)=s@Ot(39VH2wD0lGyycl_!seynSX=)uS?$llaR5GCRXw&pfYl)=oM2sG<;qa?<_pYBuKmH7*Hk+HV84ue%fFpSH0^ zQabF+0)B?jD=U7U$#KlM<|UMv?bEXAgfGvWkbty+wG2sjJB!Z+2MDfAI9qT#Vohu@^NIlTiN$kMdaMg__ z#-DvQ@ATTH{fKEtZj|8d%%``7Xnf8`HiivPEC+?Aj7GdqtP>WR??V zH?CgOv)tIQLTTxgpjChUgKKa2e{mC@wd|x;`njE3r|#@r_5EJ^jx(EIYM*seJhJ1N z&Dqbp*0p%_Wv$&gWl9i7UEH>qo+-b~=hszL7hnBeds0+n&fzJ)_Pv*%FZb@}nS4F}-k!>zpPs%BU%z`_x}NIu)Op343*N+ju~Xb7kmR#3B#rglQZu%kd+g@R z1d>KO~YE&m;L`{fUx1@rFM_-3Eoy0tXU!DN}nRy#Wzn-z9`a)o?B$P`PhK8ke(P5G9=+q#yFAhKjHL;ywAG>0ENRJ% zIdP_oo+a`pu=8-9?MVGRn{hA0cImnm;^q8IEbeTJ>r<*8sD&Nrea6V((Izcm6_ya@ zq~gk|w7BJ0gPzqF4;MQN@lW0H9QrShUcD;5|IhLctI*uF)512caoMu2TXxsJ_kmM6 zzJ-TteR_D9UH*?n&-}Vip4qxB7Cp%y7H%uDknuWcIsNaBb(Y;3Ua=e(eM()IY-{7X zbIah%?n0eVr=G(WTv{uxT@(2D@}pqLzOOfzpPsV+;r6SS-bDoqE@|+%yy;B5Uwq9+ z*3I+IKTnj1+i$n!*z2f8Po;HEfByLM>gw>_prf=)BhNqo{IkZaY0`JQ(vL^G#cQuC zU)rSe*=D(fU}4;})S0eF7P0(ZX#IxY?#IiTQ)3Qj;EK!A`I1-<96X-CnJ- zbj$6x%Q&x_*8eEje*5kBc4h{b_l$)SY=W`ZkIuVM8oSXc?(@#M?TH-suHWi8Q$5l6 z^``W^G!P9AJ`Ok%B+@9(9`k z!?yMQe)haF?)p`xMLEH_zRz~8yI1Y7cI%AL&lkQQJ{x+{wPm;7Jpc1Lo20+HY|&fY zu|%SL_ua&L2N&bthFA8!UQ|9~lD*8Xb+41YfBJlWe`#&)*O$xZ-@9|?h={k|&L`XR z_uI|)XIp6TJn4;-m(scrHTG3}Ax-@)eXcbehHjJh=vGEPFO^|`7Us3CGF0$=&ADv7 zND-Nd+zguj4UayE4S0Gh>#|6ByzkiP}UmjGvcBg#RG_5ts^S@oc4r<9(R#(dgSJhPg`_#RD z-_K`%Gp%NycIG+QUQsH+|JzopuXg@k{Z{7>I&-`$D{3Ab`&?plcLrzkvr56h+i|lm z*j=x&lFeSLSz0?isH^?3T@F8g*NWmknWGaJE_O&*KHsy>OsMt2nVa8Uy%K0rZ&~+# z?&tn~|H~#vCC(~5TP)(aM7J_@;(7c3e~z9OS9Iw%=T?;A3qNeI?V6k%Xn&3EldOY1 z=a#M$ewaC{_eJ`Z_}LTYES_Q&Tx0!wPq8L1*RL->u0*X~8yV)BeRkdPTTvVf-?xAM zsWbI@YUK3PnQ5El^j=35_uVVb_I-W#u&v_FI9b7&Gqb%lg)YwjAzc4m|Np1`^2r>E zN)E5A+*90+-H}pURIC(Lth;S7(^lUj@Bg*l&)st{EL*tD!)?m;IZ_N92f07roIhV~ zzRQx=yB_SFD;8L~iX&l}x?)S=9ZUCV(TQ`MtygJjsr1b$&WoL%Yx`^ur{a~gFs0{a z*DH+=Z4W&7Vq)0~fxFw@2Qn}`-@{@3Dfa!fQdS4y?Kd0q&fH$k%0{i3Xy-diFS7Yj<7MLV5dePr$8zc<>NB-qc#NE6TK zJF>)|ZmV)}*&aIi?6(um+c|>Y&oN%_W9(}XZT5TOTGOI{olo~{Sn>JYvu9Ea(<zgI3>Gxx62(wd|!$gnX% z!LaB->zUex)eW4kS0a{_C9a%$ra$)7Q7)<2FM0}0md_0)JboatDwUrMElzX z1m%tw#)dcic(&Q!x#RSt>8WQc-~RqrU{Mll>^W)Oe%rd%mP7Pu> zxMQM=L~z&A8;31@Cm+mC+1tlBxwm-r=UozAsZ}3JV{Lc7eyy#voNM>pdoo&Y_;k1G zolg3BYwcEnWW(<)3@#?7xAp&Ck~iGYcc6n|Ymu>Uzp_!cpNmMQ&K^DO?QHgJ^*58w zEVwe~^RBe#G1tGc{k(N^%L=~Tx86+KUar%Upi_By`wTUIy@%(vecl{nlKn7y^|okM z79}T#)Wd;uzRx!H%NEu5U2^f#+B0FM55J|Y6LT_%*lqZqx&HV0|Mvg){y(zhxYnu? z%cbIC?Xr9NIlAIbXr9}d%eG7407C+|;<)@(30nCandh z8k`K9lts;=Cg;kFxa@4?(q>>#o>jkS)e*%g(XaRNIo|H#_{a<0TWZ}#uz zOvYRVHV0>~-l>o07>noc-j~D9!*TKW6d}t^v8IAE-|k(zbB?m;w(VT{;rFJM_`k}} z%hM@7@8Nq}MNjEWnx<^ho}IOcFJ5obSthb`=e~Ep>*k6p)fd;kt*xl3dU8#maMrSy zHi{2-t!cUSy-?=z*R%h=z7Aj6t-18lEB?9U5 z7IW_S&ktelNbG#L+A;*RxSv<3mtu$W4=p*XC)&+=(*RdVSQpy86g=LrG3o7Lm#P zW%~K?dR@z2-t_1>XDRP=Cye!+^?awM2YWU>OVwR_bjdwyh7z{^Mc?(5EI%I$a{HQb zML{$nN#vL9|DE-R&OP`Y|I=Kj)+@AZcdYi;e}`k|?~*vwzMKF3{rfXnv{=hax7L6C z6c)1Y=dahFKYupTzyIspbN%>zdv@;ocdmT@@7MK@?Kl2Tjh*(g#%BI<{rz>n&t8B3 ztWxr~;u4X`>d!-FaJbHLtUtV?`sb&odD~}CfBN$!r=Tv!#$~gT+s>`&J|7_&-AnUMBD23 z?AUwv{rCM}qxYw3C^1}#%8|};J!HhdTWT}c|FcOn!xmlsH{lzq17=lxGE{q>+rL3F zU}k84XUdy`h05>eD&NnV>l$L(R$wz}Raxx!&Gx^o>z;SV5clSk+ zQ}=%BsTFNmcUu0};rf=_Vp*@6pZZLjb6s`)yg4t9@6j{uyqza=ea`l<^Ro;Bp3mu; zP;7YYSk1g;L95I6y|;c|b8hF>oN7&0(M{PC&Q?Cy^ySvI5T>B$trdC9D!i)}aoZg| zv#oJcI-5k#mvxIAZY|xi!eDdPtex?DUfd25S+o6|r9rzyl22Kk>l683yX(*I|M$GE z)cS<@p@aQpu1eC5&fA&NwjXo5!|>F%ywPJv-~++!8l})>IN((<9yQ7gb{pM5m z0DcY)E_3buZf{EZTta7>FI!^5(i9ab8sqTEj8QqDV_{+JbkpZY-&C2;n%JEdl&_*} z?XJl3T;utkb>5HNI##9GKJ9h#nEi-1D|fBogEaoUC}+{!wRtfC(piGpxo-P|47#@O zElhZ~IL>9>ym@E-ob=n_Yj3wj;n9?Re}6uIoL6o)cUq~b+>wU`7H__v`(2)YUv|F# z@4x%Dmf6^)KYRRf#x9Xy8@c(-@2k_>AMg16 za{kYix5c-gt9EK{S^lzzL38OkdDkUdSKY3?G9!8|153`br7kW8-)fV?1e9h>fBEs@ z$A1s&%=_F<-FVNhpBJxF`S;Vi_WpkUx^A9Z=Qg{pep{Z*bMeyg<(YL=6+8Za`1hw~ zeWj~I*qz?PuV=4b_x@Sw+iNR46pubQoLB$V{}1#3v-L0i|7$NeqP2uedgg^idWqSM z0)hdB)sM~piq*=T6A1_~|1Um)?E!Duf!^=Ty{@h&tX9(Hk&a9xVX!S2sT_OJ*l~XqoVYnDnsstB}N<-Can`{KK=gZ ztqU%!6c+4%ady{P*|n%P)JDTrQcj{A*{=0tM-d|5H`+xs^czS&8*AE{y%<1c$t^R(leA~YF)yyAxrfkmZk(qlweRtT7 z!iPmxvS&3`TsgFMWfT8BA#qvDPa?sa7N+mNUmly~%Q3g(+OBzh$9F%;&i(%VyXMk; z@&d8vBt>dI+^c;2O!stDc5PJ^Q^SM9SFbuAnHm(u{Q16MgL`FiE7Wr-5)bJbt9^Ks5G zzxP=|Y3r5CKUI{(+%}sY{r0$1>a$h)`@POF(_BO6d_MQ^?%lm>j|iByT=0C}u`WL^ zFF$WtX4Ius3eP{EZ2!Q$|M&M3Pn!()ao%!qjW|^p`RueTue^2Vl~Ruo@65B;o}4ps zJyU(%ntiL5_sOM6hMnac2cx0_T^YDo7-FY06hw+MY*yR&S(T0TX6w7!7wvH~y!Fmi zD+}JZ6Xs=lSwD}b;heB`cIflF7x+(aia33{JT^BsGR;`QVEMY#NS;c^Lt!`f6bQc9 z|8f3*<^Qkd|JA;Ih>KCgPLGx0z#Hzl%Auys^^Y5FzWDMnTV~Q??ogkrC)C1rhg=oZ zT9{XwDIAcmmHOp&joS45(|gw5I#JWIMrnmoQ?Z#ZV}nt%0$WvGU05i~r-RW+J0cT7 z%Q=F0T7p|$fCw>JAFFutd zd2S8YncF|Iv%KA(#4v=|pD)%+-G940_IhcoUg|ux>mH5S+P@zbSj4moOv}8>n<&Hn zxP*82-NSzm&+>l!>)*Rt?X6q6PT95X^Io;*e(kRNZ{zmtsQdV6n#|N)&r;*gy{)jh zXKyq2{O1dA>p!X2J-fQv{K*7Y(S@d>fhPV6Y0uK?GQO=7tPHiBucqW>xT&`9(9)31 zpMPq0{r>Rq<;#wSnF`ZQ`jR^y+RT-gpYQiv*z&m2nqAKxb+nXP$@|#}r%6mr3l|=>PJU0b9H7`V(tEpR4<2@y@Aa)t>j0R&9PrGo z<%!IKmIob9UY!~Y4_J&hKQUEKd!GEk;|T*}pl`yWJu`P$K5vy~vMO=oX5;P*U|>J} z^}YVmnjODSFP(D#&)NMyH{1W-toUR0>%-6Q|GM_tKkj|`ZVBeiYrGlVFDwc9`}O{h zwVywR|1++?E?@uk)3$U?5-{=NuK-*|;Vp_frpe4f3H!RxPQ zbMN<2zF`Z&_tr$k%7jp1t~&usH7i*X5VD=$t>Z=l8xG<`2SCg|J7apP(RvM zxkzi)FB6{HS4x7L3eNp3vHP#P|NZypF-{x|8NdI`+WejG)NfX)lB!c zxYu}I+K?9D($YFX%lzu26PE9txf%?7i#2aPIu*6L`E}==O+E}azQ%SOlnVMBwtlPF zDy~z9RMx$(a^bOKd7j7&K7H-~i~HZ(|AE%iCtvYj7DNfgZ zL6K>*==(X93pW-dD;^Rsf1M?mt8;7zBdeIPn1lSHt2Qo7rzDk^3R9=I-d)boOUPbvAlIE<(#eprJ>eIde9rgtAJYnZ_O#~I^Tit`d>2{teW5aw|I#4g zN<$UKv}L!l9&ZcNmAAEW^eduyXxJIj}(Q;Z*!>op%sF*0-*X?1BVl2AQS{oHv| zX-8*CM*zc}quGKX7Z)ym5W}D#_Hp($eKz@%szr)FmdJKKuzG%L)_UG529~0fI7SAh zCjy=hvpe|DG1Q7~-P`fu~ke66vu-}vhNuh#kVm#uuy9vT5Uu4($7 zIm=B%VIhb>kq72n>n`gK;@qiq}tMeT>>7?)*Q z`9DA2?*2T+r|`w^8M{(z>+R+ruRGwMc$3$qLu0#n`0l^^rf?m4krA7nd~xfPn7zfZ z_6iql^e>w{pOmY+an0$^pC<~$*ME9-v|IdMu%_iPlajl;^bWn5;~g8GJ#*R3UA(Ww zSwp5?udLd!pWCoSATNI%W9B^ZG)9568UhOb_zBluV&tn}e!?xF}vjsghk4_2t zaL!sKL_kSoYS5|;e-6F>=PJm1(enNFWovFu-Eq-E(BPQDqiLD*o}auYaCw)h7~_p2 zxk+|c<6NVYOSzJEFj=|x&hBzxcHz1=TX|*PZZ@aWNoqWHEdpnZJZH4tTxPpzPRo=z zi=(uwy8mWy1bErW^~ZW~S^0my^?jxH<;qh_E8eePzrKd!&aF*nj`?h~QqkfN*kdTo ztyf_vmT~T9(CW|cMG$8FrklcWI2!JI%B4jFclo zv?tr<)%UDEm#z?27OZGIc&gZ{p?$f%{p(h)u(Rs9?laFVri|MS^V%pa_-xYA6I(s zEoPjMbX_qq`Ca?J0tCRTbDl|#O$Dk9A}S&+Z#Hy=2Lz^bJerlg za#hy0>np?^fA@!NTh#HUV}>LHv(@yZ=~nVfu3XC!R53i%pSv$n*&~-tb(Pk#e+B1U z8#tsKlvnJ0SaLKoTeMRri&56%ed|Kb;{q`s((W{{9IR9^Te(If?vKyK9QTP6nz$v( zA3wZ4PptLz`CVoIe^l7by*8(H;gna+)qh{EeqZ-o{o2xd_wB9T%gf0XY3ARnDXaav zH|k2eMuPXsw+zZjx=%~2&VTlC@!0od(&t&;D^tA>a~bv=76=ZRIwi>di?hASy&~D- zb6d@n9jDHD`sn^+=N-GOdLDoHQ?teTTg|*_zkf$%T`zr{C(%|nYq`ju^nAPOkDr!$ zdi9+@|Nee`MOD?jbHbM2|NQy$=;+%Eyq9gBPvYF@ZEI=e|2%b`W9jz$@v`gq+M79~ z%O97_Tb9|U`L1mDLeSX=y@z#`I5u!jD?Yn$%e5q)mKnu{UKef7+U+p;qS%r;({cNi z$B7rW85xT8wqD!+wg2D7OH#$k%feo| zD=2y=J09oAYU^Ssu$mjPElq-lCf`vR5y^{ISH!OPV2JW_sc_-Fz{^7ojB+li2?VbUxg^0ZgDOuY>Vhfr zyjSa7I+rfUsmS)8@dLa4kB5K%?zLr{wtwFIc?=3mpUhpqmf^+OM9?j@*Vo70yAxM# zX!-uE$>WFr9kR2=f++SyS+(AbLr~Cp(jN<^YYfM&tEt5_8y(JS0yf=7S~_DZau?; zmoHatUA5}ToTFRbuV1$-(c#Penx9Xv+y6QE`0nrJyUV}J+fCV1^?cc9%{GIQ*phhy zL0&6gx9xkp=b1#(eb)DUicAGzd=WMc%n3J45}ou)`zDHbHZll^J}-^+5>DIvI%HzZ zj;+@YG`#JdiSy7~W)K7aoBcK*IyyZ2kqKbL>6=Gu4L>#D~tv~+9=-uFqty>bY21q*su<_{m&9^}IX$ ze@{=>Z%;H~-ROF7p5OA>vuBHM-_F*|pkNuz*ZELFKYp)FdtThL)vsrp=ijqf6H=(qW7juQ5U4FS`RY|OP(v}U_UMBj372 zUu#=6p?JD^^=6^C>r-d`UT1D}`gqfn&sM(AAAjEb^wy6T6HW9hV{<|uKKyiaYuM+% zNe|O5*|rEcWzXi?P`J5QN=d4LrD1_c(4Y4I^X~s&{_jrxmX8PZkG7|7b~Q_yJ4<-S zqCmer>I|!Td#7!%^w#XyB+>oaN&C1r?`E-r)nzUfCo9z2#jROP1!tU1IwgI!+5Xt3 z36WMp3OAVo(2eHoPhA=!(QgRlMXzXFfK(>AP&{kB>0 zJU;(_UHp$f*7qGte&7A~b+!Nc{QL}$BDIXzy01P z|BG?Q$6e2YUfJlz|9eoKzyHsppAQfJ{F3rOC%pJgIE%&;js#8BsdJ`RzT9PZwC?Ti zeeW-1^+?{*(_wTQ-_|f^kW_pL;<(&q~2U$be^GfF}U-x?NXNkfW zzwYM6M{%FN^Y!oQaDDsQ&uoJC_ttKIc4tTL;Rm*D5)50W7@hyT>)936=W|>w+a<2e z$yJj+J~#HSq1sJ8sS?e|xQiFcpX^btudlbCd)_0$+j73&svA+Ff~+sv?b;IBvYvlF z68eVeSH>jiCUdo4yL4Xf`WCzW%;`Nk({AOsNo;F!YLQ41)>}Fy=$GEn2_YM=nE75d z(cQT%^m>%G(6*?x%3F)|lv2+wO?e)3T|MB&w)itHJ9fUm@zQw7>*JdaWos%P+xPx> zVS4FBOVNq^^6!#5K)WCMJ=|h*)?N)t`eN8J|L@NEe|zKqmooHTWPdN9xMbm#AZ?R8 z9j7OqF$L#-q};OWY%+jlKihVlw7{8od?ib;8 zuEF5^^DC1!8El`En%uZqqwD)Tfn*Lt7KZtZ*Y^ksY&J+|@HJd${CBdbP4ka!_I-Ek zKdJwJ@c-}qf7u-x^5tw9M<+_$=s07P5@%k0EoEul6y9Rj(M{ax-$yDPMa*417+S3Au3U4f3r=DJ?6cw$vbj1oL zgW{f(kL+AyIyz^ZIr*%{>V26q)0*tMy1IGKHJ4ub{`l&{4bNh{N}v4Ncb?5b_jBF$ z;MUYg9noJ8A12OPcKdd8aUM&9+uOJ1@&3<$e*F0G?sWSd_20i5&F?vo7QozXGIeXn za~8(A9;xg0Jw0YQJxZE;`GaWfRLz*k%?iex%9#uyEYE9I`o%s<*Mv$qnW(NfcxC67 zCq9LS{JrFq zU)O~;!yWJcu`rax+Sh)(9bfbD=yto`*V5#7BdFfZXJ2CP1@7$YZ`@n!tvF-EElJfHDyY5-*YdwFdqqO^O zMQyEYa!9MdA&Z{a>%tdzr7_gj)va4wn5=Z??YDCmcj?65=dX?TYK;P|*ZThc-Ytco z>|Ra5N+tU*ic7!z`LkzTcDCq*ZK0pb*4?SwZ(efl(CusGHyc_+f;$#oRXe1b9{TZ| z@#jr?)2=Vps-6C3){3obvp1eSRcZCyRGH~>nYN@j-+?pdPVU_BF;uYk_zu0-Th82( z`dZtwF|I=XUD91{hK<*zOk!NJH6*yz{Mn?v{cqbfBzMf*3O+|{HAZJ^!C(P28E@$g^%{eUt7BM(Vd8BV|jL^ zS70Vb}66!p4a%081x0>vkI8*n+>S;-FHOowIm6>v#;hk~PGA;9rLJrUQ zb+3;X?%7}+tDD=n^L1POhxh+#>tFrP{cT_@d#`Vj>VivZI|D-<6qP8X&I228{Uv(z|IRac*TpsE+pRIm>((?JEZgG8=hyUK?MNZ3{w=6U2x{<#3<8Pn7 zeVgoW|MT4T{ldv7EQJ%jJg4m3=-l72HPo5uWMfm;jHi1Aeg@52#kSw~(LL*;C$^n0 zZ1j7xH6yQzDW?2eB;T0k9$|djT~F)uo+qWIH8yP(H{SDyn_G$(>HSFimu6Nh#d>j1 zfdBH#A1hprgs+d=U-wn3>>{JBt^L0G-{1bdJN)rw#s7zI58u6AIxE!tzUA+ii?(^P zi77aRWj1}=a4q49L_~A^bk*mrX7TIW4)qDRFgcXmV(qwCYPe(u*9$$h%(X?qp=q|e z?^Hf|>l2h1>#cp}!~f;4&d*ahIq9pP&-BeRX6|-($=h0Xf9d zbKU=W$iC~3@{dE654nFf2tMTKRAg;FXvDxKp;Ie<=iRGSr%s*e_;B{jxkamL0zSWR z>MAKO3rh)Gvis-7#jEar|IPE~o{!J*=iVm@{YMMt*C3y(R+VPt(@PrV-bV8W?%BV4 z{<4#nSJs9Kp0yNgpK|GGQSMr$OA|gXjh(i1=@e&<*HLXYQ-b#WeP?ZY*0T5Tk8f{p zPbuQ5*%|Y!a%E`Q?zyu=y(4E-*57!)takO^kCz|6EK)jj_~D0VZ(pZI?l50;W!LjM zM=!(NH@Tl&Y_r~l-|gA8H?(|#lY&U;k;cWkHb2iUny)SadW%FZ` ze+T9NJzd^!-*9UC{rWxocT2ae$!fW>MfstHucFHhD_4gjb3X4nQW|M1b)`J;^QINw zrfgczkfp2GGQ~)-ZHjf=-eXB`v3pz18;i?+^e`# z)-3XYZNss(4!6GbHJ>&XIB~cnF}m!hLb~$$JO-=xPXl%-2sM6>krNTy_H+N~7q!eA z|EgWj;}EztjiGt1sneUii<=*Ye&2n!&RlSF7kjxPV@Rf!RFXuI(vj(X|E<>V{Vozb z{a*Inj3W&VZ7n`BK|cG^m;@%MZ592vb9$av-h#8fF8fZFZL?pMlu&-3eYu_I`=wJV zBjxAM>pTAZ+O5l%FaNAzY?=S}-1|RQ_x}pt|K}6;vH~01=}`=8u3Ub#t*?E^z2&&A)$t+bidN|9v<-zIN;F`zs@N#l4q*S2M%-{PowRW%(B@{MN_W zGF%RNwd;(1&G)z8@Bb;Xn-AJ6oYlKdWJGR7dW~8?N*T%2ak?74@!(p zi*K90Iu>%<8ZC~@d?8IXm{qyISpSS=2X!ZJif1c{sEew6NsZTTc*sdv;e?GB|pS&+- zz04;A-}b|abJRk=o|tz{ddAgDUYcn;&YU*!jj#QBb+&mvfBW8&&RIRr1J7Fzl47KnMtaM|LHEZ_3iPY<{AckH+?zbhj_&TZE7)YxhFjvkOnJkz-R z?z*ctzeMc zruNTwW%)XOeEDIq{wRaq^lsy;pA1;|?9PA4cAS2%Dm;*ZTjDCuS(mwuOLzbMx8eT& z6-OBqT((6{uUrAKfd%4eG*#d{hp`75Q= z=T8;7yu1AUCxcy5!RFc$Pv*RT_x{S0s=eF7a#!y*ds4Xl{@1wvh8z!0-oIb4V!u2NCSM%xP z-$}{oB6qXVKZOg>z~24;q6|YFGqjy zFtj|F5?9|{%k9RImA1(3#ReV~m4x88YlSDjzj`5daojQI4&5N~G@LxK^bHpO& z;rxG9Mwbni6iRhO?!FRM`$0Q)&axeA7tOGlvf%r^GuHI_B+=ce49>_>RM85Of`7hO7I&b_DY;U_XZ3|HOM-~aRJ*Q!GMdY^~q|Npc% z_ft09TlY6@yL6~)NKIKwhvssL=CY5k&-BV_uDvGU#Idk*#;RF68Z+b8>FG)zH)P>U zlwJPv-@m-brkUGroq9EEog#~w>&3Ho1d>^fq{f}eky4-Yp=$3uzjS%uB~yY=v9+DK zoX6r~%(%IL`}Vi}fBybGoM#^U{O6y4zXcvw?X3BI+P?b#>UgjB&o388hVMW8O0CV% z#hf|+Z)d*30iFXQ7o*A-W@W~@X5C)1#wTHs+5rwGwF?uaA9OV|Iz5{)v!lVeqs4bg zko*KMaR&yajRk$%QbHG~i5)VK{U+CDy0 zB*fmA7Ih%^Lu}PW0Tf!`K7Y6J=U3S-`@!;2@y>a?mO0hTLJFoxz{Xeh5 z4^Mbr`R~(d&=u;SgA&^3{C^`}|Lw<@61&)>p2HS>k0%7%$nY61d;IZd)!u7ajfcKk zoxgtc{=L*#mtQqI|9oIp=lfpXZk`|Cc_Zr7%bJy2-!1$2@ZW-H9=-VW+yZgyuV4Q8 zWcCR~{_^(2dE1#cq(=H~U0I={q6E4QdFh(8SJN8G^W%QL4{E;rbk7>^wYn_7e!ZUG z^Ee=T_VmcBGj7Okl(W0ERCtos5s40o1;4{;%zgQ;8E%eR-RR19>G#%ccOP!J|328u zuBlt|=$zeOuB}eyGh?+#>iJl-_n5?IgNi;WrDaq8Kd=93|8M91FYIR?{+@I0`RDe2 zc?O4FGJNHlscB)R<}+5k`dYOv_2u5#FD4iZugfm<{q}A{{pVk$)BQv`Ues9SZCq!! z+bAKeuX9G#>s7~I{QS~dIeUsxyKWqVLhFh}u~l-LK6CEv)lQAQwrWxPVY{b1X^#z7 z?F^Eyet+p{D}zw7#PXBJr*@dUb5|3(`r1Z0$VZT;>ixe>!V^zD%k?n4>o$w4Nv^iN z;dPXa?AeXm43A#>wQ0?&NkMp$N8|NeeWRp$N&W%ro_Ob^Akm*uIS-*%koP60cE zU4o%`tntb`?vJ?w%xuSASXJ*}JznivvWR`6x$*4U=V~jri<{al=>EFfW9?LdU3B6uC~^UPf_vuvzGYX=RWUxn4_?C#jcw__Plgga^m?{ax{Zs@AYfD+P5m@ zH3tR9SLA#;Hhp>rgR8mFW3xJyG{KJ^2MVR`)qXoytn}{v`|`_6I8JXW%k+B`=^c4` z)uL=o%cV1v7&`Lu95XlR6!#cDKNx)J@{6m1-fvsX_zq8Cf3{$m)~q6{H|1N++&32_ z)z7SYYj^I|-q`iwDl%;c-T(iKPBpD%C#kLN>`*@R=Wcc8G4o|A2hOH4-tK(!LbOmM zAb+c%qIXBlZ70T#PDaM7dTLn=JO|Wf6?-sR3N-UFbgp1kVB#vgI)TG_Vi1ep<9~@( zcg)Y1yNK|wHd$=)?Qrd)j%6}CzfKEYms)ysgZb^PSC@(~eA=URb=6$+Oew4PJ!dY2 z%r*^s;Bev~&s7)`lhuhxCl(xldi19pr&HlV>sL7$U58a8#=0DR2VyE zOh`<46MK4B%(CB=aeVEco}Hb2|KHpDPDc}^1SV>p`TXha_WN~Jf0nHJeYfrozoIKc zMNO5~(m&Vo|6Ys#!Cn8b|NoEv|6l+ASzo@R`t#TD#|AcaRqL`X=lX3;5&ZdVc7BnZ z-S0PV#p7!}etUa6HPUM(Xn#a?_9g3y+&rs0)}`9X`^z+SgiMrZn&~xt%A)pFPi%NB zud7XeZECQ;@^jjDdDmNi?>@Y``g*G7{kq?OU)G3B-Mzbcc^y07@4t@QzqCKg-|{oNPklEQ>9uan=~KAqG4=KqQH2ljcN)?Ak={osWz^JM1di9CusjOK6( zZulPNmAN->%C39S`AVPe|1JMNfB*0A|IFvte!YDB`0>NO4)I>ShD&DrQFcCaBx6yvIkMLlM`x{-mg{1e!lF_Tae9~DYV#na z*qJ-bM0+*w-M#yCj=a45Q@5*ysgatjB2HWl8d}#HXKENIZ@tGkkvaSFEiL2cbN=}0 zG_A7gsocI>f9bT+lbU>%|JUB%`@7xVe&3slKhM^zN@(hm@Hk-2|Fw*Z@gxJst{L8( zZjT?RT)3M~K%)6`R0Py;(CC zuFAF4$ur;a^L5VU%H463XEbepBKPyhySwSX|LQ%@QH!nOV41f(Q;F#u&$N5lD^KrY zx?#{$-tO$P@YlpPz5{uI*#RuODyc|80MK`S?AlMa3s;%wFqpRWn9E;I0;qTXp?x z%OW*~+3rECJWMn6=WpjoFfkO^dT(jx%1KgRZj~iVwKL3&{oA=nCn+OMU}A^zktK@n z8FQg>D2rKc)-FAV2V2iqW%3CY92Kz8ld8S6?nLHp`1-;m$$$F|KjD()s2h^k<*v1X+Jz~Qhy@T&XeUbMdy}& zd;RrR^?l#n+uz@-{r2we#}b+2fxi2GKAZkJO3vQ)>LrT{QDNWJm=A2fU-$jx<>R1* zKC{Z+-`iXL_m?R{&F8b`&o5_2iYb+Ztf+X9rF4tuHC=C`s=TU?+RY*J@?s0zgS%G zV&zP8Q{VT}M>=G7ckSJNE9SG1fWVbjo1TuXQ&J-tF8VzG`ATl(gE`g39!;#z-Yu&* zbnVtIozJCVn@i_bukZMB{@+Xczvus*yI)!Jr^IUBl`k(}-h5l$7#VWbplw3XKGo7h z-KF26ymrRvX|e`HPd9yTv#-k3Z}M}Kxqj-Rnp?f{-&K@WUN`YOY{6ssThr#r!Zo2= ziXR;KSYYvk@%nBhB~}HM?}fXw9M0ZyGwIXgEwX<3w1%<3e$BUg#|l5qo%6mv>a|7& zo8{9)3C52#cJX@C1r9ybn-;a5MK8beKoG|hKCP>}#Nw|9daavM$avbKFOM&C?XEwE zgZ%{(Vz0mc8WjEa^?uX8i|u#*^6PdG6%o9`Bj{s2Gx*8X%8uwet(q1rT!IPR$3s3F zs82VP7Mj_(IWN|7h4c;a3ujy!Bse<~4)e@jB{W;%bJ?kdE1ho5`u$erOLSoIW;I6z zwgkblBex2ey&BJy^zOmO@|;uR<6>m-30~C(v!=}N`#fFmhag)aa*tc zrLw>OzRu)HNJu=9Ig8DBcm0mf39KqyK1yvToc(&9Y-CE?)y2Yh*iu#?oJRGzE-VMR^TZ9xRQU$aGxdfWXdcWp;D#h)uXO&28hf(5?EfZ$%Yf zI#XBgaEIgUzT1=UF&3U+oOtEUzjuk-Z_mu)d|I{l+r4R(AD{g`JKOxym)Y@uKK=cE zfA9Z)zr)01uOH`s@83VaD%QiQ)>s;=_x%1HevThMe*AfO*j=US^M_w=XPd8@6|}3m;>F(g@86#dShz$Y z*^A)=|G$Iw5^aey#aT{UmcQJ(X4R{hg;R>XH2ELjy<0a+?fm4_WxIFpo^#oN=k%v# z-^=#KEZzQU_w5-P4T~>M$edMXnf|h9=N#|c7L#}1PTa}*`KSAQ-M7v6y)^Z?B^pZh zZb_X{R{p(wl~8e1wq^YKRNdcyWgS+Q@-`Ot$t1mh`=jDh>6S7^gNyTmycz|TOiQ}e zuDD#p)zLRv_xa|vuRr~%iJErxa(Q&_l#`uX7!DlKObu_2T|fPm;>)d5OV^gZt=%zO zt|KFG?b_+LyfZzvw6u0Oarm%sWt=)HMgx>xh;XHlQxu4P)Qp0vE2GUG*wWxT$)l0wqu zd)3^EeV0r2#<{92+cCRrt@+zo&TLKXzpIz9hq8X?RG!Ug+|VB?C@|3};<^(@QTI%_ zqIa<&rL)uoUDqs~a&PI@RcVo?PgAyUzOi=i-rFX83E%$w3E9T7Tjvtz&hn3Orv<}i z6m>}ox*VxJ@@AuZ!|p;SCB8MQ6brmA+vxAz{yH^ptM#>ObDyp1YW#QY{rmq9_gl}@ zGFaF3Y~ZyOt=kD{|pQo+DRO@94c`VqiTWEqqY9|j*Pl#Gt184i>NukwuaGb=g-1=$5Pw>ITgxQA~t3OpsrkmR#FQ|i-$e>&cD;% z`|rkxY(K`k?c}4d_^VSIVm@DRl@1gaZocO_YevQgy(nFV6<_r3`b=bIV-D(eJDhUq z^{!P-tIqnLzy7#*=ecXEy$)ZCD4DY?+_}#=>r}fUqXeTsR^$Rf!eP`VMsvY~S&v;wPo?kjefLZBURMy$Y9IL{- zZomEX_wPK*IeyPmXSOFQEcyI%%FJRfmz;S?Qq8N@o;Esf%+PY+a-O{NjraT_74yIG z^YbfhTIVg!c=-Lk+WS?%PhPkG`S$+5x6j*uzpt;Tuc_M?C;oC3*J&TS_+G`T84H%X zeZ4om^sHvpgxg<>cE;&z7Ea%HrdX4gVcqp^uhY(Xx!vxLPOtYBdODxmbMT7b#4`u} zGDL5G)A@$M&?Rk>uIkiyKL>d&tL@j@+0WSY1xH?As_(Mp_S>@EbG@zpzqJ1+|L^qw zPwxvCJYTMUylUsqmX5pEs;rlPDB5dN_p790$Nl&Gd)6LqoO$4ok$!fhx8~Zd?d`uM zg4dr7|Fm^lWt_kPS&_6`QTc1HhwZ&ya<*1#amm}*TAgRd*O!G(W^g>~(mLZ<;)}JM zTnrs2o)(op3QAH-GmiFNxs}Pm?a5;j6OqczczNymU8y^EOa8pJZNdF>g61bh6&mtS)i6DG zGv4FrHfPC)*S(TTRj&eKE9b4B*Z)Q~DEq8#n_=6W;tvXC{Ht=;Udvm?=huJnzOwtl zsGYW#TvW7_xDuX(9XZ?d_JH%P85bBDJPxGo>NIBjQ(AVDPoYhKdCi8WZ3orX+>4O@ zd?`zEd8SNT)a^|=3^Rh9mUvv`SvQ^gr~uRUsNz$W>EF$++&c5hCSLI4!-t_9j#IZO zzRx}X*uw(ZTbvUETHT(e4An3HjWk0_^4;{*}WuVJg|eCrNQf3h%F6Kfb3o60q-Wtscttd=S^xoD(+ zx-{>#&oZsO*V{U7Fnc6F6gkwlc-Aq7jIvq!>zUpzH$2whqhvel%&nu$3TFL#N=_Z? zl0GXp;oz0)4bQ?iMLn&Ubk{(tq?yg3>sB^v0N*~Vy&u_ryK8M|J@?r)Erw~EJowXul5hA23p+BrXR2O%#>OwXXW@4v{m(Y% zJAVGCv738dv1RL)P1ELFvhe$DyL^VLYTJdX@X%!f!DlS1E7CMy+vw|GO_Fdc_uqES zCLuNQS?av2naf@DJ_c=`5oDmvz+mvMjIFGyEvTzM|61xxrT>rS|C_x3)Aj#sxxc>& z1TXPh$5zt7de1{lktHzl)U4Mlv~pye@4PKjt9|F4`?Y9ahnMctkC%hxCY5|Ve9K+G z?j18z@dUlKTT>&CCH83UTI02AmSd0Nv$J^>v$ZrE*QV~(ldY0(H#~B|?`)%c*0$Np zB${5g*cLpfvHN~4*voL4#G>>2{$#yM-_|iDDAe?6%Y!b9kEa{8SAU$gYK{47Q;93r z7HpkUeB&Iiqk?hv)@xDB*47ED`CKe`4qNn;O?qw9%&7P;z5d_B|G(GET|KgVN}Znc zjIIUULr`5T-OQ&6{W>;+ zCD(3W%+lN8?DBHol$&k0Wo{hFd&K9G_P@BJ;)JroiVP3Uz182(oyl667qL)x8dH(q zs=eE)qSj8XPWo&WK&weA#5%wfXbs_Z?5>_;`2sc0Ml; z&9`ON_icVJy1nbmz0*+wh8YhIFt{Clka^nh$n0mw{ya|alNO7d7O6Vp<i90S)#@KdC{@!k3WBWdHK(xS8MOiQg<$ioWASB^`k3!)-2J_z1Hux);!(j zz}u(|7f)_Gd$TOw;dr)|pp->!GmlHO@WgX@mMLn+nVK#x8}e4UXfW=aJ&~jI;by5w ziz1&K7KTewm1``RDn(XztSM&q7dbXTLt~-Gm9pa}errBC;+Y#$rg`%{bM|&NP7Rvgl%Z=hyL95c*q2GVM<)a|9FUnYqhVV`RMQECEFq_J zl@ZTYJC^!RlVljx(C+FE1T^f1)-n+#Ry<^h}d^V7SJuV2w6>2vu;^#3nE zB$D^+-(COpRcPdO3FhKYD?b_VJhs^KnN@%(SmD&}Sg(o9+n@d0cCPWvhjxB>Ir;el z0lOp|PFXIK(72J3_F(?>TUo2OZoQY4xwc*4S*2d8>E_o@qoRE^pU>Houbmy(`m<{5 zZ3mkp7Ej*D%e(Ax^~&;FnRm1L4U>KDQtdxW?f;zKI_2|ElecQ@?9+q1boT`rF{gcE z_2kHS7VMd`w~#}R|MX#%B$qeId#8Mk3frw(zhlLrneBVc-KSZ{pR!!KJ9B!_r{%T< zU$(Q=2uk13&08Cy;HDOus-k@E@B;=*18&2nn_o*-E&X+1f`ILt-CxtvmQE{;WUTqd zu-24cI6W<-;4818@Pv+%Ov9E$89|1L+dfYh;Q6yvRn7f=G*9xJwnTyQ;#q7g&Tsbq z+>>?d?WYMdZ-f+|GJJO6&;Gx!-`Kh~S zHzhuq(5`qa!|hC?^P5G6EPRT`WRmAqYNnRnnKMf<>b-48*?FhEJ5S4Uz51(kH*4c} z%QKosRhDSTNk5B7G})Xe5pQd=X6?NGY3g<4~n1Yems38gk?g4 zv$>UWUvs_A)f}1QnX?iuUzxP!-^a)9>i?f5dKgx&l`P_G^!LAhoZsH#{r&&1Wq(m_b%)zJA3s;U`du2If6hxT zxym@LSkI8`u8EAk915)o3a1t*F@-$+X|-OYxWjVkoT4<9=}wB91K9 z%!@wP*^wQey|SZ6Ve9d0$>%yHI!<&|K+6yd#s}aJ+GW6D|Y6L;)mDQ*YmeGch!9VS8cpwj{jkcJNNHyy*9nHSLU<9 zu_?;aHTf0Uf_Lw>?s>c-?Q>1u+P=$Eg4S)7>3<$ITWae&rMY!h^8^pn-al=m&wcK+ zkBhUM%NL{hr+00;$JwZHfH#z#!LY3~*1qOXLHYLw;;QvAu_Y5?H->t7X&Pux?79;6 z)y?kD<@x`A-~YGwe);_+KW~ZVfR5DqT_|<_d2x1Zs7p;?|GuT)&rVq7@%pG3d%euJ z-*YBeZ@nI5^Mair$Y<-+*I!@FpEtdKr@4k@dhqJC>r%gRhAwzf%PeSJ9uc~G)s|~n zp`r|*4rCZev_1Uy?qAOio!6Hi2YY4Cd%j65bnV(Aj>85kGS^-`wCylkfcVYr*QQnO z^o?+E5m0Qs63VL+B-K`9A>*Z)8fhL=l;9-Q{_x+wd!MW4HZNUfcx=w5Sxu3#-g0eI zHg$cyruV%2{Kjv)V|gZ8{4o3+|99*BKY#1L^?Gl-l3Q!Odgf{0J%PJqt+$0LR#;Xp zn4NP{D`A)9)Qiz#f{rCs+;^oS??hQMKi~8Ct^A5euS|)yDN4m5SA)FH{e1T>Z|3u$ zFow^rLWKdrciOV|ew}^0=gtE|qx`0ba>ic=j?cDT8||=7zin4Z<|aj*GaZ)8rUZT4 z<{ENG>a<6uN2P?w!mG;`B}PsU3QPMIer&%WU!n}Vx_s5!k3O3Pk~lP%E?A^=t2%nx z(yemIKAK znJI!%+KE~2;?};7g3~=)qu+k-Km6m*%O8L41ijk(Yj5H#H6;(jDW$Q`ZZ7uE%ANjt z&pK}o&F3~ek4<_F)f_+-TE*8{6aFj_etFrX&TMb5Vogm=*4EhPGdpG+ws=w*$Dr|4 zCRzEBp}W$z4}FL4Chos)xVgUi^Vdyld|i{ge*b0U)W1HV*r+*5Td8G6$dih_$0U|# z&Ux;u_Tb_EN{MNe`{LG}-gGUf?z4IQ=eM`F->>=XyINIX^@*>$0v6oO%h@Yz%pP}* zRWSI}`naQq^YmlaeNJN2d>S-+yZY9U>ED)2IiATfAysqw=~e-GIk~dkwH<$Y+E~w? z5s~R`=J>ovVbRP(FXwzV@UxbWyBZ|m;38x2ag!9|)k~|&Z90Cu|9^J>zt8nA`K#XV z-Fiz(G0j$Qt^w}Bp<`5Y`Mn7*SlJk<@u_Wif+ukU*p{I*y6dR2**gor%NpV@_pbJOUt}ZCSt;T6YypRh$B|`KR`c|(b2A;0XfNbA z64$mobJbcq`Tox}dw1_XuetqN&FmLbrb#%Rigd{-U7vSClXLUtb^*iZPXwp`+sC7L zB-eY@hF-~VrF(D78=L2!TmD(I?Zw{R`nPM|U&#sGB$<9W*rk8OxkHhlN(ccIKxkp|9%(?YL0aVW{w z9?N#~=(zZH`6_|rTW0HzJT@>Yd|#V9Yqx%o>+DOt?oao-DJm$HguFIvNtC$I9KBxr zB~zB-j$e~c7=Cc+Xqm^yFyZg1m8%qwT+7Oy+ZkFJ_pCJbdTH)j;iR5Mj@YAlY;3Q7 zs&Pm-y@>r&%@w_(XUjd$wcekvd2JVoEuOZ||3;@~%Zn|SQzL`?R_&VhDBmYRZu!Z( zpR^twZ5BxG@Rzs$^T_}IkHh8nuSK2OHA7QqLS@+OdeK+MUfu8N{r=o8S@7rALm#fc zojxmD*`)!IAPH)(*@^M{|R7aVq;bplSI}1JTwXr*?m|oOUx@MK? z%lg93F-A|m=MwCqY!H~skQU&YJ-tmsalys1$@lcS%zvtBJmIh3#vK@PZo9_A zpsv+10t?n|T^H&bnkvy&IOj--#Bl*9@x5{Dr=Na$cX#>jyZ_$(dv|wtxhCJgf^T}K zcg^ujH&$FC;q*RsmCos+8}e~`>uSr@uUF=+Wo$8g-u~N~YvDDczQeQ4^Nak}ZvFgm z$C>M?Gxe{BhTPjz`TBh_kKmbWQLDbBM*hhcvwg1Bk|+`V`g0Ah;y1nJk3UxJJ@@(Q znF$5kXBYX^6|Y}=?)9cMukR-MccghK&N-hR|7>sN=U)XDN9I&tKV50I^}xo7&*p5b zvI$tZv@POHi3EGQ{nV{<{L}^a*6*nQ{p;?}B0IVMbqf84yoVQP@pS|@SI%30_uk>| zi|H1-TElm$o|8{zdU9a-aew~&`+x7v|KGjN_>O`gOT&sqYqwTL7WrM?a$0lk(u(-! ztqZRmlF8Ctdd-@z#o)=ogPYchI-EH?ft|zf%x6)Cs_eDq#&_p9q%r-yre+nj&$xJX zZe+O9w*@_gw@TkC9!Zoa`z?D{Yvb!qrgjIN3%pD!JNEB?zjuD0JHrX*dnMmo43{2I zd~VZsc#qCJ1A`||+UD)2Hh40pBST`jO`oM&Qq1+QRXbOB$sLn8Y~rWbvUN(- z>8hx|PMaTkA1a%vwp^}pR+NwC(unNUZ)3bpR=(JKHHvqF!1YTplRw7)Ie7d3xA>X@ z)p>{0-j+SHc;OOqRwQ=i4$eP+wUR$93O7keQVTO++2VV}@5SNUZ(E8UE{vb!c(&mL zXXkY=}IV#G|SDGB$Ve`W)x| z_h!UbbZZ4YPwm_LbD|B;VtY@&X`fr4*X{2&T$7!7$$5bQ+w?8F{MZ?y-tRS4GGpdr zbg7y8BwMBY{Eo2SuFg;6LX3q&<3)QOOC)P7t(wu=vEulGb+70DJt<%R{dD}CdGdP| zOB>blE{oi*DU$wiX;01F8>-uHJ-%qQz46W4<^N1)a~=I(Fkez=)69jr`+saw-~D%C zv}y2Gw!16!CU!C?83%S4E}fIq({ShNgru)|a~_D*rTNZgWSp^5FoT&lrE^byid(`H ziNDeZ)^Zztc3@SRB%q-%Lvr<%YqqD3KfJOk?E;U(_WPWRwlS{VIOR~wnKLbyj#g!- zzAR-8PTJ=oi2@=7P>U}z~O%i zj}8l{aP2K^|5W??Zuxq-xW8}yr8~>rQcGDXkv#q1$H&J{Kjle_)y|tM(=sh3rOI1J zXUwRaRTf?2-0@FBw*PtMzA3wmE*docVk#)m^HpYUZpj_Q~A7 z+T-&cowZ*tf81HxYuM}IpsO~W?fWKw{6LTMy0x)-&v(rX_0m0iM&_Zh zueDN1T~O}b;%!Mio9_C$hVJfg-gG_4f+b$2?)T^V|4;qv4|6iKSnxF4A2=gp*&B_kx&F^(+6A1vG>`GF-?Og!aCfkm9;3pdd(q)*w^l8F z_M0imNxy69uO;(3LS(sCT|XV^!q>h#QKqKAoagYab!$WC&;2HCD=N6WaLe`ep2Jcc zD?TLi=+9lpb^dhK&bao6bBbrJ7k7XA?Os&&(slbH*KeK0s3>sp{%`+#|IGjYY1iE; zShI}1!y#cuY-jwfHPYo-o0%Dzxfm7*ZETt;H9>LF<9iQ}FTZixkLB+jr{dhjvzm5> zb~ns0+uPJwf31@Pbf|t<^!uxZ^KM`B^mP?zaQo1exy>=w*5PJAu#cW*(j$qcSzhA* z_h+#=)LdSFST;*7r*bF;8U_MMt^y6{?nI_m-oNlKYP9Sq1!KcpX@z$ zO@F15l9bfFV+S_e)Op|<=fS}6&!IZ>?vpC>iRahf)_1A$T(cm$Y5U(@k1nqL|7n-{ zqJ$M|7Ns?;+uLi%XL;0yf%WC2%~!9n3Ro!Zlw@()>KV=)s(QgefWvCl2J5RAosUaQ zd-U_y$*2e9uyO}f^n3%J-|J?Pi%yOy8^=oT{ zj)p~RXZsd&RWG@%H}m8CfA|0CTRsmuwMj>@<@);jTdqwjl`Pt|yZ-Bjd*`RGUKtu( zR{aE@wTz};sw_v_VZ{qo;0KYmf5c;g07Xm+{=IX$A^OXpDrw&e=eQn!<~m} zx~)f!%nN+^DZJdeB6aTb)w#c!oflk7{WN>6rqY?ud#;t0fB*3AZk^QfYhj^JrE?u! z=Jy{zzWn)Zb}xaOZ@Nz0-X&|$B9gprX_d;oP3yE~72kXRE$&;?ZVOKHV=7v8ef)@bVbpAEWoH^h0QgyDb!T*c<|8GzDHviw-{f`Cv{=NLB|84&73~_;^ zu6spG=OpuVFu1fIe)#a=L(r1a(XgZ@M7=Rdwa7c6=biy zGRt=9y{9s1%c^42TzoVc9mP3&ocm%!mahKh@Z^mKgKAO9s_qRP+b8^9%P0Ho_iLNq zCM%{@?TlkMaJ~Go1>fNf_upPm&0H&X^}wCz>Gzn~=SR-0ykNY_BQtyN)Gd0Owne(8 z&kZfzK6^%`1e;3x^QOv2N)7#SpA8ruJbrw6`SJeWcNcCKa}H?OB<3LSa_zTk(#FrH z_^jPNP3iyp|Cjau&Hw)*JVE;Z1=~2KN1kWiS+P2p-u0T8_lYMy|HsYbMR}fOAFXo? zXCyStOW@qK*7Jp#+m{?MmKo12jf4f-a~*|eTy?36pZ>MT^7jru-#N-}jO*_U{A9YV zZ?2e@zhRHS`qaI?ryW9^IoJXgPF%S^FZ!*)X-odcFXqmFKQ(G?)}o|))lI9)4hl4F zU$`}_*rfJG#*8yL3IEnvh_IY-;?R_2*s;vnbt;F`hTI?5Lah{(426s)d$1QPbmjZ* zzGJ7}k&(5xe0%KndrY4#o=!1Vnw7=T=o~OLHFDkbUDGZ_$+lhGe=;@JTa&?|(WNwY z+T+i`k<<9^|9w?uIXiEM?D`n#Qw6U#cX-_0_xjGW>GMwJ9T)oZqP?Ge`&Pb1s~+}S zZJKWMEXw--&Gq{aY`g#L==uM}vyGE@438RAD1~+ys_}U)mW&VzJ&?EHz_!}GogSIK zE)xv|jGWXjdoZM(IF-gTQ&B>SsoF(#lHdwO$(fx}YeHF8tm2+KfoT!jWw#esR`3e5 zE@NnF=(!RV`D|X$C;ydM;ww*AnQdtQXxH|^|KIEX#cP+A8#cfGdZ~*4%>3GKHxD1a z`}@1S&Sx3sM3xG~sdXP&BNrH9gz+W=C8)gTkWHwQHxA#@fCplelET zvsRSB!p{Et-ITOdLNhoXynOkwsE&^@`t{N+uT$f$i#Aq;s}vP&ndK259jua6_N{ll z{G;{rZHw=|cQ|v|;ugc!t=DdeEq3#XzV;z3*nH~rRp&Wlj8oI>POu)4cr5?-cKzq* z{hzBVh1sLB_ih(UayfE9CRt?Sl%PIOpJj!g?8BBEpV5~1WwZ6MCv#tYtrFD_Q(7x# zpP#+DESgIoFk6(N!E&j|Ck1oUTWSmMz1#FeB=h~N85Rbm+0N(EZp^+Cl&!gTYpHI_ z4(m69E4F!gOpH8upknRn<@#*TPJfN^u08mn{N>Vy4uu4-Od0oGjyqkByl`FGn`U<2 zSV(E(%$-&fYuulR{12MD?)kFVU~kLHlj4_yx{BW<6&UAdZQXXK?6<98>BP!NnTTti zP8^oc=LEf9J8jdNY}@(&KlwkH|Nm6p=?W9p+dc>0{b!v1HrKI9T0?zDCwpfcn|)T+ zgUx@}eoT_LP;Ag-+*^{rjLYNP)*!*;BGx)xUQV55rF-M%_S?nZU1qk#MWjJv>5O7G zg+SIut*zUnoJvfb-^W_bez{2}VOrapSh0pe4#6{*w;bFiyE*S$Onn?vVDZV;E`=nY zz+EL1uW1>#XU;kIHzaH7I>TQKBGuWqgPJR4mKV-R%I}+|Qsi9tq1*&~eP46x@TmGlN&*lc?p1nR}?bfYRN@GKG+XS2puBOH^ zCe+_B4qBO8XJ>CO+wXt=^Uob;Ht+kvRh6@?@c>Ue= z@L%cjZ^_|5t_A;;yBc?_>=}>Ojd@$n2K~!sp822A$3>=y%h@kkE~wAtjEL;HWoZ{L zaO`$`dFkqjo+}q>(pPl29BFira0%7nSTTt~(?~&ZfuMxIVYSqv`Q3{WO=s|mESz1+ z!j@(hV*GY5=LNP6hJxqQO3%OEwB??eY_FGaWOV*V#%K7X}|pTH?#`=hVZ^2pzZ6M~Ey&fnYn`%(A# zPR@7l5Bz%P(uRmcA+5ooTKl;Gz9`m6GBH$sZFFk11~G-eX_)r($oM z6NjS9_xbliID0%k+w2ZkSyEA7&(5}%Q~2Y*0=xOPPuV*=J(jF}T_rtrLI2u4XA~0U zj0FFG`1rW)=61EqTh1ST!P)K;a{7_hv3pyuWN|S~_$+XP`NyWNEow{6`p+*olHlO6 zFW{u5m4KMQw91bkjQhDe6{{k4R)_z&R=@Y$=}j#X(Q9?{w0Okl{ZN?R@$c^bf8q9j z-_D;JR5R~^g|!lw;*vSr-f~BDFiNhKGA!hHy=%_->4h>$J@=-rTE+OLEZA%34)@uT zL1I%=BN+~`^X%=;D&DZ9ai!NeX5*Ynf8Klsg_a3PNqvfL&qE)4EBQ4^RM7C)lA>>(j}7|Dzn9lF?PRoMId{0A*{)%h z*ZT(xFL5aSEtzw$A^CzzcMS6MnNA%PC5MW*eNFVvKci<1xg z9!pF~S>dol)tI?~Q-PK1kv^k_=3S91ZZ3^Gn>#ma+}$gnWZayYeb!q2ysdet!vm4x zp2G%gX{Og_FKDdFYtC^@y%x1{>o27+mk`wg15*`&P(j5<5|c7bHwZks#wBF=eEYOr zQH};lZpOJ%NeU{Aig(WZc66Oy>8hZVw|2YU+OU_oLEL-p`d-^qHGfgs36YM89%n2z zb$c#Z@oN3tPCK)=YiDLAq$Me2oMmE{Z~J!7NB5XSFvI50KksBIyEN+tEMIeUadM-F zYjTHWdVZj5Ye$mLi925N*FP<>k)QAP`RS$~Zn837dqds)<3bFYZm)T7#-ZYHH%H)h z`ps=yU#!1v;8LKo$$#$Ep151^mxSa(3gNhU}lXjWUI zOmWx6k2d>4-rL2cDDnS&xh!_h+l#JIZ@pZWG_EY4+`y#K*41;xnI|tcNWo-p?n5(2 zYlf1=SD7Y0OsfzUbagQ`WEC(~37vXRfbpc~Szg`)hq~Q2O!=CA<>9XiBgsbvH~i)6 z1Om=J-f{o!x81Qf*sgsou-I~~Gb8fa-Mp&L&vND1td1Tqd9&>G(?zmQDa;+3RWkEE z=1zb8VnY_ovkO_XuOB^gZ%<|LwMC2M?dyCrl^*QTQLzcr`O(KKX!rlm z=H0t@fBsoxW5=A(W61K2Wz+KI&o5Ud`mZx&X!x6_^yKx^ThaI5e_zy4!|LkdW%%aX z?nDpE=huGO=zlKN+q&zTXXu#_m)f1{vXquy+a{T8`(kzZ%i72*Z;h4y7VTvE;o;2i zrlh9rcF;B7#jAcDF5my_W&Ync-~auIw~*_3+}7j*Iw zWUMth^KSdwM|mw0mo3`N_+Iat7S?fNy788)b-spg%Pw2+`qb>3ldajin}hK{-W|iD zwY?Mm76kZj6bRiKbgHaOdWB|3l=(%W#mSy-iyDp`5%l?U`C|3jwOw4wjE^R4-agO6 zqVKUuU#!aV7%$DOe3tBeTf;MZ1sXlqc;+WaF&!53Uzj>mP((0hgPY>3+(hYQmSDjT zfzzHJ5S-Y#=yyxr>{=zJ4XLp)&*mn7(9p?TJI!jl+NJG#S9|zn${hdyb@hk)e@^r3 zPgXQ`&EF!(*pQbkz+7DUwfp4p@aPAJ)$gy}X(HcpgEuZK;obc&#?c|X2lHMz?myKc z!L#Ms*DAZ~VT?L#e2EH7d~<@`4yhe-(Pt5hzdd`x^lP_H#e}hNDE@eTVQaLp!{>mn z5<-Hz=Uw*TwfOVmPifrp?>iU-l2z31F0$sFu*<9B|LT1G({nz1D0)oZIwk1Y-@Nr% zTfeE5?TQm9*na!%vXiM_Is&r3iujon{3A*es;pk(Uw(@V>5zWrvIe(~8& z7KVm|wOcD^o{nP7*>|q0c<1u9GHiZl8kyTAl3&Z7nc+7zmDM_z7 zlG@mp!^HIYNyF@_4@);Ixu~j`3-3P>VE**@X`bg#!}DGF;=X*YYOZ;$=KCdcF~8u+ z11DrZaJXeT+^b`KHs{iW%QGf#KHqubfU%pX%uY78BRv8qtTblLC=t$F=dSj;$!Ec_ zMSC?h42t_#wx4*Sc$Z6WqWWza$I!J5$3&diomnsIExJ|l``q3Bu~tCeP7M?`|lYZ6xsb$DB6C1Z{=q;hIjAftEQL6UXN-><*EGrCkb{q^3h&raAGcPeJRZ?tF?=x9_Z@UT=|*wuG= zO0e6N#1redveyc>{|@Khy>0io&+j%=fBkTwDu3<%>hGZO*wdSA?CpgEHB-0Ge)o=V z_uYq8d;fhne0=$Gb-srW3(R|%4PT`_pJ>DL{@z~31&?3uSyvkC?WO6argYzF$|k85 z!ppB*ypy%oJhD2H+t6_0s;g09y44I0Uk*e}eJ9mX+A2`xrTIKj#p%H3pEf*~AO6!j zS`n7J_213;e=f@ZdAi>!etr0Ni`j|}95&~dPAj_c{;&SjV!7p<3>F5Av!Xf<@+5UV zyJ@v9UX`mc^rn188dF25Ze6!*yJFi7SA|CwJne@QPv0=rxSA<`^QVK}#!rlG=MM`^ zWOf!m|EEWj?{Gj?>XhBTfB$a(J%!zX$3>*6i8Y0Bv+nia8I>|_q0UnZZwY?R+t1v5 zKyld=fzXnD=jJI_?6@6&Zu#X+U#=ZEkZ8IrYgNMuLxbYT>(`>P_j*p(x}4{w_+<)1 zz>@sQR-#I`w!G))cp+)1_-XC#r}ut^>ecP@z3k$%bxP{KYf;tWJC445Hs{-JS%x#8 zpXUEB`*(c*i>$VP*Ogi4?`OExuVcO4SJH~Zi}BCACx`vNaJkLS<`CSo>-DpweWgni zv{tP;!ojvRL@nsaz1hNve+(;D#<4xSYPC%9*n{+amlsC8*V|eUyM403cHNxY7Psmi z=kFEldj0!P!nN6a=T`4Lcx-9;Y!)evyrkB9tS(OZU#~v2d$woY>&l&7CoJB*doS;_ zASx?TY<=Uz=G}%hV#gc2*IxIQZcChX>_DT@N{)%gy*kSn)6|4E-grqUHZ7@QWdQ<10LoXy033Vy??Y*Jz|zwpuroi*}=0p zIvo>&^BZ<=I>4W>WR_I{)h?%gyqd*4sk0 zX562|kf&9hl zh;^_D!vP74>l1=_eC{?}2@vJw5j}gCOM$Itip9*XtaM{Vv$wasf{x^>J&S*zzQKK` zz`yl%@3)^jp8U2pE-ct~p5IoJ79-!w)8lJ9I~pG^_wVmN?|=UM`SSAe{QCdrc`c8M zw7rhWihOqUH%EfiT)(xiOXJQhpS((jul&-|ud5aym$6po~+wFXJ$7{<2cKqXk;wN&UNPfy}h^J zfA>pRy>)G>r={RSoAc+7AK&zCO4}S^1>VOWUR`zek`$=_b@~2{w`KQhzu#T%KY!iz z?6qGXK0Nr{;q|V%A0HkT?YtJX`S@eT9Umnd86W)nm$&`))LU7)(@(n|P2%~yM_}#a z6|Z;A^Gi4W{Kl(9tHr{!t?Ki>JA|5v?U!l@v#c+XGk>a}~#u50W%ktm+E20r~d`7-&Qy0K1^ie;@~)U(8#$VuT8?qr1_mXvmnz7t_k(ebyn32 z{$CMRIP0KEPsf(qZ=DiCqo=uwN;rhnFirgYAVEAn{%6kOYf-1Bosr95P&S{%g3r^% zg5}ZK*=mrtM6hGq^}JV*>`^Sov!HWU*@ir|OIf*#B%BhGKQ(5FyKX)+ z=Wtp$tMfP0f8RL6O_T!y*6zBR!Mxak;h>}9Kjsr0GdZflgFgs#-Jc!w;KRS)-ycu7 zw)JYzv!B2IdF*cQjqY@6`VLnVx@tU+lBf zA1h*>?VR?j;Qxoi{PuJD+>dW+IbgWV=1BYDyC!{xNB`FCzy1D?UsUwpFF!J4!~I{+ zTlVmK{r~b~nHg&ZE*Zt2zV*wZEVJ?D<97e={NL?=-+V7{@qGQi&-wfRmPJl`{k5vR z-2KS&&rdHeKYsb;mmfb^7%rRiJ)RMqvvi){bH(L_v%dX)d37~=+N|f{Xk1pN3oz=`6dvW&iE|$1e`NzsJg1 zeCqYCbDv|fcgL+~`O*FV!}7;@|8I)N|Kxm>zVWsoWae{*B}mLHIdU#Yh`Y--qSkLmZEITX{=%8U=^ZU5dOAgEY0 zX_MtWVWSe6ETe^Qt%G~Xf z@BP^A8V&}R z2(Q-O#Mj#l&S+Lk%r*~wd+b%W;V;G)aoq9KnHutTWu)=FSyrMnU7SHd@#i+{mJOcl z1&d8R-!n9bOuWFo^VPBgJo|rdz8`gM>(;9J{oB{}b1r|KI#1l#c*~SaW(Qdw9AJ1L zE~dnH(c*&Hr-1N}AJ$4Urm?ZSbaT=C#nB#S6L3{Jw&hl=~Gk*1UyFvcEw%mIEgoM4^43lQK1}C%VCp?Jh z%U!i1MQOpJy`5XGxvcemE@$DDuuP9@RgVPIZ(F%!jts^#7p~WJEeH)+D9q*@5Wr+~ ztL@eHU#YVX%l8OQ$@%`{$E&Na3#CF*W}Z)d8kCjGdNJK7BD?a>7Y`SXFaO%*&wbuH z&C2g`-uB&d^vi!MvbePzuG@eA{ftBW@9ksOO%Gpx@6OxzOzpQ$?ONw!^W@>x*Vk7` zIcY3Cb2=+4ZyD2On+@Ho+5PuizCQf-aP|6qHD6!d zb9weU^z60N*y~?^zu#{^&+p%nZ};x+tNm@g|9ABIxcz+-KTo(?qRHJj;XBhr``>>) zuiyXgSB>5Olk@+atl2xM-Cof6_~)O0{(yFHYArn`@%{JRJafLsE{zNcCw6Z2T59n5 zW5t|fhAh8ctzQ4EvbN6dn1tZlqMbH&wnCTQr5Qy}JNneyaQPG^LnZ;Hcl_1rnU+DZ zA>Vi3eQloq?f2Z#L*_pI`p*|M$`U*Wvwt!td4pkBjXI0E4VPpz59MviIwv^q^rfiqfS0~0(xSh--tvS+C8sHfOpNo^40RPye7@)Q z`?EQ*VxcpiuU_@G+*)Z@+rFkf@p~u`96ih8w6)On#*|@w!iJ%x@~4~jX}qD z`Eybx!PlD=k~p5|X?}~4Ps?x3U+TAJWqZSyd!gGnm^S++PAM*9_3gNoB`nCw&G@v= z{@&l#`>uRzzkGiB>*dd#w;2-7O!&O4q=@Bon9A;+!#AqdvtHZUAgL5>687%K-Rx(d zYuO8C-YnhM_3H1#bL{sLu2u6JE}7!gv1-%0?ANxfE40of1sX3>@JiBRQ(ENq?}+rK z_cOM6&gGx&tiY#s*F8bc?v7ji_UYBP>zPih{qN-Rb*iX@VwYr9$>e=KzqiM|PCfNv zFZZ36c^@Y2-o)u3^PKhep##zi1p+2}I2>}M=jI1q=vc$ncvWC){)M{fGgeqycvWZb zy&C4d`Vwy^b9TU1fg4AgIONuIwJX3*NThPWf6%LKO%mCKg zF6s@xw%7fd{r}U?AAf3OijtN^-QG0E;={uNiMEB)qWX@f#)VuI(JY!8=Pf$*_ik&! zpFidQ{&~Cl_xgRmf?U4+u08kjaJ&EU$ASWPxUv!+n=G4i=0VZUDM4+83@w@4mHU=o ze);leX6*F#@b&N74tEQG{QLX6Aq#`Tv)y}_*M};dtE{NlF@M=f)t5(p{QLX+@#V)) zYaZ@sz47JV@xndzJH#*E`<*_2udNJUote?+37<{gojDP*`{wKG>*eI+>VBS%|94XU zzvro!1?A@FjRkjR|9E`d|NMFXz24_CC5{!!%ndc*Y0ldzwDp=)e#o{N@2>?t@9+Qp z_n&$FXY=>(zJ0Fw`|Y;;y!rE|S4LjjdbO+L@PuTZGj9sx4VO(xIyF;sO3=CKo0Wf; z?Y?{WZsW{z(}P!+*6+COYi(LvRb@5TFYUwA>G4;hK5wv|@1N-rS)_aV>#b^Wf3r82 zu9YV8)jbQ+G(1)q&~CT-Ra6*rLFt;~I}A29=vQA^+WBXa;MLt<{LW}D%zIMm#K6A4 z^MnOoT3A-##lMO3gRItTGqea?FMYeVdR0hv?+k&1kv7Rv*(;YFo7j1xkZ0){JuP)5 zgN#epQX{9a9nl4_5OzXU)&#-FQwW0?yZ~a(kypwICtiWef8%VkDS+jt+A_~s;Xpv*@@$^1!L39 z+q(|F7kItv%C#8bCwugj?o%m(O zHV;dM#^vHoo!pY%?0&+_BX&>G@+IwwR>8B5UR=jve*Hh=5a zd+qJ*<@=YX>hbe`e{c6&=a|L$&%4fiwy~F=4{EdS%?tEwd4F%;??2t=j~5oXX7nUJ zUdD7>fUo`Vu4|VIzjfWN{=fbIv->js>h(Y7&!2yNs{uQMg`EHEUC%&=8y(wn`)yI* z-tUPrbL$Oxn!m((g?>KZyk@Pvy{!K9*Q?vVeECviW7pZ?A>StBbYpg`{`6Z>J`+#s z%D;+yzScFr@BH)AA1g{OR%X8~v0{8-q`&T7&f3^@;rGw~eUong`@_e>ufx|zXCJ?O zawoj)lSNttlntNHnL9(&IpdbM zrlG3n%C)H_r`NXWDn$xtp{UnKTwEGswyu9{(eqd&iKQhf`<%lTPB#~;izYqFN>esn>pHz@#jS@0 z4o<;)W6mwI;(6S4z~gbA`IC*?qOy~DWRLKK{JK=_VYuwc^pHTsq|lGMH_dF7*fl}= z-k!z}uP#M-8wv(>1Xh{fuga4Ok!s^%Xl(ZPb^ThiZ%Pp3rd3(lb8k#d-M3=*-l;!| z*369D@lg1}^-py-Rts%fxiQ~1rft==>TbbHZU5cU`wyy}Qa0vj6x_H~D!EIs#IQd} zj6JMs1*_skw~|llN5t3tD|9<@TO=^{*rWeSE=ftD_rp47mcCpjklbU~5#=CptkB%I z@g^r{c4Yo#CFcf(*Q{5kl^zt(TsHA-{m18Z?hV)0o!%98P++sy{O6VP*3Y-Avy$`I zJSuNjW5E2#*k3FhJpx?DReA8-p+h>IpY5it9?bg#y} z(AaCwH=f%5?f99xkkb#;Hs7dobI7VP{G$vrrdMu%D70?M_tY=AeD(ekx!LAt z&g7}^1iK2EvMWt#YEg(i+vEH~h-ZVj=2Aw+jsDji8LPLaMqb}^B`RurTFQ#pqu-w& zInuG#+>|vq*io=e)2K*=g*i0f?YTf#k$^T|=haQypW3W=_5Nv1dHMJC|E})0lk=~C zod4(S`u+c;1OiKE9s651J?YnvmzOVJ{;bWFe}CUz`#a4y_nhhX@Bj92vA#-MqKUD= zoU(R@KVM3+{))FB{#Uav=DF(AE$dd_J#toa*%Tu~!^!<6QSX)h7VO||d}ir;IZvv5 z(V|Uxm8Gkjha{4c1f`txpg{``6U6Sp6U5MR=@?b-&;0i z-RG|*Hg^vR8ny&@?TwqBI#Yl8=KHzt-@W_uQD~z7e7mprEH1nbU(ft`*Se)5N;SWK zoz<`Z*=?SGU+&v)etCP_+NzQ{z8rz?w!V*a)471=9Uo>cSRzvlB;!+gbBNuSGk+ixo_>X}_wmnwwQsl9;TUwFc_gbd2fvwwXSk#_5tnIE^woGf*G7rnv2DS{1M|yVb^BcPI zj^*9<`R(!h`SQ$JPB-3u|2_ZD6ZOLe0*U9Gb-$KWJyXB+zD)7hU6Z;$U%tG230i>2 zzh18Fa7)MX%P*_;T8T6pJSpTjv-HZ0O_JNLt$_#dR8sHR#sjvp8U+A zuFP;V>o=`9YFcXS z^mV(=d^U4<%)xN(i}bODTiZMoT^ORn)ep|L{u2Fq@9d5*_c}C>-l>YcKGkyHuKK0r zWj1ryeP%uQr=+^(kBiGbkLodsZz%8%XTM9JWrjue)FoFd&@HC zlv%HjTUJ(I9v{7X_ip*)8^2kd4_80@mcKe(i>X2Il-urraAyXGF2RYv+Bky3TUKYS zJgORX*r1~Fv*oJWg%d26_jH^&Xgb;ONT!TSNu<~6wOd!EoiXv|-Z6YBu@VmSDU8(rToc+FJ_GY-2 z`th>$vJ|#;Prl3z<9JxL=UP-&EaQc>zPEMVw_basBR42%p6o-U3d z-i1-y&(BQmIc#Bd^#ViH^lL0@D#SMG|Nq#wf>*}o-mYucqKj`WX+Ofyu&YndaLZGt z!x@V@W;nOqJM}YUx7}PlG3EF7Y-06ZZ(25|Z1>;v`L%|=kH1>k+uF|SdmeNx$~*G< zJt>7&3H$$lx-;**{kHqst-S5kzu(2ae`LuD8q9oqtgx!CYG0fm-`#g>W#;!!zuejB z+%dh%^yu&F?~`sZs z;xu*lJCk#gX#!`5(&UwAv-KAirSLRz`Y!zYQoi=-Ya$DmeA=%bJ~M4qGgnQR6SGIfWw{2%OGuwTdknE$_A`z2+%&^tjkDveh7RCu5ie}dTc=&g@ z|9r2NzyI=0ytY;7q0Q~@_v`D+Z~O-}ML(_n`ttJU%bzDNzC1H?mYVVI7(F%ym!+W` zEKF-fzu!LdImT<|^X%-e@7`^={eI81<07XnUAvl^@c8q~7Z#5EHJ+M`EYBYBKQ`(8 z{Wl~!dfLsyWy>?8o-H-c+uo)6UftED<%Z7cHY;C!wVHo!df4n)evd&X2lVa!t8w>Ps@jvnFE?bRloGf$lE&zZd4yw0=RaFvFlz{1mCO1ZQ7`Knj1J^NWltK&vi^!3tt(+`}# zY_j+FpYFmE4mm~^_rH3dbJAY#>MCL2HasRVtx|L8m8j1X+6yH zy64QA5R)YW`LVNR&C?fma7a=1m>{$Afpw+}kBr8K^og@2?%J(tnC2v+aQ|1uj(0Ch z9yzRCxBT-)(?q^`Ot!wkGm_-ko;z&2CF6NU#>S&{rq{{_4v#v)?Jb|>nItchK{{ouVuYoyYBTrFIlW zoaK-`-EnV)q{G^?Zy)OYWp>#fK1-6FPf?&SK=mclD#arQQqr_!kMQmOxMj-Y%9G7I@?B`{OSKKJ!^H=Era{mH*V5;q`v&x~)1}#avi= z8zX&fy#(JZbNt@)`0$$C74@?v?`84lC|y~5zb}hj zpk2o5oOMBPVykL_c;2R3111MP8LoF z-H+V9zTbA2^Ou(=ba_}Fef@Jr(7yfkyY*fh^{(r<{r+3#37?QxS!|Za|Nfg0^kVJm zD9x{r`Q`hUCuiqMwJ9$Dy}LbA!u4s1)oL%fq7<*j<%uWa>&kAtulw;~;kwj2zWn^_ z=g*V7s=yNW+%<$FL1OveyyJ>HnksicS)q6O((EdS*vRXtx{pmPtzU!Z#K3?v>*34bwsKe#F2M4xpsoWR0KD^=P+Hac6 zOQUW-T&A|av)Xmbl(1_?=ZkwPZ@p#MFy&Ttx8XNEyW2P3elv6x+#56P-tYH+-jx6U zV*lr5RJP_-hGm&Kd%xYg%g>zfuc=+){$AO3n~d<6lWbj>Vgq>hny1B2&97Tuv3jmn zkoVV~GY)U0w=Q2k>2pE3#hYcZ@Aq!&%Gx?@lbG?|z`TW9zkT_!Wm@U>+js5n?0y#? zI_>f%z3!R5mpzW``&=!V@?+|i;zZd@26L{EF2Vet{7Fi&OrK4BJ)}83cqa(fK4!Yh zaLIxxV(rrkt97R@Rsa9-FO0*QFI-oYu$Q+QcMy zf6D6@?-t+teOjAo2dfPGjX5iiyttRRz4DRnHSP)3*#b-g1`Br2+|7CL+RjxHhTYTE z448k+{x0vg+*@)Ndl)fScmJ8IP%XM}8>wrSmJeia@ohm~88 z-T>{eJvcLco@sN(>0MJq&F)CJL*4XHs=3ywE8R{y%EvjS1nd^Fbcc0BzKEu6B(vsol zx{urXSQv~2_d8A96yqJqDOj>W!cp|XjLsRpyX^nlf6B?-bzu7G&dSW4jXEE{O_smC zXTk^9C;a-dv-bXx{Bpnzv<>%#c25xL9uVU*5evKW=|rt<~IXPk+6-`uFhR z!*%AlEG$c=T)ulZmLpMO`Rk`uJL6(}S1$80xodmp?%m$UF9OOKB#s*{e{hQkl@ zVyEAG`tP6J^DR2BOZWZzqQ3v*;r8$L{}<0MPGC`4mMC$oP+{4%Dz88n=bqRnR_8U3 zmB!9owCd@M<%Ld*D(5lwa8|!(FnH3~F=4}+rPC$_7qu;Uy&=X`EJ-8VX0HEyySkdc zzrN)A|6*OF~viE!5VT*U=yI%K2}dlDU2Mv6ma{Ev7iS26>h0vTeKB75z0z zPp<2%9OuSTQ6(lenTyts=eYMz&Re@`l|Wl>%$YjfuWKjO_a%8LvZ&pY|MvUvKArB< zr`BDMTrzW~hs0jS6PlH8`Z~6#WgR$T7j;L2cdLSuqIbEu^lp#yzg1WmCfs{$(bu@O z{e{=LC~byc2N$F*I^=SA#-7v-mBA}}9#1J=w>3up`YrQ2TuK6$C$tMN3;*4^``h{b z>B~468kra*0$<-PS5ISWNt-9QIEQD;%lVy$-d6R?MEIkiLIi`F=ptz1cz+H|Z%JIs8x{*=FwfcL&)wCrV7X z>2tP4K_Kt}Z^*Ui@QH7ew$8kF#W!IC=M#q|&KozJDdw4y^z6Xabv#m+wmjbPZ65RS zyAsRiO4-Wj-{1fL;m5@YhKGB8$Gk&(lmF4MAtJkP|`|Hx| z*RS38v*v9xs?9wAVHp?Gj>2OcqHB~ZT4EJ$Z4q9TrN(wC|K^sC7ak1G_kUkq7aPKI zWCeHEBBonw*M^CzeEPlTs~^L}1db_@3y-^~X(l}hj$E}S*)$eC#KYzYF;rQd1Kkk;_XL2~lEc`TaMJe-RCI-X%_IKWw z|DH1EwA8c8fB*h|54tryzV_$M-RJlHc+~y;`Qvte`E$=d|2_P7b@=+Eo~6@%TKz8E zuk(4izr6qQ(RLKf7L? z5okT%fBI?FoYyPFk`wsMSGHD4+}JYv&Ixs;>#t+7<}O?Lz0NXz-aLP;2|;CSyvAog z%Q#j1{dM*B_V@4Jzjs;k_1dZ_Pj6&-YR&Fg9E^hp*kXQlRDh#2;cSvZA~!=j&@e{rk_MgX@92v2p8x4Qtly+HJNK#!Zj^`)c;{$D2Lm+Y@J%)ujhay_dB%CfoG#;fDnlGG;DW z@9o;pTRu1UG-paMzYrAe!eO}S^Yv|^5~Bk_+*&1k$n*dIoWJ3d zb2E!nJ9hqFzRf~OY2ArEkuxJZofvsV8VvcO3vXrkO3go(@n?69#Oz9mTLHYi8BDf! zTiuv>r+Ae*%j`V5{Laq@)$@0-&dyE^ow;49@5b{t=kH#R+9!53N-&UhzRQw?hHD0_ zA&e7wwzAFL$dItsS5ua+{qe^WI%gI|t1NvH>gaNJ_2$nuy3y`ZQp@_5UAuMl@%@O^ zb+6h^NHjAneNs7ZFMrip#sikwy=H7loeqb)KPMF~QY=2$zAoFDLvv~6yksA5+k#`& z_gma9Iv9O9Uq9*pFZSgh=APKau(?f8TYlR+ruD~#8)9wvIe8d_4sTl0+~Hx-!^}|e zUcht5D(9wo#kV9CoHCle^D4g7p0jM_v@=Q^8EYv3miM9sPjOFiSInx8 zi01~XTW7|LY}^&L#M-4}Q(w%hG$x&@kJ{l*NhvN~nm4B(K7F@t``h2kvc1-RwwMuf z`|LJ;Jwt<#p3M>8Z|=MQ>-_WY{EtnJ$LxA*x25(}N=}{raRUM8+rOz>6m$@r<_syT%)Sn-e2+1|ojeAuUKV?$V zoJTg#Do+I+h?})-SF5^GjILjtBZDC@7{4Hviq5HK_lz* z3Lc!Ma9Q|MmqlwW*frN< zZ4m1YBZE!>V*@4QNe8tWM6Yu07wX)=`f^XEZbD(NxpdR(t!e8TLRJ3Vp8xmyebf7( z^<;`HE>?5VUqE6w$;^%uCt~@9Hh)g*G(B~)a`Ww6v)S*S7MxLy z&W>zgowAqx$c*BimATPt*Bmxj>k`P~ki6_|+3IVDU6#GC{`&39xv8N0XzYK#IUN7@ z>i_j3ZYqr-+U2p{9)~Sfoei4v*kaW}!5;z%56f<5?OV&gblMCdrERg7TMC+*rDP+Y z^URcZY&l&bomH1nVQIzYrhpB474nyD?Dfw{-9F34S(-QD=uy`#`!%;JB>Ds%ZRd#C z$@YBj_s=$S&vy#k-ljau^+05I%PJq?-cI&QrBfbO%{7yLzqiPH>656(y?O$Ttk=Cf zpXaQ7^kz=lB*So#4|W#Mvr=W*?d$$XB$u4yJ#5;tpv#&yA$#p*lc#gGI7B5+)KR<^ zG-qq>AhEe37R{n?2-s@Y~G}CZHwRKRZEun`&sJxW$m2m-S~A$@=BdU zuQn_EaQ~XmdckVB``prV9l?G}rgSk}lj@asS|D-xXHj4Asy#=HB5ohp^x1t{PC?%Z zj@R?dm=4G!YpNcL*vZwsN}%{uM@CPe^Q*5*n>hrOigi03d9Po0)yS5W3NBJpO4bp) zarh&{FE^gn2fz2ZJ{FV^j810_2nd>U;imcRJifC1Z=ao-zVnRg(J9M6s~-JXw6EsR zkH;UMTJnY`lrgA;Y8iAdxWZ%k)G|#>Ze4RwTZhNBsMoO>v6pvU*!Fkd%k9_Lue>TP zR&j~gR<5bk%k{eDcX$!*{|0-R(u>RlK zfXCZhxBZ)ON8w;NPhskUfv@8@>jEnEF_?HQkx7!Es=Vy|3-g_#0|PbVcGneyvw+2-3Ep9^Fk zO)}B1`W(Oh@ZGzAZ*R}PCE#`P`s=SBKYsjBlyU3X-QDH6w~tB$_kAHdQ64sT;%gV;`8hOo%P?Z=-fX2@Ns$jeKK(oJ2Gz-O3a@>f9~enzVTkOl0&5B zp1UQlef9GA$DqiZ4^Lzid<>_(HaH)=HfL}86s9)Z zF0o{(FW-JUul)M!r$swgxmH~-yH|b5gd?}tcW-?~#;j*gpMq{+%6PH)yj}hOe|zil zEYovKLsjI1y$p}dII*Dk-tP%P=Pd8vikmj8*vrx~d-p!qhAGdYW}YaC{r&Od&8W|Z zzu*5pySI2=$Bfk*w&aR8&3|42IvVU;wjHZR#nR}=S8r{7Hyd8FTprYCaXeGT`_!vD z$!kOZuMH~F{WVEt^`HEW3GGo@*Mllg_OMTUST%Rq%G_DYGG}f3dQNfSm8`X2?+IRd z6tm-An3hCf_Bz)2Tdr9yx88Hc=hbt*B~>k_7X9W~e)*rt!``J3l#7K^$odfnRU*pjSwb3VT;xpcr&x20-s>1BuT1sx|M zypLu0GBK!r?cL~m`&yLV^vxcjatk{I6sB`A#AtI(nIn0`@^)G5#niO3`z)tt&TCov ze!K7G2yd5TQoRhluS~+U>r?mtHLG`B@z1B@zow_G#yo)dhovQP6^WWF7IvkjQK zS;44#;nIEFdls^uR$^sfAZhC3%Jonh9@1@c6!yo^A`p7xsxrP7r?7q#;5ypb6OqY3Rx^fkF zZJZ*NoTPWCA!SYRHFkNc```AxjpbN)yLFlJ>xn$CCN(cw#lsY}FU!wxc}UV4%WE>O z4IIUruIcYS;}*P7bA7gA_S8wK1yQeVRVjX)%2wT*u&+13>TsRX+s3Is;%(j*UfrPA z@buJ{+8}OGZ9cDS(n-~H+fIo`u6FQzrXv;FW+N&-e2A^VfS63pr=)`>rV5&zv^~~vBR)z z^G&6u@Ao96lV#epxpU0+=0`g(%iX>1^)pL3zK)Jn>zNxanD}1)^Wian{qOnzKRkr}TupH&C+GIZ2QA<4;r30AED2m$kUMMH``znaGbG3y&yTisR z97E@~^78NBzc>6SvIq(MI+uCnJ@)k2Q%9@w=jiV=NWWMvKId3TX8o!x?PQ*GwgZzU z7hjW9Kcu-drmOs4jpV-<&+A__*L!5nT3nTK`d9AiGqKmNMd?rPRurybI3kjKN|ND( z!^&A#gOYkYR?OeAZkFJr^0Ha!Mo;GQE9RI8le%8!frnq!T zwy33QX^#lw-H?xV2YT%s?#6G)z0Y{a#&?zc%x9b|bIN@$D;;=o>}6G?y+t^Ku*zFD zo<+reS!cJ-no<;1^omtwnM88V>}{`aEz9KyWIuKJ=bWT9XWGszd(HS!`upSc%lK0| z8XCoq8tVpSr|Jh>Wymt%chcDtl=&0polYKQqF--2 za?LVv+j28+`|h{@^8QcO-(OK#SNZ4RaryoK58CaiwmyIHSKCFNVwvQ3TI~WorWv`r zTm%(g_i$&*owyhgB%uEm&80DV(@#H*$>v@7^7ppPTT?C-$kzRxeZS`a-SX#`89DFmtN#7% z?d|)2US0P;ukT^ud-+}7XZ8PY=70VBEPVg3YuNG<0yRe7$SV=LmI|O>X91XDzh0Nv@7MuX~K;rhp=w z%A-?DpBz8FJW;}bd9YV!wxvv5-}m3U|E|lOeSwAX#rEptIcYgcNt!vlih<`;r=K~q zkW*oScKki-WDXat@O4|SbY)!Sdj0gNYRiM>vcG%UZk)0?>l^*<{kv_q-xmF1z7`WQ zcVTJ}vjpShGYlPe6;GFXt-SY0>q6|mZl1^uTC3ig?|qgM*S;_&`{cx#)7iKWN@}-d zue4kyc_BJ|t>Y2_r`ua&Z|mNk<`SwYkUVEIhk(=K^xTUkw|BMlzxX~^`+Q+o;X(t( zO&w=G|0$|?`CE=*-C>0@XF^?qPF*%RHswTw>ZOu66PMGkd{4%3PhVxqDafeNqIT@b zUVqJ14}VoTXS-`#^}U$G&TwwVy4Q0yDq2X-=@VyqaO0>2_k_*oq7F~(-I{kZB0`DN zwfNqQ;&aC&(ypnhC!ewKbGfl~?(LmwCRyv(9NGU%-*|KBIc3RBQvH1s{$>3B@qYey zh6U0_t6l_&30zCy5e(e=r07cK(#tENjqc6JKXt!l-}iIdW23W0<60z6T^DNXJ9_%D zFq8g{iG54F4PBTV($=xMTzF;S*e2eR$!U8n+1I`>l*!mQ%jL|ax823wPnfzRoKiHD z+EzTN*%`-@xc`3bzlWdA`M-bvUS3xHy^MWE@w}k0ZsUU?4N@xp32V=8oDeYU@f7b< zPpy__?b+(v{7J9md;a~SnO3s>AFC|v{5JZj>f5oVZM1P@h%kQ>dis4%#;m&Q-*zv% z-By#m}2Rf4-djbBW?pOO1@9 zlauG{`~Pou;f|=}-Rndr6w2_oA3pu?XheF48$ zo}$F{D#>^4rv1COU%M7`yl~Z40fCDTn*aa#%pZIG&!h8o9}E5`a7e2B_%M0?pC_~D z|7Gm>^ELke*Yf*yzdt-XSm~eg?YFJckr`QQV_v20ej4Tf{Ibc_ZEvqEUijorOG9^< z@2q8k+uf#Kwn)GJfPdceonc`gS8pxbe6u9>eKUJwkhap)7;nwP1-5g~<=(y)6?Do< z)6ULrYi@Rrvr4Y#)c@t@m^*%T_<^ov$zFLa>UEBA(=Bs}Z0~d1&%`rKzMQjW=ChsG zW)-hoy5e>JhKTGbr_ZePxqN4v?`5;XzO9?G4h4B?KHsDex^#-s;TW%*2d+shKDAv$ zS)tyA_?{m{+e`A-BSqra*6(8NT@BOa4PPdec&qL+c zB+3+>J9jH;IuBo}`x;ZLIzh#bwab1zn0Ykv{IQ}r-kKYaJ%0FL?)Q7~>!w>A7qt^~ zF}cv>a#5Drq*pS#Z1;Qf@_l!2T$oVW;&N<~;eyTA7cCKR+VyeL;OgHM^6M^Yuz1QWADl`kx%$v2Lbr~|tgI-zmwp`hotGD#^uk&{Q|NI3V<$3P+fis4w z*=s%gG&?(vxhxHuvo%WEDdS7?zPR;<=Z-}smP{@Wa@u$k+%vZ`HY!WxgB zU3pLC~$-=9Fqzovq8}ln7n=w5)y8_kBxZiu6?VpU+)) zq(C*<#bxHIb+4~Pnx>oTGnBe+yf$T4?A8R+WjEiJ?KaQ9AMrfMhvP+4@jc$_ipwng zKbP)^N;aR}YwE_5mCAT=gTwPV8`YMYJY9M1(HiCCD*=l+6I0Gvimy2S_+quW@8_2# zXDqkJZoiwedz)!oZTm^<_6==?3Q^gND$ltZtTtbZam9?5vAI^SsTjt+?K4DrU>&p?fp(^&aV|&y_coEtK1NZPm<9 zk6Ei1oiug1vi0uAk~zydTeZcX$6hbH*B;bmX!hKuw=yx5@&BK*FYo`ovFctxzf^*o z>c4`6HleR{I;56`magRzV5qzJr|m>T!>Y1Ko*>b!6I@v$IYgY7JX4$W^s@HibLj^H zyghdmC>^U(<=!dM7G9h#=;&m6eQz&UipNX;)b*V{sn^21`H!n6GH6!Bdu_b>Y)<>b zSk^8*t6+VoBk0PPuV6w9NH{lAuBV z_0um)Y-RO}Pr0tW%IF!;%*wqvFY5CoOW$kl%Wjv=-d(nN^E$`Z6FhuV9NoKbbMEo@ zedWEZz_blsI|_Xn7GwrmP7gY<zQh=V1Jc^1tCRMJD^IuLI27g9`gQeY5m%R;iETGE-|ycaQ#mg=_1e=@``+9CezSSq+hvtja(tT0&A-fK~uI~&{DYl!xQ*5f% z3aQ(_1!_7p=lZED?b^Nj^{ue|0-NT%FXnjo_xJbm^6y-4R(uRvTRKa9(Ywv(?^!<& z_VV}lpZ+?uxi8Pg-dNh`vWYC;@j06fMfQA;bw9Qua`)zcPYc#;eRfJ|#l8av+fQ8; z5SpnI87b^LJ>Y7o-^*o(d3?9eDrSGbXI~oc{E};u-m~xSdvDF4v2=-qV76cC4!#tP zry<#q8Kv3NeIuNARn7>SG-ZNM;BE0_v)j2n#hP2Ey_zdN;lhf1eZ4ch6U)z8Zjac? zlr?{yLQ0qA^rRC4iMO}q-`i99`GeXr6W?IHo7Mi)ymZ^AH*YQ5e|t3-n`4rXrmMq+ zweEYjmbN~yiVm{qgQyxqGbX8-9kabnr z(_>OfXH30%;84`$%@-@XBYO;UcYA6o9?IL!>{4qL7`miX^LXa0D>s7mCvH8az}R8c zx#ImI4uMIZ<%0CKW|%qYPUYa0cFNkdAX?y8)v>}gTeE)g3z;cyiOpUs8W4T-RE+3i zCkJO2w*UO~DgX2Try8Dgj%eD)z-jW#`)-ZBWmu`LBMZlYYWw4ni&n5~ytZmqo$W`L zgSHG2PCALXFTy*OFI6pLDlUD}W14&0>UEax)=dktqAL`ihUIvMDfS0GY~OsY%81P+ z$xEtX`Fi&my+$*${wVguC=_e%EZUfRdr~RS0iNJF!J0h<5|_Ug%}lD1O78h;6&t*= zValoDoDPr5x(WS{pRm&?nVtu zX0fU`cXP2Oh6?Okb(?WQiD}QZ{``g8cVx-bsATi5%3XQ-sG#UN$;7=W4B3KCHM+4@ z_YF0azWcAbmHSU9@~hPP6^{?SIF!rk)BX1CRsG2l_vBJVJJ)D8YACdvuzGn>+ozm0NOqd-2+75w1lmPhOkC9pqC}dHb!-#O>!y>JC|o_Zjv+ zc1U?7{x{N)^n!5pTFI`T{-Jm;f!Y+!%nImpL1@x zx7SIjY>uFl_o`p7x%k7;rSu_>qQ&!D+d|!EP6_U^TpDxAqlF>FYvyvzr%&eG&E5W; zzdXogY2ohOZ=cKC?T^tvZ1Csk{J%eEo9EYm?f?Jd|KHR5ZRV|uy$%}bP1a=<0cV1h*e0kLC-R1AM-Oj!KHA|+-a=y1_d3kw!&Bw|6-j~R)xVEZD+U3^! zvhwop<=@M96|pev6p1^MGr?PTr$q9zpH_U6H!jO`+JEy+nKk3Q<&nByd$t-re{C@> zXl?1WD26Mm7Obs07kT_m=DaT)4^Exi398=Ty?gijeZ5HHS;OnREU)K$UiV~D@w!G< zm5Oeymx6J8B2Oy&>@2c#ZkRAAERb#qUE3-m*Q2SlK<ih^#Mb6kn)Yj@VG-2BpfcJIbz z(+*7)Wmpr#a75|cY&%ggtdrzZe z>uDQb?zppSl>Jt=UNv_3A?(na8d<8uThBN#=(P~%^EF~`Bo=@7%aqGacwQ;Bd}ElJ zbH|PE(&@P?EtenBSvo~4Mq7Q|HHr7qO5BEhmwN(h1#O+qvikU(Qt9;Q`B=3#DtY#{ zn{Ug`-JUHLJ>z7=L>`9P>(7I-&uX6ac(M2Q{y+J%{=bf&>&Va<@}rCCbeCx3h85jj zPn|s28mH!!@Rh{5gfI#URX#6S9lb+ug9wM>)S$|pCEjml7K<^I?7eFJd~UcwKe4pNGNI zoJQ|6bGXj0jCn3mJTK|emEDOF{nIywS<0LXp3|AbTK#{UzpD(llJLZ+P=$$K@9hZK z84_9$a_!@utlNt(vnxeCpQW~C?pMdJQreHV%3b@Rb@xL3^(-c(?GM(LhG#QgZ`k={ zN^p^#g!{3m*Lyt;l@%v2+!u@8`d{h5*3^W6qQ2>(u8D~&SFZ^8^1JG_W8RO4tK&lV zmuD-sxOOULbH=RxD0G=;yY|K}`q^u=)+|!k73r@3>&VlM1-;%5f=Vi3w+$|?e-ou7 zenTw$#ey)6fZ~;g=`1gtuIHL<<7_fHUSt#hSmra+h8a<+a|5`~7bB^ur6EObTY2IOXxAj*bsMKE-(3%Jn}sX&1;^$&tiknJv)5F*CAUPCP%@ z`5^o2GdAfD56$sD`73ftki!z*hY!!*&D;Lfj_11N^G$Ef|G(+~U$Omdd|ceW59#*b z75^On|HXX&pQrlmi8sDgI|XlMZaMb5-8}!@xANbA^S0lwde0c>5Tvy=L|*E>EyF6d zOO@ic5_(xD^vm1Z+1nqN*!w;AdZoC?!<^k^JNnwE2cMI4u>1dpS#YJ~YZJbuA)>c( z%l_tV&vpuU_|)dCX6C$IzLwADB>QX)abEV`Dq_WSEtg|Aw&kwfeCsIx;TL=U{#w1h z?$=rK`p>8L|9!VxzU=nD3Y(S6AHEhDUMq#}Q^?fmn*d?sJc;ou3}I?eHf#PbuY_r&>2KF`*TS(+-|!3x#z{#zkBUjc)-R#a^dYck7ZmJWX`&q$IW0l{q)mSbvv%VzG{`s zd(>k2j0*xC3JXg0R`oV)Y*-(=XoEzvTW~nfvzX%_rEa+`;Cp=F%(XzD$p#G@>h_(h zJkZm@wVKQOq#$Ex@G0*}MiSX4CnP5GhI*@J=Q=YmvnuKRa@L)1eqinW5(c5?5!q*h z11DbOt<+NOKRn~on>B^UEEeavWC(;z{!_(v_f|FoL!dyn3&RV2Gkb=%efK*&^ri~T z{oEDA6t=Xqq`}Gg-0UgS&zOCkaeebysjS%vy{5+&bSNsdtXclAs&K90t~WhrvgU1I zVrE(0sIMe(=ymFzIDJiV7Z$NCO)mSs^3UJPspxZghX*6e0g1$o{r&c5mimS6nDYFv zrKsK9^VbVK4CT*Fe`q!3)McBgTE~9e{cD|UV_9D880S=WLhAY2{0NCvqG=65L0X|+ z4_U<`_!2%X{OX&&rd?Du`i9D%6t>8P`@)w5*fU-6bpHE9U{x6Vx}{g9Wa+)wbFb){ z<>Zb|hmwc<;kR-Wr-$rR*?vG&AfmkUde#=6h^?Cpk8qoQTgg?w*7H~H#L{<8ww4n; z@`O%oJ)#p-cS!Y7(aJdPkK6sSqr~#r!e3`!Yp;;y5$v46Sm62K$_cBOxGtreTPkjD zG(8rX%@clZ<&rta3irgVPwDxtxJ=|Z2SW)NgEj)>RK zDy!=1_QmMke!DF9R?zCQeY1W4|NEP+x%BhPH*<=w#kIfw8Nuo@{o2~CuMP>+=G+dO z%e$wXpP&DlRcPsVUv&kK1(M+|$1J~pNc;VCdVIxuv)ON7R{V?qcUZsv^X>S_2l?gq z-Pj`iyoRH%N`+|Hb~Vef_Vi)A#@WRxU3u&&kR8 zyz<+*{kQj5e}2}>z<-#Pp~v*vj(xXd+7qSJlRM^QI<01FGdyN_OzNu4iK|gs*N<)I z|Ni}6d!odZPr8DVi?VnWR&{q7rmS&I%a6TXw)I*>*RoqipG|xha&|1x)NIR`I7hheCofMi~+#J4hwRCu0J#cFe-Wb?X~lyTi0g!A1j=*j59FVtHk=0fpH*fLsZtB#j1Z5 zDk4k$R|?IHZrHl@-tV_*s~Nj4TUfpBFqp2Sv&wOOYw5AnK26oc!h6!r#a%TM>p%1U z*XUE zg(aTVa{h#L^UQs}zJFZ4zfUQfcbDic(=A`Wu}0Olh&M#pDSo$Gcr))OPgFBwAP2+h z1x=c-^#$+F^NLGs{~EOF@2y>}VaYy^E#&x)XG&>@zqL&6*_832=CYaIn<;sP=d`|R zo$;uUJyzN0vcmD@(kG`(P0p6>{`=u!b5xjgGUJmHtD~9M{I|T0Qkk{vWcBu&CBC;i zPMG*F{5Ze%+s$S7p9FRITsC1^!z#ph!r^**`TP53zQ(?nzu&L_zWaV{xc2{}@qdoa z-p+q*r;WIC+PvqVe+GHk%Jtu`|Nr;%>EfQ^`=!?`pV!_lJr?1#C#Jn$-o9(YsdYiC zRKwrj$=mL&X}no&Wwt`aTg0j!;y#K|Hcs< zleVn2*Ap+OX)bT-$i4oqyxPTP{`toitGK6^UbD;FCcye!lI_+t~dt9t}z7S7oFJ@)?0j2YsQT^th04oBG6 zdoFZ27Ik~;+$!bR_18~7&6?|~BG05SZBDkX+@3{hXL8JsRmW$|OullTwI!;nYI9af zs@~Da=|Lfk^U_Kf8#Wx76u~LY$grYoiRJzcMmmQIl$k$SwXWV*b2*>IB|Lub_uBi< z=M3uowD{IcM{1Vs}z*QGZ)Ivg@OiZl(EKJb(lkCofrc0}js)a$Wd zU4=HBTceb~WBGheX;Jk0*z*gbvQN5Bm}bhKSlSa3^>*s3bO!Dgm$sHviT27_UAJNs z7-mIf-!9L;zt7d>>*Sb76T!x)?!Vt2f2^?a|6F1h|9r+IE;-35(*u92GDJjrWzStc zRc&vtKuS+(Z$7t#a?Fx22jk@trR`By>s?ZEFPHi5-gr&vlGe!s(UvR%F*}!Ccb|TJ zrE~P`sanzhiUN7At@@XrvWE4O;&Y4XlOmXBE$!0r%r`FxN}by<-ze<#w~x6um;_?u zf~PNQ~z08>&_zEHKyC9Tq=EgdwagA%+4o|=ImZ7d+zg$;>_D&y=n{_GG{#iEoi^JJ)iMo zfrZcIl(oOkZmzp#skYhl?z+>hq36nX&Jnro+OYk-oL$Y2nptXF&;IzN9^bajS>^t( z$~kr4Z{N4GwS7JN+|D)UxpT#}!j;v|70*e3ZOh^JdC9%Db5>h_>RUc%TBUs6()V$Q zm)OL8@ArOY$mqS9XEyt7+1y<{?<|y?{BjPmv-qgSo_jw1FgH_HSVw8tO|R8zZl#vT zi%)_spfaNzIVzwP?_D(Y(PXAOmGH`$z1?dIzpHdtYy(Q-_JsWD>lvlxr(9UmS)KAt&CZ@PC+vF5c% z?K8(JmAWp=JYTU)VphAMudQAz=&N!}n0fBXv;9)p zv)5^E<>-+5si?&5p{bhwdv<=k%rQX*{l_6$Pj~gOG%!>>I;|A9>Dt7?V^%7gcWJHt z!@IJxko}sYZj7<9u%$EGj;=0_lM&aq{530)mEztqXG6p3bu;XwveIrw?XnXUShVnH zcY0)S1dI3SB*u;_tUJt)h2;fhc>8Tld8oCgLvxGVi)$P@MVv9}GAaia< zf#-DI=Dwg?Y->JlG~fEl*LkB)>HJd$cA@DyUwiWT`FRq=<^)dj-rSdTnpMChsn^Xx zQ7I_%->Dh8S4HA}ZkFEJ;XU86!@}?J$AWFg_Ei7R+kTsYLo&Jd?XGiHzi+0`x3#yQ z9^~}+_Bz$%=aq3TMJZqIz5BiI+NVEXa@O2@|Gh=4;_>B|I!xCrKFlb-nP<*6jUyqs z=kve6yUUM1&YZREwZXMndq3w2KeDm6x82OJ_(wEd~>4%bvo8M;Nb<5@{i>)p%|Ni0Gz9~ifV&-gZsIZ#1 zJku@tN|?jq&OL@MW=pS_Tx(yT^jjb*TUT;d%(_<%+xOpoEwsJr{VNTJEk8E33lyE) z8p0S@;HhrFaz$Zj&?yUF)#sV>TvPfA=eV(`B%7>{zCCwa=Gv|G-^=UG&u=!J$8n`a zPM~AO6Qf|0=ck`m*~;)0&*7cOe=utM-M3}CbI;wpDe!!Ww(l8-V;Zyfzb=z%ueFbg{M`>YtltxK=o9qeVfrYoPVyU1WO zKj>mj>+-#EofoR#&vkcr{--Yg-1Fid!}KrnIs=Y0G;Cy>IAQg(ixSzt_Hi%NT-xBU z(sk?0rPm%>t@s>#vh=2O^f~sx`0Ir--+%v2+nhVgZMr~)v7eUH0&EsC4TV3yWZg=tZd3#NE+T)TCOhqYwty&FeuWEiK-(P{RQ z%#!{FJ+i!{E9rQ((x4 z$>J*xiO!AY474|Ndux`$c{^sexTw(GE9+bsE|tyR{kwfiwqNS*yL;>Zugi7r`S|qd zg2@NVcJumvzWIFq(kbNI_u@ohU?qr@>{zUTr(>6DU?t%`0?SxhqLDPpASBt^?P0Q z@3QASe}5e2-~a1)eDv$@zxSHv$+$EAIbQ$c>eHfsKRza}Ej21m&D>M}`B~!byL&4? ztC`fzQ-8fjb@%=Ea<`99KP=dJVnXr0YqNrS45PEx6d#)vwYot^F?#8$?_U;8e{G(% zc8RmwxtSB3gm+wf+Re$p(01nU-`dODw4#D{S{Bb;wlee9@88wgZ@(7#8m?L`rzn*? z)$;eR)$9L1l>dLwa=A~CrfatP^;OC26E$0x7s|E0HsFa$ek9@;!E60!PF`hmR@IW| z(@JmlezuFSeqUy7I#X4tf8CT>L6`mtJ73whHgMsRt+DsDlZ^Y6n*}Zz@O4P+e$l-B zn3=Dzlf=4kL5F6ZLtB|19KQSawt4=#!&|3F)m)hTqG~VC;R~f}pXEH6fe_nzhmTyFb3rzcaQZBlWU=JhqU%ePMBnEO>hAUSLH zHOp|Xz{kl?-zIgnTxnT0Ws&;&t$nS^?hGIA3BC;3@Wg!TB-RDb)`-QNvy4;8Tsq~@ z)L<{2H*>z9TiwlN%$(ro8JUt4@0<1NB;U?ov-On~Tf;G(mYANxIc-r~O%ch?984e7zdY7&|IRJxpHSi==JxfP zD5G1Vm{`e`?|U1b%xO-~F7-~SJbmug9B!eNaSf3@ju%pdW6U&q6d6~XnX1jSNY(q; zZ-%gpv&pYJG^>*%{u;gN~&Ws|eTI!ikiYzSFtW$|efC&MqJgxG1~ zf@v$a-q3uSx*+;?UF400tP7)NXho@fIA)=8>66leQWn935$tU(x<$Gi$E+UjDRqn} znZSQHZS(H@-D@M?n7wG5w)dRnr<40N9&XN))BftpcJ+r(VN4{a$;DS2{uy3lT9h9p zuAY2+QKHTEO?#SlZht5xD0V^Mn&zuxrFSPCPGB{2WVm)~>kfZig}Em_sdP2pOK=Vf z{&>_wIR8JxYah1PJgw$hujf1Tt=}?jpF@n-AKum&dEc9Mf76XpcqNiKE=0bsk!ag! zWS*Nnck8vi|KDuRoYi;y^5@URewFpx|Nq&{?|VDs*{stKpBC(kaa{6xS5UWHJx9Ub z7_;2p@7~^?@%->dmZVt^yjFHJWhBYC_Iz^q{db*_y`7yLUwc&K#F=U=&+p`I58Wi^ z@XYdE*|wLrcK>FVw^>y4@8|FK_V&|1E6;uIm#?p>vwKtiJL;x&{mz(q^Z2)hF&enU zUXOfUvTa_nid2tbgOTrJo3po;Zi&6`licGen32sZ8I-k(v)63Wy-J6GWY+B5``@Ju z4#o%`lyF*Pb$m+D?QP$`e}8N+E$E81{ZbYV-^&7zz81awweNkk*v#c?%-+tfTO78w zBST=~g{bTk-)iNP|9m_ypD%OCfM@yVl}|3UJlMnD65};z`4s(Ewi|EeW$WInO#QTL zpZkH=r4^NxorQ7N=PjACCjMg3sne&q1>bJlUAEtOiN*QHCQogAFSjuGUVbv?c;THK zvklvte=Yrb@cEp%#r)s-fB)SVBGYcOpMd7pFJOlMMX zQd+D)*}1pdcF%b9v}oS4i`DBV3P%}inYjJhwW}tzjOH^01HGG$O_{Q&CARS1>Qo=i ztCkE$4nI7)+HRWUM6J(Hi`)+Fdp}$4OtFgdxk%sVGnPv_tSP-`&6*Q@O7pC?DsPHG z!}-^o*}c7**Md$?*vAuFAtaEUdunF(Md`4&4yB>9rf+xPvQ%dUmv7mvt4%j!v82ReCpLXwI|?^^|?_qkr5K7aUo|2RKG z)jS=U{dyg*mc6#GuNTeC5`4a9v)=MKK}tqTpUg;OElqxOR4(kZy119mcc!T>%I|9$ zV_x-i+vIx2Yj3@>>c^B9lfpk7tezj#_4$lasQIm|gQ-jPuLnuGqzKk&?>nk6Yue2n z+csITtYl12ZH!tSyK!AA%cQ9vqdpiY1@Oyc&*i>mY4n9_?OMqN3#KSAOkg|~)qBn% zW|fk{xu1EP-*5kYp)6%j7KdxtYn8X$d8$15H*Q;%FlbF{@;KEfsCb{lHilCvQY0ls zM(VYb(Yff?o9AjjW-{LQOX^h9x{teZ)E1;oH@C`qxc!<{7svLlty6AnP5E?4d(*V& zxM_REB&429zH97j!JNAMibdANgO78sMKuQJ+Pk`J-_OtzmM-*HvE}yDgIiBMUOPjv zVV>Zzgxg1{ZS&OEdE2Z1 z|J$52|ktb_VMH6XSe--wmd%H?C<@5Z|_&Muid`?=h^rBe!pv9opOpuLROXqFr+jTT&*0zm$3bRCv8BAht zo9QYonbT@H-A8cut)jJ>uC{Jhe5-DQh5fnDpm4PX7V=5Yp1x0U|99u=PL z8@V<}<=kfe#N9V1srqW|h*|G@+v;`ENdsTs%XjYHHT?XsHvE#?`(4MVuX*dX*H)$(>$VCt8n#6Sbsf06 zI{T!>GZW#73RTa0if074>p)QWBWi{3>B9dRa zuAK5!zqRsX;F>C%+ap zPRdMDG~KA0eQRC)b18>6yTV+aS=9gk``!0)M_go4?rm%NqOM~z?yloklt^xS{o&)s zsVf6C0C z7u2&b^Y*gRdq*YY7k3by3)e)er?#lJNascMA5ad z=gSNC#OVdyj0(?|Oewf#bH{!~#q*CJ4VRi6pE3nsC**NFwsoawqM%5KSw zqLZ3#ESJ>Zn%l0NztZ99QLZfCs6fj+Ll=fcn|@3wo|7&vr69B1aCuBuk;l>}YyQ29 zsgz((4f56sy%D9%k+Pylbi&=T(@MMV>a~3M__+W0#;(6UC+Mu^+0O=j zG2P#i-@ySpUSnVO0n3B*o1)b2?T`3Gv?8=5zh&l%%ez8yyDR zTG4Ba4fXzUzP4F>@XoP}geqYvSJOG0tt1}y{$#w+`e4fDsDg*Her$cm)ySb?*S|x^ zRDmJkgoS(T{d{xjBDWdF%Wp?*471oB)^Odv<20lG_N=B37d;cZilh9gwqc2#3!=6% zo#9x~QyU!J;A(l{>+HPErFIHYF{@ej{Nt~y+OfX>+i#|h(g?-H7GD>AsTTdvxx&Kv zbhl#i++sTm`AMa7gx8<)lnw2eko|XW_@*5v*Eh>AIw3WAv)FalB|o(6_gswH-nHmi zP(}6ZS$ql~qE_TzmyQn%sonAP|K6+TSz}}Bo2C^OF5LB{bO94hJH~-rjz0M$jn(p6jQVPLaBm zw)^(m@4I*Jt`j5g>TWzI^U`)F#H{?aYE$-NhI zH!RD%wT!i)a#qP9jjbIgY|b)$di?nK+qBzTd_TwuTy0Tp`S9TxFT)avlM_tei$tDX z`eX{@0g1~dy?GOkPI0+l>761b?c>4_FfUa1cCX_5J%-1uxEZ8&Ez^y^Z8rO@o9~3L z3yRLYd~WAod`^<3wxG)XTF~YcRjH7bCr-au;IMGQs|U*spD$9p%XA>-xMBa}j1v{l zOPV-+g`E@*MOF3{pIUikRb9`*%#IaC(%H8`n`UIg&89hVbQqj3_R>6>GWTN22C3&$ z7G22_SXwlZN9Wit-}^t8{m%bC=il!C222dh%*+Cj$sK3@|65+VzkYi0z8JrbCv#j) zgp<~aa?IU2mAfHxR@(`^rtUR*6O-0nT{3l9F3-euPXz_nteNpHfk8<~t4WLL^u6e{ z*&&&+b66QoYd1XEGbh^H=;o$5U$0(||MznK|C@FD=O?8^XC2ep6H_ndvgP%%+qX^} zUOn}i(Ncv;8yCK;$>?~q=u?GiL4xeOX;DfYDRYGx6cn;+ojr1|w=OfERj978>r9EW zQhrcpH0$1}et#QTmH(*M|6R59!YfUsnC!K#EDa~3@@otB71~`95fo;UFj818Y#em8 zv$U3Rb(a4%t)l8;D+a?geR2#8C)R9u%Vd7)?a{xN+Z^Phrk@O844-@8Md`ZeXWORo zL_Y~Nk9}A*ak|njnN9zUe_x;v; zerW4Gdz%BhdaeC8^WB$fi{9aPxx{KNN5PzBiL}#BUY)N0yZ-;x@c7!V{ri7i zjsJUf|L=#t-|zqRYPG)I{|}#j{CJzc|NooK{O|AA?uc7w=%ygi{;#Ir$=JQFSaD14?d^MSJ$@nH*D>ot z#^Gfety`b{`?v3Yj=+=S{r$%eU$kETeq-3TdoK&ayi(8kURHQyz+?GbWHMjb=Q$A( z4!hRXSM0E#ua^wEIC)b6> zNC<4arlfG@&&T8LEIpcjk2@OLIzmI-w%pBQR9I?r{b^78NJET6~f?Yt7??Rr;1 zB(5ZK)m#HMlbNj^XHRsVx%)4!p*``zoF}E{g-X(uTP74cw#*Svzn2=C-MJ)ZH zv)O6k8qBEX(%AVn5?Dez1XC;1X+k11p7OoRI9AYfM4pZexJF{a#n|F2TSbbO%lW8_QTyjng;#z*v?jXm@HD>mO4Z5b zjA*Djk-$6PZu&rnz||LBky6NkXd4gS}ZbTsAoE-D3d zMan*RIAWPT;obc^(#d|yxi4NYY3!-2*zy0Rf4x-A=iT@JzWXh|PyVi|6fW!dIcdZf!APYzuDVg?)ljAp!xOhZBukS z6t@)mojId(s#y7ejqha(qwWIF!mu8dqY}Yf_qOM@b6ks33iispwJlfLbOCdWSmx`j z8BR-%?VD2EshL@ODYVw*QuT69Ucsk9&os+opT}O9vF@lfSHi5e!w;9{?lv=ND_VO^ zA~?HnwTBc#k>6(%{&%;wtu3Bc5cVxfx-8M~T15KZ@~Z8Y@7I{#Qn!4*dtK={&E$}x zkM|UVZg1<5NDk>bAu&5x@|dH!>DtX68FN&db#yd#F)E!o{M4#<@~)gEQy2^O-`|?c zzkx$rBrkA+!SN8Ujc%JQOD6O11nNy)y==weX^VossGpo5-}c(1H|50d1zHD}aqeop zTRyjVT~Y7lo{nQdJ%*|quT97dvx_bHDb6C$5!v?os*vfusX_A|i3Gpfb*_@1;jxX- zB!P+L9J}7=sI~n4x9@h&1&PEN%MG7@$=`qQ|3~?0hc8O0wEVxU&&1$!xn?h4Vc(h8 z1}zyUzFZTWrL~o7MM%H+O4b*4^EXc8>GJ5(T>4_ovctW1p{ooCrn^*ENw0kzM;Eo zpJ;2e+l1@SYizIO3tuZ*w*K1I^{=|7?q+sW5ET3RI)#nJbO+bM(`RR-&rc5)J?+ez z5EyoG>b$t9e_~--N14M7I~#Pm&iF69`n31*sy228ftbugOD!u{GwSC*jAqk`nHUgp zRO+;J4A)!U)mM{#d9L5UcTUQAqs_%#iCVXAFS_Vbz^(Au;@5?9hH-k+r)=gr!1Cbl z?sEBkKb#!q$;q!jJfV2noaLEPZ5yAwngi+_eyy^U>+ksS?d|TndBt|~gM5U2r=Na$ zJAeQ0U$56c&-`~czkd7OJUO|0Z@!oBo@;fWYpdDwKX1$L+wHe`E&`g(c$T|%-IYg9 zpEu9{fAM_Xx62mC{$JPsuX_Lc`0?Wje=c9XY;tz~-fDl3{XfsnUuynKS3}Qn8h$OEdQ7-{x~EN==@_e=sW8ORw;pW%q$e>$+3c zepD>Um6r1N*i{-`Gn=`}^Lipp03wPMryfTUaNy#eq%Zz|NF)Z)zK+c&&_GfAMx?{1QX9i9L}m zvpOn*K5{H7y*6QH64QjJtT&5X5Wq#2MQ;)y;So8YSVkQ=6HPsHou8v~6V>d0I&$(_RJJA&y@bmY$1xtyO$-rI_oyC08EJ34gV2%BCZ_+!>Z9m#(zS|Yps ze{7+3ji}j)s5jRR%Z7=3JjUO7r|a-9k4+qFdL1{NW4~@vmzDnfq~#-?jx8c`ZcZry z{zs*sZwupYIocg|?YicQO$VaqvO1h~cW;S4cj&Dk!xz2S?DL<@1j8LlDuW9yx7QST zPMcXY^I4v2_D%_otk<0!4qOiR>%9Jl{&nkY__9aAXXg|fPAT7%>yceNtnYX4)7~oK z{CAo8wL5p;&U}5m-ygJKyYs`lcmLkK>&xi$xpLxGP{r=s`P;3Nc?5ed-^e%X2LXT0 zslR{!7GT=UV4(P|w%iW1-?)5t?EUYzHr9SV>tFxrYWVm3dv^A=cD8nVYd?Rx`@8)8 z{k_?qViXNF57;$G?qI? z^2mzE%CqK7He8k&Eg!SCbXNLS*(9+8+=?sfZk;tYmd>7YncL6B`%2F3E#>9a4jqPk zhbMSn+p=Tddq$O|cgofVJ}o?B`FvZhea(*zH*?Hp=j#6a`0``TUZsg!^^TqVy+Fi! zUV7xaNFHBdhdsvkEPZ`XTOJj;8YwSOlCE*=#hmQh=Omm{Zf(!_2D(w{h}HDlLjyb!~=f`x?D33p-xutiGLp*(7{j z?ETVObsdh1eRI68iDtHzIJ|CG+9DMZ<9&9gP|(^^nf5u)x8;V~uAWzSEvTz<-eU{D zD5hHBUF+`WRPmaf)|kP_aIM8auHEqYqKnJA9j9#Ax-IjNMc*p95a0c`5(U0?F_-7A zZhP>nlfmKs_uFstX2x8M;65gjsIaMa|H`e$ETo(7l`68VtFfD(zE0PQZ-oJq|AKaRX2-=%Q+@psS+2CdskD6y1_>5Ud!%jY2D~5o7@t);aYUV zs;fdOtNs2=O?Proam=2}?<;=moW=ePa~sxNJlx%XF)D4^{-|&H69p3gzN+OiIkZ-W z;hvD^G|rs035&$GR;(`TSpA%5(Tewi`uZUyj8d*o+TL(7NM-9(%g?%Ea*XwoYQ|sH z$fN2lvn)601n%F}-r0BPN7R|GJM);jRIP66ESj`b;^p_uME=y7A4BeMyk)u1FK~Ni zk?OWM`FdAZ)`@xFelqNc3SQU5zsppn$}q!Kcl!V2jht*gmd#EYmT$lBK5zg3L38-a z#IkdIeyDCXZ_mu|HIGrzjy7HfBEz2%a4zS zCJ#5Cx8EJJZu;lcdC%9q|GpcvKla&_V;8^AK5OG!{=IznTfOBw?!Vpt@8xx-Pv7gm z&#(XSul-}y+4veqm+$=LvDYOR$?yCBuE;Wf`|NGmk7aJJOMX^3t2sgVmi@oS_IbPi z)$H42W2?B~yIe!^oMoA5>+V%LtyvoNe)m4^gUhF!5NJMN^E}8~lgG!SLoVz~+S`T& z(^j#q=dM#aYI*km8byMR>=JSl6%J1*)`qzCDZr!POy}RSiUE7%T z;+LgLcL%&=S<}1coxxF&vppVHzTMqaDm}Slh3x`{DKSCkJWA~5_O~x?yESD_a!>Ak z#sj||?F{QPJUU~>^e1*gDFfkvO@P9se!8Dk8^RqOWB=+zsT8QuYu4r*za%aHO%`Nrr4$O{_w36#dDM;KOOraFlX}vh3R3B zo#%d!S$*$*vCn4{tL(ksEtk)^WWl?0$B&B)D)+@PI(qkY9GOz(T6^J7 z`1|9DYfcv)Oj^VEP$Kvg>%=vR1_yMezpk9KX5KEtD3>b@R=pzE{e4&O@LTdrRVVf1 zf8CuKO*_qQXFYNBa1om}b?tTK?9NASH{N}Vulrh>aeGG4(^=lG4^H(cq;P0oo#LhE zou)6u(7hpaPf?*#zvB<+-J!?;vnNRaAKbJB$*P%Gi!G-r#=GooTj=D~mz+;&`*UPeLW1on| zh35(?4*RZ7@sD`))Xyp5-(F76!0zoQR+UHPyD~92{$zQ)n>S*9RnMAMt~{MbyHv{# zD;Mq**>R|O{o-~1cJW1UnR4H;+wjy=%qOJdgW9y8Qne zhxv+!_~q^P)c#g$d3HO$UOf3$9%$FZ(rbqwE|BE1N_{rx{PW4LCh9#u-EUvF-s42w z{{OF5ub)4E-n{wq``Z_9efRL;IrjH+w72G%OK10PO!2+^^=C z*}RR9dhUO``|i1@_4}4yf2h?kdtZ8D>dxKkbhq*y{_*kg;;qZV#2!5Ucq-`jHq~U7 zgOc0~$6m}(S^d&&-<)$xk9_{Bq_b3qQRCVQ%gF+g@=iT3i}n_+y|rM4#2QXTowA!Q zE*?{gI}M-B3HI}4$bP%&ddIP--FNfezkk1L-Tdc23o15VYrAmFBKuCpWs@$GSGRFlYqKfWMAva|b8#t%e#Y)$8LPMF zX3nm4>1kWVy);kmS`(EmnwX;Y++^qW_tm-brR%p{5n8+@H{o4TnNp3NeA~lCj2|wW zocDfxA>(D%tYxpG-pg{I{x$WQqW`qHHy62mRhUp5cDeWS%Qa=9xwpGI`m)VUcNSZ{ zjZE(8-FEGg$yuo^<&+TL;2y(U;VYXCM!k-CzlQO^l%RF3Jgw`p1%F&S&t0(g(4#${ z?>#p;UODf$=i+BBEE8%n)`Uu&-{f&}=anQ5!6j2<9=}nXGUxwU^H2YOxIbETaR)<3 zOtzol(;3CvuC=LUxUboLtYM-Duf(5^6-!iimUNyCIW%=?(ONFk@U%ZEH+NpqV$!-V z8rhPz%X&$mPSw4I$LyIIn)03Y-&Tp;qb`Us3YjIRBEX-U`hVUOaGr71NZ`(@}WN!oR9)oulmOBTJPu7I3XD7CpS`mcZQn zCF^F~^ZNPF%U5^S*HneekMp1Y{O~n7y3S$sPPKHU7iHG#rn}$oxRWEfHF;6^iIU1o zt@aV0GV?xL>j-~&9U6b-zl&A*q^*C;q;IWNuo4$|xGk_FtM=HP(zl>HLO=&e`7Qtd ziND_dxTkfMlFV_>yJx=a=4LokaOq0%x@$l0uDi&yMC9?|!_8a2J$*VMxXN zbS;mI6!$D$GUb#B@6Idc`S`sppWb=JEA8WdRtBYC z58LJIetcZ~uC(n!%Y${-wH0#8qixlKv~+}OE}YynC1*|XxjeJmmdhSmy_k9WjEBY{ z)o{^&Q|6pDN;kQ7t8m7zeYv;QmOC9&2<+Q9t*J$GxkT{V)ZKT>VprQO|7_ELxuC{U z`O^G9kNO|a@mqe`qW4hHf@M=&RF*!O!^&I3y7AueDf|DwtInRAWjJeDZN=v{j%9^i zhUY)O*~5L1+2NGy`sT3RmGjna&CQ-LA^U3WrcMKR!&hHXGvF2NT z%G%O<`DU}{Om^ycY*CcT$YE#V`*_dg4}#gc9COY;pHm!QFA^hY!0_It>MOsK?;=H| zyU`D#Lj6T*JnVF}BMq4tu3N>lpW2giN?Ldpo1p2O+t%gV_uqb3uA~zxA(r8{)^mpC zF^&xzw*5-H8l^YA!b*0lvdOWiZ<`DPHS)TPl04 z>8+a$8?IG6HH!Fto564S-qurF!Wt64{6`itDSTUbN;aX6U-|H;n7wYS7*J(bD z`pOow!duq=YEpDyQp7Z_8Pk*8lomP6T&TLzA%eqmyP!$PWy{kFf0zBgDgWqcL=x`Heue29jk1`1Rtqe2fe=1(XoOr=XAW#q(Z9} z$<^yb*0}B6!58j*fML(M1a7zO`?_xbSXwM~Xi}J}letmAldy|dLs?i;7;D0B<>`hq z%;MO6>$d(=ulg=i2iEIJYTm0iu|>VAo+5f_k8NX7%Z3>Js1t_Cc@HMYrR@GY*YH_| zO`P6!iRb&?fB(IA>$9r8FVYSB9_QEpzJ2_-`nsL~Z_Rtw?_<|`UD1~|Zb_MYJI7p0 zn|;dVmnOcCA6D=vPKoL8Nv*4_`*?4)TUlS>yya7bp1+!N_kDT({k_%yZ$8h>m=`~f zH6qsQcxFvqom^Yu?fjmt`!Y`koJ^gip3EazZ6#O#cXs@F!>zu%@8|AbSA1^WsnP)b zyK^?5;t*#nS3I+nZ%tT5TKbyOjuf4TMb?e^Qi&&3{XbvX8k;d|S+2>%X(t%8CI?Q~ z@OoEFkxw9F#QVQl|E4{!oONtQ(XBl5<(Gf{dsnypcJJdKfBEfY_}e8Gm-}7xP;zlF zTK3wyH`rHGd6t0Beb6>yCQ+6kycl}UOPfN;LPQ@i6 z=bvx>{P68;^W58U?GLN=Ry{XawJhbVbE0m}Ws{{-oIh;hivH4Xy6o+)13k0CjCl;* zqSnL(G`cGlomCKCIHG6Nc1vb8J-ErOUBFhqh*ADZdC@ z_A{xQD=^{ecKru8oR&^dRBGO0RN8j!;{VCNN`qgoP0dcs&#Uj9KV|FJmg}3oNBw1S z>3?Hd7Qwx0jal$7;Y}}|Pp@os&2YUVPq#eLEg!B;alKy`+G(!ZZp5~oA@y!zhcLlb1UoP0~#iRXqtBCiW z*q|f+N40dcRGnHqT@z+@ehlOIs@NcQbxW}w%l$3B$vw4IRW@?{3=4Mk{Qc+GTkgO7 zvW4IG??SUKe3xFgH8(nYZpV`tmxS%N>pmV8zy5kckjoN_UEadWz527$$)_cD%m+#( zj%C(WRsH+$@bGQ({9nHRfBdMhnYZKGoZz)R4fiS^uPHWrzC9a}v&!&!;N#s?f!W!idVt(*pnERBNDIoiZdqr+}@_J#>rjv$mvNrnsYACdHnF{ zk58X${3e^G?Z2O2Zk?|y`C9tJKJBFz?`K`m;ppSIZpC}Oa#m1}>T{FxOVb1{20WWp zta-Kl!Q98ETu(J!kZR^wIwhv)oaMQR(`#Iw?+s65VVL2%`DW6b<(W!GXV$EmaIl6(B`YF4udrwo%>z`-JW-{lAx<^Q4a^|*uy|Of1@Q+oP?&XVT zxBNA0eyD!iYk&OqxcabAUZweGl21qOm%F)t`GxAgpFQ`+yk7F|s-RSM%;Ok?l&vqc zI9B}QxMuabd1IgV#8QWSzH>L(sW*N2^+UV3o%`=L4#NV~?+nvgwH*!}?RvHBbi>pm zK~bqU3od`ETkSLd%;qfJ`s54KW(KT3sC9q8=!RVnOuK)uFiz_=ik#pa(3hM#{hc{@8h<|JXK>s#$uLZ@_k%vl_#v{uf=q%ZmD zqJsN(-<^{%?t3hgtd}aMuJq=&AFI{jr%61fxxaVk?>D`+sw+jJP2~By*SqqP=Sm+h zoS`NdC|;JV!Sa6Z_1~*Q)}3)kxjyY!wfC{Gb(+tkA~g$JldJDfk1F)wY-Px8K+tbMd%U(D5=eutl}HQo8-e(mqwhfkmW zdw8#5+3uTDg4Q()=qwDD+P1np^PDSVi@>Ru6?XA{?S~E3w^}@^ocDWQ{r6`FBZ~LN z9Zy^loVxD#<&RH4Fe$k#QS#5$-OO{?ps(3a^!ILCrnvUiYLg_A)8_OaKWxCGwBhEr z*=Lu%zV-L+z3SP;nxYH5p3W*3OB7iAPv^{uSA*td%Wz7 zno^`o`3&!i(nl;`?-dtw^D7R@@Xpwn&(JXExk$H^->wH{v(M&jXWnY=qO+6Xz~RGt z_mw|iBV>QCI=l1(t4o1aVrks$^yD=Kf&cp^J+?Ui+EJfl!Ec78Qzku%Ic?*A{dMK` z+qq`9mwjbR^!ohq!yX@%t;-~m=d>rzIkqx$mhb)bWlwdOLK!%nKis~n*P?dx#7?cs zhigj}b!JXzh`oRQ@)DWlrQ5D?EjXsy;<0tgj75hwuh|x{Nc@6jG9wFT=bT59zg`L} zNo?No)qJO9!@8x?8+IJw=en-5$?HU~#d8OR=Z6-#?PcP5{VGv+;`!&nbC#XWv%WYd zNpBY~ug0NU29d94XzMW9UKQfFzRSZmPP8Lidu?2h*AL0;lNWLmqBNAML^qwTQo283 z)9f>j(|WujKJMT8tHx`}%lGU3{8vADKYv2(CO)^*{1X+oo>Y9jX5+ES*U{oqkN+ec z-}j*6SX6#(@>=5*ZK27GD}NQmeN|mQL;cjFSk4WH%D0R2YPD9?zRY^{RWo)uEC2Ns zK5HX5cC1hfpS$|n^6-n#ihMtWW@_oInK#d568ag{Uly!}@Pdm+6`J`a&bSn$# z=QX!~U06`~{B)@Vo8ZkadyCh-wtD{OWsTK*ziYSt-krNO?(JQHfbd}NwYB%#FWk9% zcYE&V_y7O?|8)EQzh@8sIqLGfdncczsmStt&G)>`H%(%{|J{Dye*c~mLg{*nEbnau z!}8{Cy`{fgWO?SiXO$A|r+;!VFgP|MHX*V>ESwM z>KkyW&o%3!;`$Dcr&&6tzB86(N~~LcdgZjLo%1?9V%fj{{=0j7{+i-jtgCLXshh5K zeU320q`)a_)+|}2sG_^QcK^j`tPJnpy_-{f*g_!l2-E{+crocTP`*Ib~0 zGmQ1iHOb_4*IyUNCNM^2Kh0XhJGuN^>xSIu*K?ee&az)LUwYKpVbAx4nGLN=>n2^) z*?MKl~D%}Mr&Y`8Gv0W(8Zm;UO=md~-*C0}PH zrgtk|-j;jz=hF&D;ViYvj0i(ihju zdP7AR1b8(QLtO=$7`QHM;`3`bprq+)`=&(Ty5D{$%RPJkd^r65z3VF}r=1Zu*V`S^ z5_64MD%HlYV2_pXx;5vk)^HiTIFZ>C96m4n;W70pOI_g|QvQD?+n?_ED7B~c^_v|E z9sgbKeC(RMHz+OpZ`EF(jI~>@af`bxU{%bnd9fz{gw^xr!m?V4mQs#lJ&EU?P%al@C zbLg@Oz~OKJ=m zCU|eWCXu~FWa;t5zyd?>xyh32u1Ecxw&z0G-L%b_x5LhUDt*qz=*$s&|7-2`n>lMX z>bFmS{q@w=br*Fnu+(qA|Ni^$hgEZzdp@4J>dKj&VfVhw{+lQ6G&y)xz_ZFrhumD+ zdZhle9E(n#QMvi8D5r~viUY&*o>Df3XO%iz+aCPh7k=OM`JV4_bMG>Uz4P5`P$Igk zXlKm%(_y*}Yr{nh+J4nJdrW_wDz!Xn)n~uz1p-cfFKeu>25nIgab=yGmO5iyw)15M zhdIwLw5a`C0LI{rq4~DHf1t{a^lL$dCO;Xe?BM?T`X1fL@ULY z!_I`U(@WPwKx%%`x;ZCjo?RxMmS}eD*`(uz8MD%M-{o*xxpYd=Nl)i^j)^^Kv64ns zpEg<BISpru}K?$T)do*13x&yc|pB1fSX3^=Mu3oT5{=tQ8h* zy14xC%lH4jy3BW*;j!xvOaE%`j8#h0)Y)0|P}H7f zN32WC++AUf?3VI1m)n*ZU%UThJ;Mqvy;#dnp!<{FJ$!gD^81%BUw-`1P*^I$ptIB9 zdFHI=7V&!1Uw^%2;`_3D-a_AuUCTlbPTG3Sdb(hm0K>|qS0>HG|7AA(pN^R9TBf*Sf1NBg-L_E zh-t>!XNnigwuhG8V$bJ2I-_@Mm2&yLd~<1+GnW3BOLoSDRvMn4ICHIO+nHtW1Enih zcHG$3`+Z+-lzdH`?(MJ2`}&m68tcY3e7!d%=vd(yL#Dv{-#y$~wl1uB+ab84NK^Hx zi-+%r@8!QQusEGjeL7{v>e7T)5s)NQ|;XLkGSviH@`W4w-9uFos=Uu@TyzQ%A5gVH(6^xNB94_TaLID!t-;;nll_;tk6(Z$FUrpWGplY{aA#;jCeBoP%Ln zPgI6f@|@CX91iQQUw&E9_1T6mJU2T)%dz)!fY-~Kblu48M>d-`2(@R?s?KJhmD+lpQ;@%e#+b{)rdyr%zhFQ;2$AbJ^m(VCK@~3dQ1(4I5<+&A)YW{ngjU zBo6++RkCf>2QkmX+RiN57B`kJxbtbkX@jTE8PN=aiLYaBe~aB;xX?A3@kG1Z@21-` z`&^D$yxy~s&y&4A=;K4)=M!%ipURG%U|Qxas`9IN|Npf7q#jWn)|i#2Q&%{};zt(hHj$58$;4w^9XFo-O;{#+uL9J`al0M+GAs9w`wnw(AHezK4FL2sw%nTD|4f_I_1w+ zJ0=nw(y7Z3vhuPGr^EGa-}(7fudjPwbm_tOOFKS1ogQCW8@=h)EhkOYrA(_&ZOy#B z?*6&rpY8uYtv>wFz<2WZw>C>3f2`TtckI^NyxT2ql`o^OZw|P8j?v+Isc%N4#q*Rk zciS2~G?&flHsif!*%-1lK2fkipo!hz<({=t^4zC~&X~3cG_fDpmVIj(qr=&=yBgnU zE-jp;_q^C^<&;-@@8_6Rxqq#_Um80He0^ZG%FT&p9J4GA-F0U02<%ERw>K5Hw*LP= z|IE8LHlKU5$~JpWJK@44`0$m+=Dh~r8CI2lR_s1{rlrH-$j0RLG5L>Q%>Di2{6SYtl)H=zwN5vqQd;OTcf&CMZG3Ry}H-C zF4K4G)zaAOTi(6iR~B1tHP@+NSySi6)mxuUso~%_apcUDpt#Ff#%gW8Qh6~GPYP%0 zek|RYxh&)%=?R8rZOjmmb{5(Vo;lQ?TzHRwaccQac}f6bW_r+ zo;Ph{^V+Sckv@E{_GAamEkASa_<=qV#+g_C_}=Q6%k(5h;DWV06Z`v_1{TXNRa0tkWUqtX)lJetr?y_LlQhNGm6TNjrYggc;9qzNY$pgQndJSj}<@)A_f5 zuJq$X`(UguT16{RT_ryi6&Gis|-di9BKQ!3uKFYGsrI(EsX|MV_7{^K`FKbx%2 z91Kg-Uc1_S zU4A$ri2G&x!Rqx3IIfh=l@(qZ(Z6|xfx?Djo;0hFMZJBujNfQ3kI9O?oRyt+&NXB& z_qm;^k;_*8{m5T`qos8xLx$$fb#6;V7!;1Jd!5vA&U&)_n%9$xU;f_rdRJ=XoaEzu z9dkZMZqkbQ$+y{ClQr|0MDDue%tcoI#!7sD4@@jR<-6ql?}r9U=45XzDOUP2>GP~G zJst+%Dc85YwmQzkvDVC5Cb`3>hpSspu_fr#bp3dSgw6FER)-ax&DnfbNNL8-E4Rw; z{uIettyrOCBgZbxH04rTV!=JZg~fkmOY?oBg5Nvk=FVCc#p*bF#ZM;21@DtS%oE5y zc|UI6{QY+QkI#C1oKvKE)AIR@gkXO~p{g>mGyXnXua?FH`>nWE^7Qwj_Gaf&OAHQt`gTfLALF9V$f9#fLEanrd|b91vn?v9l5tXC%?x6F z@P=>e$+z2df`ty2>ZztD1+kXhOR!6OKDT&w_`&Fm;Gib=O*_fH%ep8TW)MDJ|=f--7bE2fxCQ~&-Z+8 zoRj{|f#Yi15jU5{i|;z#FI8HVZ=7SCe0RC{j2l5;^;X_VJ6`+c(Z93WLW^Gf_T*)# zF7L_|{eGbD_vS5IGx8oRVY>Bh>g+Jb2j<7J|8tvhg&z45BIEq#mt9Ln*^0*($~P*X zI#_%zGxGVu_j}D1jpm(KcbT*7wbUx6O&zAQBze@^J3S!*Sq@V?Uzm2SNFH7 za<9U}!#^LE*eyO8B%zzP_)1sDo0)rGi~KrVAkkJRv-kTw*3eSBKcDCC@81~x{OSGw zfAe3@+VlOg$=!A4X8v3ccda?S%ja^4*+T~7{jNV|Prj9<_%=(lDZT9Trn?(mjyU*< zZe4fBbZOYEviCs{bs6cm%3IcDs9 zZIxVj=Ecr+JEt^xthy#KouNU4>3x^}vwg3#G~4bqWND_pXit>5mTLNwO|Y{ol`U$~ zf~IFY=YAeGka#(%xbxur^Ut@ebGs$!w_oRUJHPxML64n1kGJT&erOQ!tL?kp^F2DJ zH|c4r<{mn?HB8pBBwDif-!@!W*w;eK^QI|MIV|l`{F;Cwb@Q-v9i4Uv+nBrT2PQ z2PHSPyh}yr7Bx&;ym0pP)jN33lSU_Y6XM@m$+i!Mnc^h;)lW$eSo0y)x-wUnf z--{}iIlR7IwR!IT{PfRa(Ldy`um78KY%NFHZI65ZPJDY|ee7G=wfqaRh8ypHdVPak zjW70#qE))%;RPR_UERH2kHg{28|Ix2Z(=(xtXRn;dSK7lcfTd>9h=^DA~u9mdV>0b zXuJ2ZhX4OHzdy%xXnVg&g4Fl=|96?2=RVpL=~wVS*rj@spa5%tXjhirtZuW<-vkdn zvpMj$`CW?QPk|)y=J!b*_oA-`tTJ8y+G>4wa(e1nLzdtLyoVK!>9NcYvYPA1|9o=? z&+)?#OVS=qpDw<8`Tjk7YHI(TI%EIu$K#hJtJZpD+f`&nds`WIckzE@xRMl{!6x)9 zXx{$X-=DtzG~l^k|NZaJj~l0SZQ*2e`tbLHd|NaByQ-bD4jr;Ax0)|tkT2b-I*~b| z@XEXM`CFgH{JM8kB6@9Jaex^^#@d^Av@Fdh9yaLcubdbXC!IK4i=Gy~0M|;iffxu{?^;_pCcTOK3$?d!fww>Cw7RKYkQ+ z$j*-KS$yi{oYv38Ii7YUy4WVzPD`3<;RDat;Lhw&uA{y-~Vshnd1AU?&T?urUcLH z_O&>@FhQyNXT&qhr9CZg=5%d%Ea8+r>2g$4`a8!SZ)XRSz?e){&gAP)r}(n3*NVMw z?f&@fuTP&oy}kX}%D*khXweFVrgSr_!ig%Pw>9Mc z-+I#FJ8RB=Em&3Jfiq`2`*7v4f-ZTBlyCBB~-OFNUqc(c}K4B?e z)8Q&L;pjSx8&j1lwy!^Vn~kkJ^Yk|tDK)M4YUiW-KTEfNQ(Jsv-rd~Ahsv!l@0*{u zf4gwTw{)}FCI9De{r-}5i{*>ljos^W-e0?aR_Ngz{)PR9I|ObRDmJVr-&gQ**SnW5 zUw(XOc>rG3h z1o>W?#mHpWa{H~7Z}D2)9~W5jPak|+_4nJ`+a|soAGYuR|M&Ih&nCWHA3jvPOEQrQ2?~+&cEl=d#WDbCyme2j%CuADMD_*BUX! z9eaNXI(KBo<~CHmpAmb%T+L0x^0u+F&!rGJz*S0pT4codgRaq4JK zEVy^}^B%qI)ob7HiYfByoDr3G)5|zCxo1sIz|_unNj!>fX>x6jp=Z@ST`bcbO8R#1 zV{f>fTeGh=cKT6;V3(?j3Jp*gt$%FsH8#KWYS)AyCWr0mQ&wc1dRex+^0`d4=-TMs z^*X0(cHfQDpWZKTS8{j3f^r9*Z?7$k%bnY~v<~&1+V$_-C!5~KxqG(7@Wkb>ZF-ex z@x^(5v7e^!M(%xAl$74Yw6Ew|Iwk6K+lHrRy{+-#Z7mSjn(mT`PBK9*c{HDRTq=x@VtW=bxW`{rVI!28UY< zuD@l<{ii!o_69>*5aZ{=cjGI*O;c33b?S(8SIF9)1-w!w!e1f_*cDb~=)L^;^tOp_ ztW3?9ecZWcUuYbAut+*##xl#Ndz`&)MCtBS@l!k=vA89n@v))Ht@^83rkl>@vhS73 zW>vc$7NlYn%)k(l{v+0HNB7$@QKlDlSMF_GFUz&(wFtwDb!RIM3vK_Fen)V}ZnLE8 zm-p5N{dU-5uF9cgmCciuloss5u&C*h-;Vc>D;~c-@ryaBUn^>1&RWJjwknmk4{YnI z)b72zQt!q@x7~MNgmv!imANYr$h!N&+r8JMf2JN5Ei{+QFJrnRxvAXj<6gE(`8`Ru zdRHy=TzBh=(zD#Zw_GOWXTRzDI7?Tp<;7VQ_X}Mb8pieCf3_b#?w*$A8=}P!lYjmC z_0zk~v>*QP<%fx1u$Q*+^EuO}=wClQ|NqbV`+vXtUH|j+{v}iTjw>tG{{EKhyZTC) z_sr>%A#(HP+1c1?9=NWbxu;jLM4({?pYwB$>6vBcY;ESqT8GA7FP*+CVdc}Ry%DWJ zQQ%a%Y^16 zA4&B+ZL@p|-{Arax%R{gi}~_$@;+QP=P$2SvEH!X5OE=8Y)1I!Rbb8a1Ih_}Ft({pJIeki)^Ml;0#v4mlnzmKg$OWI; zy6sliD%KU=_rGtr&BC~4TGEf6&wK(pF^rsZ%eP*6{WQueZ|k>Oo`cda6u2~(S4stY ztu-ynnKW}s(5aVlPqN|Dm%`w3{jG-)V-t*T-PpQmhiYQogHO6o=|2|X46-tMW z#NM~Hvt4!QZB^(6iR6yPju~@1d<<4;3BR3?v6(ULYRNsN-q7Fs+*>N+d~bJl__8wk zZeZY8yLI~X@Yk<$yqwq=2L262Xakk6#S(iQXNX zf3C8O=Tg+kEZ=t zSt(K5Qy7^xaaPdQg&Y}zTum!n4xI7w@KMlExU}ZzgtxYLmfSriDCm9e_~lHAzU7}+ znw?y_FHZ0OzxV%deQ!>kA=qS*R1|x?(cS~zRj^Y!t;QL|6{ zYYM#ewDq$Ma|35ZXMj(GTgHYZse!99PVJnMlvt!R>smK=kDu+v;x(a-W{PL(>{cp8 zO^VSy+g@NH>%CXQq5RE!I~&DEUyDHH_^eG*rVk_-1Pm(P-(;2FC81l^V%EyH;d9%B zoqz8Y#?Q~WcO#}?+8;%w@5@}BqMv^|(J8z8mdo98)8lok_PP4MQQG@9|GsN``N?)x zwYwKj-J- z7Gdu-w_-~6|JHx^xcqYA9>sOPl|>9QN`>WbT?x8ywsrf`zfwQE8dfnK*};3qTB_q% zzh;vLM}nZIZ_w0TnQa`LygToON8i{U<*HP6&-(q|@_U<$J+3Ud7B+q9hQ)_9!!lPs zne*93_i;sa?cSLD>+8Dft>!&4C}}@z(RX@}&*`F_G1pAL9{%v*$A^!H-G!C*#l^+P z7e!4@mTSwi__8%jd=F!};j#1c?XBg#&ME%*{8LBi`t4BBUv(14GvkDejW(`WlX+Hi z{pl{3<(F5!{&nrt=j-XOQ)WK@`S`g1eEWL0BT=s{`ZS*=O32CYx1QVQ`{d_#{nNX) zMd^lzEO|O-dg;e=l4{00hd)-#S^n~C(bt2~mB;Vt-Du)}{<(7c?Uh-Z8B&#;&&PQE zH1m7CYu0Be!P+lp-pmzVIsJ5!Y;x@NuVrN(W_u4Co!@$8A;0r|d4G5BKg=xI@ytgv zqpp?eZWZ{*tGImXO#AwOIXk7-dae}>Y20wttoQP)YuBWN+a9S-?*5CC0{(k5C-)rlo-6x;S6z-m;kaqLZiisU>o^>%~#_pC#?y>tJ z(A+WO=1)VLX;H4Ov%l1)b4f9HRN5X*=9&AZZ_6ZZA)(~ltBmVUe|ow-UVplHw5`CE zs@79tYC?<~pWIopDfxNxQOWXHe;31+Ip(P=9am0F(mZ;|t|wC|JzON;zk6-WH@i(6 z>;(e_-`8F*=P0PG-5aB4I=wXZ(xzkk{{MLS_~8ZS1i#hWvY2(I7wwK+)ln=Ll6CU> zmNTy}EwMVjBqP#J%lCd|KFsap_FKg~{w^Zm;%#H~XgDwz~iNzx8W>zpXA~ ze`i#EXIn??u>$)u_tWm>39t!&kv?6vUa{`5R^eggZ;ULv*rOFE7Av;+IdpMd+;gy6 z%w&a9nrC33Yw{k(Z8|&^)l;gKukU`?Xx;dy?Ehuku7AFB*YAJ4&!K>gp-`{VSlr^fOfUif6wn(oS# z!BHK*?|;7Tef{sJ;VfVJir206%00T%!)L{e zYjcWiW%}mZ)t;G9k zkC}bsg`mOHHC*y`bvaujx4kjx+xVLA_~C~YTaI4&y+P-*jsCp7-|Ob&xqNxdZ+}Fu z?(e6kPj9dOe(($HqtNrz5;?28x+f!rDM)Ni-4Vk)hYg)Pf>gsLc$2P5N zNc-=z$cV>z_1CJM_wpXUEi+zSq<7TV^0~a6-#JTB&8=Nmbbonlm==_Ea;HbOi22Qh zT{EUHyL#>W-p~Jj{`~p8{?DJcPnTYMbi<^p$3<;<&E7pq4-87q{k-sJ?2fAEFv`V>AvvrjK_8| z3K90te?GLicP#eOBCY`MjTQ=SXDt7J`fQ)I_Vk`-Q38f#u^fFI(JN0IS(mN7RyuQf zWn{3|Yb#%60b3h8BjKi~Lq-Aun?ei%C37&#S)YXBA5xK9=?}Olw3O-Bo_i)zGF_d(NERwc?f0`qP^tW@uWMTglG-UjOH?{J#(F_OrNtR_*TFe%r?X zut815FNI5SarscWd6%e_x<_UN+wmNp|NqbVRRufu^gNzXTzJmb&PL8(J&ipeD%Y@NTJ5}!qI0Kh zV&nH}+1T01@jJh%N^>(wG(3J|uHW<5Z-wR*_cd%V%LrvWV!16~pTurZXR*Rc@Zxg+ z`86LHW)#=e)$Q8@s*q=GS@?dfq{>p8v&F)J7fNG`HN|ulFV+2c_}l*P%lV+y*RxXv zJ95f1GS^PadLD9a=ai(t>bA>%mt9=eUVZ&_*O||6&9;WM|1krvFB!XE}EFK^~>Q6I;Z3R|GFMu|MRI?pUb&Tk;QsP&Q`}= zH+{;&WmoZjujl!n+81=W)B{oUNR^h4uN4tz|poLccyf&Tr=8J3q*q(@@IOt>X+| zk)CDk&(eK+_T5_X?VRAGlU5v>(`|aUm&SH(@N^DHvirKX_+EsPps(u6RmF~l+~1v( zayRH{KE2vvP~N0HJ*h|{gI7>_68E`PZhO9Oy;3=ID^rB!%9A%4Ix1z3-~PJn(u5P0 z`{#zO-BkS9X5HsEbIg-IncefrU%cwVtxad1&5M~Hy5;CP=Ear_K3DSECwFZ4%C&Xb zR*&qZrVMwdisT#2c;lMEkY()akTv;kBp(Ay${r~JBaLmVRxK-?5EG^K)Xdk1k1>!{ zAjt1;!HQtTyFbnIwp5^W2!cubX=iF~bPhZ^LnDt|7 zp|?jwcCJ-1_o@U%?G5Ey_s%_Bexgq#CHmPn;h6gl$KGC-{dg&t;ovi~ZO=Odp8hlc z&dc_4o$sE*lfG|#n=5>4!=BtvFW&D{n6~r=)kk6D9bT-3nqk?Zg`BVlKny-!I>H-1*$os=eE$ z%>41N#7eIJUVD&N!M_?idpoH3uEQ)XsLRNYnDdLXJtUQV7NVb-zI*v=OfHYK&k&%EBXWXi-9 zopI~UMNeujZExTA-nr*QifGd3P1}On?dJ5^CIsj9Rs8sK_vIfKnP-)Cm6eur{r*4Q z|7UsB?e+0>zkg1zFNsZe&t3WR>C@RJl@&D{hRaOjev ze_!?=PWgS9<}-+XUB_X zX+BxOb*oqGm~>g?lIL}o<%L@wL_O!&ve2uCablLH)>-Rswdr3I{#}^wzx;Xr{dx2E zGhRA;xLwaJ&AcypW$TRJ>x%mrlRFmfX?@3?Cuk?|`-P3{qmvsJPnoo8O*Y$+sO%ou z$=X{rL;|<=*;Wfa4~e_A`sTE?R+CnIiS3tTOpJUMw8ZIt&YOZOscJEU@V&}%d zaqoNVmwAk(GpA3PRH>rm5Erc7TOIB#nXlaNEmnMw`R$5xJ74eGvTm!`>$_99LRlU5 z^1Dn4s{8k?cfuN`w`IG1FCRLy{KCf1TvtN8g%iY^_OCnr_17!e>d99(7az2KE}A*_ zc&cRd+OXwKM-C?%2o|kn>R21dLO2v1e(~Y0ax)veubw*K@OH?zH!v6;OQ6Qdq&b{l@e+*XArS zb&+UW7Z#qaYpHvBlDDW~vDK=)2MU5r!J*xTxAPdn!lOexXKBsKat`g?xu?BMXd?eX zYxPw@Tn$-`ZK~e_0~%!)XfbU%zN{l`$)z_5Y6`9anbzvAi?p=r|6G;_FN$hg{C(bb zi^@Ckle2X0ZqnOo5;#?jLqQ?WKCWDV@2yRWdrbDhQpchrOIOa~I%>%z@vgY@iqG~I zjp%0&cYm&(ukbzQXSt-9bls1<$lI#&Dcoo6)_ps<O{D7_XJ4KAUsty#4>j&*T3Yp63>x$G6;H-d^uB3zypS+L{`Z$e&-U z{(ik4pS||=F1}`6XBLL(r(0!efBZODIo+GnFzQBi)aC2X*T?UxDLu#UJm33TVa)X} zpFZWzdw!$z_f_d%`|7KzYAPz`JpWm>Ga}Qw=i;L)Jc09{|NQ#t)f|39pU-Q!&b+I! zva7B5@#W*kiWf5spPzb@ciGBUn;~Mxz0;??^`1Vj|HIrVCnxt-He=V;#~&qXR%~5& zdeah>+~Q|Z%WA&JF^W7bknnqc+2UN7cUsOiRW*zA#};)SS-$-EoKu_D#jMT^T=Xfa zNqnM!)0;)2=c79uwQp*%tUY#!gFiXXOSk>-L2K)h1E2V3-ZkJklQ(nvjaEH5pWPJ} zJl~q$?poJ9w|L!^N6r8LwZBik{`B?P6CFXTFIKP0`YBRte?)+1%Cfb6m(HIKy3SN+ zaA@jO&Wk)gE?++UcyZJBvc=iUpFeN^c4OB%@65GJ1q_vDq@P$DrHHuJOjXRN%?$uTpQXXCS+4P3^C${v&1c6NH)RZtRm z?<~I9mydhWinWYQ>FK9#*pvaQB?6CU#*Q_p*dVm)y-;;&yGq*6yh> zUV2(ez9CXVMjMLx?p%wy9d+5(=)TG)hN$9gW$&*oVd+os)#&Jm_UfBB(Ier^#4gQ? zXL9y-ADuFD+Y_cm*Opupose;wnJHV7iS=Vxo6fWeyB~T_<6Eqx^+tw$IhS39!kV3m z3=@r`!oMf~tXnHC%BuRTQ0o7$>-)dOUjKX9-Z=2&(WoQu)KvuMo`0Sw@n-L}DDAE5 zUVl1wVvnGIzVV`GS7r%Vs;pW%?V8h@BOCtx{k>u4=G>lbylt(r2W3BS6r?y6VZT@-7WlhC)N&kKIDcQ>& z8JxQm;is_p>;0sq+kO`P|FkNpQsR5!w!3-v-p+EHeX}dQzJ;Nyq<+_1N%Jkm|NFZd zI5;Le*mfZI$Af1T%)g`ZCtsWwrj(yK@#2%^3%EXS(zzUy<(IuJJSFN{?1xni2D+P7 zS>*cJ16QupJ6&ZVm-J@F&L@9L=S{CPRQA@hJoe+VfBlA=0bXnuQX}jB{d^wvx}6`? z7(71d^z`-T|NqMWziZvfDPBij&l1pd*}AJe=#|ZWozF#9^MCD=|MjoNPB_u)_0wNg zzQr65ORT=t=BH$Tm^XKKoZe%LX;H~NIR^s2Y~5BEH7zxB>$%f5eV=oq91J70-Ph^s zuV1_M^3Oky{q5If8|^Mwn=4WE{%`J{v!9hN)qJpHdw)1Bq3E8#wy5IkrHeM@WD4d* z3a7DL)m^h9%!TLR_FKBAi}v1qGw;^-+U!za&80{BiqCBod+%)jMUC;qiVbIunMF!o z-KD~!pmv;(;kxPi*Gb>sUyI5Y#ppF+3q`-YDSSEmb?DZ_;)4gL87swJuYSH{W*2|b8@~3%uJ-%}Z(f;{&6AAP zEq%Cj%9>53Yo-S+m{xi=;p7*IwPEM)m9t$C+%&gDY3;O2$KO3Z-rw@$*RQJGalG8I z*S9`-vzOa&RhF(O<4lXT1o4<%!n_Td%T3p6s@^{b(UPIw=pGX00zqZgQL57)9ydn zc7Mm}vx&Pe?6E%m@bCZ4T~dK7mCCN34(fK4VYpDz&wo?&^LO_T&-`x7J=wQ0ui5in zOY3cpO84TMM(hRYyAo#q@aU=HpXM|>?sl^H-v7d)Cj}xM9GHy03Er=7l}*3si~+&Nj<@De(KSfx$%sp8o#+Y(Fg{Uv8x~iM{3R zhZSdB)_z{L^UUWKH)*qgt8;oUcipRYDmufJV7@r$^(GzFaJ6k2j_+s8v|YIBmq1i@ z+OuxOmKXlVgL9{LI2;tZR?BhCSh6Yo(VSwhlo!Ek*KT$4h!yu$UA;=IA%$Vt(Wg4x zlH%H0*H-0}sV1{uFSV0l+Z}81Jp1IcF6B)ZM03`zeeHDk!gt%ZpQ@Zh76rZ9wdQ%T zg{=R#y`J7#?L}+XZcW{qaw1Fih?z=HG3$*WqMx9_TPk_b3nT|0x@teN2A! zt!okt3M~SA+?5_LpSjbM`GAD=fu!{&H~FnTFN`kbTD19Ao{P@X8Ot|sso`gFa5;Th z$)%cg+qJCBhUj09Vhsbbi@8=^$l82TVHHEtEC)ryIzh%nVM8XRl?>HNi(Y^Ib^rhN z|Np+%|G(XSe7XO8`?>!7&i7jd-n`@EIJ@Rb$5u|kUA`+ed*9k7EvUfck?k6MJ3@BN zTFnG$u2*8&ztuwSD{H>fzVpqqu7B;z+yCFh7yjS0L-zG|<+}L--+peJ`)cpMgKWoc z-*1~68nh$4;^MlMB7vK`mWQq07UY$;bxYLJ(9L;wKicl)H*jH4{q;Bb2mk#8_ga{K z2Pg7ZNB`btTJ&3P``P;fQ7+lVYzz#W95h3(zMb!O;%>6y`)pkWL4kw+N?zD%*2||c zG^FTVbf{h$7bUDw^W($E6050`_SgQN_UYALWv9y%|9`pM&(^%+)yD#fU;DPUb$qsw z-W0M#>3~HaL&CXW!P?5ola{y5jQh;R|NN}^X1Bz&{^zHwyDxnI+s-ead-Hr{?DeD7 z{44ru(|xoSFi(_gO>5q)oKs2#L+YiQ(i+v8#) zj55EgOjN%t>pQhM~= zzUI}g8O4#;mrgl!B#~oPnUeY8EEg4){1e7X9@&=S$$Nh9OFur_ka^)0mn)^QE=%SZ z3s(EbiU)grbbyiK{%BQml84FWTF5_YB{7>8NG@0xm|K zGC7aA03ZKpYL95yJ4O=mu{?&#}X zxB7ZqF6Js23u?ZwIeXaPORapePj=Q;tqh)v*6l9mDx2A-r}iq!^|=dfJR+v1q$YeZ zGR5tb<@2|Ej%O4WWjXPbocsFM!R7R(6|2sA1l(YVxRx6CM)zc2$d|9bbl1-8*nMMT zvc`f19uIE5*Q#eUP~5_tZd)Ab;&pP>xo^gysYxm;)@o{86myqw;!uq64_Li6Z|!kW zfe6kEGNvI}^PWjE=r~>s<8u(nTp6;l&hFyb16>In8xO8srKGju_BIuzq=_a;ef`ifr#RTlI8maluC+_A&3I7HfFhxUHP16yvd5fe;bnws^S zTf?efD`((s@Z>s@y)G+ ze^0+#vC4O9t>xw1^l8`D@NP}XT)JlSmNmD`M3N4_xyS2tJLkpTSF!i%nSX`NZYo!L zue(m}y_LrSS%zQr%V(RjpRauXd-$;P{ZfVx zZ*Om(K3)8M%w%1;+kI23=PD?+J&>1|KUNrZUG;g*&X_shHB|#5rd7^TlYeyK`Y5!Fe+xK&Fl6(0;oUMt zM`^36=!R57mh1eoR$O*0;&0h>wbpu0oaL?dNT8`B>Ms}5y3?o5cyL%moO-ou-Pu(M zdy^ksyR}Kpa9z1xMc+A`Ke#7h)j9w993TJfbKfE+s%N=8sC>Vy(MJ>C=btTE_Uo*i zlJrCU=53d4x0mn+rg0tIe(hRn?DdqxyS=1;DIVkFJmaFHw07&1p!?-@`a+x9-#a@T znsV*-TQhy*rdb>TE;75zNuhRq5 zwX(_NtBbg_X-VjNza`7K&PJHuXt}d#sd(OPrkq{bB2Jqc zHh%oMbnE&hhyK+6kl3}Zx~Aq={XhTzm+OD7|8IJ%qvMv2+mE*!u2;7xHh8UGn)j5i zOsaZQfWM&#-<00M%Kb8LpNPNHoxb5?T$0)f^Ax^=7dC&-7SUqJjaEPOKKj_VyIH6A z-wm5n`{-ru6~=q=7i-z_+z%N|-?QS%oNJRdZ{teyzp?C&S$b&< zP2W~~U3$SD@0aG94{DchYxn+LvFxw;y=BImy37Pswk_k|7|ycjz+d4zi>(U8J74tW z=k4EJd!FH0;E zF3X(eJgASjfGP|&zl}Yr=$tb z_R9bISMa8NAv;q*!IQb)SIr7Km1*j0;IM6as+a0gl}j^jR*NTpIJfb$VT&qDK)}Rx z-TnTc;m6~tzCLHl*0PnJn;PTmP;WPXX5~Eb(|#^4X>Ki1(uVT=?RsUOjb_Gt-u^rCX<@#7=ws^T$Jf`=?Wm74jUc|Mh3#)mfjTve%tuKJc`vvbHw0 zXz7(}+uj_x9)4xFitNR6n#cQOtKVNx%i>`OSh{xm>btK)Q}`|10;Kqu8Qx6IyigkZ z;oc&XsNBrho})9DWj@`yHBGYfvQ72-L(_6KzTZ79backbCzFCFrnLxgath3By}4_1 z{`Tnm{Au^Jc19dpxHmSb$L>gK*gFgd`1Vvr8yn7$D%BolK zTW$T`*y2;$&fZzKy-+4P+wj-%<;SB=z22U$x<0g{@yI1vSMQa#6%saHaI&uSG3;o# zGjs0gDdtTcmZ}b=XB3YFT~t`_JIj%S;mn(D>x`OJXK&p;L*dqhxWmbB9*10!sowCo z&c)-%wl5Ohs!B^mE{EMXmvSS`Yvooik3C02xN;t*hDD~c^G2Lnk(^#yy~=BgVn$x| zGvC~eb3ZberN7}nVy1;$-LJ>;=bnGQK0SYF=-PxyOcmD0GcL=#^F3!hY0KQU zXcL9NC~wc!gGxQo`+P6w&RPD`|Nj?%v0b{~J~YlA3fwZ^R8`O?c<7f?7z*T3+xlW-p!xAC&_=wuHdxWTW(I*^rD{{#)FuVow9hzfTlm||XFDHP&BzS}*F#2+CnOaJQu`?0t^F3c(q(C#5RK-VeK( zyFq$J_NVHkTaTZ8E7V>z!Q1n7lJ6RZ55Im{`6jRZT4Cck`F`E++*!+lCKYd4w>oU+ z-Ml@O!Cu$DyG1H4IAX@rtSQu<$dPCq@})L@>D6m9EXwX2UH5v|wIi1KDvZX@=HzdG zy=je0hQ`lL>(12J&ELH*db*{mQ%j-D+L=u=mzu8L5X6ujy?y)YWbtGHhRYh#_t{p( zcKFN7$;+Q#{`nyL|380>`n$yx7O5p^m8Au>MoBwFO)HHJzFvAf@kQId_f{-Z!q(2Z zWVk-N=-y24oIUrlR*H0V&bYhlT$>(E%9A{SS7r~L273YSBvGv8?* zwiIu$Ip3}8!?0jc-`rvkmV55I8?2W-UU+?>wJg8Ce}C-#^PlgO20HQ`PK>>L{rU5t zEa#i|IU{a|omjL>emWO}R1%NfjkyORw7q9WUCz>%IexC;W|(x9)1pa}ZS%KoD~&Ba zw_uvg4##iT-Y#O;wnS-?N9=W%mUFk?`Q5Y5c6M_pOXX@5R=@7+D7w;_z%92Qs3%fEj6ZJF^kVNTbhPafZ^UURiY{+q}2(y5MX>l1um zUNKv}wYYC)Qcvym@Z;vkAJ3T0A>5&1n5nrmOf(>1_lK)tf*ZGGt#@G6-P&_GBiT*V zrAY7Sp@laL8-xYdUQ4;=u>C2U*|CYCCBCZ{85vvM*_zCjKcPV&s5z~@`sC3(p$yjf zNqra2HZEDzv`NRCqfWw#<(#T^hUQhvtdq8#vCtRR{Z!p|NrIk`S#(N=d9E2n>lbcdbpZBYBoD`;#~3G z*9xKx3AYPpcAB$taV=DSK1Xw3?=6EU4hPoTdXH{w`SAOBo9BG4%=-jG5g_yjw?P{70VwmG)@q4NIb1{TW$NnJ<`sZhtDiCKJa^g!|&I( zOD2@<3}6w>{A{9sdd;m}>)t1YT$On2Z~wQ2WBct2tGP)grj=f*wl+30{>#I%HNUQ_ z^VQr~Z0L6U-R;lYuV26Ze*fz3J2=CnqQH^6As3MLT&83oJ_STPmXR z<6-;zzT?Ua(=O>vzg`-fo!j<)_O)$W(_}*y?OJvv>aA8BZ(I2K_eZmLH#f;> z9FcH|o_E>iyobdR$Hd_LzQ;FiJ=+=gO!e4@FJEqy@0}Xzm3^*o_14$BKL30(H+nj& z;I4iCzxCh$Nwk=8PI%&;{Jz*{ccPrtPPc_$ef%+F=XOhV;bqJYnyODzV#F7P9%VDO zUovOn49?^Y0lxeUW(PzR)@IIJ9%NLV&arl3K}W{rjP!VwBNaAsf`#+a0$Z=mSn>XM zYU8Bto37JLx2koN?2SwA=?L)3)4j4SQt;Ql$F=*GfwmNVdiwf$443kkyVv4wyU6VR zXR+q>u4PxUHg9in^_w1apv!57>z4P`@`rZ$9NRrTCTnugYQ>X2Az92RkNr5-oOL-X zoO_#HP$TE=ol8f4GVJOtF1xctsxG5@Vido6ZlL%<_PdjsrcB6bV3iN@3TxqAB{Oqs zb~eMbsjScTtzEl_W07I-fonqTD;lyUxSdM-nPlcSao4`$bIW5G60%d+xR-u;!@hp3kA#A;z+qcaVO_I*vux+KJ-xNzx7;u5SzoCQd3(BL-^Ymj;GFa) zq6ZEX-03>RV}0BH(cSC!?BB>Xtyr|dFd;kY`L1(53PDTK45t37JEP1XICJ~yZI5$x zO`SI}MU^OOyxIBb_LnO2c?|X66}Bxsm(u+GOx^8SW;OgN-TT=ULhiD*wk&DeEySU~ zcsKKKc|03${+))X0Kr=V9Czk?T(tDbq|MXX7*wCWe%(6n#pmVc`&V&Q+0D1B{q-Uy zV{PpE>zPvCbz&1MEXoX>-f$GoSvE!YTLA+Dw{z9MZ)eV}+#+CWJijl!EKcOwHmwyM z(>C49Gf%Ud>wo<5!RqU!ddDPopIdWd)rSus4DRLa|Mx9=?NO6tv9uKozFRzVNbsuRwOd&$#mc@ve0=%we(n79Oz z6O=*|%U*A)tFAn$IoB^;Ddw_O-{RsqK~61)3mlH5Mn0?b%UruvDz}-#`TkFVwtfAv z*S)shl5>ADBRVsFUHrQ1+Lm%{jDqX8Yb&|Pcii~B;rgw{V!I!EzuC2<&fITTZ)&w< z%BFSQS^^sbU3_QVPd2Xouq{;QwAQM$Jzo#b=R17(u=AT;5B8SpO%LKpI%hduC)4zF zV~2#Xqf6}dtzmE7>U~$fuYNu!^}O}6ATQmchQgoI+;erAUzl8t+Vj2abl#<&wHixH zVi^>Yw#^Upy7{}nN_J)^Lw5EywPkbW@OPN-9JV?8dQ(a5`?=D~U*7&~Y&ie?@_p}j zOH4HQ5pe3|?bFla>;BE`2#FTo9Xf4Nkm{jV7fNO>*!I`@c}}kA#9Li?Gf(e6>Aqyj zqG|a{E_hAkS`^?Fc6D1|(wbFAJbYYICLVmWWZhOr?^&)#U!Cev4`%Rijbv0<$|LQ$ zX|Ce3sSGz(J*`~5`_8xg_j4UDvpFd4oU0jrZ%eGS$I){+FD<9viMnmMYHh(xfe8V# z7zC`(?aUCq_0Vd!YW@#lhv1cK6&*e-e4V7HIX$V!>P7>kLZO5wy!-Mx8!KK z`*4Ujmz-H@cJtC#hM6I}g^|AxC+su6^D5-qbGCrDwF&F(qi-Ml;N_UPc50CKwN%aq zL$`npJJc%T*|wcN{N49ypUl2YTb}5{+iz|?m}vd(2it_ETlHXYf}_LGcRbpnSK7f#@gJ4hJ|O=opd?idt=wkjX8gf1U{cV_|430 zduV}tUe$l+tJ_%v-%rzLFxXdc-{S2cs4~U)oVALDy1Gkayd>KmUViRh|L3v%?tSL%7fx@VzCM2cw3vBzHnMH^cElF{ zm{`2FE0e3kBcxcbBjaz&)+tlGvrFeqkD9&8#?Hp#c~D&`~2h6r%Si}`tjw+x#+(7m7!a! zp665vDp`Izx4C%1g|D%1cD=p|x{&Je*D6lM2zSqOk>18p7k@vHiI(?WJ;~e8Qgg-J z`#wv!vb}s;I{Mm^HB;lRXB{kgP;}0>WmanF;^cIh?z;`n9NSOpKCiKv$9Fhk?-DgV zQ^xYvr0_kyz5z?Oru=ARRgC()jy0eoL(lTahaW#))V|o&!0>w3DWhnfb#ra6sopI+ zbGcGHIp+G-sZ0!tEK0j#;(7~r-gPtWwb*^(>(VKyvDsedR{z{RQ-C2#`&=Q<#dkgn zBzDU&cr2>=EQC)K(L7n;SvOBNM zI@+B3{VnekmFIJfR%$sUm)+PfWtV=O^qIw%c;?T~y9~N|`n*5?qFW&|ZdT6EzWc$# zD8lQCO*)rW?*ztj7H-WpgD-_I*jc7TP0PHttXGQ3;KcBfKHgBuP>02T$TRP2HTYewj(y!ujH|Zs#`SSZ_+`QGQ zHOhxG7H{dPo%DP9y~azwb+&f?2(#DS;40hoCh6Di6+(i%bt(d?EJu82czhQrY}nqp z+uoC_Ec*L7!Bkk((|(?we*fI^$yz?fPwW4zy7q0=DlN-& zwUVCGyKL&K^H!ZUT7Nn#E7?1eH?}91r}p@{=pFaB=bsPLY-3sV>Fv)--4tENXAxT>RyBeth2c*AEL`?8=il z9#zzFafVr1xL0Ie_ZurUh6JTDm9=ZLJzt+Pf4A#g-142%Ze>k$^LSz-eI|(~Aoz8s z*3u^-6E6I|@%66B*|l5s+@9MCT0G~xSSD4r&olY>Jy!2Pqt+Q~nL4_%q_PzkrN%xF zl`$4fIPnoHS*E_x4EEUysbzOV-*6#rCJkj;nv;9`SUL=}wf8BQd z{eM9FA5wD{DVE){UcJZ3eVe0zp>XJmb2~j4=bp~qB;N3y!=)?0>*Kk00g(eGr@Jn= z%&7=nyVEp~Eo0G<6M76Bn~gs2TC>Q>XVt9WwC4;D=G|aQ$cq;Zbgr3kb7AuHZEHie zc5aDc{8(VaSL^R`ZQh+(COOiIS*Z*R9Vu^gHVQXJd9U_OSshl`C%PbWYTAWb)&LbD z!R7@bAydm<$}=-8_PhLLef`hVhhGQROEh%M$qt)dvb?Wg@uKKlk3Szjm)rmOPyHt6l@p0{PccLYa!hqy+CYHEo(EHu5C^>FvI;={k6IBwbUvsUs}U43JziG+pc_ zj4z*5*nQjGg+-0;o?=o0yOQS8EqaS)1v|a(nBdv@;Ne4&rub=`g04~bzwg|3{kp!{ z0R{yY`D7b8{xk2MK7CrXcV_-#GsdIK`TLh&zFC*0G%0A;p16=FT;VDbeC>5FT%Y{> z`Sj~or!&hxuY4zZ^0CGBs3oDzkxm?nM_zyZ_Wu9B?a!y@D^=Od6WF+|%!coN?Y|m} zFSYlrzpYcB4zoE!9`zu)&Sh zEi*YUTFQw;WpgahO0g}uH+x0?-)-A&Wv%v`!+K#RPtp6?YM~ihbxv;r?faK$+ZSp& zQLcLXy3AS6Uq4miGGtQ`K7LPAW0Uu>S(Dy68yW>R&;L8(d4BY zlGU<;WIsPFa5xg=b#CdkJ6ZxkZx#u@@}0G6TNsauyWn$M>ut9i6fB?5=1lR9yuNix z*72&&ul zcfQU|&hq8Re8DVq_gLnvWkK4;tIseftm>P4H?8NZnCNkv^P5kpT~0`Qe#6-MSx{GJ z&GSmVtp?V2*If&`JnNF#wzaIm-#7QP_!#DY58O7VT1_PIFfGtarY%Zr8HKH+~2OUCf;0QsUdWxcnSr z?%9`ZA)E{vLP6y(>VAD({{PSX|EJ6E*SD{jZ&&v({=fbI*A;u^`rNCnE3`|#_HoRb zu&Rq;GqY(jr!s@_%2g(%EY_h*xjM9}EkCZ~-toKFC2DnK=S^{yFXt5B{W&|g`Ie~c z?~ofmBu!^rney?se16EaSlf>G$8Ou1CCK;PUSY&JzjjgXiNAHiw^uIHtvhu{J9}&P ztW`~19-FPQs;_VyIHGc6f#Qs>e607?E?D;-Ra_yv_F|7*LdwM!QJ}z$G)mu&+#}-Tpxt1zMO=Wc!~?YPnA&!JeRcM(tAk_IJ}6t{a*9YdQ+7=Ff~h zr@4H}wNSQ?2R*Zwoj!l`QtP%nvrt`C=7z7UpT_tt59T;HCywnl-`@vw;;$cN5})z? zhQp#M!p7Cl8Mk!mQ{wBz6;roe zIm^MrcBGrB;q$%rgw5BYPF<_?`y3TrTlsIB&g#4BKEFelq(et*5|m_YrnkNvSS|K9(9{Xgazr$}}#!;O5?y3Jf2tr|v3MI3XL z_Zf#NR;a(%s-4Xt>~%TUe1p8~8|#*=+^Ny7PTVsTmsA(+dAI80?w&{c0^UBD`L04~ z-^aw?`g`i9&i#6;O>EIt$GjW2%x0F%PptU=qkP%M-fCm7vu3BlR(r9E=7#EuT>Qs! z#l(`g!R4R~e@DmrpejK}-JE+u+3zm<^iHe_e)8a0`X#;FH{|b3vs=WKJ3DTNt;>?# zDHj=>+}MKq?`+-_!Z5RA?ql8W2Rk|46IW*^-Da}4v9+6XZqct#pQMsLoOx~Y^TCXj z*NS$>{#y6`ka~b;_P=jmUvHnDe|lD^h`{eefvbD;KHK!(J>n6nWmbCbs03g8!ngk} zq(<&qS6xxjz@Fh7QWV@!w-M_{QUf3!HT7K@*Z3Coi>_2NAO{Zl^j32TDj!< z{eM3_t=fA>ILAA8t*P1vpRHT>|9$&@Z7A#3y69^2IKA-9syd6!8K*2+?0J|KHWsh4 z@}0~(V_mml$ue&X8NOxrZ04Smx37D%D=#vJ-5V+dlXQ2_`vR4U$%h>e-aczGLOrrtfcT4yAo}Jao8_L-9^t@X4J|VzzCN zGVMDq(R|^k#N&NF1(QT-0vdd;`Ex81t2%V%_yxh)Jd86gR8=m$612KJPlE5v4%4WW zQje|uKiiytHfc)O+hfaC1+8u>t4&YpOXEq;ZkXYnw^pgaYxP#CXHEAQ8%zuf-{kI@ zIwyPWXPf6!_I|cY_!4`+{M_$t?ze@Ku7B0qWGZ|4m~h0oYwt2YE3!PV5oWj=)Gb)i zaH6l7KkIYclB>JAOm3PlmzcwUY1YI43<*wmu5J4zyxIT2yz8wVTXP?Je@imkJF7S1 z&Xz_8j!n#A)*=rldaz8}!*KBM+!n%-Tr^m|NnXUSv@V@EBgAA zz3!}_HH-cVzw-Zon;vGYXrQbi*T?=}$ieF}8-tRsOB4T_JAW6bFR2z&YS36DCA)j2 z^!CJt1e5*{C#I_pYB(;6%-O*naAU=dA9H?xDm?$=!uFZp{2caso+|(OxBkagea#*D znfqsJd|&(GZ{HThb-UKL?DE#?y=<%$xb4-c+&2Qo9i7iSL${x7-C>+ieQfH_-`m#j zxax6>`SM(b&HwXXP2|-(vRjY!iV$C~+!fnrb*F!_?mj2b=;}N3=!G94z02A|!gUu>(*?;Sh+m8qdmz@hJWvaavZNiPcKsRhJNubg>&&RHqX zrPpqolx-}&x3PHVl*)O}Dhtoa^B@0qtM2dD@ZFbh3TEA8|K{vv8N2>^(Cf#CA3uKV ze7$t*l*)O>3uTrYmVGqfi4|u|h!hgmy=>I?*k*lyKfk!)YVd1Jw6D6rAP+p_(_)10+|n#VHaw!S@Lc=++Rd(19-?jE~- zefpHm2X!_`=J+06t~vem*JEcgV{`jW7HGCixOUagLB2_|cIM2vr!{>)r#vcB`lNV7 zB5JZt-=ZojIp?0s7OS!}O{X6^vo-9r*&1D8v7}eK?pcO=&0SVGZ~D5|XFtFG``F-% z<#V;OAK7L}%7kZ`UJm8CC>N`_^O_sSwJ9C*boR!D?p~*?viTE3gP~qa3cv67fA`hS zhCDlGU43uQ=M5{ji!yrYWyT(zRdAE_y@T0~GwWt7%AJ|+r1EH;v52cUA>j}< zV%m$aoc7H*zA7o3)_Tt={w_TuIqW&FqRA;ShTJsf3v6}^MNMX}5zL;fCCV_{qvzr0 z=l+Jjro7%hefsOCUOw~f>i&HA*xd1KPlHmme3C;_blLv@58D%89RFBUSsOP^QqZpM zS4HLCx|#bo6)(sVWO*~^@0qvHA9dV4$Fxw?_np_L?+a$W(@^TtQvM~dOCs~~oA;+$ ze#_i;k<4#?lKZ#HUa0)V!E36&f4fQFNf*qSDEzzpQ(fL$XTI$%{LBtt*QwoHzS1z^ zT0nd5Vg*)*9z$aWKOJ$&clQ-?R>&yK%s+RiqLkg~{+0TQ_Qv~lxl^8+oqhj5Wb1Cl zeShc5Rz1CIJFH>R9$pwR^&kcYo&I z@&i9Td zbJLGI+n3ae@A1#f_3bIuKANyG!`{f6^;u=yGgg7N#Ey-pZyxFWQBb#G>&s31m>N8a z?OFsB8(O_Leturea&_JNzT?dwS;U?*GCW)AdZaW~XI@0;+q3cBm9f{S1fARYzFKhR zGR^5Z`Rt;q?qdA;57idD+WWk5Z~67xZ>x4jv9So=JH7FY%5)Xu%{v!%{(Lc`!~5iA zlj}ii>bAaq^swN=z4F@lqMi5h)H))j9pwoP%h@40vyyuX(~TX|rUdg`+z^nqZHHLk z%B51eDU32V-F7dQXx8)&Re3t4NXzniP~${1#YcM^I?w)}P|SDG%0BqRy(_EQ4>!)7 zs1_QTcE<8$9OIoV#V-!0H?3(icpi1N@yfQ=;1|#SYzwo^ zQQ2p6jKkKXO7Iv@{+t%D|M$WC?b7dp>_XQnop}>uBg4m#uw6B9?NSB@uakEDOIHYQ z3|*`ksJT=xH1w_0Hc#f6P91B*Ts|4jsy+1QpQU>BE*p05F2-8x!@aMK#AAX}V)k|(VFT|noDB-tN{ulwijd9(VzfBnDZQMXTjHg~gDuw>z%w)@zt837OV7T-U2 zRjujw@3Ve!(XX~oo~h9yk-Wv2HQ~Phy~I@O?OP&uY%{KscfHNce1BrxhSTovQe@sF z_4`Kb|9`HwYUMpqo(;;qJL>P2Pkv~Zu#NB9w$sY*Z_64sp0G>Zw!i1LFZ25;w|}3^ zlt{d|0wlhfYD?(5wTRh6;NpMI?QrnpRL z>51Gqvy$w;_%_V+^5c3ncl+n>6&6t&mwe^E9@T&R@yCiaSF>8D=bslg{@T9yXpfSR z;j~H5OKoNO@BK`ToL0F~Y5Mi+AInVp_C7H>Yb9-Sdf)u|*Exg2>b*CaSQ_kH?sez?+PD4ahHHzo4*ov;@PN zSa$hmiu}GctMBgqz0X*5nj>r4UX$4(#*9pY-6Eoi&-auwNF;CJa97rP6Ror;$m`_J zZE63vTuZ)T;^)Us&j%OnTF87-^?8Cv0%1Uc?&hTJld|nwS`uxNP;rGmoY&AL*8?rR7&e^8O zcI(a#ACEI4P8mv?YdhSo@4j|xlb%b;C0~KHTc^y?eRcBQ)Jsu%Q)fP_OnD~2q-6Ep zOYh##eXnyQ-|oK0o^QV$cw|JQHP= zn^&beDl8D1bdqW2gquv$%_m;prN%u)(a2oFxH$XluZ%T)rym)ZZEY>y8p4{fb={`e zYoJ3wrfp8OE$%q7N7X?o>Hc>gk#@(B=czkm)`>asT8FlI9I5H=WO5L1`Yb2t@=Z+Z zvaNBWrpBD>cfWD1U6fZR$EvB+a=^!TqHNv2cbEC?{}tHHZ&)zxRF8e#|5(kdb8eZr zIIucQI8mLx_ugrX)gkXs{>cq(>Dd4OVKu{_``1eM_1d|7yC-P8uzVdSlg9qZ7vCrK z`uB76U^N$Hv#~+r6|FFDVJW{dl+FH#g6{vfppZZLN2@z3X01Y<^Wu^3`t# zzo>0>KVs`w$$x(D`Z9)u%H(Y_Z;j8~Q8{P)ahd3z*TEs*Tb^z2y&PBn`^iCB$1U$a z+v#mrRAp+|vbe3FJ)lrN&iek2|E!@m_8z;v$M}MDN5{MD`Ap14oA~w0>_S?nyiSzh zI~?FG*m}I3T{vk?bl`{r#%x`>@|DZ&*|~?jz{eOJ><75v;Y6=_UF%= zw|x8Z<;%yfmwYlDS}dPu&MQ8*b?f1W33^eV4vM`{`Q-y|`i?uVEsdLY`suXd*0o3TA73r@yKJH8wEFGChl*_$XFosvlx4W%p5>cr z<}Y5FYDw>sTo`m%1+ufB#ZTORX4cwM-{vij@`}8c8Yy;<>3U?y5)KBdwCA5NotTjt0xOY^5H_Pw!D`$f%8F6`O*EfGpJr{YT@_jN# z?fq7XYo=#E`?TEq8I&p*%2cwdl(j@;B`%s{%owSEaF^c3_h)R? zE~HG}p3C`t*{Ro^Z#A}GUVN>q_00pjxqi$IE1m=;iOcSGyP6ZUwlnr`3#l3~iOu*2=bdJpSBYykpCv!R6dZGJbj>{4E|9_YBXGc9t{rBhY@yjn6 zI!6)bm!EY9o|2sZ^-~X@E z=O6zV=EcU6c{W3bDM2N@Xz^d4>9xi;L~k){i=6z`OL6Ai_3~9kdDauRKh2pPw|m-S zyHDR9%e@nMP;2$(Tf^=zkL?Shzu(^{KIP8$Lx1z{9AElRZ#&n!Vm3yBj=kK9D$f)A zFS_oi&)p$hcYFEmLnevyCso@W`xm>nFsDj>i*;^V@S9w(+w%|BUdcPPV(*IEVS217 zPM1ZPHkz@Pb@j#c9A><5bxKt34Q;_`cx7&&$gXAMAbnw!(_<_`=tVOs_FF zY<9BmzT;-|rMdXu>CfBu|GEF)$>sUw%oqLlYya(Q?0al+{&o2I zmrJznl^#CMc$Pc#l+F3mv!+#sg}$$dJ@>r*_?&B*v2Ty(ah2{SWY|0Gd@~@Z1PEQR9RjaC~c(KdwxlQln)WquZn#)V)_Ah_@vVuv# zrNrm@Yu)9g!X0M9E13kpwI669p3 z{P_CwCetTf{_lSZ%FabH`f4t{cE@Eym=lkpf|KycZBa)vWPGw)cO`@@E#KFDmuvM@ zo%?yGjr3cd1qm^%y*Kq%*1Bx3*LyS?7`EM-AzPUIY>)rPJn2(!H?#w(j#Lj(fi!7jC$F z<@beKx!F@xISv|c-m=z(MQ|#|8B6h|wa;|7t2uGV?U`tP&1vN##bb#t1PddlMP+N= z)Z4P>_Rd#*x1I(?W#8Mm?mg#5)7QmeWouTcPQRs(jen;2e9Ys$B7eaqn6B0A^$L3WmvYgcuB_6vQMS+{qO#rfF#*3Yk*3Vy8r zb-sSy@}ECXZ=Pz^?Xf!gxSV3XptAIaGW)6JkqcJ|<(9Lk_1(R|&%RrA?{%qtypw-_ zxixd){nJ)o{@GXjub)=&v5$RsJ@*g2o>#mJZvRcZz2}A<`@X>7-_AFunDov4e9nFK z#+&5_%I&A#uRZgvQS3$0e4F3j{Y(D;dsCbfc{a9~IXFuBrZ_apL%g}QDb?RA8&iQikS-JA|bu~3L6}5Z+ ze7l|RJ9Xlu6DGcoO|C^bcPyX0J7(SV+f$uwzBt?S?60fc=X;vpzRukRG-34X!v_PI zzBe5mJ_}>BBB!hP?p@zuasKpMGppx(hc{jg^E$h824Cj9>uKN3Cr%1pyY)4bacQhM zPrJb0j-Jai_NOXY&E>DXzj8}z+%;9xO#kPX4}+^8zD{y?xoo5RIZHhB{hyB|GJNfsv+}mjF8=f5 zhrsT2TW1xoTKl#3eG}+l)a$R8KH0Ty<+fSDE4K%Fs+Y>E1ww!)?DQfHM4d3sv`P_3V4+xQ+SiH_td#OcxSnJl#XU{9+ z(sGrQ_NczRs@BzEShn@ry4N|g*KD;sK27Li)M?AkEv9QXc5f2=H(UF3!4zF7(TvrJ z%pz0OCV8)1yY97=)1Q~e?QLvq{``GkzyEGt*yPYT&MeQFnO3cE^|%_9m0f$?`uRCU znKnj-HPH@^Gw*KFnZ`9cIPF8cHK$J5!!JJ)ANN}O+nyo?6 zD_2gLvUwHH;e?iH-}jj&sy)APuEfpc=kK`B&lntCYj~!WdsXk<#Mx1#={xJO?FosE zj%z!DS2INw8Ol%f?CwpPQog-dlv6RHz|Bi@`lT%4P&GCdrj++J9QW9*rWse*$gdI&&e1A7K>{^n)j)r2VnMgyJOCW#y zzSp~=y0<)=GyB=guT_;6_E!1Qat6Y?=O$j{T@)gkx$K14@ihxR&v{nLso3_SX_oFZD zZi`$v#4>a4XEmduZ?>;~cUH}JD|o&xA$s-wzp}N_d(H0!y9gdAn&)Hd|Gnbx+nvp4 zJ=!uRw%r!L#q~&laiYYY{=RJ8on~m?}yNqYJU(VPi@MbFetu>#S81lBSww-MGe3FoY zLGbmtGS-2fn$PFtNLSQUZP}Ngq_xv4o8#AA{`-Hw9G`zJHS%**Hrv(9A3xatv;TMe z|HJ)%`L|yE|L^|)-|^4(FWKUE*_W|OyFTcR)APH_SS6O9{Bml8<&;F3<*&bPJ7;YD z{o&*GYfs8#y-%*)x}3lNUhDq9U-S3>`E>egm9tA|&ZQ!~RS)*QuA1y1CpZ6`VT;<; zn6PR73l~U#v(SIW_IlU5j~@-6?>W=`F=fl@qBVQ3#06fxbSHZC(x|5CGqY+ro=v^? zNo38R@T+bwFYtd0+j8xAY3%jXm0Q=Hj^gk=_h63r#VOu?E3e-A-kkT~(zjS=!=)(oApm8Ye8uOC@!l zHe1i-|EqZKSfPwMERxo`k$xc_x=C(n_u4U*MqF=CNAH* zY4(9?eCLD zw8(bn?df;EFVA&2H2eF{*ayA3TWrJX%o(ww$uh%#*HR?I&?J-4X*jYxSFf8+MAG69^jR&Fi{>-Adx z-6?ghw`t3hSe`lSx~lkzTnjmQx%N3h9fE<9nNr6$Jv;pJN^73)%frk2?f?8Z{P?i5 z?$_=2_eFRHW&V8l@Zsa-{PX9{o43Di?(~!Q_S@=erON$!`0(Y!8msTI{w*u7O!;ilo1AUv_WtgkHJvwG z7*22Un*F9^+o7j7T$T33u3s7wsCuZqJz4&D{lDe)ewwvEe@QFWeZO12^{T}0sMq)Z zecOKe>9?ob?+aY43@cT-6;*oVuR+I&*H5>;u2P%zZ0D0rOP5(4@9S}LxES_Y-~Ruh zbBaklA9ugEQ~t33-|PKpkt)kVinV-1-wUO&Uk*;XwX87a@*`XR!van&YnG;HKAKa0?)KrltugD( z_b=P`o^iufFU1)bZ!h|2TP$37?`qI2QPwSKof}(Lxa1sV)mR|pobIuBt;@4}yJOe4 zwZ-a9*JFEcv!%*LcCPZu%E)I?-W>NV-_OX-z1H8=@oLLTrYU>7riEm$a}6>7Y8klr z){=9Vdv@DT5y)O^sUFIBK#gfrufT4H;sot)H+E>NTz=D`WnFeKd2$rjgr=;OqKqeg z3MegjDZRM(UDaNl*IsXCnOO5iPgZN1$Q1dfA^Et6Ur6rW=w&NIXU8mJIJaB3DQL^J zE+t=;=T~M;St~v3zz>_I3&rJMURgce<+C)6Q8iOa!mjrJ!*==or!Du#i0%Gj{L$jx zv&?flZE7o&!qSdE`F6KK;^OVF6L0neXD;0mwYft;Si67w*RM~R79F%q>*w%&`g{I9 ze!U+8d$?CQcq9ag+z1i8Epw4~&5_#18!Z3Ntb6~v&Z0v5S9AI1r)M4isGYrSzV`RO z67^eGxaNwc6ni|})}WnhYOp{0z?=JKqU=m7@(yZg-HH-zes53_b}aVI#>UR1gawRF zf5MivCWy6JpJ9LhO_y2heYg0V;AwT{`P;qB?_JZoC6ikb?#r~t^W()wVG|>AGOGBG zaB??bZY+y7tf|LcFYVSMzp$WZkD;m66@Ldh%_rgV9jdY}AQ@#Ed^_idLyems2n z^y}AOpMHA!y0~Xy_ubd0Ri9rfuU+Xn>q^-A>)J~HKKj@H`SI}akP`@X=( zkB8gaFTcFNzw=4SPQBAfVd|T0g&sYA{J5Q8p5ejv`*pLuzA?C%eEIvZB;exfuURK6 zHopF~&RULtzFplZ%l~iR*E=teulo_lFfH}%Eeb7VQ%kMBZTr$9 zaWmI)l~PoZUkk^py~$|_I!gsEAO4vrvpj}Nkniw;GnUbe?xqjVJT!>lp0!YgtHGr- zHuk>dmuZ4mqc*PzRm+L~$r>&kHYKUIrB=MNVAk^>E@Q@owM(bnT4MQp_qyz@>#_w6 zADn(Vr%2EFe!E_Td`ZvQFB95i&U}6|N4l+fV_q85@2At_?dtw)S$EpRZ+qb#vGtQ? zc5uv$RdHe{yJvD?EvsPU^(S-H&9+CfH3W5+U+4Fi|5azOO;w_5LL zvU<+3c+0fx*k`S4F1c=t-!paI>W^{X;&LvXN_yKjTjJCf{#B1^H#i8Vn9b>uHGa5B z=a$(&o2N0ng{*IrGnQ?-d8AZ9>znchVW-f*tt&G^SLe>Xd@IUX$z8_DVcDDo*EX#) z7q+tc<}u?|RMt)X*G(PMM3@(xIcMGOl4g5OQ@NobYPHPndmDpK-m@0CJe`w0KhsF{ z`K48oy1(iSl#A9Lt$hF1ufTtSYYO+iq>?A4^-Q`u&N)q4^GoQ8!0Ss|Tf&Mp|HrTU zw-(>EDmqjpa5LRFd)hlJ z^xBk}&*#`4eef>ZH;OUFbrQDsg=X?I# zuzaun?T*{duDh3=tB_@yKdbz~UgIsC7d|e}ub8{MOEV)=KaTzV*Xv>PUaR*WD=mo& zJm(vSU-wQ^pu z*SVhp%}x_jUfkVX-hMbyqCGXX=-jo4yuH`HO_|q!{PN2hyZC8yHh-5h6im$5waA~F z`tWb!`iU}%$1-J3UbuWYSobRX^F8h^O2&M-@iOZc9A=S+sKNF^TD^ zKfk=Z{IcYh{5NxRb3?W?yZQdpPv6P9{kF{TU4+G2Ps!@uh0aF=_I``yG>($){b(p~ zF>kwgnZNGBP-#dM}_}tY>c_#yxxjV02o4qXT*LI)%_bOw>1r1#8RXZ;$`F5~X zKtw+I;orP^3z?|w=bvX$<(4 zvo!5`>gF!yisv#7mzRCli(cK;#VOizRX$=}-nF2t^m)rNC5~J0yjt{nt@ZMi@+RlA zPewY-So%GWGgv7*VBwx?yQI{&9nAE3zjvFK!aa$rX~L2M7eA)0z50y*y)BnVy2%33 zO;R%b9S7Op?iF!M-Y#UsV5lOczn0zmwXgoEHTNE#^F0;nx<-3So@ChV_^H!{1mE8^ z;5jqNYulL=--2TjSy4M$6V|#4CF=-Hjf!Luxcbh)!{LXpT$>WZov8fYl*gBAdidYl zzbQL>`eoG{fMB`DOjXQyyo&{ggRvHad!Md@1^s$Q= zP020%@9*!g{{OGYGWJ~Y%F=o1&-a{Cy;l16(Auk?e~L7HSA0>Qy*77m_4{^-U_UOE z=dYd->~(!$ivC^ice*0o$={-q$Ak-c*eF}&(|oqIbYfOb*<^r`&at4 z+oH5BvnRBlw5v(M{WyP&vna$tsd_^W&!0FNn&b*$C8lJV^qc%PA zRwTkFnP~IwTqwaB$_qXYuwoR+4S}Y zEdgfVqpmHcr90Yguk5YdZLXYme`1HV@k&PLo~=xK(`P zXSK-oFwTQk`WLG6@BDuK`tS2w^xYNoMNZ&aDffj<*QE19=C!=)_{Y7VpTVX1 zv1fM0gr1FdM>22u?{0S5xNCXb^V89*E{O(&SpJ*MS8({rj_ud7PQIRXDJZ&ja*aTw zJdm=!Lv3)(tmi_Y4tWm&qG*KhS~J(l#map&yqwjZ=kR49M@uA(tAu@hmCnw(Kd)Bv%kQhIwQaMA$c~STi_$dTZho)Y_Q#V~ z>=Mg8XT-jmsK1Bh>;6ApF26i6<=N+-JucSCR^@Bg20s@1T2^0PUtBEg zTv1z_Tk1X8xgkhfiS2pOIeq`ruRne*+WBX8{pb9DFXjJdMozn5|NXbTeeJJ*zvZj) zUHoRVF$htsYLKK1>&!~9swgJhX!sXrf_b1Q}ufO{+h_^ zrIFVXCH8*TTzYrQ=5rGb_0Q|S-lV|PedqpN>-T!oBa81O_gv09KKE6bj#=2Pg+W^ZM_!t`kK?~w^0|f_ZF`xJ-=4zp#jf1%j}+e5&F}2 zZWVLB^Y;6(3)5maR)xnDi7^-)wRpa!_|~+^LEEl3q*(%UJHDuV0Q7QJ6v zs?|DkHrq@q_J+d-D_*>F+HCM!;EK_ovVYgYrk7s*!^d()Ra;}%*Z8XI)u*!~uZMVT zy>@`(T=lzKPwxphPCu&q(J@)2B|G_Ds!V(4tV_GDL}h+H>GXVFkkS%?y+#wIj81ot%s#jHsxnJ26kC|(xu+$jHWoki{YT?eI;@3W+PcsQ*3 z_hPGNcboLCEq7NvKVLYel!}9g^ ztv!AA{)?pv~@X!UhRhb+y@=l5?E zi{w9|t5NWHNA>!xYfkUc+iYg?Ew1>~R@3LsGEHBO?G=1D?ZK8vh9|u$=`XYH23%ay zw%0h`_ms=2i;6j&Kc%lO{XNC-mf%Xq=-GKsd+#-APj^$fo%!~llw-{xU9XOX2`|K+^x^6Cy>3}wExXU>YsKesc$d1a~ZY6JbxfBw$@FXMFO z)q|;^`Fh_AMdTNs=8L8a_a+cx~$~pqjz40S)DKTI$6!% zVYzh9{%*%XSMaDvon@SQpN=BGkBk^F}t8 zps4EC^7eIqj_TJLo`2nM|1Tihanbeb$_*kOAzR%G>8XO`{0 z_sZtH(xW-D$RWoP%WbrO}&(B`lt^e8eq3Qd>27C$vS+DO1uKcchSZ>|+ zA{T>cJel)~6qT~)8VWVJFR6_4Tb``P%IIpae9~II^feyh-79$RNe|xx zihPB!uCC8#Z&~s^$-5)2cT3wEjtcpA@8nquW~OJ)ebp7}`jqo{A&=n-_3hi#*7Qop z%wW72(=VQ!dAYW;!(nacUg5o^Y!kO%32S)G(0q-%-0?_|dLBwp!|R7gpS zT`v&maBg~cLxlG=YxZr=qKdAz%(`~NVjIKS(xqLd6IfGl2fJ=EJTmBet@{6C|Ni;spYQ+oy5800R_uZ|-G;|3xm~P2 zM7>v>wR?B_{QqyBYd&AT{JF$&0j7?IgmYD%NlF}wM%fP%D))q#c_g#f@;`o8uw(8wc6sK!OH<}gu)TGpdbZ32Mu*d~f**G| zZ(iq}8K*Zrb>EdqGjD%=m8hfLoEyE`Z%-xn$^hR58w|FuQhfQ`y#CL12MVYX5 zMrW_R_0ClNX_r!5qKjzes-lpT%?tBaOBnrEnadu!m{CAs#bb%UyUDwXa`#Kj*q?A| zTi|j#X2+M;Hgl)wE>6sz;cPl{2w{pssWnZdntkU1Tdnb`1 z(H8&r>-ziqtIgk*{r?pI&$~zQNJ7uW@9VcLljz#|?cu+?KTm(W{5*a6@#CrUg3n!> z_5J-l-@_73_uqd%Y_KHCbYj?g$=8#vm1gd(|M%ynWpQc^V59keRtNe#7n!<7y^FHk~m@Ia%G#yUDv4WxOjZ-FFQ_%@~tUIBjqx$+} zzs;YU`O{1cU8WYTZn~3GW#F;t+qsSdOeq;3nioY!RT>{zQ8K02Z-)Q#&rfSEdZq{~P%2sG%W-CnD&eK8y~}T(sS+3P$yl$oeEZq{30W)mmUaaSu8orBsCw?iu~f|2 zZGY4CF4H?*H?Qbt_cAL!ySH=k!Ml0OC!Kq+cWYVfw0m8fzTdud_|DQ-skieM^VQF~ z-=J_OTQG2!;kBrX5@)#{+>6fL8Mec3&AI3Ud(;#=IHaDOo4VS7;{@AhgM5?Tdq3-M z{X8I*yv9*UjLo?rV~!|edhGLMvr`}I?LC_4;Ox@YD7ew+dlZk<>)6+8=9jpxZP>bE z?w8pwz3*19zjfy@Q^VFmnG?r9y!+d}|M%|w|6jdcU;REi&SXbO;{Uf#!nd`y3amA> zulx6<%Ie*6+szT#yBZWtrg_v0ul{Jm!mu=bwP_CnBLjn{i(|;N(?;8Gtto8d5R4A5 zTD_^n<;y|-!v;JXw?12AoYL9T=4Q~b_1n95W&7{>jSbz0lBdFFSkd)ubM|uKI&} z7#m7l^{ev__0&sNvQ{m15DfDB{5H4l_JM5$P7J#0Eo;{}zK`i>$tntIiaPPlV8g~F zp2;s=nkrOp{=5Fa`un@YS!F3cYrmG*OpB^1DBo^g|Gm7v+Tu<2+U@1vugw5$xBPti z^y`Od={=cir$#+bjk|8+_qcBR?*03#zyI4?zOOtjFEjT0C07L&`NjKp?!EiIT=B@> z4=-yf|9pzbKD$--G#AgYi|_vFvDFvvxcRnx6^Fttu9+@Jq^_^{9+tiKy#M+2#}|7g zGd}oTnYq`mMXAP4wpjDnf@wj|Om*8rw%PF<-ZZD!Z|kZ{=cTh59@f}BT|TM!;kjQA z`jR)Ci%MQoI{kF&JaGZGWCoR`Ykn`4lQG;O(Vl%{&yp>Be5Kldr$WQVWmE2CU45dP&AT!3`cvQYX*{y=aeB)y|EytjID27NjNY*uck|k> zKk;*JIrsHmX7}xh*KY06iGA8}F7wL5hA-Eym-Z^Q3Ct;8yY=(aMYYWy83AiImBxxI z%{dmxdzv94JMm#umfp;nb4&B@^v`pAaM!}e_Jc{^0v&T6LCAMC@Cpj@H|8wd- zuSG8YN||#vP7j(8ARRj6NW|$H+eceZTe~IaczJ$Txgm6L<&{ard3%*N8z%K;x>#P% zRC-?Xp4VKHvGDr&!r-3QUNcM5zRgE4Vwx4X?)nr>mSoNdlO09dUw6z1&sqBI-du}pGcm8b;tXtCct6}S@(9t-@^^av%pWXw@cHkF#Ul-VJ7ujlJ)6BS*q1iiV|IX7cQU(&6r zb!KmWx&$wgOy2kF+wAu8_wx7l{{DHs{@>MbdB%fLZN0l+aj1@CYtR}dsC>UI0d2e!O_4cxw+PA(OC)-!7w`BMd6}7g*L1X8Yd$SdjZb^L) zsc~rINRV3oyZyIB+vV4;*-lfAZmD|zpzoCTC9y@XoF!^x4jc45uG_x?n{)l=oJ+U7E-^ZIFtjyZQC+2ZT1)K8L*+wz zuZDU1c}Go8wR>9nZtaS?ec$!&tlv0u@3cj-wMSw@Y&4s0T@Us;yK>qZ-l7E)zD)bO zB6|G+HKqV%#u(3y^QW}5oY=&heKjM`$g{!sy;DodjbE>nv)?pvnQuFH`gQO7U*h}i zYIf^odKT+;{V!Ab`EB+3|G)qLyI=q3$G_8$r(gcr>Nmmr{MLJSWb*RzBMzJDO`lSH zs#E8#%r~iryUuuPTC(iF?Q6)U8}t2Muc3lwzOUudaJKaanB`*uSX zUb8i>SM6#e#s1o~t-fN>_Mq0{4Zmi3wom%axBObWPOCOIJX3iYYq9=iiIwqbi@sm1 z$vu}-w!XG(*`f3K@#V`;i##=%6Edf^3aVd9pT1&RRJJ1H^F154mGdqV7JPWFqolgJ z+2f4v(yz~+efW3xx9L(}&x_G<#VbP_!)_E_~j-nC94spsMWZn1T1xw_*d zl4YEDsyYM~h4Ban^*HOLMoycvd0k0|2ix@0na{Ug)l$fEFTG~@e$Cdk+k(0~Bo-M| zFkT4Sa!tD1MSjlbjw{m3rnPT))1ZBmlF*RV|>Tl4dt0^JhPSYgEl)w`7h zB$EX%bR^kJ>2@Ua1im==GhuJAfXB8=%lBOrcDW{*o!en}Y~i(SanDZwT%&Bv{QPIU|7kIRsk99C4Fkjhn?SSDNzvuTN*6-t6cdE$SA!>d?@7Aqb zD);dmHj#L|C;SdmE8~-|U!Rs#`I>4@N?iN(o@BD)x^FQoMpJ@!&3V2@toON^5=+2G zD-jKio)xN_on?+0@F?;n&PbA}?U52)b=*9E7Z(G|C5O^G9qWR!FV71TNIoHSEo$27 z)SYvRqq5^ntyvgSs(D!_dNdzjyZ6@tmx!pfrqAb0-*fJDjpyf`S9SJs8hcMaw?eS# z%-*Z_vfuaaY@X1u^APi)z0)TZ#}#|+4XX%_JlOVEGWp9T-RMo+C$~NnUjO9PH?F2Y z23f`(+>>@Tt#j~jXsgxH+fdl^qb-mjU+DO1m7?gQeC>x%pZ@&QMEdod;@^i$%FDl( z*MBd!|9#V3fBpITUzg?M=FN9~@bBN@y6u;>Q*@vHsJfDLASycQ^6A$fFF)7UTpHzd zYW4Sj5C7>MkyuoIO<)0sa8mlZwMOQQ=QNv7*N#1S1g(xb$ZsMR~z`v+rzqUNiaCPKXE3=%(lZ->HXcme~6Pg!!Pi+DbFYn{ZR z4xQ7BmZownnDw}k-Q`NzxbDIjXOG4p(+u&*oxb5{yco zxM926p{no)_garCNG`hk^2gKD;zdgq9+qnAVcR-egh61#luy5YO$yq(yV{&bI4@Qs zV=e#Q(i~aNIsJzX`gY&7@Kx;7b9vwV*-_v0_OFlI47oFIcbxhhmAf=0_iy66h8Jsc z6*Skremd#e(y3?eI6AKqnAsY@aZ%#axps>iYcn%9znH#fS?bBmldt#aJ)b0=8N2_` zw35~(EYa(IYk6X%FNZZVGAQlc8dT)qAIKamFKWQ$Lg4wp87jhe3zh?-ae`of|H*e1?JFG6Xe^<0h@yOw~4<8@z2QA+D z_3ZZgP_5eP;@^M%9Gd%iR#xuxpEFgouWElZB7`{(ryT=`A;X`+fKA+qX+2>;8OEUHau+to%N?$corYM$4ma z|J-zhfA{b9c8MCV?DR~Xl`1YWNimmS?Jb=aPA8fRWln)ChM?aZndccNea z+L*s~{qpX#b>~li`tzlwW&8H+c3-aT;Os3dxYzr5%kB43D^G7);IQ)T&!?pl?x8Xk z&+o_G`usB_`*O~%&wKtn+rEGG(le)HiX1QLB}~(LJ;&>{7n9&ai{69Q8Cj)FPD~G1 zn_Z7eu70h5UY|ptJu&Y3u_hPYbiocmfrmBwGWefd(`)INn&S>*cs=3X+P{3F{k{n1)p<5UPpkpu%J`P zJ>iL$V&__$1pM5mnmo5z!>}bND=Iv7Kd+Sho(_putP#v-I#zs*)8`h9N-sUp`e1Hx zuQpTF^Lt^uN-2#YS^<}g@BB_!6Z&BLlParu*>BI?JpJ^Nb#}_yL*87cCo3kbSo-bU z<1NlD*Q&$w1-T7`85v6$S&}(!tXjQYxj=L0*0kPoGX{Z-;y1I`#cpL_IJEcMDmm#x z&uqWOg>w5U{FU15-X?Km!o@WXjEQG?UO&Q@6If*s0e>rxGzRwqv2K7_P&T2(h=PrH(D7}S0{bk zwzgBkx}l-sbt}W#mupwn%!}^-{qg<(m-7Gr$^SceJTW`hO(275&a|)m^*_#**Z=T6 z)ya4=g5}NB$lZ;SJRFOgB2w0}rXMf-Vpn!gX2RZ5p*t$?uSp~edJ06XKEUIk*z(}* z&p%OKhQ}(h=YGFdnXD@r656rqrLN-lIEfmW{P*{HFCWyB>&V#KA88a&dW`pSUj2@l zWxp->?zz9-t7fpbbqBM@`#V{W&vj(15fu^0+k17B=Mz)+n^MZx48CtUlNlSOdC2$5 zo|>uk8c&`)7C3ImbMfbfty>JX2VF8+e|qVXtxI-3nHqi7ElA+nD_IuCz8dA<&{ENcD4P}sfn$?I(`3``}W-}7_N>`Fa(TPFBTz*V=|+)@+P zzD-P-OB9QA_0m1PNhZFZ|Gj)kteTO-CF||$ zf?nmVARwxQ*zEf-+XAf=Jd>2 z-`Wp{EK8kvetGc9y;sWY=1xnMY*XY5+#xNzrr5Sz@Z8Qn9}a)d^qs5P_F(V)=a$ox zbe_&}>e&7F$XvmNPxjn0J+@*-E{7vS)avJ#BQ;kfUimC*njfyu!uqNv@{3pM+OOxj z3s%R@X08@b+}qh*x^3B?sMTA|S|pqlTBZb@ds$&4m0gvsnacR@&rYU<=jZljcewW? z#$L}|e*ekrSPnR{O{r%_f?EYLQb1Y`ltZ!f0nQu6yEzk5>dF2|* zg7vFErIeR{SK6b+toe>nd4$k4k5^9G$T7Ske7Mnsb6#yCSFMZob$yJ#6(i zR^f^Kb^C8;E|{*y&{6R?r^AsUdiC?uMo4ut@O|I3TiKhX-Cd8}2rJTET0DQ5 z=8uH6XD!!fUghuZxUx%C_Vo`3l}|1$T9eYmoR_p(bQK$Ldx;;3$}&6IzQJ?i$rW|( zE^_knp~vNuzuXg+%smtMB%tU8sJ~kblu#VYYataQq4(9;@xws@yMW4(Yj!ma_h@ zH$^@6%3f+xdWmUc!R`Y z`olW?EAQQ)$pHzUFAH%pEmV!3ZSg*-N3r5Xk|5{7Hj8V=T+SY^*s=5W{X2b^m%L5% zXmOC%Uvek^{n~4jyx-qlG5znY(Q)K?g^ocD;5YW_;Qc8^jgA)Nvl7n=n6?W z2?VTl*L%6Ej#1KCVNsuJ=%PMPO8#aG zJXQKOi1~5pq}*bIHi;ygH{Y&RJ6OH>er#4{Btz=HGyDI(%|Cy<*z4ufr=gZSch~Iy z`EB-kySlHxe*OCSS$wN}qN>A_bBaGQC3?4REqk}dbi>wAA(bT(hi%S(uC$9icWeUV zr+vI1zuo;^{y6V`RQU3%k8Jn$;;2@H{X8j!p9P;d42b0MR8e{@6s z!0u1aeomTn?e_M=4-3kxci(<5&^R+V%3Dv7?fD&>_`cA zG}rGfU1a){Mdx0%?6+HvtQ-vsoSk*8r5{|bzA3cH^)mO~&mB{WuSNN}6kS{8n=ax0 zKIX9+k81k6y%Nd?R>@s*Qxzz@xAopu`Ic7w>D-$u-fw+1ufwbH+N`bb?(S~qm$$of z=T6q{scE4-j}3S(TkvW1Ig~IgFx+%zO`_i{aaM+FmPR7U8C!L;&uLx{3tgSG_T#<6 z>mS5|LKV3utIBU#+2W-qttiq|rlQqy`i#}y6B2JdLUR4|c5Yvn+|!YjdCtl>YORRo z(kVe!<<7?}bJk9c67aNcx?ZZgG$Om#u%xz?F>(9ttx=*EL!H`swocnJt%u{PURZW8 zn@7s+B7yDCq?XPqIUqc#G)!}ac~9V1WvyQUi%T@OZe6xEdBgqdx(uGeY6qgYkA1rq zLRpRB-P0@6rn`(U&{JTi*sm4=4g=klThA@}`B+EEcgfag zd*#c775;tfuisT)@qhilU-8!BQ?73}6Zj<1R(MA-f??Y+({+0{D&%yAbZly!9V{-U z#-O+K$~DFd5wF)eo3QLSd*zu6daq3wyqHv(`&Smv?)w1_fzpe!a)xsI4Kf zAb;gQ={3t|%jaB8{UH8{SAel8w{cQ-C6_{0fZC$WYugP9ln=sU+qC{hEAnV z-|zn`-#R7gb>b{LKY9JnpQ>tVYwG@7p8r3i=i=nBQ)W2u#U^Un8`DfR&ZiYb<64r*M z=bdxi!MLFKTS4)RWtz*M%(?ykJ3~eX&&0Or(b;p#Eydp+_N@#J@M~F~>BNvc*GnDN{q7 zmDJh>rK#t?ghk74pI4zettwrNSNqesNrs9ocTK)#FI`?fZ|3w=U(N0JLXQZxGfp&{ z*|zc4nuW{u9)9Rn%)PZCbIm7OO)Sn>>*!L-vbu?r@qm@@ae+fI+L6NT6197F?ymju<6h+Mb1Sc1iV~j? zqt!g;-1B2|9eWIqSqU-*tryz&|3_%Y;pyk+d!Ia4naoq3d8hYV^+d(B9c~-WNuQr9 z%%#xam?R*Uw{_vd-I+f=T)iG&`}gbd{`lzEX4(Qm48`f`e~*8U|NG(P@8^$=1tK3a znA8Ve)ZL+#8WGNTLnPbge7B+5uZ+2hDh?0sNr$8CuePz>IEt8 z>tPbfb5s}K-rTjXI{$Lso%i3^8miM8^zHxsod5s%|CE!g$$PmEKYUkLzN7PS!ko$4 zCCl24xHfyNHeFsi^LZ54L`MN9%|n*YOJ^2G6?tt{>&=|lm$mWPzQ#A#Gpe0TQr3LD z*DmqsMeT-jtDgC9mCl@d{pXy^m$IkbYmm+nogFT|XwSUCqO9Z|DC|ewlSACqX=Avr>+r zob<;PXU{E|XE8#QUQnPhrfnr!~7W?!0>adiB2{2s&lfB()Of2^(a;`?5!Mghke)q0_ijUA2% zEZlnP`qVH_dmW{wN!PZl$<}oJ+cS${iTk!~c`Kh5`)0;YON|Wif4Htua8=R`-^Ug% z3<^@-*E%?yIo3+<2{Yi%p1CXe-Odyjo_p5s*Jtk3<@xvUVV#5$!?#Y2AqM(mb8C*1099NKcZn?DyQQe2muL_kEn` zk|GfKy!^H7$||0!cg&fe#d7<(Ud6Sn4P|(-H!_>CDduaP;;+OWEeBPjJ6oS}%`v`q zG;`LP;+7k`*A#1-GH;0D^k2*Re&PiQsbdFIPFZp{7{1uM{ha2n@M!&|3=ER)Z4Nhn zGYEuiT6Xdw*XBIAU>BC0y~2{y+C0w8ZRAwx`~CfXU3GQ&^ID0dp366OaC1*6-OAL# zpc(k()2EoAz|wXFhDEBU&v-`_YwkW=H{;mnpIeq~-MUCZ<`qJ>PH5(`nLaI&`j0f}Qb9_N51+&a>+G z&JGuF%3iCuTa#z|nkd^UOBZ{d#_HJX-L5{W^}inW@BjUFf5hWG9}_zc|9fV}xAlw{bw3Q3m(H9X)b(A_ zZST{A20UA*z2EoyTy=Tz4eQrIUP?t4GG*rzx(dFsPCUX1r-SjWUndZ z6D&GuDgOS>)0&@u%kRhP@f#jH{q$0l`Lb{4Ue7A&P|C|@XV5vl$ulCdb^d5uFQHwb^=LCOs%r$LESbOtXd-1F*?kOQM{T%0ZD!Szg zp0M zV(+d@=_z3qsf9uT1#i=emJm5aYL1I&YhDNuHUcu`{4tF$5Im|=Sz{*8da-R z(_3cGz8fB2`}uYJ{C}70|HXUhhTk`lU|@N%mvgnm@rZ24BbMiN7<^fRPF>#N+`Z(P zfn(ygbN-i8*1lcq{r=FR@7EJ0cI6~6WEHeHMR4%&oLQo{R^fBerej}%b}W;4ng8$E z`ZZq@_wVmKEYNnaYW*ea*06P=lka5tp58OZ|Mb&IMNAA%nH+naPFgNq63zF)Ets)$ z!#bTB-@Og{@9bT}e~nkMPm%AK#aYp5yc!;sYxjl=usvHn$DhTlMcmu1kJRoB?c^{>A+1GL>__wC(@5_x%fNgW&~_GWI9 zVd6cUu*Q^)zuo!$-=9BsZe4aaeC^lmqKd~PYGy3EyZ_(y|5fI1y>$PduKzXv|7rXG zpP$xn-n$w%xA^gv;&c6%pER*Ih8257`bUUy>M1BS%k-;HEzCZ5)Wr9kcWIGPn4##oMxEFVE9*<<>FRpZ=^YJ=gbmiNe(HAGKzuDP5BFc9@pBS4;Mn1>g7Y zq6z5J_IZ8#^r`1Xt!s{2g4T~6=O3jg)-vAfc$t$L>y@fs4=`8;D zsS_9R2A#S0zt5F`@9B@8cz&9Ga>xXYbg#ZC7e^%u(fI60SO? z50XEByB6gY7|Y4DU|~k{=X*DO^P7(4IIx^B(&wnKka6PJbto@G{dDNp<59Is*kiJ} z{ybmpb+q#RSAl@b+~s*|rzLenEU~gUT^Y06UEL@U1W=3av#}=Qv6YZBdFFpIs z-@9`S)s}uOv08cbQ2PoWAtfQl?5U3HnO-DayHUNVpM{@IZ`zUrFw@4Ob~U0GQfd%g75``Us#uG!^N zOs&p8|NOFI&l@?pyt;3n=iBbJ`Tyf(@Y8)8tCW;xXEkv2Y@Bs%J&%b~-qvlms@~^K zZpr>W);X=MW@@~Cp?cTC;iGBUAB1^l8UkrD= zG>CYnXNxpvjBQ-OU$;-<+*iKF$FfiKdN;9WW8RE} z8CuD&Ynk0VcdhbX$5rj?c5MC|Qx%5GCWfbN;&&8BU4H(r!fI~Osnffr#Jqd=?*9J& zAD*6meED+m^LM#3j(vRiaNqZTKYoN{hyH$U!1Md>y$J0)$sF(R?d>*vS7!a!^xs|3 z*1y}c@BjI>zxLP1V4jbU!{z6Go~t_j(VF6Ok+bHMp1XN3>s)E1xWkqwZHWww4pPSq z+c+4epFZJ0^9vuEZ^lS$N6U@PwCVax*i18xnfe{)MvM zIj3$nukWzIi{O+KTW)12DSbM(`a_(q)SZN>LD`YhO_zT8`1ttUyyItH?}|IO+0EhU zobugszx#*TRBYqy17yI1X%DZ#eZF5>d6AfdF^cT9X=ZMdeZQgTfrQTMLcMANlzOPr1f zIA_PUu-;f0obzGrS4Z3Ga~0Wh6tjIqZyK!@Z4bCz@qX{tI4{{Y$LGG9ma5y%{gg1h zx^L5I?ys!H3l9IAc&ogtZXhGL0uuKY{9K6 z;XNBOTAuDwC>LwtTf^wb6lDGV+PyEAs_t-i2wwfj%wcG9wV(I0i^uZm;d2w`?lyc@ zwDXVu|Bv>|w){JL|Ih5*yQ`l!8(y=_)-gQ2X--{zOj&`0Tls(Iv3(%e{?eoAzGSjfrS!l{_Mq?Zk7gO;L#{WQSI) z>+(tKd>!gJ|1Y#_+F%;D?`@jPwciqVOFUA7wPL5desz0S+1!SX71bIsk=aVWZhjGE zxOexi;LNZhz0e|0?_Zan{+|B$_20K|6ZgxvDLywj&n?8~oboPSPQIDnuuUR)?b2zh z$|SRU1Tr`M*mGvSyuEz?@t-vtQ+gJDe=nE!?f2b(ZZ2_geX7qd|BT80dhq|(^>ts@ z|GOIhd`|y*eeYAtFKbSZ^3vS8HhW&#HA{2-(?5%*U2BX^f5&w0cc#p7>(bEgpPzpE z@gt(>o;=^NSq}~y^c;P9E9;czu?@H1e);q3(^J>KYZY4#KTPO}nrgT-h4c072gQsxt-B6h$^UZocz^xZ)$vzX znO%*_dV0@$?J8B{V;Prk81z|A-;(B%w|DBMRPmJu_+3s&BzkJTlTF+t>g6^t8sxe!q=&)An2;<;DX~i&k#6ygT7k+K;OKjmSF{>GBz^|NT^^-|GB#bW}GE&ROt z1rIIDG2WT7enX*|*QZba{PN79_P_rME*y$`xi!x3`RSii zyn_}!sImNhcFO8GVq31IK31C$y?X7HD4w7%jf_1TH{JC<8L@$lL7?M{Oy*v}MI1)) z-+!G;IT5w~deU>91s5~61bIDvx2In*BHNLB$JZ|o4of7~Dr#-zxU%lvtG7=2u`bU= zC%LWvoc8hJg)oK0mj_a=9OJTjQ~tYI_4Ah>Q-bda=p1=|{Q3Xy|3Cl#di(#c`|sY# zy?g)ee*M>v;`@I*{rh*VV#mR%^uFbvm!DVdE9^LY_%Ji~{ylrd)?Je*N{d#XFsJ<7 z&U;bf3WpLUIDOA8ohG#As!r4os;p*Pz6%ipVdzbz+ zl8^7-`<-8Y->%*J`wvRL(0Kdw<;NdY+qfo9-gx^L>+PTWHtt)yd`sN9G�QplAEV-w$NJPpkj6Yxb$4&0I0I@gB3g zrccz{JascxecfroV9;Q-LQ|+_L_j%Kq?c2Bi{#z$;C^hna?T-krx|*6_ zpMHLRTEsTdqUZFcGt-YMd`2D@yvb%Bbn-mQ6yS2V7%iMJA$rOeMY1_A7{_*8Y zME1|8`t^cNKmNS@yyuL$xw-tk`kKnh$m?6LM4g)+GAVBU+|BRhvb@KYsmj z=Y+D$@4l8;&GUc$IW_irp~TmFZ)+vWtpyCPS!H{#J-um#Pt4kV=VqRM`e*n3zix}I z_Jm)uzCMLh^Vfu;KEr>%*8h9;?c2L{dAq+a`|DNS%UErdsdyeDy@*NR^&Q!Jt5%2i zX1uqrF}Sv5ikDu?l`4L_FV{r5HXnOe-sd{4(r%v|tIJNA_g7gSGPo#jT2tEM7AYSi zykNzeW}eKd+H;lj^sns*Dq5@i{LS9od-w9%Et#VCoLh#mj zv|2n>k7?2IrPmx@zk9d${o3Vo>i&E@{`hra-V^5UONfrl%8U_0PC7nTf=1c-TK^=vSYI$ z8z-yF$56q{wXqkyO)`2e^JH(lCg7TE5)?ZlRf<_^acX1LUNt4Ly4p%hS$}^1pU=Lz zwG?Z9Hd@{2zki=$+6=brSk0|*!aX9Vb+;C2b39lRY~-UDmVH;$?y}RNTkD_qkjK;+@4?e z?d|%1uf#9cZDHfhd$#D-s;%$-y}LJ|va;4!ld=0$CBp?NcIjs<*CgI2pH*G^`e_a4 zxy(XFrHwgHmw0q6$`+8Zw-u%w zoFir+Fz0jO2D6C8Q3;E-zjaP;SCgJL>H5=}opY|Gid{S9nYwJ7j0DTeHQ%1RzO?nN z>Fp=C{xzMj%CEi9qV;@nX=8{2!-@ojh84BG`+u1}i2KiYsPs;b_QXF=`L(^&qtbq5 zu3;2nl`ps@Ipx6`MIFt~XNK`p|8`mD=W0gRT@h=s*m*LJU*Jd*+ zI`_6pdWh)#$t+uG#F28k>(A4F*~z)9zkdCCwYs{v_{Q5~QM)8;-4*oaWv1@t*$YNkLqOQ@mGheKsk%V{Orwi49$+Ij%@qKkq+&`D4k&^Ofgr z9yd7RI75H>&l)@7yI=GUEn3z1U~loe8oTqSlh&SI`sJG7)5E{p^Y{OI_UxTr%a7;p z_y2!*T%MDc>DngKguR`wr=_mkdggRc_TRJlb*oLT3beP+w?CqI#56O zKPT0H+?@YT?wy>x{PdqXcm7H&zkAMFo8k9eJMp!v^IvZ|B9X^uzIW@n-kH3QEO@R- zzLRCiUahnDTGhTYzwee57e`EcTC;0SHrKtQ5|S#*uSQw-T|WEvZU6jzIqm2~@7(WwTwx=t|9t0hj?Al*7fjPq+BxO_Fa7_Y+TYimKlFN*UCkc@-^GR^&LR`b zOtm&{DE%hDxAtpktP{uXyZ@@hlc$=#s!LwGcI!IJY(1q@(~d4Z&8ukneh0Ub&t(gq z#}+;vTh!LN6e;^1Hem2bf4TOjz_CJ`J2FWLcZe>rwd%GYq^QpxPL#5a5A zpHolkbGDJmTb6ldsg1qu^GV4)zYiabiuOLKy8T|rc{PQo(8Fi!Dtg0$95@AwPOaTO zXOk6|LuK8~UkTYO#b;jMV)cCQ_H$Kx)43U%B^(&mtX-QKTHbVPX7%%1tBS?UUa#Oh zm~}>tspG@zml93qEz954JjtBZ^u7IWUhK5%*RRj{v`gpM1)KAGx8IT${3-CrpyK`B z?{}mb^o-)4&2;VAdaa7_O^e5hm8}X3rYSLOIQH${mup%bOe!Z*dUlmozh7#`=ID?f zE7K;hM`2r3>7IV*EeBqhvNs6q-E8QReYbe! z=ck*b)fp~qa$sN(dwxfNo#l=H$c5?rn9@7TZj zw!2Wmd!_37ts?d<0&TNrmp_fUD&?E6)$FUZjCsxWEqb4yPP%-n+C9BVnzx(QWI^GsV_ND18>y-zE0;fT?r&;wAU4 zG~I4zXbj2P$XKV;71J&-$5`y3fLFd`Sg3pMY$c(X*yP!bKUb}~c<>lc$_0tnd(9II z>NI*JD^I$XxH>BcBweaG@wRdIKY@H^35KF`%eJ0=>Z4I!zWsZ-`MJo4PAiNePi>8T zwslHWk=vxtPfdCPUGnnzt2mtAhrV=6U}I~Ua`|I_dV<-pWu`A*rs>pvy?R{l$ET{O z>0ci{TsY;|r(dhS$E#S)J-7C2Y9xQVZAZqf<;Es`&aZ1{KCg^qQ20?`@#bEA#ryUT zPd{=`w4CpE+J;^JSmANUs`pwiHeIv3WB2CU@8!pr--{RBvhkWM-{FTF!v9YzZDI3# z@a)?$iSNIDefsm~%^JojvkxD%6w3V0Z~nO8hVO5Q$9v8^|9rCAzGt$*ej&+Z1}mkC z_YN+T3nHtW3$7h4lvw`v_378U)_tC&2RgF&+r2&C?JVS(i@zH#nX~Wr;qv%-a{s?9 ze}8_uv80mnV~aiY6+eFb`)kqr{@wd$skOFp^*=s-{#m5CRN+G2_SwraV=tdRo&L_2 z&v1E;*~DFi*9!fXZo76+%9t-%rft&Zdrsee+jdNe^44@|YGCc+dK7woWzdpyyQXYt zI3khH*M2uIzh~vzS49)NueI=Qee+}vk7R>xj+4u@(v@oklb+8j=Ck{9&XC2Sy}C-m;L^0(z|_o`MI6br%z9f-D7A2bXZrR-adlrZ9 zENv8;?Dc--#F-pxzIJTTIsLRIt|xHI=K_l(0#$3*ZvR{NeDCg$BTF%tpsvKA+ggdce{@ zrKwzz&t=6*Z3UgZ%TNFL@={pQuiTvD(o*K8jclwl%{#k_`>M;Yy=vU?Y)#J`s|}JY zjU6HJ>zD+1f9}3FZ@XTCqtjxK3e8yp59f5>lh4b`d;hM)nbq(PXM{kn@imE82hPXK z)&IZyzW(Ryc*UfAS-~oHu@o^C;pCEQY|pQk`evFQ}k>)Ov+ry`A< z)6-ce%qc#|{*ozHVt2!qsQop+zI^&pQCw`05@OrHBE3QN!QT7t-Q;Z)HgCDdwXov( z4(FcTQAZ!$p0s6W>Glqly;{qrOe&oj7S$E}YT}wBM@7DTb&gUz9fWc(dKH-VQ ze1$IWS`_YNr9E5srpZh9M2;BWogD!pg2{8a=XvP!W%O)3wo9T`=H#*KmsLIQM}-Tp z9#Jq{ki+%cm$&J*+RAGCKX0%9U$yzmwPyZvJMZQR9lV?wH_ha{clK3FY3Gz_sb`gp z_ztpL^EGb`V_4L`ckAO%Tug?2$9^5FjhuEbbrxHCjAp3pg~t{QPT607K6Ra8U-#$3 z)$sc|rx!T}uQd(bclDazbK|BT4^B=n;7X8Lv~S9}--$DlcoaT8=C_}6X@=!7$Jw)H z?!5MB4yR&JJkZ9k%ySn$8&QhgK zThB=pSJ(gkP+;HlJ4$tW(axB8^X5-WwYziYSxiE*z2bAB1bAI#4at9hOv2$pjP1m=O5UE4f}WbDx}l26 z3=)kD=bvxp{`l}PyLj!nrG@YJN++)|W?*Sq8}jh%9pR*k5B#$X&CVNh$N%hotHAQW z*LZK^s@wY~@y4<+D!N={l2%YinNZkqpXa{M%VhM=g;fM*Z%%$ z@%G&E>-T0dG)$b>$urr|<#OJad%cer2-T?QIp6~tK+^;Ofer*1{`YY#l`Tptd@Y?X`nC1U3 zUNM~q;{RV=f6dhRgpIy`|M~cRHjF1eR`_J9CMR?3-G2J%r7Zoa$6u5`HsHNnw|(=u zdFQ8}etIXGUomf|=+Y;TH%<{+ynp|0^R~G7zH^aAkAHuE-($Fbz5do|^J_nU&AWB> zv+DGc`s&wn-maC*Ub9h2pjmvv=bx9NVy8{z$#jTJe|cx-&TVO(UT?RWKCj6QJwNM~ zRM5V?d++w{4)WHFWQm&>KX3lL$RB@ymzQrZ-_`u?TlI7q$y1hv>8v{hHcm0}oox8+ z=}+&d%LYdT9`9PW{Iu1PSdJpg_^90N+sohI+rK;J+4Skt1)8=_zNUI!JZtHbpwOzN z$3MQTh`WBizrR1q#+L76K}E*e1_Q|}yDT2Qo6GEfZCBx(V};ju_C0QJ(0qFD)+U|l zr=LE38X?`k`{nI?{?kVD`_z?kwyvs?XNk(zxsx?_8K)p`Zi|@^+xyNJzB@dQb$pmA zE4bk8wB_2Z9TMq+8?S|VnWapa&|3PE)9_V7!L_XU&&|53x(!+CI}E2qMX&w3DyZ-v)_;yvJ?^A9~ z)!Wo{OxpQagy4p23=g*Q-P5vNrq*@Odb7Z!2?Bwyd!<~Z=N4CqKGR+tob8+WcJHz$ zYc}7DdT*<=LS%D!UGjr1%ts|w?{aZoV)*8qW%}NG4ef;@4kj~tGVj!W;A+`+i}grE zcNe9$C1`fq*lcuL~8nP`4%hq1{G$cCt-G-N!16by4<;d8ZyEg8Vt9-@Ug4C%}Y(X*Q`1+=Xx)jJ}P z+_AWPVvk$T1g>-Ixo>Q<)I6;AiedM=@Ev<)y@CXV_O;I3_wu&sN6*u;A@R9u0(q00 z`FL4cLKi3Yny_7wW9gk&)Ycul|D_8g zRB{Jh3H0e%aCfOxQ-c!Ir~1F&|Fg3gvMuHQzEAJ->z64fww;STAhhd&>AZ`-cc>=| zO#c~lYR=_@mDjDMlaCb!=&k#{@7bK9Yy3T*^Q6=n)~wxjuPu?|VEcpKrRDtZ_x<{% z&G_?UyZ`jlM_Hr2=BkBO=+BMZ9X36+va*ubFiJdY?b=Yr6)-SBhP~}Y_n-X~Y76)jHf3o>=((E$Z@*i&S_gUUEPHRuxOdN< zIKASPt>*Ilf{mMx?cQ+zZI9t&gEz}w*6ibXe9p?_c1f9Ae6G77w>0+px!9J8 z7ovpsm|v@SZn{6j{+X8f1$qA#{ppc{itlA7YA&7QcRXr#v7Uwkx z3R8z+S_iYz+0SR@nyy@Zt;yrw?kStSG@L)^_#BH!-~0WZrE7J_p;ixuE87*OX7!q0 z+ZJ_hd9gIx!>G%*q9(sy`%lWoqoVf2Hq}j=7i4_6{j}($<@H-LcB{=2n8=X8Jn`Ke zZpAaxU1#)uH|%qfWBGBbrD@A_(@b9ld&-}U{CiC^Bo}OX3wyW@t@8f;U)#>CS*rGV*SvG$$#+cr z6kQw?nh(h`=so{@`SRtWxU97u*I9H}m@<}HJ^%kv{@+2%Ewio^$|kPrxEL{Q)-~0o zSE8F=mn}ZV5}1B$8pnF!XTE2CinB~j;=UFs?`E+w2f;ny+FXrs0hk+0X3rmW>p zoT2voZ=1r)w#Z$8+kMOaa;Ar|0tTXkGigBh+@6JrQjof-| zD-QjbGMS;dKwPrC(7b1NAyZ50UV)Qm6aWADQ)L%>tnkaZi03R;#o4NN@4S1L_nvXZ z*EF8HcVs#~R9JnD3w~vp{`}7A*V%K9eV4bB$>ZNS?bzAl5C67*-<@eS_uPE@dabQ& zudDKxZZqI{Y_iJD<7!M^!DW-8d@jbLidpMkZ_?9Ty8QWa)Aei17)(-*oI1R_eYW)4 zCL2x9bG_N?Kc~h`V-@^s7?E84-YekgoVAOm6u-{B<9omE_t{;~5+x#79IlntR@U0h zJ-;;MuJ!d))&_~>j)Rif9WG10U&*evnH%i&>)N+#BGIqcO1AV$=s*4T&Ft92?fZYn z>du@H6d51i-z&Q2-JM;J&Tln)fB5X(SU=0>kM25Z48bdxhPw0^_C4Nl+n*t0ZSL%S^$rK7d^S3N zP0DoRi+_%7dy2IabA26M)7U2NE4?;h)};`|rWQj!FZWE3dJe{Qrz!+rNy&`EjK`|0c7Jb{6F#O95w-5_5}l-u9mNl`e7@hae!f9?f~3=wdw$0^Z%pew z+1}ui!6WIU^foGYt>%B178f^`FN@r+ad@l^RgK+x_28|$dH1vSpP4?#NW-;_qvM{H z?shIi4F(^NEv2WrqnKF}@4T)1#$9eTFK&MP^HjEG{`dF$AGf49N#wtm^IJYW#;B1a z=icXYvW~2IFV?tTdZ8-9`24N`kBi#$OOY?Ky6>y){Sw5Tz1jGLL65v$&6az6|9`j` zb^K0i_3O1;x2^g7vu2&Asxe1Hibq72uHmuyfB&TK|MT6*E7`>boF zv6sR$zm&bbc)R=5XOVBAVuCDYEvygKzXW^FpZndAZ?Qbvc1`}5uLLKX=U*4gIUIZR z=j^$@by7B6S28QIp7PoMwYiQ)5;&I;*agTJ_M`2No@ZO(be$RzN0Y07Go@TdFtt-JR-X!EpW&Vmo? z_%2$Bv-P(ueR6O0<`eTL-}k<2q0b8pM%+1FHvC$HK1_w)Wg z*Y*4T4UZ&t%${UfyQG5R|)z z{mPaw=d{@CThzSMIzB!tU~cO7kDp?{W9LrEw!~S-^yB6Hk3W7|B=@MIxLCS<#l&yr z+uM&XUp}SyTa>mR_DHvERl{ z|FnGPo!6&7zWh0pqrI@^|GQd_x|!2sKK=ds*Q8hD%$Zpi`!o8VPu}|M@9*!2EwZE@ zJ=`GQzVO-|NtY)^=Z|0Bxb53n^ZR%1-nFf(w3^#zIrZM|bu;#UFO9ukdChx~!kj*J zh8bnnp8eQEjd~wU|+#G>oua#fr%sZI8^fc3RPX=k-tKL5`tHonU<&M+46cjRf zgp$|#2H#&+Xw-bM+A}IxFLGOg!KLCgVHbZ)5;*j-L~NSkvAcQ8Th*@Jx;D$qOmh3u z0IhU3hF`Bb8>W@6lnA~Uc(+1iZJ=n~LnoJmtbcEx+q>=Btz~yFMST-Ev|y#`-tLa_ zz{CHh&NOM8^LeY7i$U5YHiqoCCVrp;p4nJprm;?R`rxsI%WB^uYmOayr@tLmj!M2L zvF~5YUdeZN8!|0U z=VQ_9*DFR|j*gxlSM6?Ou>M-4ipjZ2TlHAddCh$vIKSWf{a}=^lY)^FkJH@h)w*BT zv};UcRCxa8?%TV!f3PTJ^GuR_WYGKg@87=y?boVbhuWqvyqf&;5mA ze?C3=<@EZm;f8!(v+l{{wJXNy9sm6E#?I)+f)ic0Wt2F-x_vw9-2{fh`eKPT!|6&o z*Q(yzNN%z#c^9+OJN!&)hRY_iFxpI2a+^e)5r+!$N}^+5e%%dJU?-cgtS-#@T-^-Qni z+ZQe09};NSMF@DJ)3V|;7aaUTzIeYy>r0*A8H1QOe(%^CpGm? z7um?^Pv80dT!pJ~!SA_GZXMS7blLBB{eS!a@y|{#dUhlv>#QV0)Y<2!Gv_&PI2a*) z>|*ui*~k9Yah*G=_$Gf@=9kY;AIa4Hc_l0OG^nOS-mdOxZOpTuFMmEgU;nH4T&9e> zlb7bxIo>Ciyj^f``?cknR(I|QTzpwlS6R6;=G^bU{~jOj|NWPb;Yz|5o#N+qeaY`_ z*Pd(_cswDH7WgQCHjLcD>KnFMZzU^84?@4f_l4H9DNroPOla)pP8_wL=h_kX_E?C^Zq zw$yb?r|k;;J}rgg_}{$p9h{0S5AyQ(4*m|y7R*>XHHtAI_WC`$>p`X_8AZDjMOb$T zIR16JY*F=|H6=E9wQk?V36^vH_HGwg6vfJr5IODMu42El{qn z$6kK>=GM+NF)+!qW$s5sHU^EQ=RVt2nL24sl$wa(c5>P{RG2uJK)|?7wPJjEuUj+~=&H_uTE;5xVDMb^cnx zq}6977WY1}tLRN&J5V|AYme7TzKP2YKU}?j|GvF@zn(KJf2(rIMx@mJHUEO!aSp{h z_Ld&ZdS@osSg0g(SfaM0&LXaFZ@F{INsf-d&OJGub+-&N*X*v|vAcRl==Y6V+c*S! zH~TS!F44Ut$r8|GTK=8?SkT(moy;G?E;YTFBYkY)Q#GZzb3M*meovdTRq;q_oY%^G zR@~v8*V`U*mOl*g_FFma)3PaYTXTk|RBg1U~v4~Yf$qTjE5RwbDCh%Y;9<(a+HF715F-+p`d z?&HUg+s}`?Z(_D%V%@I(TtB8A+y2!4`(tzDpv3+)uRove=GbzML8`IML#TQ7vV&H| z<<&d(?ti_t^+mhIm9S@(nQJ|!C(6jny}Q5n|Bhu}K&Kn}*Z+5SiJ^aw@*bVD!gX1qt{IaE%7n;9yO^LjIZFXHl z_S)C8u87=RC(y;F_hP?n`@?6?(l|aE>F-=J=bC`CpQcN_xB2wS$Z4uA2|T5F@9*!e z{{HXo^z-p{wZB4&%#PiA^F6*6boEEIywkbVIc>9N&tAX(=dLGQP7TMR%;WX4{m!31 z|JdSH>Wc4S#gWr1_r)*-ma@E;dv`o@`;yW~y|y0yecz{T&A1rgw{`2WOVO{->#x^b zdu&I0`TM-Q_m^h%9e-?dey!>AH(O@U`TYFz%$c=c;%om{@Eo3gzJKX8iQ4SD5mJwK ztm}5mndATbZ(V(Tef_>U*()`dN-X9L`>fWXvDTA$5yQpk^(trCGPfPmxl^5&J4?-X zwrShsYk8L2&pIikU7gM(xbl0R47)=}mdi<-*K4*$?GBx3xjZJ@Rpq%x%Yj2xIa6c2 z?s}}zR$ErF=MY2ePHj>7Wm8In1sU%anXO=6cvVY@V>V~_{~Pi5lsi3Giq1)`SzP(& z-Kj>&TSw9&t50MFzLFE<4fNVM#i%d&!#(E>ZC+~?+XRv{MFrPxJ@<1`an$4mPNyb% z8D3j7%~beG&{_$$gv7LsDF-U~d{)$EHhEl=cy>>kd7&KJEapXbnjS4xUCQokhgH{w_b=cEM`7`RsFR#2d~ft&#p7EWu_F-M@MB ztq%|O7MI*TG1e?P>qw zEQakSMmL>OES_JgW}kR&u9aS9r-8-ubyDVAjfgzW!fvnkk!bQE!-z+%F zd%z`mEqCFZWtp?gco*7fWKChY!S&dxe639By`KV)UK|!^zqm7Dj_I{UW<`g$O1Y~% zf3n>o+nDdLiLa8<{o3s_ZwrKE-IsI_3B7;i0K*4!`F(%?{XYKv{e8uU^8bIhr_Ee; zQEK;X*%^{7#|_^syJ&g4>a3=HZDri`qrVHP%M~Vl&WwDPYVKEFUViSkeEpAyyYK)1 zxcmNJ#*1&hw{My9xbWOsi`R@Q$1L~`?=I*6{+++0!fNjG&rgfu{GNaQeEIR`&)R}^ z_WLE0ojKNSoin-X@!syW+oQ5|qn-V|c3%1Pq2T}L`Tu1E>;F7#pFUmu?}M$kIs&@A zg1k1Mxt0fWKP$Sp7+q^Bc>ng_zju$1^S|C@XJ`9Mpl9LTyzP;xS8u1h zvFPi{mi<NuUfonZDFpR zMRv^Ez0;h|`BdBYDRWyq7no2Tp0IPuqTlXoLyt;zsOTJj9Qb$7>s=Aqd)nLEcW2)E z`}2JLHOcr*LiQ;gM&)T6uk}3MarWZzaPC5TQ(|y9g<^A4Q zH**T^Rc5bQyX_iRgBJIfd(qk1b4{NYNU$+PTuYrPF1mI_!0Fi1hxe-GYjnS^(q1^H zo6jt{=aQ~VMy`UAXqHk_xm}w;`@yK?FCSZ`hpuOAD2~1{DJzq%&Kd%wz`$Go_!j$ykCnaN?XbH z`K73rd)HfD4QiK2lHqwcYf=|ycUTDbE1rbBUF@5=d>vViM6MRgRMFY{?cPNNMXBty z-)BD#;c$pN&^LES@mf}?Nv0WVt+;!?WqG}ieY;^n>J8SotB|K(tjzSdMJp+jKd)@j#bZmCs$y0`sW^lGlWe3jJqr(C77ryTDp zlxTnX^oNapw}(4}NQz)?!}ZV=+w<1^{B=A3k--~&_Pw!+Nvp3;VC3#Pn`OZ&l)Y&G zE9t2-6jid{2(UYC*2~VHdnRmCZ_6(W%hzj+pHF|h%<0H#)fv_6w>}LLxovvviyA}h zWS;=vK*b9A?X45n6rQVoKZmtt&LoCSiAs+T8@&B~*y5+e+RrI(t}3$W^j2q^UY}GP z^?BF2*K@A#oU)`SREcS^&zg&|{_h30Uc0w@+A76`tgF2z@F(7RTmJj7f#12xMQbG+ z*T0#5Z%I`)!wV;lLJnu1!!Mk#xYXR4#Pz_J%b)FfsUyR4TkrL2r(Sz@`pur-`~SB8 z|M35Z{C{_ci((21LJj8_E^)q@z2(Wzmpm_m{}s+V|9MkS@|(TK1$sB0i&EHfPJz=~ z{m^L}d;9-8r@UITzj|-!y=$B9$S+&J|7TkyXnXN#oApb-{QW2J_<(F&zx(Syb@kRq zCS2OJZgZ2p)Ur5l>wR*;e#b9s=CkU}>SnfFe(z`R7p<+jpM$b;w=!y(@kkZDpCj8g zka^Z5%%&V)n_^rRU z+b)0a_RFBNb(;IzXEz(N2|9J0-V~!({9d=kfM;=$-1VX`rURFMUa7t>|BbqF*=6 z+_U}Io_%|sly1EyneON^zy6v;Wtx(4Uz}|0ucHSO#9!$Ax8L+{wf82Sljm4mpKLfC za`nudHun^j)u+xli5v9Ji_DgLzWn^*J!fWJeZjV?(P`om$2n4gMK=?bJ}ZRlGOf0i zH@e)pbLXq8%I+@T{+Ue?aw<4C(XdcgXN`((`dp3|9kY9GuBqEP!7zo_^0m{AhQ_Ip z4FTC(r_4G2d1mK@gpAvN>)08#itv71b;~hNjs1%D=L_F`FPr3EH#xZC+|>yiuSxvQ z$=GzQ>U~e))T0t5(s?HznDShUd_OmQsU5fQwuYcCvyal!JQE&h*{PcxI>8WYtswY$ zdZpw<4wesHc><13nr8wOTR!|coBcfI_SaR_1}SU4x-1b+W0et1e7fk6(puiM$w3+*LiZkhWzk-6(Z=!=MKPs4TIU8i@QdA;dK>AAwd zBb(~x|C%--g-sJJ+r3DbBrKdU)Y$_Px(?a;3r_b}2BO(XkZu?f$Uv zYwqomwfRx6Z6p_OPEg&{wL*>$z*}LPCornAk4V zC@>zm{*Zy8*51T-^ZoCq_Z->5b^Sr^{qHU(4xQUE#n^k@P8%CLzsos2o*_j~A22K3 zm*cy;VgH=-hHmejIF7Z4vCR1%U;8)AA>BMnO z;<>Gog!9%ZJR27BIe$=mVdeE&@`dgZ2`8?Ffp%eDq3M=~f^@I1Dsx+0*AO!0^!}{F z%Z1#NuOuF`d_HOQ=A<=Ob5sN3jl~7GesuDboFgzx)!48d~L>-W2v!UoSHjY zq}2?c>bN+}@R}Ag

AE>s?{b)LQn*-`cj-ZL#Hwu$5b@(ixXLX=!NP`6PyGbyJ8L z*X|Why~P`MitKpzt>Z*Q^i1wzFV7isi!*v|Ha)gnpRJi{H}CS(M|;d?-;3=jJ|@9) zZ~Ly79Q!zPZ&y9vRx7ul!Q!>^V@ABrN5`?A|-&lZoZ4@hZMUtea#t#F>p znfF%U_Z^?_DNI|F{I1tqV%D~|zqY-eo8PlFqi5@^ix027iDd2h%J5=O_AI}tvNW!|%h2Ccc`i0u3I9dPp03}vaR1KguND6_T8qzomfHXC*WLH^ z|I3d*e)<2$`u|@(UUttut6%r=?e_b1t6%H+Y8BqQnKEnH&9`Mz?|L|LZwp-f`}gqk z^Yeqd7@o|%|3Y(zs@L}1#mUFEul?cw|DpYG zUAQ;zy|-m|g{E(lW_VI#w>`JIy!`o`+0Q$942!RSkG(%#_vfN#K}B9~>)!9F-?86* z{`o$i+uJOkSI%S+wPIfyBCz4yi}w2#EN%=}5}W(tv(Da{mGir_?R2Wn)~e@OYq_KY zE!K0ypPrX-sV1XonFFUs*XmQwE=#6Z`A*JyUUpbuPpa5a(Y#B=D(_=%o7P_MJn*t4 zh(9bND$&o7@9>3fzH3<-fuApJ}m0fzP4a#G$ zm#*d8daZQro};T$uRgw>wI=YFAXAq^*UBral6jubF})_ieTeOA$?DH0j1SMgeVcb< zsf0hn^v|y55wlcQ`aO&BUTL~Cr$^9thD%Lb`nuOvy~$cjr!2DKlid@Y+)=Q*$Y}W- zzV_z3M-EF*4=R75BO|=}ycC1Ux-(MVF4BPl3BUI*4cm7uydkE0Uxw=RIj8oVxn}kL zufc?^W&7_(KA&=KLQ=`O==Z8iqjp|n(&>Es%&1Q=9?dIfuvMZVWj!DL`N#*yfmVdp<{{P$C+wuQ? zh5x?mSO5O%`eg}ixuKcs4!!sNS2xwJ;&so$sN$zgP1hm>_m~+)YBjqGD90R>Fml_$ z(7k!zYc{dS>#5lc0w#Q&+6>)_$tNdWJS(fjQ0%*P)x`d*hEuOR)(_QM$W}4!b(n`Q zckAmHWKVf4;r{|M1vdG5Odggu60?a)8jej*H&mgxfPtAW_V4SQR#~0^jTYKF1IY>TxfbMvtILzPUw{9 z2eTgcFBLONasH6|xOT&}$jzA%0_KbC6gvwZdPSC8ez`{X+LmQo?-_nReAszqsYKiV z>;M1$|M~yt{r}-Rqxj99KD^3OzMyUCy*qdIRQ!C@E?-|$vt#|#xy4-4ysw3BC3M`Y z|FLCVX5_TOS=lRfe6}i0S~b;6^Yx~E_5UBf|Nn3M?!+H|K7O{*P4T&2dfDdh$Nu_| z{G7`s=OUG&radjOs{Q$?GInv+f^5h)0 z_1dAFuTA>}zZ^GQey8;A!vf_X?dj3g*8at(SS5ZfV{A!{jI5aDwza-A_xYaPyZ5f0 z>V0zh$4^iH-4l@IV;8$NW76lJYt~;Anm*&2|9t!Z&&>Dt6`r+x-}d*ld3^8Zntd@r zXI8EKbno@yo27U2_CG$mPA}W*^sXn9u1lN@h`!CcV_vD=!Q9(Sv!)n}U+Ycfa13_! zy2@)Ezxd4o%Usi*XAK<=53_m#JzHljb^4&RN$2#(A6wR~zSr)xaE|esq`8~VX+1Z1 zuF`O50l$z?)$>^bJ%+08drc~y&$wBA|La^o?`4%?j0?ZnT0Tv-3R|aW;u7Lz8Gr6q z@=UAjWhdHxmKb5#1wfa_SmadMqJk#or-IZ5Y&)6D!`)Saz!Xm$|Mblb?{Z@&F z)JAa}v|7DeK*`{mgtOL8kpqQuL|E3fiKK58zhiT2>okdAuTFuz<-WHqUYD3oD4sX{ zX^q{|87zSLwNbxhsuHe}u zCY$-|LFt}I@5H5s3inLdvtyrHo&RjKcFQDFGn2-h-)hZyn#I&j1(g?ZIi@NXUWv-? zdA-Xm?JM7*X|25>GdG4M_v9UN5U-l`U}|yK$DO*5qq?o?dbYYen9H&HPWbF7A*QxN ztS%sxx^q5=EZ06IzCNz*^H=lViRCx=mx?fG-C~#)rK%lzx-$LDw6%(xPVP6~ zt*68#Ahmhn(F0P!JDv%xRn}X&X~iCmfo{UUwbh%Uw7$}kkuiKCfA~Ff1O)C zuO#5@oYHw~Os{3=UaGh0;xO>IT0W(9ZL!p^lr^eGRnspof1{NgSf8pT$um=}B`W)V zseatPoZWph`;RX_FK>S``=&hKuS+kBrgpTfp4YR#_N(5~E%)EY-?#KD3-TD8%9DiT`Bl`BY6cL92IXSs$sq>zvMuzbQ$6fzgaKjK(<$7tlJbU)cK!0;p z$XC6Xol)uS3(L>V^;<47*)T2kvWf5MO(u#r65{t&aWXxwnE8C=>)r8xKZS3-#xHMI z_p|2zmoHBaeRGVRwl(bC?{k&uy@}IOXL_$Zy=%>Bqc>$86_)XG^74AqyE!!4Pqtr+ zx~md$-$e08)a$23`_7zx{MUKq(`V1zejg6%s$-g7dGR%9R%}Vs{NCy<$HLdg7e3wD z^Lp1SKK|alJ&$)CS(clZmp7-lu=n(?u$61`eKsteHm92-BVs#;LHJuU7Zy%)aX}^S z?gP8laeZZUcxL4re9H3qot6d1-}CH~Jfb(3OqsUy*p62VYG zsx6rj_IM53yv`N+mV$|Eb2^+ZZ;4$#m+|4T+40j{7Y6Vw_)zN1a*ff!=VXYp%PF-nXVIx^M-e%!Tr zU#9u$nB&k_yLX!rQMlw+zs>#+r7O-{$ER#k{r&3f>+9>|_E&#@_jUDlhYSwK`|omJI9;=vTCAMZ z^U-Mj@t>R42{TOIc&>F@rsTv%7yZ8QZV{#_GL5RAzb)MLdaCZJHrgu3tnP1#z5agIHq&#_$rmPw?VDsgDUrcs8C%)4t+9)P z`IMU5LJqQq3va!ae3W^5khgA?w^7R6`%OF#mYYd!*W+0*O(uCuTU#B6@ApqrrJq0j z_%LzSvCmJ1_Lv{LrsVJMe>);{+sD~Cb2lj@-Y?I;zt8ZPjja5!mqA|l-j^RVx9TuI zrjuP`B_}U8?^j*;&M-lx9>aO(rx(sTc3Snh7Q?yUvCsFK-~a#N@%8unq}t{#Jj=3J zo$q>-R(C_6uio@Pm$t+mJ9la>-Ba_kXurLUo!#FbkNKC1D9wAGI_ue-%bTBGi%Qmw z-C4FK=1Ti-cHi~p$F?|odwkv$wr=Yu6TkX@pXc{3oT9~W=5x>MOW&QZmwRkE^Le@d z;*HDR?~3U%c>N;i`mOi(_g|OXYIZaCdX`Ru`L4!@wtf3|?zEhLTtEJue82m;8Vi}W zM1y0`=JXiuzI#tXczUR!+nFAY&>cqE$KF}^Jx;xHJ8!M(bcx_C=UyunNVKV$Y`r$6 zxa(xz&QnrzI%XXWz1GxnuiCbxqo*Un`&a~n#r$*PHm|c78C-8vhifaZFuE+6yzP9@ zSq1~1xA}FyXRmvEtZ>hq^HCg;Mdn4=%!-@I>By#+M`uQ|tT8=irF3bBk)R~o`l-&p>&_M!woMWIaOd2! zpf5^S&3gZ(=pC(Gee-aerDpd{spMGE#D^gjan~v%xBPnbd!7H3ERz|E)1$brFIr@q z#+H5N0~z39K0XTLQwNzIAARC>AJQW~%n` zm>q}Mgf1nA3UEmJ_51&SUjI|y>TUS()meruGdVcV9-e-_{_U^n`f>mM@z?+O`~Kep z=in^WAa?V=XZ|(3TK)UW&&TrrzpS5rTA5+;*4=miefyU8qI`Y)-g|HJd$w-9^3iDR z>s8Nh*X_^V)wlkQ3X6k6s;h!f_N&5k9a7KT&IIU2XM5glY|z=^YV=C-)T1?#)1IEM z|93k6|F`Emj-__GoV$6<>iU+qdFB=Q@8#|Py*&Rv#Wg-nOtm8CjzO}>%+xvAJzLMQ z-v4pom%y6u(q|_0tb4z0b#GTfg7Bf3LYdpQxU<$;9!;HhZh5B7`p*%?lX#yfGdLV; z+r}Gvzoho^Hjg$leen$&jKwZ;ESRT!?)QVaikJ30etUl{m*BD24*u&tUFN;FBA9P; zV3p~1$@RNDR8(BurcLn?UY8`Mw&?cl-7!Tmlk=GbWlz7XvGRYMGl9i4xp=n`!-4ug zZ{OeACR?^!Af^KsHPwuky|?yH?0jwF>+j#+wJ1=$nd8V5 znQyt9{WWFh`{~E;`=Y0FdY1RCx8=L2#e=;e;#wabnCRsFRyI#J-74PrrU3w zI9|zkX;!XZ5%kn9r1z7Pifm`W)67{xr+)vv7g1efrF(i-uw)x!%lvtMhUe~8C(l{o z>Da=td)w>X|B~K5ElL!79izMSefzdtlV9%dpQ%pYmRq)4);L~oy1=a2hYw0>Y&|B? zzVN)Iz0Do#+ad-j! zYsqTOEE#>v>3{1oX7#lO`@EJ~yLQ!=x4-*#uG;eJ+q-YizCBxC|MT?s`rmc4maWVF zz3%ePX~fCh>1BIeyQS75@%U#+n|A+{nWlygSFN^& zy*^{WbFh8JBtDO=p%*9RuX%WC)%HHktm7xA3NAESEg08+$*O0f64N5rxnEjT&sZ)G z3F^D|JIIJ3@9^!Y>8YNYud*D&rB-RJ*~O$7BJ1eIDInT5sd(OV)=rs;g<+*iXLNL) zf97=AwQhG+siWw(Go|y4L*Ct57shAz=6m~;MO*zm<*!9-$}pMrIVv_wQ&aUVQ}8KO z@ifkZ)$+IYIXm3b+ZW&!cU?n4v1Q-;9>dc%+zp1;q}Daun$Otri))Lczonp}Yj75$ z%8sHYhQPv;(Yr+QLMmfhUwz|X5Lm!9J<8Q$=@c&|C*~!^vB?6R+sr3!J1Z|Vb;h$p z3_&vlC%AU7PMEXNC8cx66Z`*9^}qA;Z~gGT?mzqUy+&Hwd6KV$xTUlSINj%0Fn!Ju zQmpm-^yTH}?YH@s|9K|9|Ig9&P5k`){`J34f0wu4Q(sZB<=nq-<@^6Wo1I^CIYWL` zL}~H~spXp;cf?&N){7GGI(cPF+lFbUm|99)e=7@4JSy|}c)z@y-29?dg=@DmL~YSC zS;(un;mYHe{~q_>-&_BG?%uBF*`Zmc4Ti@eg1I>~=HC7FU~m4~&&Mp47QemjU!!?- z3-ce9v$t=h-MppvFzD1SpUV~Vj(@iDw^V(wXtmbYS(f*IEX&@lXfn63?rezEKOEsdyOd|@{?El=INZ0SDv z6E}h^O%6`jxzqAV(EPrBF1M^%-@^6p{rh_Q`1~IUTmLHbx38q=h)sN{`{BUpM2IZVx`{b)c*}FFAx#Z{NM?N<= zrV$YvyJhx)9v&gx&x@|DUiVg@K)2KDuF!VDpf3IReN~0uwWdYwtlRnj)4#vfN>!hq z-olmALDZ1UZd$+ZqMSrm0&EEmpd*__5N_^+wZeLlpQvz*>@=5kC?Aj_Of;k_HD zPW4pdSimv0a-Mehu4mn^q|BBsF_d>n(e0RE*e2<{($FpRg+OAA_|@W9=_f3&Z(Epa zEqHF{lSwWMraW46^_`b)!|7c*pBITSO<1B6`+S?H?4xV_Z2}Ah^Oh}|buQ}aq7CZ; zUAA0Y@|xczWOMPl(#Y#UT1p)SJ}+x_KACj;f{AZ&@6JaL{!Cex8k@bgYlhRIb+4U1 z9E-RX=dNB9Q>?e@O~B&q>#k+#G9}Did+4)~baT~|rE|Q`I;6}!tEuQ<&$IQ)#yO4x zofjrB1{`adm!P!Z=4C5>4@*(AmL00|4VTX-*sY(h+pjly+h^Ip)7#E@1pB?-bxQPo zbQYWQF$L|9;fC%9u37Rvo}e|ATT;1I+t^0EwnI%pcww%Sb=AxZ{KW4EbIR{YKkncW zur=4a?zGi3ZcC^8zvJ?UVzNA68~LBEh!9Op3nQ<{#qwrb6e{DNePxm z*;{7t`3g2CE`Mj)ta2b`^{g$7J@bqXt@*TcZOO~(#hrQ{4Ne}>$sCKmear3=pE5;% z`D2^74jxJ7GfPtfbB<{}-Xbuu!RPUk`98TN3C-fl41YeoQ_w=Rjhu@yf)!h1oi794vl)u`X>yhX4Zdd$&bhLZ-Z1L=_#}-k~ z1qynzb&p1PAA8~OtHw?`xkT^%oXclZ&h4BQ<(0R4-8HMeWWB(wUG)_=7$*QaKe%a>G|Iv-Rd+o9Ly^XDkd-KL?zb6zg z%Crh|TJKIsJeyQ>$#uWsvC8=E47%4#=U&nac_HYOytdEKaXkl9kk?&<-Im7TPHS41 zGKD)D+>_4E)h&&^mO6LsR&(?ApjW%xvU|((l1^C0>xrAH=^H+i;Z(5b58t_Gn$oH# zd)1q_hA9PkrmrFT9vrSUBN3iAR31J6^fJ$tKvu6p}R_i6hF ztBzZKSu6?*{{H*$?sa+myZh%~yBe!;8w)*|B`0TZ{W|bO<>f8=M8bmFqaFuNXyoZI zOgTAe6>n>?Uef`dgWGd27wWE_KiBl0l#!gE>qezlv)rC&O+UT!$(y;`%|6KbT(|e2|uPx&I{P}|_x7)AaP@0~l!+F?f{nja4QyjLO zi+bK%zKX^8`hgsmKfCw;znfV9va?mSpv!@80j9613;DN5Zh|r6f-e8ACuaAn>Y7%N$uuyrz<3PatJM(ax8P!G0o>9P9N?mPb`nqzg2sGZ^h3| z*G_GH#W*)u?)d4ye`|GB*JV!Y2$(yya$c6Ee*C_qWkEkaFW0yK^TT=Fn>q4ramSbV zUfz-${py;d*}?6x3WtKU1*V?rdhvGmw|C#}o}Rwl<=N@yza^9peB1Qqdv*5PhtEE| z-F|;vp{1Sd@|eA!Tb3MOWM}p&!|d3VsD#t2g#-iFz0NCFI8r%pdTFBG>s_xhw2y9C z!yWF(^4#Y#o3}{u%B5LPdLLyUuMD$I)ZH1DuySGMoZ=0Itd+6hwOg)uZPZ(RtE;mo zD(2R*iwD-`R`V(yIA?xrk$bvMX|A+WGB0D|Nz12;%C_uqdwNS_?cr*1r^9ESSsl9c z@`Xw7Ws~J1t2dZNb8#}5_$IFrZJa%OcKP3i4lcdU*G7JeJ96ADC7a!{dR1Sa?(pCV zW$#qFv^Dqoccq}y%gWOy`reWY7rdBZ=o0cb0Wsh}dRm zoo9St{x)?6h6VRTUG6Sr$TDc1wA#bOVv)|#7j11kLP|j^wHg^5Ba*`UiZf=_|0w_e zPX6CR`M5oGFZaqH|9HOs-{+Sve|$B+zo+i+GS0@<$GhbEm$&QlfB*h{zI}aNRmGa6 zudL>?boks|;M>h>{xzre$H|3l44d9cvog-${w&s{y}~l_ddK4VTjPIS|9_?DH^_6c}_&W6v#r{P^!Kowgj!nc1$-_m-#BL|%*Wx?Z|LQixYFY1*92udlEF|L=SK zkNW@r|Ls@!v`Cg`W~S8txBd0I_U|u0R~EbetNF4kCJ(=^zJBfI=a*a7t?r09W;p#; z>0E{>yA<8GnjMc=vvp~h*iGKEKa0+3b{1SNV+h>Q(ZEr0pU3+Bp38s#fOorp{CF`c zx##V#KJE8qzqjT3FaP}f{QQ|me=jilc=%y~#hc`w_xI}qo^Jjg_tE&X&FWu%;eQwA z26?Go-}ZX0GSiBsPbOVYovB|uEvv^;CNFMZ&Bis_3_OP$7!2Ls9}bV-dv(I^ZSU8X z=HJ`1Z|~mS^*jE*+x>n`an^#>XEaKs&fC}g|GRzvkG2c*{QcwK$ch9mDW~S#U8sDb=oj8 zoB#LU4YzeqfBNuYgZ-OdiQCsroG_=HAz{tOW7YCrCoRR5SQZHE*?Ldnx9}EU2TqRI z+o5_(uRZ=4lHF@~ElS`dyE@bBO;yj6dMrcV?`3#0$F%sMl=b;ypN-F6&2d#p&sA}8 z$=?1j`dnzgD`Q~(w0FzaaTYK{Untg8HTS)2BFE4ZI^RQh)`gY}rMXXctutKrvS!~A zk*)3*d#!Ub%r{=MOrN<_7+OB(4P5KG&hJ96^ZvIpdl;rf&6vqky6*Y~oxPF<;YuEw zM`ujA>e42Xoz)}VHgD-NncbJ9n=Oyc3EDfaiep7&z@ePE2eUtWv|WkLT;trd%Q9F{ zo6A^OR|I_s~zpsXS_ZW)b-vTNDCJU`wyICvF{Mx@) z=KH413bLBpcmMC#{Q35^^QP~dvM90oR%6BU+1s*rKfL;Sd;H$t&yJp+e$7fn@o3h^ zv_Cg}HZYlrPV{ogz5MO&h9^6pbfrz!;p|YWV_eApub}wz)$sRx+u2`-$fTFHZ7f*3 z)oY^G(kn5NP4DOAJ3rp7#*=&FeKeC_CS#&+*SctPr#6r4^RGp@=!vo{e7*ZZ+vS&E zex9Blq_m-P+ByH{+vE3cJG1r58L7ofqh|R|+FUnL(645_vP(>8+D#e(b^pM z&WVF<<&;g!cD&6=j`_66dC8pa(z034KEJFfF5Ym>@9~*KzqaU}779*!(ZLf~TT@{% z&u{tVjZ=O-J3D*d`#D?BUH}!x*}LWI|7?73&HM7CW#+qc)Ai%^reBQw{pjiG?fG%- z%Vs{mS^fXRhXYgd^YS?|eGQLU=y!MU)RtG5e=ey1|D~Yh-rl;uzlwI=vA3~&zVGih z@!x;{t*`%k`|;z$&mQ!Adb|DppNI1QKK%V||L1%CulEcIYo(XJeEIUnj~@noi$7KJ zMxLG{RPmm%m*MKE_;>R6_Wy5Kz!0=zYee$Q*A{&Y7fNeAHJ9zk|8{O?YOm(8DPC7@ z1?+UNlRZD*t}f=8X3H^l`917QqJ)a)PIsN``|s29{WYJv*YDoeTf3gwwl<{KF!yp# zu&Chs=B-cWe2&ws5oPdNIsNoZ$)@sSQOOg8*S}unuxH!bTia}9zA5d|F}}{lbFCm% zQ}b$&#LWh-~{e-IhGq&ScF7`@`i@o?C!Up8Z5rgZj(B}-4a&GGs(`CzE-rM&xtLyR>AIsy1mR^bJdh}Df!|>dlcV*_=%~L%1 zm#)Zo7L~nntHB0_&adZoZp+<$_usZ`tNh&imCj#xzyJ5;aC!afvb(R%^XC+|oM=!g zowe+x5!?L;lh2#DI#&5cym`2xKhsurKEq85@!-0Onk_L}@rJXiE_CN~+O?#7xyZI- zTO7;v(Cgne->N-T{CcbJ4~LEKr7{D$H)v^c%;CP*E?~BNx=`utV^RJorE;rIuYYax zDeH7t`jOBArD!RmyxaPF9b>p(l$KPSeYM2*hQ9~bZs)Hn7?;RAe`(a4yfUY^@Sa4| zq{Vy}uBVC~i27`_Zs!zn{rEdz5czyw-*Nx>_V#~&JYM~*VCJ$N>->*T4%++G^J?p& z;%9GXhhN*-e!N=tzIFEQb;^q*V$V$t|MtLJug%A6=aj6qtO?0Entwh#-B(9&2^_M@sS^52Icz${AWwS<(z1!Ev?KL`PV`HOpx~RIi+hrH0_SSj&$(pJ2mPuST z@pG>{quF?%Zh!nV)#rQGa2ub$e53U3hSxJL$N&FzJk|4B(9=&JK0LkrykGTtX>9i0 z8--T?Iq&VQ`S|YJGd1;nyLZ=qe;2E1dh`@i?Db5KWX+izx3@{Rt(&s!Wz@PFtN3M= zE4NA*#a`cA_cbf_TIsCdQ*AkyORhyF*A~R_Zs6M2RT%pEugSTVq8nd1GMeAddRJn3 z{&UT~Inz@WlRH*qMlF2++CzQX==G`{nYXsRc2Rk5I$h-PrEP1Cly7Z&y{l~A^mVW2 zez%hx2itge!ktsQ&#*T?Z5fBfXkFx76}%4c43QGQ239@ZEK%QnXn@B z*1GJg`y;fYvu6Z8pM0xY_w*vO-Ieo>6^0o0brk5{n;Dg@tEtX3p)}G<^W4v6xl>NH z?{(aE3?3O$Vl%I^GI*WLj{3dt_3wKNosG6Wo1@HS@m?@+$C4e_CY`LzHaM!fY)!G` zx}Ju6tS%ZsEi(mlXD-W~mCF=ZGBu*8Z{?9v-K_;{BLWTQU7MxL!fYD1#E83jMKCX8 z%h70ekK)K(`~IJO|L@(=)6>rg)s$D4f4>?YZyK9E_gstP)l9Yt0udJ`XfInO#CuV| z)|_+0Ny}9kb;~xMTkzw-?R8f*#q!tbJ__2oM0aCrq&@dmF#!)vEnnx-onAk7?W~oY zbuI4QT+`3H)@aUp!n5kr8mEzeDN`&UF zh-;o)TCx1a6g8InEYJ6BJoocKjlr$zi<=Et5~PFj>&v3oXI?)Yv-8!u+~&56VY*ub znHfZcZu~H5Sg~&X+IkhIIyDfmMXd2b^D9l>-&Dc zdb`?u{jF`>YtJt|ce-ZZoa3iY=wzJ;YG1T(hUN1ZuluF#hxsNfD?DX6Tl;fcPws5C zi7s49iiU!g&%d`j`(;Kxy6QPU+w(jB_v4Q*uiy9g+tt<6FWcDOsn;}Rew4cBPW?|K zKXH}Eb8f%=wR^Ytz29?;uixUYz0bdO+MJ-v+ddohC#$YHct0d9yLaQ0IkTVtbnfau zo_u|4?tRw#$4uv)Pv0vbk$tsq)(*YTTh0ZaymePvPiR&`kFRB{Ui$Oc4{^J;U9<9g zeD|%cf~MQG%5uB-DT=FCU6}IeXNliVEAQWTKYTiR_riCV71BHEUd%1l{B-y1!u$N6 zjr3nPb)36dIZto;=5Nl1%O-IvRlQz&f79YMTc4~E3{#u9@mhO>$JP+nCwK4MdC|N4 zwN=`@=RdEW-krEVSu8te<<}BB?(VI6OH1~}B*n}v=6ZWqV_gbEAY&EF79G=dA9r~% z_y*2+Rv9~OStgI9*&0p-7gzn-=cgmP&sIq4KW^!m#S!NvdAv|Ye`{Ft7QNzAM%`CV_;+MQxbm%q<_b=|e)iohYC7P}vH5^D?_W)z>Zd^+daRMSPl z{MROJ_E>z~)s$81k*wg=i-|pf{Kq!w-0EuJn%ZmVcD#JS6&{yMGcv!*MI_gJa0$s` z^)Nj4{r&y=Utctz%LH#ZS1CVZ^-iO|4hg;4x_7q53OnhkPM1kGNeK~LwfXy;?q$bY zFIYS;y|d(Sw35d~riRUY={=gSek@zpy{|!Y=_wg0C1+F3pG{NvmZ{oIS8;U`&eGhe z5^d_L|6efSowT2?$6#5@j*Bz52McH^O>k^yu@{z(srCB$^Ni@URZQQW+iNxLVC5@j zUAJe+u63`Ce3yUzSYze;Ic3e;-FNHiD}KIOx^bD&agoxQ&-ZShw(RSYWg_0wYywp` z#>iwpDOsE9yLo1Bsb9$)#S-7^rLouF-da|CuUTMXsF3jVA3q--|8D;;;MK3E zrO&uG`jmBrGss{>HbaDz}y>|0;@O^a|I1?=QVPH*uMH&W)XFmJohl{NOf8<%z4dPh2bQa^>R!u9@4)OMYiAmf~ZK zeE##tk5noBSDeOW#ReAH*?K8!zUOV1zqM|!t<0`{{+jhR^&Oi2pKGk*i_Sf^2p3x? zDljE4;q9@njxNvl8q7Jje9ENH=c4*qj!C8MjM1B=r=fJNGBNYkhpW4{pZnSVTmSgu z%vsCkOx(kLPyh3rqF1|~z58b1J6+_n#o7M;e$7RJyoA) zY4<-~v%NR*2IGt%E+N4ac{jG^iaV{ayr#AD$>#|D%QK2CWzT=MiB7k#{inkebF;F% zy!`feYbBfc{gaU^Y^iwB)6XbIn#^T%EJ1Eh@Wb z(aZRb6$f`*%VR0e-CBGyE=vT9ocFWo(Po96^&>86HOut=8Q{Vb?etUWy_$Y<-6ZJBv_e0D9L ze=g~?kCEhFl`waw(elrgljB>rWGaLzI;2c14V-%-^qRg@X0+GJn5&gy$s&xZQ#qV2 zo3&;18XgO6sV!)*@qLwEekfw~1jAjwU6fL%NGc_$F&MdcoH13qw#Ds+(wZX;CwE>s z6@5#ZyBKtr)R+AKpVsHz{`V?;e_33$p3sV85(<~T_$V>0ls@P&Vf|;lxsxw$JHYtx zY^3W3*+o+$Pde|M5*6h4+QRR%$?G}KYV6j(w(@;!xR?{(kx zEMAvi2wRL|KG2tmoMMm9>2ft^D_Ci2j;)i&PBGYHLlqF z`_t3Y$Nl4rde`6I_Y1TS;dK1pMH|Cb>733o)qWVsbzb%Pq*iUK+|Igf^-dLL^(W7t zI#qg}Wryj3Q?qtoKH63H>(9q`8_NBPMOhs7@V6|9&e9B> z@%(4dPNADouP^^qW?*PMSIpPme2VMWy5wh>F&+sLY>VdRWHWfI?3-SEduu~L(ZitM zi2{tz%|6cC=f3mFnQvDMs*9_CKRSB)@LO5q|EKr=0d@WBS9yJ4?$hj>uKBdYZvHjZ zXFpS=kuY3P{`EvE( zO~uX}y=oPm37VC$VuEZ9%QCZdPj8wMBQw!-nbC0x(@oddR*P=97UUI~>|@w^VM}jy zP1USrs?TeBzb{j>NY2fWVtBAhi&??w)-+8Y$IshqJ~-bm&9&C&7OZ^keRBHg8{6L2 z-M4yPQnapLW%CM-j2G{F7GC~&=J~5N!VOVHD@$X=*K+w@=G)EK+2C_7a&`FaZJN&| zu0PMcqqdewpi#{vm)^okC%XO^m9P=556--)7HIB|m(sB7=({3}@ zuVaO^;?z&Zn+?O1c}i{8#kLk(@PI}WrQ06X*xB0I^*vsp=ahTPMANO?i$k$u{U;sS zr5b`J&zDq|_};#A=gzlS@#HJEsA8i99o7xWhD9=N5N3T<3T!U;jh#`5B%4Ki}rB-KvvS<-B51#-g+D zdzVx(@NT?ryVUVgDpw~1XYtiz$+4$R;{2X(zU81ledCk}@5pDV((gE5?AyGgr@`e4 z+jA%71KBa5n>;!SY9pG;*T?OPy!Lf{{cMw4z5*^s1h(dKH@UR1Or1Gpot)G4Z?(6# z=f00sU`S+9obudvGJ})u%Oc6=m7E861bCmbulats{rmUw^78ol&tJpi_uiFd`+AEWdx&z`OhpKo`c`@w7T_51%llXgz?JgH=~p>;Js1LKym;&Tq+y&6mn3#!&8XfK_e z^jN)c{)4TT1oJ#<1C|>o8YxZU<5^pJkNqtF-doEK<^#D>?t9J&XNZ41nNzd_pN)t)n#Q@5oAa#R zN-3c?yZzhQ+1Jlir?1tkKL7ml<;%gh+m`Xh{=X%#)YR72?#}&s#}&@va~5yQ%@TBe z(WB}z?bZ5!vrqrrbnI&7{cU%zvM&7i{NL&4zc2qhqLX`FG3c_{FFqm3vz$6R1*XlJ zHN}|e#@yGx-rWs<&+_2&^z+X@XUz(>*G~h-3`vY{Z`9ixAfEX_jNCJ zy*qo`TyyHmB>@^Vzp$udTiY>Yg_0du-$XO7`9EgAu1RIg4+bU9)mI zW!ZOm$wJ>_Qm!eJ4P7ou?bg#4GFzCKrc`uq-Ro61C4Db%`0%jc#@jMOxA$|lZhKvw z{&v^8=ciNm#9XPowpEv51?vV*NAGYB-`m?>@9KHHBa7C^OkaPjYlg@82r+^taNY?{+4<*H^s>_dQ&7FMoUN z??>-kZv^nL ztm`t{J4kC!;k8Y%(`OW^F|GCaY;-MZ?`MVztGQaQe;#Jjyt>9Q(&fpTt>>gnlwyi* zW_)*fm2s_CdC@9ao((HXjB|pFg0p^pdH8t#|F`$!_Ux$s`%Bl>dV+&Mhh_7Dwy91k zn>}v^NHBQCdFyE&4JzunTp_7H{j|;b$Eh_n6*hDI3wxuhPF`E}aB9ahftc&1|DL&C zFVDTb&H47XyBGN5IK^8T4OwarPHeqZ_WkSbeEsL2``_!T%sW^_68_w=c)5LJ?*2L{ z)kRtsGiR>f-*sN=#M-93?|wooQ_lX$;o;&k2r;ax58R)cx^Iquyj^X_>mYA2m4;PQ zyqmWyEs5s6q{0w0U9=%?e`T=NW(WH>UhA@Zb{3s0ePMZQMb701v6tBx3eUAlB^4-pY5o^Xe@;Lj79KT0W17(L4V1<=^)2UAA@q|JgD~C6^t3 zXCf=OQl9Pg8ymmrAB%Rzcu^BUjm@1w>-gWM?jsImE{jFlG%&!{qBBvGY)H$uC z;F~dJO@V7{MVj8uGnfCQ^v7OjTCr|T*+S-qsMY-Utg?HqMrbDcKHt5JbLWznNm`G0 zuPeP4n)X+~(0gra+_a!myY#je-8MTW5jf-WRK9i0+cQtCO4)p@<-(Py`JbO$E7cA9 zlA-L}VZn3w_4W0?>t-zbS+sM`x#v?%vSt>aw2Jc4T%BY4oJsSe=-L(zL$#w5Gjd<= zTKC#o@WNGbW9jT$%M7nc87+MhmEGf+s(J3_ludj&Z`q!)WuHmoagBK9${K9t#^<}? zT*$h6(f)lL!m7?!%4He)9-oM5RbAfXbWS4iT&MQ+9d0h44#u;#Td65JdqtaX?TRZA zNL;*G>edTgrIZtvtAkZr8#wYxc(UKN^UMGLT>tBS{omR5_tgBYdQqJ^@wJYJ_q9!W zR_>vO#V0f8Ej#^m)49)5k8RE#e_SZDyfpGEOB+vvLX%#FKnjb>ZIfJk9_zn`Z}Q&f zegD1p^EruTwkNMvxhfv-BTBa#)A`t9h2vLn_4AbbWZy59Bvo2iu+}6FYq6< zb`n{z#Vvv3p!MrDA6b^f*gHLl>ht$MewkCEbInFKpwBznuB&!@tk*ad-as z|NqgSzrS+2SB&9&e{qv9Z}saxzFWP%cdJ5>wn2#FhUo9BZ?B(J?A6e4?T<-Mb)16j zcc-rDH=ec6?q#Su^nFpzo;ndhN0Vz=f|nN_|L;1dF!xaXs^q=u+Pjp~By1F6f*U8FVHZ?yj&ObIYVG>Ma302(85ufTIvd4evvnh;c zZFC!^cnLO0TsAqo^h(`+ZeN8%L0rX(P7h}Vuf3{t=>l(CkITiQCpYQEUT@tdJTc1h z*_J!{%RjHYw(7!b0jcb}CcXXsH>0e5g+qQmo4vmNV|TfsUjKLg`TJ|?{{K5Xt4R3P z1-9q13-=XXjk11I?tI_g#_r#b$NieimSxTjSzzn~Cwfj-VdZ>5o5(1na4Ob^hP~|DXK-55MnAI)DEj|L^Ji|3Cl!mVdwB z#^>^$Wh@H5mzfzPKiv~wl6ZsnaKc<)&GolG?Vof0`IJk>QXjK2-p+b(e);F0HQ(;N zFRPyVeD1mDrE`n*mR=KJTGW~C;-mAXcE{emSJkJnaBVi+a?JF(NE83Uy?ggcwk-@h zr>g%thHJTq_U2uSYd%=2IyrmHDzHBvoSNyh__+2&$3r`gEz8`pea7>h?|09hJzFAq z%GUSg=XTCmG*4<_=I(8(&QaU9yuQ?t{yg^fwtMaCvZtp;UfX&l%IoXBlg|tf&EXa* zS@^o0kukaF^ro609}cqL-x|wt_U_xciwtMnoS?Q%N?_jfqtTo1zyJ2T_LSza2=B8Y zMS3o7_NQkv>=6?Tv|77$nMnH#fxw7qsh2vU-{1Yr&c!&V_}Z*OnP4wP7TL@U7Y^g= zL4pC+in&LR70!7s$uQ+|pqHPf{_9ymTcRCOuTExW5IwJPNK@_k9R4`n4-5j2SABUX zvH07Ub5Xhfvev%drNwt&LOA)3{I`x3e?G==D@Z9lbq`-R{j;lk4_PvEOR0FXnHWo5WL58Ja=5m)^84BA_iHYfzRlY%-xhn@B!x4O zGa~wgibBL{`2__V08 zP@MHPUt@g^hKzKZEGV3!-TY&O((+g z=ani}c{aR$6{2|J{C@YZi`ImOu3No1>eSC4C6;q{g#BF`ezSkYZ{8i(CON7+f3oVl zVfAx{x1c3s@ewW!A`Q~B(k#BTHSC$bPuDUmEtF^V^-!0fzLXWOcbCQ{-u>mB+*2Ch zExX)sUEur8hRaH0oj<&m>OZcYd}fo*xf|Q#_v%eO|NP_m`oFh-e}DhHvh14DtYurT z-CD-GHO#qezdeuh{$m+)^KXSQB>C<8`1|j<>h}{cMUV^Sh{#Rl1@5kfcQQ1D3GpFzW`*!#HeZOzF^UDW$S*q$WG2~t@jqP`N zociUU*}{clWxMO@Z}1)#xDJ~8Uw-`f@#V|AwU)cOv~-jRi!~)r%-nNDYx{QdjvFSq zUOO|HPD}`TyKCX=+mkL!g$pVi`une-x_a}O)2VybWIMTR=qomC<7*f9TEpIu8aUVc zO3RYO7l$8SFgvT3ymn5|BVDJC_tIlull=!iq} z+WY@M9Ohrhxhh|4iuXz{-VX<_Yn{36b%6cTxz+EQd6rJ`T6;|*JGN9h>x9InzAR0K zC$*2~oY#Ckb6uUwQKiX-k4=`%*tJgiP|zk1Uf*P$P`3B7l7}Z|uk4x=uzGS9lc=<4 zDcgxRbH3hNE$WkddZ|IUn)7R^(1L|pOSa{f++23OKHK-Q3B#c^mCie-6zz;Lx#j!V zfA_lJDFtgwV;?^)N_@EP^{r*8>!d$5Yb!a3 zMV`g%cs(n8eLVm6?fZXRUH|UgJ1z0-IfX9swuR?>{w$<8G5El|226*Kb{bx7q{2+%-4@EFYo_*y8i#;{r|2{NjGUa(UNdt#bK5M2FA?R$?^O4ReyYR zERxBHVdeY3XLGNYl|{Nc1T1(kt;?N(XF<@4Ih&8Iu$Nx7EpuK6!(zTy71>+1?eJi1 z(Jf94)?KAmqOHGT)8lldSH?PWWa?)f-irl(j61y)=C}4&+`ZVDW0s|O z=TC|K&2@|nOJ3D0uAE=BYmT3Pzvc1tgPJj^lQyp{3xI{&TLB9daR z&(Lx{7snrh)?_;=ESh6dX?AqV zzn}B}SKfHb7jyxy*sV;dsf`Yp0(wO@I6In#co@U@t?} zgf#g*&pw#>CI~XO?X-G7TP?yV%UY1-h{WejI-2S(Z+`8&5|b^fzxY`+o4nR$MV(b8 zTaQgRYgzZ}&&nzCm8X@>wC?s8KASW5dn1Qnk)GkYt*aK?Z@UnYy?%*%$co-eb3(tZ z+`r{mL{V7K?t8C`)}8O266aEMPE-AT?fvgo#sPDqHE%|}o+Dl6e0!VJl-WshmUVXg zT9`k(EK=-s%t=>Pe#2#{GZ`;RZ9W#-Z5|VR-)vn=`Sr9u|Kq{eOUw1&xh$CyQ*`cR zM7FoynL8~H=Jx+$c@ElMy3Tz{)Tfe}rc#%ryjv6wnXWy(Yu#%beNEM8QQGO*YfZau z+r8mGQ!2MyD16=3h-{Pno-QJ%SKm_<6EjPi@?Ac-ktt@?p{3hAd<0oescJ84>Ch2U zzOt#&Vxr1A3#m`%jFM9w7@R#^f_fOe8xAO(VK{YFS)o~~(*Mkv-*^Ar|9}7A;qCY9 zZu9QtoHvtkkKr-v^d2?c980#_RvfIH9VdjQDZMV5b-i*HXkO{Var?ig4M*is>``QoDF}Q{a#REsmb}`xywGjeE9M3@&3~`)%U;LeRn&4U-j>2eL1d-M}mUt zYHVu%e0a=n|0i{iO#B|XU2}i_+x@#%@Q+lzB&(c9VcpI2RH z^2jhtJnsL%LxAy8b3$}u?C+JY`&TcjyWAajQ-)zfr{e$P8y^>cZndBMzIj>hi}GNv znafUJ{yV*NX79}-bAuuyE(i#6b(GsxoL1U7#j3aI$?a{|Vv64G`u6SJ)1O6kH8zS1 zi{~xnGH#Gq^6;6OiQ;oBU(lTzFDouZ@8NO|>S6lyX0`TKpUWLBzyJ145aCy5*jnJl zDYCskrI&l{?%jKx7nOR&T%2`bTkl3K@wn%wD^^}UJ$?PIyoAQ(%ac>xlo&5)t-jr{ zeEDM=-z`U39=&~=tJ#{(tJtzEHu#L{^r*+X&Rt%jv+ncHJ;zF8lQpkazu$9qrOs)u z2E~>K(#`i4C~sPRdB?ibPis~#owY6ZR@lv%$7X(h`sFdd{PWZ+wf^PTO??;7+7hlG z_xI8C`|rw`6=eL6Uw(PQwIN{t@zU6~Sa+AgUdyM;GAq0nZMye+&ZWbfj_lZ$TYiIo zcjENmsA*R%?|)m%ux!S&)IP(ND|}oF89w~@65;*!_r2e`vNJ9m5z2nLa=qI0@imx}Q=u37p@jD_J^sjG{UH`~nY znCshK=kc)%&R8a>+ZB;E@7YZSfy9+Trh=z-CEj?sdG9gpsxC>!tfx_{H*Rrt7Ypn> zb@}BC?`to(=2|b2nEKkwnJu$H|U7HUK*>-^m)?~k?v>B z!j{`zQ;te_pFDG^H1_tkV-mTOe(k$Hy?oBG<;$0cu1}3!KQ}-#H8O0f?6h5-njISA zTiNoDEZebc#-m;9+A_3Emu0LqO#$tn=~CM{oohDx_O26~))}g%i8ajWcDuRu^d#@F zg^kWteJNb`1P|GqU;a2aCOq+3l!OcGv02w+x{U75*xAHzeae*w?KL|cdNSv!CmOVE zd!W}irTCKPCMO}Yct$}Xi4Etxu1bh}vg;@ad&|#Z9QJUMu^?AN=`;aGo>SKXxEK~& zh&QRS>4s}12&JhcGic>8y~%Z5#CiDlzTe0Hf9U`J`Tr;Ry?oLUDThTHP1)9`Md$7{ zOU~8ZIxXtbu5*^I2Yx;Mce=g*y?5vSpI^=M|cad`RhkF?epzw z|NZ;DKkiuScJ7p(jww-|S0s-9t-JpH9xFq_$*oUxQX?-*1zW#iV?67mn8J8aH@-c4 zrE7n3h1`YA@687ojvQjqIP~gt@WwsQa&mJ`PHg+V%a6I<$sv~eY-!U9YvW1T-pi&I zW#8JObjZx6C42rlSI!SRPamGYtaoQ)I%|2{qOK{dIKr`unlRP8+>$U!E5v-m>uhue)_p&r3UZ zJ@wN(e_5odDZthBVV2|7wC_*5#r5O%t=ak{=H2`E%L|>9-Zwk0S=#eBBHM6TeN|P^ zfyz`vm-6y*{kVMrZx-^+&CAc9EfD0R>87$~X9h==U*@xzI_@ zRGwC~iu*N{KR!ME{QU7{Wd^D2DR-=TA78#K%y94CU0J^NDVO$~@#lAE`EZawmY-?X zbEEU0H@Q9Bb)Q$!V(B8z!xb~<&y(|iF08m!@k_4wtOvCjv(=8ev@I)?I(~YS&+^AR zjii%%B9uI4c6zWZ&vbN>uhHG=py+blClb>}px?3A>to|6@AlV&Z;oE0{i%j;Zr zPnHM!g_)t3x2-h{+p2Epmb_+`%hgYN4As^x4148Te9o}DtlDz=4-tB7C#zerARU?lZrX zR%*$CqbHP0u`M$=I<|?dEw+EnPWAlNGVSv92vH zk9V(lX0&``0juEDL)M~;pHF(6HuZAq?Xr5ah}NB(wu;GSp1jfaoTDgoF{97~zIQ$! z7G9Ie7GhYr>y*Q~4!OC-9wA=qN~MGZ?)UqDfBAR3|Me!B`F2k~U3qUaQ!D6%)bkjxqpWVumc^$o_sjn?Iv&jNu*lNx zo_)>FA13}Aa}S-()!wnJD(L?7jH~H@3gcTl?ohjzrb% z(z`FW$en-w`RPjOZ?V+|PnI-o)L;_Z_BHA0q}1Y*nLjx4f<(4n>f|smSZJsJ%7tO4 z(PPFsv!1r+f=`OgTW1>;>^8|bc`*IwU;m&ZYfTt4RfA<$^yM&e2L9&~nz)+zaeDpC zKX1GrEL(r&x!TbROS|6Z$tMF@o}5ZnbPlSVZDypsA>qaB!*%zos^y;VIkWJ!^X)IS z=>kPjf(N9W-ZSh~SYvohLVmuVhu`CsvmPYYXWU+vZOwFUX-IF35JQ?0*SYPvzrUTm zJ$pBE!<_SvO?qG3=$~dfv-_?>3}?k0{nI8g)FVb-zD7($};@8j+qE#29ZzVx!+ z4H0--p-tN{ImvB?(KVb_uhD0Zuz`4_L`|t@dr`qtl41_ zElV?J%?Li}>JZeU$g;QZS9Ve>7kfuQ#IL#w|K1l_+ROG^KHrwvt0=H;XWHJEd4Vox z)_i=C(8KF1@oQmGZB5lB>&g47+Iq?t8bxRK>ZTYOGk273n>H&bX8P644~?a-cBv+R zS(Vvy_bs1Rq}>~S!)2deZaMa1P42B@?F+gyr^-&7bG4zhEks5z`HbelviawaNj;BX zc~)sD*RPoycy!jerR*gf3*T4E9$)$3Uya>s^%D}l-_L!vVNSSga_;A()0=dDt@tdu zF}LH^vWvA>Qe}!y`erZ`1|Lwic5N}+2=`Ry17(YU){-+ zb*sx%AaQmvQ_3mJ<(YftoIjo_wf?4J(c39Jic-mY6^yH&@BO~&*gX?I28EynQceli zBo(gDX;4hoRejFEaQl8!HJ)`)xNX1{=x{pZ+QT)SX}d`PDPM3F3)7Z!diN@yK<@ z{{8XSKfXKq?&<5=p6Q>T?l>})XELkUhGP-=JD(N)OPRgZc27??J*BXc)9 z?%B2H$#dxyZ_^WQm*#pM*4^sx|IRZ**E@$ix63?veWd8J(?631drtjho#C3ba{A|& zKYpm{b?5XibqjB}wQ^^efeMr6(;1ue7CCe1J&Ou{{P*9xb8TkFEY5Y?RaL3`E-jdP z`sbvedvD7=+r;ZlKlNtr-S^*PZ%>m^TzAiU+S$!Xb00@_zNo0N>Q{fg#SZ_VFV&z`l-yk2T6=l}lx{{KH71`9Wp>Q&SiS8FpFE}fyI^y|VF-^s-% zH9;e&k@l`yYx8D3ySh63vdQroK~bG2CTu*ns$}!~Te0_Lzx|%|`SIh&moH!b_;Jbg zDeKWbHAFWaFOaGT;_ACq$3OQjp|NQaP zIFld-^C#|(DzPOm1@fqOXOd7TzK6ldj{ekf96!;`mM z`(w|ze(vnq+O;R>RTk@-(C5}`Y%)q)uSb>Iue}uWwKMtPsx`l|-KKVAIC-SHwM0oW z%-QV1_C+puwNRXALGw3hW8csF|DE1HE$G^m3}KN)a;YwCSC&Vw-etO_fYC|!%u*3C zmeZp0^8Ww7{+E&Oldt=F`+ENVo29jz_jCWA{qJ@Bmeksf%Z^57MFm%w$CcIp{`dE} zzWB3-7i+ejySOj^HeY%Bv$Ns8(jVTw4li&2X8(Wr{+b^jFW%_eIj`^dbp5zLudc@* z-Qo8+*z4Y}>4z0}sCA$5_KVq^-2SL|(VR7k>>2IPuFkra8pC5Vi@71Yw@@h|TT?av zw!Uq4hI248NArYptbq;@4Ry1Glx+N^*Y7?5HFo;jeYJCC;_CnIob^!jNcGy*g3|n0 z>21$z#gsTsf0pz-x`i#!jDXDV$$B(}(QgTXu5p`Iiaotiu zi6fHTY~CxMpI-9N`PhtguOpv)Uheo3dt&Kz7c`%6Z9} z7bm=ISA72GLxKHSVWw#@x3=xvUH$RXQ*C)0hAA<}GJT474%K>QmQ7 zC)a*Bd_kMBw)f-9mn(C3-=6J!eT$Xzw|ngJCj!n{-hMOZaVC%A3!`PZ(SM(t`(FOu z-0>pS$t5e7d3*QnU5mcy zr&W)cE|oL z)wE4nzFrw?SDra-v-;}w8GbXo^=55s=%`Wc$w=F>)r`fSr+I7G?!<aTZh46GGsoXD~FvtQ%QV_I=5maN(7-Fh?f{T_8C21O@h6K4;TxUBz{R3?cs)^;;RAPc>}2wJpG z;?$*6a$9dss92fi`y$ssEnuZAtE~a;+YtMiyMEQe>zWzhbP1A z=G4BY0xv6lmM_k|koCQI^^I%nx$B~*d0J%1cRmR|YnmpeHn z#=ighx%W3GezR$slDj>8dgU$|`Rl2>&heN{+#6@SJ-chxJ2Bw%+#|b%WO`rR8oZhvraP8JNw-4XFE4z74pZ~_pw#1Ap3$AQC zcBB1w`;?-GujYKUi%6cn=UM6AIr^5*PMI$K@#Dh+pFM{Y`x}nUn(}G6zr6nRz?nBa z66QP&nR(OF{!N>C{yh`l&9~q0ulYHth-sq#d^_2G|KEQdnHgqnV~xD>(y}jEQ*)Wf zEVtsjrr(l!S8v|N>3G)U`RAV%HgV@3pPsH?awu%)w4m#$o|aRCyftg@TfbkHsk8OX z_uu>8x9`OLb1anG>wDejBUrpJM!~F&AzpMAoU}kaATFT_KTXKt_U~td!&oi^HE(pAvW}ClFAn?AzT2DEVhgoYw z4=FMo7A#pbEdshA(N~*@+_x^sHe~)w9N`{FRyPM{4ig8Q2@N{ioyYc7j z{Ib~pvu7M>UG}oZ@AOX@?IWig%&rSb8YPHnrtowLXa4%~YTMS1_fXj+ymUcD& zZkYX>zW>TfFWq}>%{0?2)m`5TifYweiq1WMeE8=Nk;h)) z7iRi#emHd}M6ZL3Jxeq>V8XLZwu##~mEP1;7Z=Y+_R`!Xv6ek~jo6u(Q&QI}=j{v= zczNsFw{PDHEG%B@O+RhXcly)9x4Z7DUX=#*u`^`7&tDV9VyXJlW{ny}2>}|!1Pirdw|FdNZITo~Om7Ksv&fd*x z>*NHA&+Wc@E=s)S!tP)WIl(KlKL0u^Es*eV&yqQtg=BXYoys(Sb0Xl{mjW@RqHAYA z&nbS_UT41Iy>95U(%9*$LK8RMvDP(^Nebwd@h=Q?VQN;?6s#iwOr`%_^>cj zDtoRV=fcEkrLmRd6Hp_B!@|o#nLI>_a5@;M7*AT%q(~DV|^E?XFP<-W>C0KCg_s7OF4bv1GI6T8&d5 zOkFOUOpOxpZnQeMEzD)g4=ts0QIGe=iFEdG*$D=|p5wK;XXE)pu5W|CPJX@Sv!ccG zbr;p8-yMls{;*~hlkD-#d1}c%=~Zd_*3IW>m=Y5rzrV2MWXFeHZZ0{q99)h?91VZ9 zdW)ulmi6;5a$?C}4z{a(sbyC%Ja+nL&V*TN3WrSmFI%j9^jJ$~`l+JO9UW?MO%uXW zb+vSSI%0lSu^0*EuKAj4ddy4eR*Ndbfk{POmYm)vQ)j00c3gB?F@Ld|NzaKhnr?d? zIyky_6y6GZ*IFGC81qP@IvsQ6XPpAY}Oef#=Qz~R8ON#1LBO%3AyyzK68c~+t8r@N#c ztP5rTn0514WZ|x|pzHQugLnRloL{PIWPf$z{5qlan@_$y)MBMQ8CQ+xBQCm?lqhmRd#sa`(0BmPjCMn<$c-Yw2klM zlwRfo$B!?MDSdS*;<<^*r+4pM*KVC=ar^Da?8j2QOSk3n|7Px9b6SbvUUl!2 z4cD$ll`OlmC6{C2wO>DeM9dUikUM+b`+Iw84{xpe^+j{(m91wVZYVw{c&V=5Lvw%q z|Hl?_vzDE;to`*TQ^x)I-ManzE$^84UWnZ9!P~pPy72eAyWxskG>-*odt1)!n_igZ zblc=$*2ie>gpNGGUvEu^C-PKqB%XsWxdgqpddj@CCm4FigRyUJdd~$v3P6l95oi6g_*Z6 zv^lh#*%8(oF=C|1L>SE@O*SmC1 zuUZ?Vd9-@D#5IZJ+a}w~%WuhBzc2gUY*@A@hGV0V@%7AEL0-O>BeJihhJKq`#$y$x zzG%+rU3&M}Wv9LRcfS3CNn&iWW~AyA%lztP0*B7u(h+3b`*cs`=c?_u{V$vJCa)EB zh=@phvGrP%C+jtagt@|rEDv5c__1^~?U)ms&|w2%3((}%Lq>!STRR`2H5U0t}njVJFu4@ZR2v?=nA*JfB=PmL|=`{fvR zVbP={bM~zU4@_sU@NW3?;qUY1rq%6=g(*=dS=!H4 z_7E&C1y{1rmTIMX;uH@!}a6s=eJ%tKVNRm&&T)wo|EM>pSClv@A2D^ z|DNS~Joe$mG4WMvO~W6`apW=m$X%-${`2?$rT-olFTCy2X|KC?ozRcxjY@f|1pj7# z?Y$qiJo?F|KOw?`5iRTDUA;59KGs;-)%@I{cX^regF`i;b6%(iY>t>>Qxv-5O3+ld zi{Iy3zj>Y+^|)(m)a$v0({k3um@Y4VSlav8t=$qktff`oBJBj&s`f&Qo)GBCNS>^*bYmb=GOSpBg=Pepk$3 z=Muuo#ae#;X8O}p^J{Bs=PpxR%EWNcT+!mSjMCmItEL}(oA!E-dba2^!(%4Cv%OUr zcz;M7zi9b-j_mP`S9H!CZ2$Z2SmgdEXLM%2{Z?YNci+CA$2VFYG|zul8TrWMe3V4L z%eh+udkn9X#;)CZW!0Zk1$Nv_N`8-zY-N&6)b@tZ1Ve|J@h6SmGJ`CD)Q7xXqw5QwSwwclbOnm-F zYUU}OLrY`QLcg7vQylofPdjLCv8Jx2xFFx|z2Ea@xw+iP+cvu{*h_J0ID0>-XY?>Hg(=prHog-Ae1KE^Pi_(RbKjOK!8vwHf=~d!M^(A-0Hnee)Tn15xMJw3rA=8Pr$` zmTY>(nA-7V(aL4sMapaLv`ZLqX}&xf+t?N49V524y))~{RVCg%=MIO8S3dO9`@bSe zcbSHc=JL>6x(gVW?=Z_{P!L$fEh+jw?(7wDCl&^g@7H)m*SZQYtaDu^^1O21^Oa9{ zI7;TuVQ64kF^geoik8C027@b6#p^U5*Z==t^XIL-?L=0k0+qH4mbc56W%3FhiJ0!{ z=WHrg|F63|zP>bjgI#Uazt7w4>;B&@usdYL9KL}~qvx>Bi>BvI1855Mc5YWd!pxZ}TBl1quhi3wPTsYy{G61tW%*^3FSFnM{k@)d=9E(= z=a*m3tl#|q>-zey4h=Z@m9r)jwm|%J=56*WZ;z3re6Bo|bsq6Ub>-YEjD;1Sj|DJvSk1xl&r%x;A9sB-%ztX1Sn5>hhjg-x5Z`c0) zcX#*p_idj){P^-Q;4ihPP$j$IhJ2S-bVxQ3?L`YS7nBBJIYKnoYnYx z&ZPvd^`^cF&m_c^m^?gIEcz2A$(gNVIsN*iXQ_;W$!os)?R{u?EOU<6S>MZBbET74 zEtTnWXQ=r6VcE4=MQTcdVREkLz0&#&LDx;b{k<;tR_44Jk}e_Z($?}W89wo7;bxg^=xl=QqH{e#BT?=z0Y`+ zXI@I1`TXanLgD9gcV1azV{e-oma@|Ip!r_=RevKpUj?n^JCQjjq;Os2Z?8j{l53g{ z?+Ib#Som^R9HX3bTI91R?TM<3N}Cz4h#qf05c51$vTb3e)cZXirft9Wxjvl3of*LA z@pM*_NW$MGledae2g z)sZE;)&Ae>i_wk~Cn>sVMvD8M*6BPNX}He$@ySI8y-r-Ss;xct`Oo8*d+t9q&)?5g zw)yS-JNNc-=Se4T3yb4_Twee0A^-mW-)wpg{(Am#=4<8*DN9<)6xFl{UvD)vi z68oP&_F-9GE)ErUkA{El=BRHJUAM?3w6#v{>WA*cudbVpiPs-^`uw!`|26&Rxy0k* z-kAd41>4KYqF6*meW!=RA#FD<>3)eq&&o_)bRY(H#DBUTfP7D&_}U z$@TC4TetuI@+qd}d=mw3PEdRll;oGW|97yIoEPMEdoAa}i`N&vu6C-btgEf7x+cBOYUaDV{QUg%inPf^W^0F*B?LpFkxwPw)qa$RBV@vGqm*R_R4{E&#P@LxEv-MhKT!_lD)l0UzIrJ#nsxm%k zau;11@lOzk3G3#87>y4|uZd+zs?HKykx z7Hi(wDZ;x%;<%vToXz~-`Sb7ZoBQnN<>UQ_4UUNLuD!&T{kCkk>Y|_`&8?YIg8RI# zc6LOnHo58FiuxGCwOUMVsft4Gt4nrH z>fdv!o(sZiExgZ7t=xb6>t8m(>8a1Ci82>WmRTOCZn8S~wv^z?rGA%9c$_br z9o+dq)>I)>;91nAuA3bK3pqGD(q6tc2#g3|-gIr>nb%*X8&5Q>apYZn!YQ6FjIE1d zW0+p)-u5-d?5?j;4`kY_UNm)R(HF1!J7Jl~*HVLH3&bRvQd3yggi2(T9_A8H6JTRV z))fqUHc2@kZ2QE#)#sw#&pGi*`s7lBg0)K5D{B;MLv)tE5{Z>$jbL!?QrpwK;01q! zfQ8;NgZT4lCqp~0bpJcR@StWAE5i=1PpOlWlLQ2$fBZfF?*FUs`#<`)-Jdr*2CnBw ztou_^_sgWs;MyCr+kb72-T(is-}14I|LK~4A0D>*&)?_2vgYqU{afdBKYw}n*ggAf z+Uxp1AEQ?rH)N>J68ISPu5#_heMM2PF9_Vdc#ZXKRpAc4h5BMEPkZdv@w~o(r&jOU z{clS99RyrHdvDlRDQ@^gwDC_9Oe@b~S%0EaU`I=A^x!ef#$9^7r@NmhIl!I3dzhLEgSjXKBgZwB2`2eJ}9t zo-*swA~k8To8Ru%?Vh{r<&UBo-=CL%9{yQS@%P!^+2;JY*9$yDXI!wkb3Z!We|htj zTV`hs=ETJxuG^k}Tkn}ra?CV|%S=JfGmWlEJpQ)(w#4E%(`TR8NFT89oAq_UBj>j} zSM2QQSa$yIy?aa#UY~ybvu4{hw)gMk-nCRpv`s1UFy=do z3>Tg6ORSz9c1p8l!_Ay+x7X%fdTenmX!f?3zwb#!XLUYbQW$XVXHZt;N~sHKEZJ*I z!%cd?7N?{U0OaycdLkb&*3dTj}78}AKt>O-{KJa zeDCV1JFkWQV0-mj%_wB&F_D=|bEEgx>7FjKo4<8!+s-Vv0>8+|H}w*_6`BmyrrY!| zGrTF={p{hs%-gA#W?es}6w00H;`%!DdEY$OlDTnPW8&(UboYC>bo8{bz2H+h!xQ11 zx>F{>YWe;h$*&7lRv0tye5DbU|GQx2yUDBhs#Wg^cy`R}aa{Y$A=OPHp;zRP_Y}4( zT1_219YH(7+*p)Ob)1~h$srPb$>*JoqfbAh;`e%{r_J6yZ`s_`CW5oe(vO8DVX)yNR7d# zu&&aoH{DF((6b*#HNSuTd3pFxk;IJRzT%Un%WstYJKOxTd;gEsb+7x5A2#usefHni ziY+^y<-JG{RZ(!(dgQVrEc|iTy;H5Slg~d(WMcWnbM)2Y%GhiBBG!xa?ekj`I;C)K z`t`qWH|rGoeiRkBwy^T&{$Gc~cmGLM6+2-m%df9L-;PiH>}+w-liE^!D=hi4mq~PN z@nya;WlnOJcgz~oGgD^^%yCX``fmBWP(rY9mKa;j_p9c!_svh9H!ULjtZM&bkzkwo z{!v-?Ong7|T5;drR{8&5?CmYrb_Jcb==1kKzf{cmnx%a5oMnOnYfN)*v%NQ;y>HXE zpkpxyB~lXZ|ERnC`vLcL`T9SFv)U?Oely=Kz_iMCebI-+{r4;1&)q!x>@|s5%U&Ml zx38JK&vrih>o?c(U1v;iIZ}Op+uGu564Qeop31%4<UaG9UjP5S_VrAuJzIH{G>!?SW$#gbePLH&SHJ&xyDeQT^UkHa z@SWQ&kaWtv=EsEM++EwI)L6+Md%48i_vF>e`&;Mo$1%Hk>l!{?66MJ7{_xwgy}`4& zr(2xg8m4>W>a3i*fqTkt-_6^7b6U)?M2U0Hqb#4RD6lL_?0q0JVM)l|@3GgpKiiyt z{`tV7gxC$&bPS)xn1nfA-%)qj`n_50^-|g5ld99_Oi%S(y257LvLY+qt=EDg-nyx# z*S>pRxpJ$)^>y#_^771P^D6Z{ULo*q(;2C6@AjIr9BlW!f9Yw{iEC{FTh8aq+QML^ zlNmYfXyo?0?@aE>b533V>7Me#vfbIY)s7vR8r-G1^lNk1iiNG`EE7F6pT~4){9sV{ zyvOHqPsdUbTW9Z;mdjuNP~2>_GwbObE(QT1V}@qaZBlFpg1DlRWh>v`TE{wZja;7FPprGHObRU+VY%ftfSN%5YmU;nritol@?XFex)$eA2j1C2vLU*B); zDtWSqMPSO-#RA-$3tV?jsW{>Kc-tqX*Q)UnT8$gL{)GFv^9SsDCa%Q9bbXgt{R=nN zTa5vX4Ox5V9dK!B3z)Dzyr$uG-Og2R;Rzuk;m$^D-YZ>NHGT4Ff5xv&+zhcSyh;an zbaFyF_yZVLoml!Hf+c|A=iT3b*Z+I={O`5rHD(Gi4{Byj*Ice~ddg0tqSv{Z&HQ?E zp1pke@nb>H=_%aTQ)jL%)cfKqY*+j1%kKN1^#6UD-Tr%a)#YP0cO*8=I=^JYY6VH> zYgrMhwS7ugI2XQFJI$(KRuH>;ER)HOwx<2KKSK) zG9%NbYfpoY6`oqk_2BgD;*O0WkKei=)U~ywL#r%l2=r(M%~+_;t3rDg?z^4wAZP8?*H*7}I(`TU z@OB(YjhtqAbj9a?&z^ldJDWZG?)v(_t3O-R{J+Z&o)(SB_FB1gTH*}(`EtuEyFTA2 zoy#CJ^MZ-Pv7ZH2bEm7WvTa?h(P-0qIqmke_|uG9Yc_3P6LizjpD79TWU zRVI8uBzdiBi`p}jGs!(CC%7s-k9n#U$y*cEu%L5_8%tmNRFe&746m_27 z)N^>pkrPvQugks_=Y0RUrIv~M8Ku%$&qO90PU~>dlDzyq_O{7o6Tau4P0qKxsJ*=H zt&Q)w-xpYtHB9!swsJP<6Xq6hS{u5VJ-O%FE|o~f2M?On?ylr{QeksUB=pas;L2HQ zf&pSjtX40(RmvD77@O*%vs34@Q1Gl}H!ZJklU3-wJR|s5&vWGlE%6gf4t^^|E^(*F zh^2kBi+CP%=FQI6R?pX%vUNV{nwqFNm1C`|i-=3y$CFp}mO6Jhc4#bJb8g$|%)V8A zM@kC?dFNeEUjAg0&gsjaFTZ}Btv53?hD&4Xv!Izb1%xvBE?3Q2t0=B5_haUa{cO)p?`P!e1@%WKKClC7r32JuCo8m{U0_RO2TV&QX!>82j*AH7akeMVu&sh4`^ z@*fax6``^#h^KV~!VioNq5H)+3z(vdKy~fuBF1i}+&-0r8_4^^OLt2tC`P|}GC9;M* z9^q@sYvtBWpV__RvGM&@@rJ^?tc?PCC92^^3$L4}xyn85a#hesSC4u1`tswCUsd%s z8y<_~W|*Vh9QNqa(?}K3;^k9zOgnYQMA4*CsAz3h?z1Tl3A;-@t{Lc`UbL;NP@?zi zi<;|YvwI_h^ri|f;GCyDJ!oEB{H00DD#gT%(jsS_l6h?8AFP=(H+Pm=%Y)b#yW}|e z+piz4YN|f+>C>lQRTe#mSJ=ENko&eD-Fy^XOUb>7UKQ*8Q_64TOE!!QlX9iM6M8rwIH+zJ1ZCbw4fZ3y`GjdM()@zlsyw|#9 z^jn0#LgGR>ooq0UM80R*wCl9~gywl-Rs75hf)|!sJxw|BLNaJ4Q_xD+X1{$l zcNj}<7(7hPj0z6_-)sJb>DPg)+ZO-17?s4K!8EIP!7BN|Ee3cfg z``5*|&ZznPeb+xpZ<7)k4HSOKD<~}z)z?sJNf4Z{upyQVb1sM+kf9~fA@B` z^z#QlUm9NDIYpFJqVMGgBY))=rAabr??3tPyJ=GQ|KH!g`gIq%Z(R5?+c#fEfP>-x z%iH&Be@HME$Xv?*|MmRw%O5_^zyCh(+pc-*%ej<-q;5qOYihGy3ypC3s$XS)^+F4G z&+c8vKWGK+FG!odu>L1wOa1O8u`_i3K410j3Tr8A{Z*wP{eaKEY+f#|`P&?^V@XX# zT~%#m%(IO~*#}y_*G=3xZPCdbmw@X#qz+Fvx@zPb>c!EkcF>@B?P?Xxr8_RA9a2qO z@XBeixl;DZw^pxDRm%7ucV1^&!S7+Y^s@m|!`5xDchy}k*7c3B-eO|t!Mpjik^X6; zo@TLkx~FT3i)VXZ`@rGu=3k&R>v`+WbDxW>jGSY7zxMm{s`Nsk1cV`5x|zv+QesKfnC)$Cp1gy~2`NvyWvRk!k2S zA|ZNZ?NW~qx8`2?{x+|CaLw#nI9DxDo!U!I+ENV?YC<+5YNE*6*5k^5{qoH<^*oSaj% zW~s(;{X^OcDx0brU0<~xHCJF#DLTm(Wj#fbmyzK@m43YIxlK+LR&^XFCy25*q$RNg zRW)a)3akit9p$xkUgatKgQ^O$EE6+*Ij{16U^N$W6j6%3`{mWTSCh{_daeC+(c}L{ zOEW|teVjTo@}cgt(8mX+PZhrIpcM3Pj`rcIUq;;PoA?9gF8Wuf65ygWAx3!P9`^UK zCv^h${d#qFcX|2UV=ZjcEPBreKaFY3KAPF`a7tgh1wVrm)2qzM`TJ{UF8lx0yqH{bs6&)@gw`sr)Uw%vE`e1E%q{hxQ!_y0O2q$I`4 zz$++~-Mh8H#DSd z!X|_1n_L)<9AnQ_Ha%wXdQRyqM}AK{7^tz`d6H+6+RxZr^ z+Q0Ewg?!OD|9<~ps)`fK%l6-I7kHNX^T)%>pHFvh;w%4dUVELfq4eAA_4^GS?$>^| z75w+-x&8lFueZ;)tBsn*!SMWZfz8~#-}(9T^YZhj?T}z`;!@mlPaX zl%ZPe!j3a(n;%BS`?sgcT+bHoc-N=7?D_NbvoFLMoR761i`*|)dB)?0Np8lWO=k`p ztjK-+>{;37n>vLLPidz&F5zN2yUoGivPt;WgU63f_;{~fZOe^SscUniTMJCzm(EHT zP&86K_W5T|M`7f%8C$ZvV}l=?%xmAg{akha+RH6YUk@MbV(cspQ*=Cg;j+C5!|}qD zb#^~Qf2|bl{PHe|C-SvGV0BrTvd-cwvyx^li!7H-=248yp9?zub>q1g%THOeGf34s zxu{MH^15BReY^SZzxS%=7aO`1>sh7?1Ze4*C@!-&U#xeudVPNTuH#ppEI9I3wfu&J z(Y2^nF-C`rfvI_2Nei`F*;O{{`CAz+%gma!fvxX~SKlcq1*Mjj>tCz|Qjah?Fzn>I zn03p^G3nZ|HK%)}*8Nx{mmPXdeIe89RQcX*YOn6Igh(rD9i38ouj5m?_Ma`2lo%SE zm=pp^RgBiyMa15ps23VMJ+UNmg2{mxj_{qzaV-@~jb8P~gl~EsuGFNGHn)sPe4>4< z+ME-tReR)~iGO0{$W9e>)AA7QW#P7)vF@(jo%?y)W6w_Q*|~rB*L!mQ=llK78|qD6 zUChWZeW_gk(~6GMJcr_3n&lgL^osw!+x`6T$NE1%e=ooP=h?^nfA8|&zk8SMTWDGP z=gZyl`~N@we9G+Yw#D#zOiJg|KX*?X*xvSDA^B=?LQkx=^~2ArcWVVDES>yCJcjXz zzL@gQN7X-W?x@+T^1{_qFn8CKir8hmF0W&Eu8VtT*xByMK?NVdJv*zn0~0 zn|(HIGp}OB`;z@K?HAt^Y`<;N`&i5T_uk?>iF3zx+%2;{@9niSclOr)kDnTij&1Rs z?QJRRf7qhxJwt)SmaCm1$t!)i^6%|=xy$dedEJkPoB|w~yWg76<`vw#Z(r7#*PD(k z%j8kuSTZGhrRSxeHT#YkT0ED^H2J_J*nb?fx4vV^nPB0fWv``D!4 zrJ}mLy1e{y>8jn{pTCq?%~>TATGW#gG|Tkk+zyjD-%G6g3p))pBi9wBFch5o8oT@U z``_CV1$y|wM6fqpws<-zTX$*LI#JHjmD#hdNlXsb3OF%i z+gp>X5f2Z}% zcer}-h3B68_urJ=R%2n%*jljmnq~IdIOkI78OJo2hGbnlzidwQ&T5iSj~5}TENrt`IJ{nPjt={E`EBerLDv1vOq#v;XKzRkDk8M zp04si+q5n>B#7O5($A!{wI42ht+;&rk<*#vHdCdTwXIfjz9=v)sk)hI^!e0*wGj#* zQnO9cN*{0H;Nm)TRkw}fKy2*_v~H$f1l2NJJymgxA>e8gO^~kQ{RVA zHNU<*e0q7gy2{I6yc;Bs{rvjW=B%YF!^RX3iB$%Q({#3SNm|OZT|fN)>*4nPm8a_e z>HpuEt-0*y)328e_AyM`#@%)&?eX6G7Wu4dk0-@0ey*RfU|$eZMIz7jHE}Vb`mfLL z_!J+1Zr%CCIkkpUm$aOYDvjKwzjtcEPT81sob%;pew`b!R&vj=)-@&)r;eUV;4uj3 z`}pBciPb)>H`+0dyO}#$BzrP%mvwq;Zk@8m*gtvU`ubfR8#2~wuimLQJxS>2vB`$3 zGZ_w8X*+D4k|{I2GRPYp8UM>cH8c^+t!Cpvsk+P>9==wKPD6h8ZO&>FKV}r_u89M z(O%c2Up##JwZ!I}pv!GXdbd;vfJf1jb`p?1m8CE72d<`mE8TofM@w<%c)5xD&9}_AJglbBoVOWUu|~=iQ}UyjJz8$@y%rhnx23oObERy=`ijal4h{%;zmfGI%wm zayxpAFI(suDlOP*X5xF9rBvif6l)8&PGO(nwp^}-8?P}X#Y~Iosyp$O@%usR_q7Mq zOs?I!n!vKcPn<<(tHJ_B;m1ZVj4!bG=_p;>p|kVauFTu}UiTVa-gHFu>Y=S4raVjS z{1y5v#$>@E3E`T6f{6RGDnqZW^?bf7+4pnE%&DAW4}-3Kig5`s3DR2{Up&`K)6MnQ zxy%4L1wF&1Q&=lXuW?Dgk37PlB*=aJ(7By=^LF2U+q*D`E9Ht(S~8EqlRater=-Fby)qWPV_bZB zi|>bXQOCs?f_!vDS+q~Ha>r=TUf0gUD3u*%$SOHuRf+AoVh0wF2GPurhLzW}l$NeJ z?xFc~QK=w9f=I8$O7SAu5{I~H#(Q0iUewBk3%sr>diA>Q;3^lFjlMfx@vok6jB{l| z?%YG#oSXj%xri;@6lObj-Gqh>VOg6xHV6nzx{|H73!#?RR<9c;xZbF&`F-`- zGu{a&m6FeCx-)QmmihnZ_;mY!4?lje+592zx<2>bJ#+k*8y-8vqoi_Zsxjk+kbOOY zM{hGe{QC84fyI@m3A5%*tF&xCcJE&8*ALHfZ|Crso-;Woc~InZlBox$-fWear3Wv; zTxoeGX{LtHnG(lOM%|VwuBeTD7U6Yr>4VnA_wu&?UFLqMP>R3(@bP|m@3nQ?ckkTE zsW>HQ`Qg_eD^jLS`uwuQPqUUozOzT+-S=o-Gm*s9_Z1r_Ps3^VTwUK``hwxBX@-u!rP&0`U)E_+IM->sYXeEIU_mp@iq z5n22A`BkoCv)0{~$X;X0;TG;@dQ;(sNnCYo}6tG7gpP zEfon%l2j)t~zulW@+xZRWdry9w`(pyKivG5I}O)?O>m7_+~9-cqW=Xw|FgviY0$ z!Cehmj}{rK=>>B8p2~WjwwL9OZDUxPj+Sb z6vYV&rmrR3slT%v70tZiDY}*nxZH>F2 z(nQ7!L3{@m<=#COp(M8Bz|@Y8Ls}m$FX4DP>Du8iz3b^aI5w@}Gi0JY|rB7nh zR?NG2rXho8?bNfDr&*JaXwOhxJw2>med+=ZZpF26&(5D_j(eW{^q1s{JsaAXl(w#_ z*u=E>!AqNSg)g!g<$49hqb7X#k@)lO{+jQ{@9*_vzM^#PLLk@vdCx7UMST^$tSz!g zWaWlKYk4}@biEU&oh^GW?LX~Af(|F|zMa*Z_wRgrEBBo0bk%3BY62&!wh6_kUvs_0 z%rQUyo%TQX>({GQ*V~=fj;Id)@Og<-uGU{Bl|6o2^b?MMJ-6YWQOnaJ+lxP!7H!aq zd6<=&9(d$Hgtlvz-|^|sKh|h%&2n13G0dD*shMqQ(MnN0QOmSXtH!t4nD5KZY#|Gz@U#^@pH^1nVM@Y8i{PQUnq=cK+ zx9$wP%4+dE$memUm!{yP?{W4sW3T7@nx`$@9DDugB;LStD+RhvoIZW}XN}cdKk@5X zjt92!E`5?I^*nLanZnLO&-lInPeoY!2lt#b-)FjZ^R25Xyfc5d|88$@Km7W2ft~;5 zJ$LiWW}m(L_wU&{d*90ze3s7z4!u2l_V3}@mp`63bL__1ZTolbJZY(0xO4x0=L3fy zZqPBTSS~ST>$~au|Ez9r|DFH;i#o%H>9c-{ZpfN9V9X*0&b9Wfdp?eY_``{MAHr>5{0&o1RSKUmhIw=d4Y5Gfd4Y&MV#fE%De$Uun1<#TS^jw*(0}dN+T&1GE zT%*Hf{@sxH56Y8_qj$?aIi|op8&3bLo_IHxsH}bghYvVmWa-=$WPU_Uu_@bC&zd zFMGeg_TT0A?azD4K1ma3hUNYhD$Evc=7Pfp8Ef1f47^Lr+755 z?#Ohy{ypa5)2AV-SEB@Y!W~46Qq~{2>*d?{?$C#yrF-wO>gBEDj@eO`aZsH3)n~S4 zIWJ3p#HxP2em}}F;-}5AC!Zo-u1`2Sf2C`w-Y?4)ulGN3O8!(})pI!0)hv{=PW0i6 z+xZO*T;WQO=Ug^sVR#}WwN34Y(AB6NMQu$C35L&mSZ=*{&tTAeZsa?;eBu4PIeR6S zFU@^yv47f(+{9$tDOTsbeFR%}Z_B)W?dO7Bin7mUT|KK~cVk(myxhC(yVq`C_Qc57 zxyREXFE8)jzRHMcPoF+b^)p<$f;IN@o+&|&4xF#1PoIAM`Si~(W3;1Ro7LVg-OF|< zA2dUrJ~!Km^%2#-}_)sQ+Jj#J7xIif4O)oue{^Ip8Q$g{=KtnS+>li$NAjN zsN*rEj78`89?$UhwOqS)hMvypNx`$8b+X>tHkIM9LEmGG-o-}d=PcjJU9?^uWTQmE@^(xE!rtis|mE5J8{`Y6c zi@EG9OFw(BJG1AEcUR@S<&(TQ_PwuGnRIH;GDU^!r*3@+v7BCHHP^-A%q+{}`F&skk&(LZw> zbj<3u+{@c~S6!U*dF!S{EALsmtl4U3Z}0zkO7W!=Jg#4R4X;Hyz1pS662E$x8WY1Y ziQ|Pf^Zd@;+?KnyT&d4>%ju6sg%h)7pDF4Ee!lfKs&RAK^p~oCPJLdx)={0jUo288x@NkNQ!!*+=wcn$ zV(Bwquf!(jhAR}tifI|I2!Oi&D?+`>1nb(cg_a9t=~ww@&wc(S305uG)u2(@LMbe(kI6*YR2)s@_Gf zVbKGt=YskPk4trT*X+)VjZd07UH`QNlSwXta z8*btdl8uY!TYlN3*E0MmgGheqH3o;C!ddA{ZOeYg_7qz1%iGnN9Mf>yaxzE9)0lk>{;L2TepfR-T5+m5@`GF)+tqTZT$BCBD`eVFIHY>ym#;I>}7^7 zr)}0Qm#$>nnDMOA&#<%MlH6sJ<$Dwo^&&sF@A-V~_qI&a)ttRw6$Ks!gKe35MItw<{T(|NOIN)z-Z7+wH%9UocTxbR*&9Ql)JE zV9lu=Yc~3FFfEwe$MbAX5t~EEQLaVDMANc$ITRe$Zk=M(x7cpc(ll3I6LAU7(&C^l zHNB13CQZsZy>m*=+M8y#x4pd*s`gaKqI*hkU2U!H{PV|dicGdT`D)h%>(%S}qr5ev z6wV#G*S?H%)kkgCthLu9M2~AnKc2&_$WeT%Qlc$2(kZ|*Yu5AbWeq$WIpxvWx*X4^ zBu71~+;**1;My5`mJ3XGr$kuXcreGgfhVblvA{)Ty7lhQb}b8b^+;^ySieDf;=YXB z+a-w2Nd27W4FV3GHlr?L?Jjt|mA)#z5BM*0M za(nYwE9APJfvoYNQ~YaxaWDj2U$stas^jDp5iVY>Q&W_Db<_`NIq7JxWOa*8Y;h@z z}>%~rtk&cFT3SV{+jwo{JFYPOpwX7)q6WEvb(n$s;VqrWf-`3 zPshG1zqnV2Wh}bK^`gjQefxi_yw-%Z-vnN-;S$U`dq~OUkcN}Twkdz!%E#H)*%)7+ zk~*`6`zn{qk)ViJ)(AnC29}b*YqD<7TVsy&svK(Ln6PWx&$IXc{90fC_4I`wQC@wI zAAV?R+L!5g!0LJLUarHuK~Gbz$~j5}luB9_EEUv$%q7V9ax+izUZt2rRpDzUusq#V z8T%?S;j2i@PZQbqHCG?RbVUXHFYP>^vTo~(U!{MS39rd|yZwq)@AK4I*V@lWq=xe@ zY};rgy@gG1^#(gVEeqAN32q`H4u#j&N%0z8_wz|qdp~!h$1)Fgdw1TR z9^;~-t0~dnbJ&3Q@e!WCX_vpR__i!a?D<~P7LDbPJxw$&&8qBEY{}if@m$|y8@|g1 zeM*XFLW%m^v`aE53NO384UkQ39;hmV=qtLja ziYK6Uo8?B0s0HfH%1VB7iVN3m>UdXsXp;B2tt&F_S?AAXxnC0dc1+sOQzRwZClN?Rf=5-1$O+GO^&8II%TirICFVUQCi$V&fIJJ?gTVyODE61VEKGc z_KdF|+Ov)%scW?y@X~g$&nU?MYsDCX~4*v}~d3ys(jD`UL%ZZ@ZjIXs-?%EB^d^Hwp&fTAlg6E@G` zE{-kDYETGp<=7aa7}2jRo5nNi+$jgPmfmU6>(s?x@wQ#}=;CTP|eWg|1CzrnXw>*7~ zUf=Yo&tLhvIVJN-CeJNjFe6~?)=#02R?Ie0`XTVE&Y8n-nZ)xs-Ve1j&agT(WJ*?A zO+URwADIkr+(Oz=E2-i zaE|jpr$d2X;E(JH?J@HjUe9F~ShMj=X|IKHb4QzwLC6|QW~H48Ygrk--1|N|R`Y9( z)yXn%#V2#7OE_~l%-ET;efGNCtf!|w{rOU3CBLRL|DH|G_S->P#k`6ovF#m($1Z;~ z=~EWGd;k9TTx-6|_J1$`ud1wdnz;LR^z+(Nl`_W*=S-V(eU_f?3RSJ863rB|Fk)l>ZvK2?5(G%YFc{l&Yc{y==`;J->uoKBr`o#=J=K~_gG)o ztV`|O$ocT>-@j+?o;`i}^6S^HmmhZ*VKg~hIioFb{ngydQOQNRDF!VJ3R5Fb`|esd zd%5d%7YB?R!< zGE1i%d)T2Qy_ws0^WAs<9;Msg&)vTK@yE-Lm*35M9RH_M;+f^kFI%szV|3!Gnb4{E zbPn&r?+deMZ+jWXudkZ^ey`!Qo~`GE7Ijs1Y$)nAe0Jtu`>8qWRVVM%n0D3V;Dndv zvw#2oJ^lLh%P(6R&RNdoJ!kn^hGR)YlGo=g8J{N?pJNYdIBt1b=d(?8^4#)+?8#?L z{GDY*1cVvCI!Zcs++tQbG-v0lZA%=FX{t`$=<_+mo3-K8)Dy3d`+8T*OpSb&I3p>B zAz-&v_R6JVN?WcypVR8<%k*lu`D#y*h-dD->PqTExzmb(Uc?$z_x`W$HevFd~n!^YyNO z{WoLzTZMDlGjDwPyX^I^ywAVw|GZdp`)-coH5)ZXHVu!UqOE6oT2$OzPQ*;Gog2ip ze3D+=mUS^J9_|RA^FizMj&(bypKy*$H*Sc@aE!U1{&amx?ePbnZV67jC(+`qyYt$n zpaXlJDHf&jeu;jjc`n)SrT4i{r}9_NIQ24dOA?QQ_Lq%Pw*&(syfm*xrJuO*b!BXR zhD(Z$;D&-zH)C9SUEH+o`PvVgoWFhhw#4FnaZXR1GiHe~_%FZw`0-_vv)6W-=ihtt z-Td}lv!y>QZwF~IFfuTBx;Tb>`V*yfzLwcXP4U8+Gg(+=8(=#>iOkp zyGO2g<=tiZv$t8vCtopFKFTsf(x*#MFsbL*jLR=~=&TCUz9zB%cJbP!A%+33Cl#3; zo8$j{lK1Uxf767e?wk1f?c`J!e0h7_l|?zhJ%-mWeV@CyeEH>YgGVY`-*fqbm9fh&XGTt&J-hbyHp63=Uq(p2%X{x6d`w{1wzHP~j}3f-y_OzH zS^ILA-cpO+;Fz?>wZcxfTTB)g9*hj1*?V-~{+F}Z7#K5WKD$1BdgZ)ns;k^jS#H1m z*4e}I`PsDBbKRNNO+Ddrc}fx2IcX)AjN57Z_y2r#clQ2YtK-jj|N3~i{qaYKV-eZ2 zmVG_fuwgUbiMsF4-=90&Daqm>`nD>bjZq;$QAJT&b4g&1!0j{3qOB7G&AU&Wu{_!&&DaMnp zdKa#{+otAod5ST!#oJiEgDZ0b16_=!6ssKynv#&X)R^(2)#<6lCr&;ms8zeU!Ry{y)2ho34m-CT5}wEqa!xB@$D9*<4m*qljMg^>u421# z?850zbp|h&SiPU&N3ymCbeoplU7hhX+a==Z`&ZW%dB-pHI<2Qw7wXt`=+JA41V%w$ zxe3~jODox~**STa#hlbtx6}QRxgQc_fnb^$dn`H{+C-}qGGedf`6Mrxt9Ae zd{&h#i4FPkn<>c1m}B!f?=9yvzPe`=pF5PZCUDco`2SD&*Dp`p{(Aqq@6BGVb9BpKSiX zG{V{N&}*IO^hq_>$~aDORn_^;ne_V0uA0AAmpCE3Ycl@~~m^atdC5GGh{N+X0mEK01 z9{X0CzGnV&)u~Dh9c;Zi#WLrVBaMBF&+UBwd$&2iZ^p4ov4&Gca>t`ir4|(5|6Y5) z`^WU@&nImzZe0CjQr6SYl@)bWjLp|}Y>N$CFr{d%&H>Y>66tFLVjdj|J=tO6E4=y0 zjI5Qj>t4ssI6Ws=tl@zy$7I8X6%9HJSAw!NeLuWC+`RExy zPoP%+V-|+P222hax6dwPZ-~h{mpL>2Y`=!(^;6fPlvJ8ide)gL1$pPr3e%sbudi?U zbWV|Gq|2-ej5-H@-Yt7Q=i-4om9vhg=H2>Mw%xGp1e>&z*4FI&`!;v-fAKzFv{C%| z=a)}EEV4V7DV4Rd;l-TSyZ2?zniQ^Fd3$T&%)lMFOo6LSF4^>*KfmmKG@Am`^SK=i zAMU<=H#>jdyk#4g2o;-NJ31rDCuPp66vlw+ijxb}vgY|eXKatk{?Kc9tXh80d@spk ziBc>%DQ85FMP^E=-LHyWv-f-K^_BDDL}zaelg<3q za6O8lN3pb%i{aRgFSY4CRx2wbug#6vcsy#g*7qyC$8=WpI2@TJAvjy$-MjqGcW?Xc z?dW2V%583yb0-P`WwR%fp*IJvT8Lix9U5ABx!`BW4j797dE(u>`Y zTX^QSTqllemI9uZ%cku3w2N0-nW;r*g_*RIgDA7I!^){Vv7(tA32UC7Do~uz;n9-V zYZB8hqIC8^Nq9|vwkt!_ec`QhrNV6Yax_d(e1Asa#gt0UvH zsgaZP|JPPtHjxcxxRR&J6y*K1d+&sqkzxJ2L(W;Ay*xuCNyw}%q*!CqzROIoFpwy#y#JkF)gxsF7fe85orBn z^4*-Cxzk<$-`nbS*=+V%wU%ceU%q_$bLq8N-a+=8_Bl+={@{DrWR?5-S<_@TeX!Br z{Wfj?ZEg=g%jsimAKR^7@;_Thzm`NKP>=rmp1s@USY-*&G5XA{jUZXQ!#7sd50PwrZoJNxh1 zvxg5m>mKcKVL2Bqa5KmJ-p?7AWu~jOlz%txEnfEaK-*;li}dDlLtRtx6(2r)c(&aB z_e=TzU*7WHulIM^`6N*yYhCQ^t>p)WQWG8WZk6qRyY;BnautPR5%M)XA1^QWH&kaa zxwdFs;1WHV59jvU?Khb5`}ghC$iC&u47(rjO75MX^=5AnpXKyWv2-4VXPc_#)nx4X zer?8Q1E0$-9Sb>?PKi!6W!^U>D*O6YmnEWHOEVPIpT~rBFZpV264mXrRL$OIYG0Qd%E-}8chkd zk$L5n#JMG#uW0!t#nR2DzB|61);XQCHhcE5%J!75@2%aI%Erq@iq~nnCu(k02)w@i z?=tfg0mJD_=7)-^&dPqVcEYRK9~U`a)AJ1py)Ly;h0Bkr_}tF0eHAI;?sJ85TvB8r zy~BgrXRgjO-MWVe-@f5`|HcY z!~g%x|Nq1P{||lt^X|#B*WauCI{*LW`d{7i>-V0~|NNtHo}I+xZPA=-y)&)m`h1WL zKXu@YLd=@Bg^W(EUaMD@-Q9X&jnUNeQa|-Sy$t2pbxv!C&YIT`*JNLOTK`~aPwG;U zujYH!_+Nj`9#eOymHG1NtF4Taqq4mUXD*-fSXIjFX^ZxZ%9(vDCb0a$XVLO8p5T+*=}n=VvcG`Myky>r6mMLoKbugiYt>i+riN2O!um36OYFPr(SQo`Lg zBX?HX?wgwX3M*`7;`pB@u2{OEfTM9{@u_1o{^r$JSHG?~p=oDpSNs3ZCZ?$_nosMT zV=pK4AHV#vL@uf4p0jY1A>a1hQIY=6N{9DcdVeh7w%NI#cSIZt&2r3YW3`j_6#Fso zNbZebmG?S%>wd%vk{x`=FD0a z=(ufLp;WSiLY<7Cn~Yds(H8JW|$foMyr@?L>x3(D` zV>+F;y<0Oh?J0B5!FAbRSlkapy|(f1%IMrMt=8arg!uEA+gr{>@^W8%cyVj&^{*)l zp3EtVd$vQ$H6_$-O3-?)RZ*r5Y}fuJwWLNSd1<<}${YyLW7V6v?U+>dmbf{=T}z(` z*f8jvW>UJgb6M`!$I~slJGO>5GQ>0%Gh9>%F#2oKYyaoN-_xfrhri$NbmVW{@t(^j znF~#`3}-YbD!WZfy(VXxKDRDvZh4{SXPfDhZf|@2`<@lElJ}hWc>k@}e6OF*yS{Dh zuSe_WTr1lx+vezS$>{#QdnUf=Yke)xf9{HUU7yzb$AMXlN7CVLOh8@eKENYq7 zxA*M2yOT?Du4L4S?srUEqLRb(H9Zo4)`)C=k?XwQza{0wgo~jSwk{>Fjoc(wi=Q;Jd<^Y5O$`_}$Xd;N`^ZU2At|1bLf z;ots0)Bpe3UjIA%oUi85-{y&%@5}V*`}?;qRdLyV=2?YB@8g&(Pt_vLEWOJN>q>d! z#nV(+rj*twt}%YQt5h*g^xJEvQy-qr``UWgTW?mt&L14tVh)?uy)KS9#bwZSFW~yE z__(t?G2Wep=Q1NdA7gMivWEMktF%(jiq>szo39tG)4F|C=gKLcmIDj=71rF%yK0p$ zJ9Um0^G1WsrqgClPHEn8|NU1_p@~5=I$|!{*xK?Zha^Aq<+y!!p5v@#d-v{r{rlh7 zr(e(Cul@e=}|TTXOH`|KjDI z@4q~?(`C!e9Pg9MA3v<9*m}+S`Pat_s~Z#_1kEYVzqhCI`?s~9-_=;z+uPdO)~(uV zB%bfVKWmDz!MwiX4?no*)PG$4PW=6TBfCBO_RR6O;Coykw`*JMw5Pk}|9$y(_x858 zMJF|%zpcC8b}Z66JhS!0v((PRGn`)k&3-)l zZr$!PpI??JrF%cy#+x!BTXoSg&hql@zyHcH@ExCArfQx4SjhC@qhFqh>n=|*@!R~~ zy!L+Pm!)nWBE^olq^t>5jLe#KY{pk+5vLn(%PIn`j;O9WXScM(v{91b+Jo5SJ;hQk z3q@9ZbLKiBw0P}R%hh{lxmfgl-gKrVyWB@J`*4`1>hm{Y|03E0Zsu$=H~+ovt>u-o zWzm}jE*hO*8pdcSe$kBK$)w|%KAK-^>#NJTnNDx=dCbD#dwG@RuCP-(eLkO; zEwkq`$(LHsSaDB3v10b^+1oOCCo<@NHuC*!a{jc?@=QO=#syW=9G8UU?O|V%I@59W z(Fs|)XOeg_=XWzaneyd*q}R%96?OxTi684sxwVaQdRbleR%@!MO;5Pmoh7?&l~ZZ+ zNz?gtuAl9uzm1CtNaD#coBg*=pQ-)2^Qn{7_kS$#PG1{yO6!qJf{Rd^lHrP62_}Yv zzXha>M5ePctTA0Ga`epgHIuoUwzHWUDNCk*mD?qCl_6qpp391e#8_ps2b<0?3PmmM zecTjx`uvf~{-p-trcF~X zS`1u=P12q2+`sb&G(!9T)%5*4_g3%NzyGcEc>ez1A3rjc zGJ2Ae0|aCp#Wr;PITf6 z29w;|U!^xKdvBI|x$p3gFJD9+Un=Z|&2l?6vdlYS%2C*LPh1m-2zA z$9ultdphS?rrEvR*~@ZoXGTs-{b)Bq@$s9xcVqXz%zoHEU+(cHjkYMR?hW7THop~R zW?Zt2_u8ztW#8Z3-MuYSUS7Wc_~A_|$s4Zix|_FqUF5S$i}|yUWiPDu;?#XQiSyuu z${qC?v(Ei~ut)w_;Eda9CcXJF8Mn7x+2k{M%B^p)mrafzJ~-i|=5iz9Lx=C~y_R7( zWzOlhXLE0Fi@nbK{IkvZ<%Km>H6jz#er(Ub_xF*w{rx<%_QxL|xA$|jw9LMCcmLk% zy4t_>f9?O@cd|IY*;VSCbJR4mEw|tHd7L}`*yQYGlVholEyvnJymY%e6aotvud}es zn7(uh)2;8*t_G%0bk%8j5d2N)#opP^pFTeB|Jh_()U#AZ)7Z%Lp3rxBdyBZFljm;r z(p*`w@XDp~dpq4Ur=7ClOJ4h?`T7g{w_NLX>S<;jio9RGbIRj~20YA4Z4dP4Fnx`6 zZ@Cu9;NY}$-;BsoE`~)9xpxIvJ&&l6nO-?>Vwn3{Hm|L@kN2iWddqffc)jav+U;#; zZT#1-Z=baJ-??j1(^i$Oo|5GA^WndDuh;M2bnVRNO-DjDX8cO>3Vf=fva9W7Zj4C6 zo89}S6xq#B?&&gFb-sV+v`OB&x6gii_UxL}>>!`aYpeG&Sf(E@Oj@^mvaYJG_m*pG z!XBM!wY!VxQxYnmDD>8bnU0}5#bR#26n(I^n&VYc9@Vs*q1RtJq4L#zU;db@TGMzJ9 zpLM@rTVoh$%cXN{O_SkSU&EAZdyfUIUGqsMu35KY>GQ4n(hHxgIe%X3M_@?^%e7l) zu72iQP*kb2Rl>+?J(JVP)(!#9>9eN0E|1NSvQ5^%99{BWXKB`@30?vvDa%EIds0qV zU4Fl7&gDHGUN8OYe|;|7|NGVI^-h-~lDn^*>PZ2e{`BwD^!K~hnX}DeSYQ12?*D&} zPrrV@&HQQl{vFkyzrMS^|L4uOe>3ORoOO8-!E}+0L3iTLI4!+ywb%+B!335|xo^7G zXw|aJ50%cIsIl(ZzS4}fx18AGd^viqFVwjHepS3I!?i2bzFp@nuLoVLX7*m28u=_! zrpT-4=eNkm#RvKK|5kWuzvj}vb=&vfynp9j%Qjv`8(X_im$d~Y zlE3|)eK&9W%iB{foj!f~@#EyRmMwXkUxu8#Rr+@K`g?D`-(L52tHveE<=HYEHn#SE zU+@3-?eg({7q*E##-`6R=d=i%>}c4i(qj2MbLP^b%+F_IFPl6pk^8jpO4RLjUk{r5 zdTSV}3Y6%1D7!52l#j3fEXOE6UykwO?+0`G4f!6Q;F;}PUtR3oW>@#)$Cp1xm{Q)< z?SF0lHgB`yq12hj9?As2*eWr9s-_*LaF*bGGikD4-}Qc+&Fd43{I;@=g~5{zpkNI1A6& zZ1rIh8*`DaWwrdfy!>sq=NdjMs93x8nGK`E+PPc9N;A&QlK5Y1R=~}578S0(cb2ra963d+Fm2=!$-_3k&;;XN(FK=JF zW~tS4fxs(&CwZS+nJM*rPup(=DY1lU%Ss@OKcDLae;j(S=BpS3LW_OZy_N1j$#Wo_v0vJlW# zQqYR_oF-s$^6* zq#5d>ls)Cl?tf>$axFTzaH+`h=01-ZtM>)Wni!IOZ5E%>Ex`Wx@c&g=KyN4}Prm)Gz5o5%&26O}#}=s8-r-d^X>;8E@5lE4U+wGv zzx=pcXL)|zw%=dx@B9C1_U*dOw@akc^PBfGZ@k93?y8h?D8rVIzpM@k%w)ZGqgT7D z!gEG==R@&kZVrLWueuMSBG#pthEI&TdUZwFt*a|*&c4{up?OU~II`F5OsIO@)T!)# znSnoEe~LRXE2UI&t;VLZt%mET3LMQ~Q;okB`ZX@AlVM`EZ|BZUpHAwmX*Jqf$a73cQe`r?_3K}G!CL1(KicDbCF5t{%a$pNwwZsejrN+k?4rcx z+i$9So<1oVv5kvEjK%-{X}_RwXsY6nP!3wx8ptU^M6aR7nmOg{diP za(9b0ut*5b{T`j}xTLU;W9HIptETQgG}U|O?z`J_jeEUg7PT=Q*d*a?XYu?!&`kDR4ZRBR*})<)0ESd7G@>thF^Z5F|`(FQkzWjLQyyumUD$lRw z{SbYvx0CD4Wdpy->#P|rl*INkERb62)1o=`^UoUFnl*;!qF7Y2ue=VLIX(4R)Y`3| zjkf2yryMJ^n1627`j6*+3WS6utx+_J$vzj!oa(o{c%@`ukYAx@jOx-U-rKG{nZ)1T z9Fw4ZRm0%($BI)wD`%-CI=YnpFf2Y7*}Rs~gJoH!l9A$$ZMmU&(}SE`wz673-!}X1 z&ze=O#itxnc(a~!E56(F`gh#c^(h@Elol21J)g2!Y-ZEeu!rKwJ@?jy+4yfRT<^oy za42+OxwrS_Gparh znfW}iqfl_i1&`O}A%|CQYzt#c&V6)Ye*=TSqKi*AZMyh+=d^dLID1Y+Y3m$gVp`;= z7Zd5Fxq?+&A*-VFn#a;8?dyyDDzC;Ik(ku0tDfe2nJ+FKKX&N)69c1TnU<^A ztTS75nZg44YVR_s-60Qd-#^Z8|L5Jeg_Zx$-fnN-{(E*+G4t-bc~NQ!n~p^#2(Ic_!WG3J zBBt_Hm?37#v+1p`x$pR^*1Oy~yg=0bdg+QH-KEPyxi{j+EBGFkaPzsn%;&74N-a!s75H z|Mb-^*CWd^Pf9RoO!a;`V-x?`wB5z~9E+A_tl2bW&KJ8yYbV9tUMAtceDkzzEdn(h zhOV`3K|Yr)j;s1!zSDBy+g+pc%P%jO|37tR_Br$N?eFjH*?Nxeutnct`~Rmeo9G|A zvFz;|gR`I4e3kSlc>X@9N2g}<{+GMdp1N=x6W}!Mov_B)Wb3Cr zXSQxCvvCBU@IFwi& zewi(q!npGGn@5x8$Imu{;s*Ka@OxlKOg8e80&ExZB`Th zWp*XV+spe@!OO#{i{7TKmcDAFe`rmpHG@pk_G_6;aXyCAZTKFW_}cxrX8nF|gFqlh zoWqW5w|DQ(-}m#^&!0apFF#%=v(?&nYr3JUQ$Tr^U!}x3P4(bH z>EVwXjvTW*tm6@}%Ql^q}pg1ugzlkcrGxb$ggy5nyc*O zgO$m-F=Dlg*BYK_662h>lW79~xoYv`OM8RQ7bf>^ z4qMvQ-#=f@zP9Ar-v1}f@1}XbaB;~}OVD}x=hL5$?egu3U6EQWC*N)Hc^p#z=b`(1 z`Pu7U|2kHE`jAFSZ|Mo01c3*$m{z{O?z*&mWBJ2ZJ9nM>CbBftec@h~I}2X%vj&}H z>db206O^-SpJrjJcw2F3UCaU3XHi9ZrotS?%jX<>Xv6D#&hzkwR!IpxCgBTvq)JaR zGZniU@WeRs1U|kUW$|oJ-oYTx;<>yX9fe_fYjwjdrxo*^)Cg;6IX2_@^y!tI_xJpN z_U@nW^<4h;!#``{9vwQgF=eY_{`P3zoVUNO-SWMk+iQ4w6U%eQE!%T5ofdDM%$_K^ zD$nfpzW3R>-g9Q1`~PYB{vT)8*O$sI1Kry<>;KRAf8{GnckfqS^3mqmq&ZusU5HkW z*|vM_SH0t3zWjMICvb0MU(_#lwzc_A_e*`HgY*{9bf34Z^5@6Pmp@-VT`3{;UUTV= zGhDn+r#NM7yZzRnuQ;afIJ4rEs5M{DB=mk?cr&j|N4KjaS3GCKvHg{|&1%c-=I;5P z@}k6st6OpL!JoO8&2oQyc<8KGx&MB(JV(IgiS31RrmIeWGnez;WW#GWP8s+aPts-W zJ8sOpxB9=ZgT~S=Ys=%DFQ{B+>d;EN@n!bx-M`=Fi9Z&ap0VNH8#D9YwIA{>m)_eM z_4#Fm$4;NmXPS;lDNN2jyYz_&>w=S|3$K*vUblL(=+lE)o9~sKbXd1DCC=J<*0R@9 zf1gagswWh<>Ye~9tTZN6TWk<&yL&zgR6f}z`-?A_a%^WvtZMiyxvwh(wx#<8kBh<|2z-sYG3 z53k&d4E@-dyT(cO*ZXCeoi9qP=K39<(|z$;OjL9%OV&!CjGBVn+gp53E1ll7WXg`P zhkjRHvqcoo3vxOVvT4=tpqM@7<=?-%w4GRS*-tNZ(mCt9d#^lPy**BenPY3n-umUL z*$>1RuEk7t)8DiyoQcEhL8DXUR;yM)Ucps4{vuh-Vy)|RX4YM1m!DIbk~wSI=bw9Y zHb2{FxcOa&PR_Z8Ke_AB z(MbJ(<(r>OX_?4T^q2g z?HAY7O%2|(;(B|4gR_k&W5n#2C!&nDdfonYp`~(O@EKM9HoJ`n%63=({fr zNuTAkpf#U&-`)29cHL&(TgxWCepzED%dffY_wQ=i`5h@mmuzEQW-ZISb*^%HP)$Yg z|LOaG9KHYN?E2${e~#DxIiA|N`)uCsyAyeT|6P||RnfitGGpuB?;guCcWt}9XP;c% zkAJ&U^tm|JeW`R=2z7RFbbjtI>9`|ny5f5-2?ybU(Cbx%TNgClZIM$R_aYwq&mwp+xO zeKSwY@mt<6Z+~n?$BVT5-*5G{oXwo$wKn#CWcJ=-r3Gt`9Nu7KzyHacx!k^&|9p-A z|MmW#XV>NJYkz!r?4EWrce~-!#~&lS`0SR?+{-7_sw%=-MacB+)mbJNmR_5**)3i3-{PX& z=WFt|KmTmGw1>fa&di&;O<9wBk8a$hxaMHgF^TuhYo+R+@GPUtAC$G(t_;k)X`}Ve-yLWFs)^oYU zYTmL+Mu*-{iD|icmzTWx>eQbgyH(NR{q)pML#49hEz|{?B4r!{EHmuwh;A-_cBRP@8gf6jN8{Dw7jn**t-}V zi^@0UaP!a=c+s=@+OAWgU-f?GPq0#J2j(Me6+ z&zsH!cX=I{wE5knkfrTY2RS7JZMm z*Z+AO9$)|W!-su#6)l9(EEV#eEL@kw5mR_XsA>DT|gUjH@x`RAAW|H=P9{`k26J@ZUW!{?bY&oYzP z5@tN>4B$IH`)uClJ=OQ_--+1tet*?`@2X#45@#LrpD)K7Wt#ajgG-!Y6|0=Q{QS7M zWtBqH)J)9HfB)V+yZv`Hi-k;|zveow-$7ZL=YAjh`t<46_qXT&d$YW}e0xis%<|{c zr{DkoZ~HN+{9aRyAT8zei~g}*oEyXWWjc>+JtFyBbJ>hwAASAy&zWy_-K#90x9su5 z4KIJEZ7w_}s%NNsde5Bm%jay~c+KkawzXy-BsU#4m=d(S@|fwR+YAlG*DAxkE#>&w zUZ(B;8a=&;%Qw5nQ*)`z`}gne)&KohVDtQXvKKc)ylwBj-EZ@xog~^ zsqWpk=a_^uN9^{OyX!W;OzxR=a_!a@9jVJxCu&4w_nzJPL-g4OhN?N5QLLvo`CR_- z;hFDkljWCpPGMR!T{Yz9_pVnV*FI}azq3`=c>ek2m9yp+*F6qCcV_KoKjlw-%ac#4 zzT6bx<<;pO=^NEHXZy79MrH;r&8Cj6SLPUZSiIkMJNNq5S!b;KlD$@DN=>ajE|Kh` zSiuld+fr3FqjFw)t)_{h!7&rR;vPW(!M*#6qs(|2?y<|ZYcAdP)@=5_5-Z8*^0(W< zwz^6=r_D<9JtHJ|?f3lRUu==dCuYnM>VC1QiQ!n3=oQIqlhX^Y1?eq*-SlwnOb)F* zN-BJBqqz{k(PusE^v!J1(2)DQ2Ts!Bg+&{%#(>6(=2EX{8V zJm>9lz8>YRd#^Gw_0KN(ZpFnuagA$s2LJjH8+$S2Z5Cw7ry^3+g$5F)z2RfHFY25n9n=@|KI)p{zlKt>wmv~aOY*oujlnY zuOH`^e?H@x>OxbFBMccAD)-;ado<};s@_(v&29f8dEX}OTvVf-q>(*Aw0VMus|s&? z%xQ)#VeMh1?rT|Fx{Z}WolV*vmRQLg%bfFU=MyfDYn4}xPI+lc_%^F0n;2b|I;FXM zN1NGFiT?HL9~Wp@Dl28L`)Flz^^TtsPlDcpZC8R$#%!51oz-aiBJX2Q*&S1K&z##j zOO!iRz`<$Vl%RRbjz&%QF1(j-Zd+x;fB0d6#azGVg(>Ua-`l%;|NiaQ{(NYbZr{u? zeNx1ZE0eN%Emxk{mV0~KYrghy`I_6MzQR0#Ctp_B=oy~-ykp6kGxmRP`qzD39e;Js zznAm>t@oF=>&y_ej(9(}{IX4Sk<{c4wzEG&vg-bPS#zByJYckX%A zQHjTUv!(TQo)=opT~;~Idu@Y2)BgSY_wU`k^X_}6562&WJbax0_~VtYs~FzS=3Dyg z`r(HaHrv+jzxjUc#`eR7v(ikP&0}}$-yQobbDrC}$Que1Zw0t$#cF+W;)uPkz4)5R z+p}laZY`5IcKo>i^3RpC#Fb*6sZPJtX0~ib(W<3WVgwu`jL$u;wD|w<`~LW8Pi=U> zr|e2DS@(V~|Lol5f_;xI)|~7;owKI!oYi~L&o|$HH{ZTp^O*m9(CNjROCRk?o>sYz zKmXpI>fdI1r!POA{C#2WEs1-~68I(2_YzMC8?PU*T7qo@CC-h%7q0Q)Qi# z%dr`6m)(E+{o1Y1l^upc6Tcs{Oz$XKYnhsR?31FwXA|G8o4)fZb)NWGu;tjD`}RJU zP3D~MNSV9#jLVZ4j)hkj2`|lEQ*59H1?SzqRz1Hc?$M+q7nRosq?cFw z-QIR@=cZHFR~LOXJofk@2ZJI@>L&fE+OFcP3?^SJTaHI$uT|Eqd;ay^{-84qey#sF zIwp8WK3keQYg?{s#k}X4vrINxwFJy8bus+BN$1y9%_{xrr{a|g&SfrHA9cceE#pav zOAcyR4>dNdIx@YoP_JRd!PWQf-``*R+lzK{6KrdnuXYSGvmOUSe*%&#VGF@t7h_Tu4@s`_U{zpf^Iq20v4(ff zqOY>M4z3Q~_u_@tR*uu9-yK?}Bx^2xJ8OHq?TUL>C7w6G|Fw?G`Rvryd^Qd%C+*G8 zzhCq9tN;FgKjZ)XIsU%x&%<_kdt1Avg8SxokF`I@jZ5-Mv0VD;{Qp1yb)RmzZ};bA zzr4JCt=3YD=PYL*AMgLa|Nlq(cX{9Q-{oz_=jPLnOR+-aTy>9gfXcR6H1dKFVfOSv~U$!-1=u;wx2kCohxOF8s&-Ot!)i}9E!QtG_Yejbcn$PF|eU!fc-`DlU=l*}%|L^+4uMg&|=GkhH+&U+}Ye5`g@BK8@zwfs1H@{|aJ!kvxYn6IW zpOvL|#Kq5BUYO^^&2g&q_>8QzTQ_Z8_V(A?GUqnG<&Q647B;qi?s_e+^!=VvI~I8- z=Zt9*#pjeh{eS+-e!tG?K5hRCeX^xjC%==kVijTt-MDJks$K4DOkb^^AD8VS-p#@} zp~XXRqT=6^tNV;*^OwzMw*A@I?IHNE@c!=OeKX(wy{)OP$;o)ly zy>cRwx*QpwHcoPk)KqPq;C<~@#Oqh9%FZpFGQ%-@PWq{xbIyyWRsH+Z`u$zeLy5&H zv8OJ++53LiIf+;=&qBkir=qp4WqPKp)4IPc_qK_pc&)Imt}{o#u^Cshp3e2QJh_Hn z(Dc>5_s<0cKGy6L(c+p?T2`ndsx|Y8i*xRV35IP&VqYd+teVy7weq#wEQ8+eiQmjz zT7yn~tPt9@{Zj?nafrRRK8Eyy0F)=)!$lc$KQ=llY_YC zIXz#$y!ft(@7)s)^EkGca)q4h78euj-TLUQw`OGO+~TIU)76<52uwZka!G>jn=3(i zQn}NLJ#{O&y`t}4@r}KH?8PBgkb*r%*q#Xp9rkEVKE?fh0>f5A|F4mD zkS*_}Z{_9Xtv~*rx8Gm;T~D}OKvE!}G`HMlZje^&b<6Y)i?&F`j;gcT*SFqH+x_$P z{=eC2KbF`3Sw8>%UT(z}g=222-;d<)|NU%s{=VAp-2#-R%>L?@tt!=+ zDD!LI{kLVuUhI2cU-#ec*p|C*_}aOJP1dUT{tDgx@|CD@&96T%i)^0GS!?Q2DN)5K zAk`jt$6Ir%#AAaq$6lOG+kF3hvF4*!S8p*mD@Luorg^+y{@&>+hpX>{Hv1U58CFZ> zuJYY_|LxxOyVhNHJ1+6uAW$>(YQ%Gw-(FmL&ri$$`|)?F{F<%TPyc+p-2d>`T{qwE z`}a#bpu$3?=i2YTv9actO|(Kb8NF$jV|e%OTkQ3t5_?R&k~Lif=C1Z$`1+|5b7F3Z z=FQ0Eg3kpG+bk6c_L}-VXn%hDn?-LDl(w91)Ntw)cx2F1^n#&)qgYdAzrIe2+QP2^ zU4=62*}i_QGxn5!;^!7zG|hL0;jyUpsMFWd_untSn>Fuw&?cFK=Wa_d884Nt^fc#X z?(Pdb5d7)h@;PfZzG|C4bqZU$?xI68p2ajQT(x>z?g|4X>ExE~#*WZX&D0~&vQ9jS zY0u_(pEL2D9OC=(K#G)FQ)*@P^GZAa%Rg(TnMh6)^7!=2UAVdD&@3td#rF1FgNAkKHPiXoz%} zz|Iw9cgRB?p(k{#qr`Hl@pHX+ZSqlpjC8M0D)P^tpS*a;ZR*jeqi-U5sC! zEX(WBUK$Ydr4zVS_8BNNcvv*< zm=ZL@ORsfDi)NhO^zZU@brt{qd_8Rrx(Mko^W>i7V9$+HIM0ZOC7)uHy&JfFRlM^} z^Lt@+w@qj3$J^Q3$rZ7={`vTPKL7FK$MySpx*S9&a&c9acODH(%|Vsr&BJ!y|lq)sprQD`MY@Dr3&x3v4cxc^yFg=OYXh5)toJ- z&q;C6EOCxlBEcW%pR3)ec`~189{npE0tFITP_aLOkcb9+Og}RhDQbJ ze!a}?`8a$1J)1lB74eLR<^O$Io+x#@>}r{I=cBj3AAVl0Z>b(~%Iffudpmcn6bR9* z+wH?Oacv#bBM;db7?xA*Jr$FFC2GNSd;w`@Wz4qze^{Xd&D;nMU zxoTJ3ZzooV)=H*Fci-i0y~$B{FlzVRy!Y?Eb-b$CXAsz7IQ_`>h=wV_T#D7t*9h9{ ze9YwgSz={;uxj(SbD7QOBA1pZdIc&c&3IlY6R4VN}g-4GOLx3j3grcOhnd>xJR-Ry(GK;6hgye8<>5S0aaL?Q~G*s`{jql=0 z+iqp#uDCF#`HAxBM@*(#NpHU@F($GF+>2a0)lkUT$LRCsYk6k3Pk*)QQ+Bv0#Mden zvF?Z9lvT5jRqH$M_%!{Blgos8j@c<`Cp#OXFRbv$eDZ&yTF=b$(~HlnTy^K)!&Zh_ z%QJud`BC|TJuc*Vx0zGGJJu;zr#T#-k?ghn^Uur2_2uu?)_nYP^tE}u+`IR$cDY^I zmc6&0lkCLf+5WWM$Pui4_eYxn-zDchd8J-R*9##U~nz+|sSm#(pi zS8ts*k;ibmZ)yGh`iips_w(=9|M>9jZMCwK@-o);MyXyyMYd%RwBuF&*LZqg_E)Yb z^$t&7Hm6uK^3z=}O{eG0t%Zji4(M>4oO4L$KL4(@GOof(_Az>v$5jsWNjUW|Mx0SU zz2IZ#O~n&ZS}b0gYiD+PI!$|;t!ub7qsOuJ41@Xc&K+-;Bu9s3Oghk{p`EvK{WXal zo-R+`=@_qb63SjJ;^6i2?|Jzh6Wogbe|R%}|F6)*A3t7R-u(Rh?QO!Mi#F$NFV$Qs zkjAp3=lbiHZ}xnjs;xQS|GDKEug5=&WPpI$k;mv=q-`^wo?B(|VpKkA) z|Ni~F+TU-t+wc8bC{uNQ%I4FWMpG@@1k@_83idMj@)$mU{P55JfB*mGpMO`r|IgL+ zbw5|X*PH(O>#g|Ti85?$m&(4{m@6*$vAxLC**>#3|6a}3`)_A>CRBCI2rAs>WZQqZ zWN(ba)W3O;_nfxuz1*>4*S51uGhE_-?^=0u?%d)&LxvUG@=~6NCfC%|m_&v9>T;)F zS<##Jz<<8IW%g6$(<%$HeKeQHoI0zpCZO_t*lW@IORQSGCeBU1)H;(R=%nGY%vtS; zQ<83O+qybd(Mpbg=@g$gYi#HC-I_6{n=N(CyItF&w_daA-4)QOxqQ-^Yf;G@A4{yx z7AFPVyBgAUEh02D)N<|G@9WmtEb`Lif4swC-WET@laj~J2(6cSmaryr{q-{i7BYuz zPM>jJcHeS&CR5}pZI;!!dn2=Z4VPYiXmWP*)v$kC3uRKL=-u~>bm_F)I7Mf&VaSTD zD__m(Og*gf>$Y@A$FF_;hMeKQG@tU#NZrG!;OBcz^J!4fDJ9kr1}RYyJA2ovpsvRjr=)c^yR1DbI8!z2 zdF8AWzs0?Q=H9}BopZF8JGFlC_0hi)8<|>?yLta@%lyM8{+A<)#BP``I@RhlBlUNv zN~73=wvGrbb_VHY-^-zK8ND%^a(-0wDJ~UpI<>RnePA~4q)$8xrfjYH`DTllpPJxw z)#}!xdTKI}8<{zqTwcv=YOT;@{~_bdIyL>?{kw~=wI5cTK4-ImsHm~t+pMc=cWqnvDB!;Ki)WQp|G)iR z-Y*|-v;J1q^iMuA*IzGNyH9sz#uLyzYV+FW7<($IF)a$@h*@D^7#JGP&tD#2|M%DG z>-xJxZGEpx=r5mav~)sBk^U);fNPcs=PZLy8oljJOiPkHJT>S@!`gt4L1!kcT;h6f z($NV%txt@jJ@%a8bWv4YG(%}?tlRk`E(f-hJY_iG&lG>N=Fy_pK^yicDkKZ@ZdYjD zKVh*d<=L)Rn8Ch+2+<%v5IPqUi1^B74@n(cQ+b*a*^&}>tO ztJ?&ee*Ad&Smw~{)2?Am-@kuFLKS=UttX z;&qQd`0|z2d6!LATb$iw#^8GH=HEJVp2PY7-LF3C9gbT-u2{og21`}OPf^78My@BX{HyS)C-WBKHs+i#yu-~Xqr zW2@<7gI3Ekij2X**A0&y@0Ty~J3rt4`Yjp0erJ2m;$5p=Utd4ZJood*$L*OCP8~a+ zEX!nEuxd}?vVcI1L!0Ynsl8wQwrXvl|K}}nYv=E;*mwVJ(8>14A3smm_X#o2J^8d~ z=Ms_RIctlX9IoD)7PIbowqEAmxa#-GUQ(rVp8JFpow97Vh+ez;miVGMn-2INy)JZU zj(1$Ctl{zrmvSrPLiwiJ#p@}usI98mdw0zm!+q--DrcoHTNJ42vQa(z%7P668;_ly z5|o!?sJ2%1+lp8`L>41!*cEQ)%*Y6-e3Lm)6=I#Kd;yS-G2PI ze$mbU-nHw$INZ&8yEpbilgqW6Q&fdGdIV=}7Mi6yHK|85W9u5ltp(-|B2O3&Xr?bc zV!XRRRz)l7)CP&DCrhX9ac32J9F%e>az#Mm&2QGpy%#l?^2^)xTt4Ab8oNDq@d1O& zKWpTU2Zmkb&g|?mkX=*S>T!&1g8|F5z2!a|_kX)=lf8EKvbeHaTr*EbT`$!&TrS}f zcknA|w1~4$pQ582obN%0} z;BDu07rQR)I$=J&+1t&gK5S8T?={Qpw}1cst-Aa5`fCe5)ztUe&-a+AJ)e@S#I$zf z9g9Y8ZJ`L0cGc+u$@lqvOe{}_FeogK@k$K1H7nXnf-~mF`DbRyuTN#K4ffh}O8nt5 zBfjJf*L1p_lb0?`XtmB zh!`u^L{ZM>X_ua#pWlD{@%#G!>}u;)@5;P&>gF6}<|lL73eT+M+57ih^nJ6fCvw+b zvL~YL=zb za<#M+&Z&IU(KryIaO{N7p~G%hR8QBflSng3`?*&vrCZE%$t2d57Jnu(dgw4IvaMX; zHoG&;aC_sOx&}^(6&bll@A`%GT$Ebco2&n2?Ek;MzBbR_m$dA={r^9I693;Vzi;?) zxqiER-4Dae*y~mEmif=OpYA`!+vwQ0>)EG%mROxDHahnH+wEm5FRr-!@yE-@zyDP! zGG))XcrD`cwzZ%61TX9oIsEn3^Eqp_E}CCgQL$oG*4C(>tEx)N%l{YH_Ze|w~ zCQg@x`(5;krXMnB|NQjPT-HY$<@bD#TKV{)hfAx_qE17P6Z0>*UE^_C!_d3+oOOuB zS;ncG*108QpZS;f{nb_J=WpKE|35BYU$t9inOEr4(sR=eS)MOse5ZKNq_pFP3Gc*7 zG1}5^Z2XV7N+)kgeKcor@w#=hB_4mh<$pOLxai(Vt#GebcklkK+ixw$Z~ynp<(DM} z$8yYG2c0Uap6qncrFH%F&o5(~IJU3toVa~?^xCrBzOm-hPekGv6U&mbke54 zQ(tc#KE!nOvWadB_u|#7R;{*rKFQ+rmA;wW6PZ}6GWj*cH?ntZog-pwuxNd4Xz0|f zX?&A?p7gjSO%s{>`l*voP*U&4GdgT8aTV+9#r8Bd&6aFqP1^7(?4EUeCBuoGEdow6 zT7wcyq`t)7&e?W5b6)+gm&=X3SZ+DJ;oY3^X>p_ow{9pvthE&U=gtT`|MT&%_-jTH z=17*s0otK;b>EGZ{ydtU-&5$bM6fNf;`_U|4?ib+dNO9`dNf|*+RCJ9&LB1Ym~wli zg#UcIdJ7plsU=s}89LvuelK9y68S7Gq@!))x;=(w)7MC@-E8RcbLG>tMLlBjM#kPe z3=9pA)bF0Vq{+NEr0&#~jD&6*COz*zU3Vr-5G}4ebuDbmlL-q|vXr=bp0nj9=Jsq3 z`0~Eh{8N7AcFmm?^heX zzfWuI;{A9QOm$s4d)e8g9vtr5UotFMwJPz%BmT)ON2grszhB>fY){3%L*aJEa_0Kj zI~1`@4f<@;``E?j`DfQ@hO2+y-7J3ID^Y7nuiWuOuEnL>-bM+({d#G>e()*FHVp^; zpCz-GO|{n7SmG42BDOTX{`1@N`?c;mo0=q&udJwzwGQVDoZhjlQs!5o1ba7|=s80T zR^MR5#TS>Y$_fxTRyptaoXv*oR?R8iml@@vEB+^Q7PnwluaxV}g8|Pff1S;*Q#OuS z8gjifreoSvU9F{F8XXZ{yJGZ;JA7QjGDBk-SG+Cz{pl&U(!MzTN%_yU^S7*9#TCdc zCBd*oEGIT*(X1IkN;4T3xwXqVoK<`>C%MbWt4b@!XG+knyL;xJC_MdYaqjKAWvjRL z8J-F5FbqsoD?2yU>9WZ&iKCq?XRe7hE|XRcVNqKou}C6s&Bhh3Ha!dvte>y;YLT{l zKF7!)B6((ll>6GPZr9Rs*9r^o`Yy_4Ynj%hG;7zk%%W*ar!WOvi(EQ|r@Y9AXU*55 zKPO+U3^=z`K}q}RoTIbP`LgYQwe6`>Z_$CsT=BDZw|px3KCinMR%*-h_-RqtEt9#E zmle-(OItd{=$gfImz(}Bm7OfnZg!fgTCOfvYn(+mPp=T@zTY_W_O_FnQ-hplq}ph& z5J~V^DROdk{1>T7I?P-f0zOWSUwi%B&CGehU6$g{=WN-2uX<_2ic^-k+ivgw*L~SG{_&$iV);x72g$HCUy^PKbnm&f;vRfVno<@B_)M`uzCdGB2dJJPV7wewGGK-UbBhNwpy zuQq*GNV>8quebAQr!cc#w2_ASX6%C^RB&d z=Jwlf|Ngz3xBqg-^V6N3tF|$=Ea<99}fTjklMD_>1|Vk)-At+%?$fBp2+nCn;bwx=x(KH1-Xe8#264+WCnv=?4A zF}(Ql%gfW#gRhs~zPs#6QGNaQv*Gbm552#)*Iv%QH1vAt)_mz?-;PA7<%g#SFLXNc zStjPG`3`T*%INTYyE^*?P0dO#m)d)>|2^uWIbt{U4U>S`#)q{_m^l z=Vr~nzyJOEzb5`?Y``Yku!pp^Fh z?$fH;KYuFwl5dxU28Z7}X<~G4;rqq;uhzX@V>n-a|B@+Om&rs%A4a{QjRUHbW+ z7b>P%0SA^GE95HHSSpdd=kv8$Q;hXhA{U!lr619X4&^mCreY<#_tmxU{NGDsmrq%i zIp*AthcI;3RhmxQ^hsUh;qgPVr2CwkaIBFO^Ab7Bg&x@G5{(tQ4A z?)ukJcQXXHi^_>6y`d+9lL$&NI)s{+C6#KH9X$h{?_>4U=9QxO#z?=COt# z4YNe0B!{&PGXr%6bYABybn+=au~cO7R>NabivzW?eKpNXp89na?99yj^~|a@Mh+6qRVrtzPB}8 zl6jeRED}2R7vwp*PEVSfyZfBQ?JK4Xk;^-GIW0|@wBmN$8v;A`UyZxUhbN>E%ef{1~iN_cG4MlQ! zqF5UJySm!BJws+TW%eCE{PXklv5U!2`Gi)pU^Ud3lYT~At$&2V|yx1#8r#MAcV z^tnCF^K@D^ISaj6SaVa=)7wfqQk}`>s?%%-sa}hJ6 zHj8TDR)&PNfv2x@%y3-Hw0P^)0|#0~ls2;XYCC@F$cb;!IP@XLOu$H1LCv$8!R^SD zRDSl$CQ}z)KejD5y=UzzuWaM<##(pYfA_t-#aCKPv^2HHQV!Ilbt;^P`@W;c zGUxl+F3)4`e`nYbwbo1XUiIrcX?yuJuWtSS?RNhDf3H?Ad|&^+-F}Y0dcXlMW{tI? z3@rl59SpakE|=zBU+1m)_v@Oi@51h0;AhxyH*fZ~n`^G`{+qWoO14k^wT{93KJ{c> z;pdag@9vUcxBRl@(kHQ3w}f^rT=lnn|BrRA!$R)0YG$Q&bWFW<|E__|fAzpgs%viz~( z(v&qTUuRw28kX#txiv~L$-ejbXGw-0LoShG&cr$!8$bKKbA#8^ruMAX@-Y-X``O0- zvWb82^{untE?c)+;P8e1%N9?kguUIhD(n3mC!4u`@9)>|h+7{Ts>xbB!@=>kS%}B4 z{P(fJ=3G}nJI4(-U(1-Y`t_}FxejL@t`INHV@pi60z6aG6tpze*Oorrdo8MWL)`lJ z@7}pGe|mV>U2A9Ft7UmfT~kg-1c&xXCiR@NW+>eor$4>;+);_oEm9ZQOr?}2Y!SaX zC8PIVb%LJjiC^TNxAn^qnzO)nWH%nt9LH7)EDrxoG)(-+RH0BA@Pk z7RukbBl-CRMup{Hz8#n6fByJ;e*KTt@qb@kT`hj5wQ)u_(_4^GsbCwOLn)Y-*a*ZTL;-k%jwO^Q8}7R+vfgiaNMt_ZI6tXIY@4=yZv9 zO3SuXc({FXl9axg#JB92L)z8%qOIcmS-OwiF;J9`+P?!TnT-!0}w2A+%S=%xX%iP;vZ||2Z*wDMp z@Mxh_u+L7H?77oo7n$+g`5u0ei@S35C%dIA2l)BV&$F#IWRf|q zpXQv>m3S-X+WGl+OYg2)_2*}aUA*7n3yXt%Z(BWIQ+jt*_N!;7ryqa(aYtPK>}94= z(a)brutrwf#oyW{7+!Nfet(T!L(uNKdHe5&@6!wEYk%x-zt48A-|1do&G&!OHgCH& zHArOYvdpd5JRjA~xV=nosYJG`SagTCPh#JvsmW=*y99VITeJrhiJ6Cn{#=$>cKdDF z_NyK8_H{P#>nxvtt-5RC8~S_7B?FzEEDR6!)_;5RuvU%63B^Sm&QDgQZ<)A%v51+kGGqQ@i*MKC78)J1dR_B$ z_HCCuC&ZIj+ zOOBh~;`r^hp~W(tb9tDTps+v#TlShcW-BIgOXi-?;d(V?vfr&J?PCEEe-eLb+GNPE z2X4aM=Lw1EtEKn9FTd=0?!X*lXTkj6 zLOa$dEL+kk=4%>wYUYZFUVoA{uIp=_g7A84|9RPC6%*?8MNTvu37X z@AqRjc>+3&1NC|qFz`5NE>%!okhfd!SjA4ct)I@ni(BO9COz4^Q&Tgh!i(FETjA*< z27#$()ETBYwQTBP@;vfoi%#m&2lu12S-Bs4;56RPqxM9vpjuv}`^>%#h6>Zw!dM+R zcX?}uv`&3=+i9w!M~F_Y`H2I~|6;fq9jE(bEj+on;vTDvsnd>Et>r%YwxQt?WIRHtz}st{>;Gd^3xiORJ~id# z&$Z`!mt5_j+ml_wxGPcXR5CfBMci zoVE4V+cM2%mtUG3Z-~mjCVi#CLT1L#ckl8f+|u8^dnnft?B{z~DmhhR-@TlG*QbBh z*vPzE7MX8$exCjG{vTC)_x%59%Mg(qzqcl`D(6E_jx0mL8yV`lnX3jj~ zaM^^n!D_DG^3Rp?)YHtao!GxWYVEe$Ys>3qr3)r5c1rGD@qF(0dq1ZLGanH+B$Lyz z#Xy#UVNUTGn|6cV;zdknxGY-|wya#8wfEi;gYVa0|EvjHF?*Td%+RY9i^Z#oW*nYv zo*%cruJm-!!(HooBh@BHWzSq@tZ6xIWrk$y$=W@i?v?2>^gRynayv4o+afj9A;{~b zr6%jS%IC2hjUBUhm7QBU$Lr!TiPX@i+LdAFB9o^j{Qh_AbQzZer*4>&?V6>9GOmhB za%-059V@h%>u2b;*!S+0A|ru`+pmS+lRV4QdNQ3Uz*78qZ0yxr8G;w2lgoC;p30Ot z7NoIsg+S}3XI~%9-q*=D?P5k~Xp^kqA>qrDLMy5RrW8+-k2sT9!EJHpUHR;>(>enY7e6421%>M|FB-zc{{6zGxBa7R5ZSMRPVAe4bV-pzm{IPU|+0q#Z@)geOk; zy7cDME24$Pr(bwvD>Jz+a+PR5Vf5jYW}1?xp6GLyyBZyaaLnD^R%J>g1*V(K*QtY+i|4J}e;>g%=eW1fNc6 zDoopcyUn6ES*ER~e!C%;sED6t_TCd|ET2@IDo?pCJh#;6^Bx6FZc#1&y#~etf>~Eq z%r8s)6p(svMc&)#yDZnKh_efLx+uDaIDh$4bvWq@bDG$fcb7s=+A}GL?cY-)E--bH zzjyXohLs|&Dr>hyTo>QVdFRpg>Dzk6vgM2oGnYPXpYnB6$P3jwMl2VDPkdjWDc!2I z)Pkp(;pJM>6;Ie=E1$pKwQ^O|q5Do^0)4w6Vzx)1LWzZ{_D*Q$Xu@ zXTRV7|K1%3W=qxATen`Tezz-c^W7r?MU6p58c+8|ueW9`v5=FnSoUH~?rygw=cm6; zl~T`q%&=qCtG{{aYZI3}yd^KkZ|ItR=E!8GyQQ(A)#Yj1p7U|{PS;h@|&!^tM*(!#!%pMQEGSG`r?J3fBvbku(<;|cyrOZ zFF%Wx`53lL3chZ3EGxStI{fg3_;cycV{Vt7*>ZbL@9LD<)i!eWH9r=JL_D8)b4w`0G>Q3<+yC9+O(!x$sly^)ka_i4xB$<&IDEdKsq8(4qM>s6ledp4$_O ztz`Jx4+ko)pOv>Y>gbfz315FySjhFSDCIB~t*ZF=>g#Ls%O-#R{HyisHjT;Fb&$;N z{k(6=op|+dO!cNML#2>oR zi4zisIg|{0e(Ev-qS^op5hw;kSSP7`M(_HgUy*Ye!nSpTv0SOhu!_Dw${b=2oT)U#cN ztOwmW-#csn-6Ad$yTY(({g(RQ+w1?%|NpxF?^peO*|)S>43B2cY5V-N#%j*_<^O7S z@f&5gZYT6^mKeg4!mg+xuytbUWkJyV!CB^52B=5FS? zVsl>g_@u{Ai}u}Kdu!dU-S@+lrv!1Y`!7^BCVf@^Smy)h*`*3>CH4F7?(^IkYLdwfQKo+GFmSH>L$~ zJ@N60dOc}r(~8^AH&*PQA+hzs27w3l;kWacQcvUH-@QP7&}ufJxQ`hms*OlU;H*43o^>KT<4F5gsumAI~UEWLc zTtr&k*H1_9|Nnb`_o|;Cmv5K1uiJO~{r26l9G|~dRn^ppSiS0guD|Vet)|WS%Qn6T z_S}DOKD&4GyOLdB4mWsR&CQ-WIX(Vr@0-7N_5bDsm)*-XmrnkA_wL;9VQ=2>^GApk zzxeR(`FzfZX$O2SM`Z8+|M2(cpEA!bYy|(;b z^>UFD3+F6;Fkgt_NJz(~4%2aTC0jk-j!aD|XU}U->~fBH!<`B;0& z){oo*mAf8#^tdGRCko8id*c6*<1hD2mN-ABSw@E`Lf~y%&?`;X4)Jw|l!~V1t`pIH zKll5-b+0E(+ECTMJeebK_ENo5r@a2nS{_g&*xSjm;puh5Yo|7^-E-bjzs+)@siKhK zBB!7w0a-ILI8;pEmMxF3^K@C7zr}6o2Qz6F28~r7`Vvkl49P0XE_z1n>?xSZc(Ecq z@Nw1NtlQi6^X>n+{C-bxy-f{Y;j7=DUzSL%J8aS>u*tN~;l%exN1rWQTmJiQ+3ve} zW;}OFwF7p&|839xeI8##r&AdB!T)M1I&QZs=V_+LOD?^Vbvg9jlbC%qb~1g>g(d}e zNpwWs-Q+$)Q8hs!Ai?C>GiS9IetLdSKv?T2SEokH36;g(4y_H|1?tQT4l%Wg zZeV&eTW0Rk_D3OHLYgjmt_$^4)0Gl}6oLYewK^>m)nfL#rGM^b#PW?j!CS&2vjdEo zr31Gz-Q5*fG0F90jnVWqTS_|4M4d^SB7CNJOW5gJv*$`CkDYu%9CTd+zO;X;&#(Qn zY1-t4jk}hM@JwYc?0z#;v_}ZfEAaEyrd&I`)3eR?DRZ z{`YjV472ZkEU=N``_BJ6{{PkcIp(kVUPGpPm&b8zB+V!-u?QD|37|cOg^(} z6{Eu;shX$X%YW;YafK*Oy873E=lJ7`)$&P`c#Q>P*I(bY?t6JPi)ml8p;(}?fX39$ zo{g{1TD3%8YF}O$Vm`+*->hS~z~Q>>zi;pVIs0msX>aqL_dUn1W|i)Y@n60<|JK{Q z=X>_YojX!fUjP5Yo0;oh)&KuHzr&Q-C)i>xze`Ghbn@$6ewzG;EzWN?JZkaw#Hnj7 z`QEHIIdiv53w9_b>HYlp(Qx`56JCZhm;d}c{d|<hw)XskK3VNk8Www()g-{Cd^8 zqLYU$zFv1^a@=w)>h;~c^tE#j*_6*D_)~auzvEh@BS7|Bf$|oG` z=OW^ikd)l}`CMfB8qJ+&3ZE?%5Ac3E>F#-xsY~58HJ{Dt;*prvDyh|3op*a%^wy$av2P1T zEDV{a+ZO(R|NpQ2;s3AyPcBM3W$5R^D)?&GMYrHYwG$48?Giz!1RHuUmIi)85hcfS_d&eLb8nfyXx^NC-DE2EfrMC#T(37NbkH*;?B#TBgV zOYd=YqlZt`gPdC3f&%ACAHVtb`>hOyqI15Fr+Am0+nzhSxGk`C3PXjA`uo`1+ccM+ ze%i&6d;41HQjudb_>Mm|@mDhAR5S@)D)|3LyZxWb_J0>2$m}sZY*F{)gQA+}y+gZ- zG@l-Rd@<5FWY6|tKfO~u>_CM1+3xW3+jnpMoVGX4-pt?wpQ+MB< z=5*-%w|mylgU+0JGdFle@j2hiR?qG0f8E)7_Ve@e*I$bk>$L`EFBTM7yL$WWyB2wb!{58TLoBxR`gPJZcT zx1~;ZGuE!&&Tmn-+^yNGNy-4$KB`>x*77k?%c zX@)G}Qp(C+yYZM9^FFRDHMXN~=9XU5WL=sP+4lXgLGR%r6PH|GI zSpC|o=9f~EkEZAB){YPk*=wc2E%8f3Zg27aA!D zqsa3~?F;pnhLo}%khiauSQcm_>Ki-V$GM(WVXMsb6WpgQ1ST4~1+8*D_+-!I>dkkI zQ`V>-PGs0{t)s2s$r05_*P=B;L+y@>H7bF<=54I zFGyee^UKT2kEiP!wj4h^-ExUjNQUGeF9mVk*RvKmBt}kn>7}{$>ZYQQfUaF}>+|pL zoqhach0Xb5kC|KrT1T3UmE)#$9O3Y24c1akj}PVlZg#Zf?eBg2rLuiAU0JUAEO9t< zpsVvp+DQ#XweIcm4o~MyWpeG_^*+1~ z2=h|Sr~mVR)JaPS8D?KO!yuKi!-spL^bCg-ai8ulfis*HHuVL~=w?b$XwtW!?Qq-;`or&M-qnv){i(kljh zJSSL`xKh=!bFXh@FyL8iA(+$=!r}0??6$am+><%y(mV6{kFW}AzYhvtF0td)Jz zC{?|ft{-oe{_ko0zo+r_->sEy<(Y?G6?D|focCN{)wXPg0G0I5GARPrOYgqEzCQI* zb@Wd0kKdGDS+Xp>VsifV*Jn>pho7H!x4rV$kB`k;x2=>G3%^$sn5+A(jkzl*m^3&{nY|&SI zE^~9_^-^6~zYa#>zQ-DtlijjiPFjmu#Lar1ux726f5)G<(8Bk#iDt;X%19ggMCta#5Uy9d?&g@?`&r4S zc*asZNo@au(rx?Wg1V-zZE2WVtYvB6ssDvHjq!j~sN#(0mGhoSFgFWzT`=+c6t&g& zcA2ihnMAQ)ZaY=^q?0=ZP|dBX`v)@AWrJ*M6UBBhM(`E#;@M#Vg9~f|uhNMu)Z6USD4yuf4S6)W?cB z&lUC__qAp3xD}P2pH;AStA>}vf|MD@CwR?y-mv2JuBvZ;Zr=ZQ_x_u2(wrI9e_vgF zeSLl1zn{xrpIrQwEo#v@f1+W5h`^IGAzh!}{rmT>W?#*pABB|*io_@6E{~ejy_BE1)$7b> zFU{mNK>~;7AFtZ)d!RfX>iGW6@G!9G60I$SlJVzK@^&Bs~BMir8TwPWD_0`j?y;8sIj!6dYt@-IBZQpmfyi$0ay8612@7g`T+v|V4Z$Eyx?*E##tMjgMO^kKBFtdoq)6w?) z<*zlFyWidHo_^UPEHpcJs-EpC)fR*2O)Gi~S8QExv*?^9Z*qK*VN`bSxtnve;|pW2 z9+O&oAwt<&^l;UjV~G;WD=%?<-@UgqEPJtF@X1>*zE*9GTtBy*Awe+M%gN3BSWJ5& z-)1LOZs|$Q^Cj29N=1`6KI`1FUAxuv*t)P=658j4D=!!Oow;^v*ScMK zSIyQoFggbq`CXJ^4CxB&^5JomemvLNC3)=&^Ntw-Nn2bNuH{<#uWGLIgy_9%E<90L znSHV=I_OkSWLd0b`un{b)1;1P&ht8Fv@h7paI({lw`RV5|Vm@FI#F41jc|x-Llx6l>)g-4Uo8*+OJHaVLRajYj=>i>v1cwr?sip_CQa^5IUZ7#*mDu34Vj`zldn800Z<~BWvzkTw>W#6u@$_1_b%v}6l z{@t#;slkR^+h51(Wv)_h_1blN)u(b0_`YaH{r7io{q5`Qd7N)wxpL)9jl~?k9|2($;`{UbOx6kkX?mqtbvHAVF z-+z8?{#?0Zj{Ey8t%#p%F7jO8x8%>Ov#Z6=$MYR7*cY?@y4~{)H`mF%d%t>>+%HGP zX+g&l0~D@XU5dIsb#kVAsO3!4HR0N?=j2pzrJiqd;5}@@(|ob4c6;R7u+Y%2hqu0G zcPY7c=Cev>9D`C>f$H-a!s4@%W!eKZMVB#bXJCI{`*f8=kmpg0la9-ZvX3w(Y>s`u z?e^6y(Z@A5^PFaMW^TT9CDgXv;ES^0`ta3%@7}9Ed?w`0iqI}kyGWrrUY?2VDw6gF#T$X3&HM`kPt*uE~t%urP6l&aXo8G;Rg$Ewx` zlxZg{Sa@sENlpI4D|$0;wKa5dxCov-bLQHOIiG`j`nGP%mHxT>+|4<`nyy(5Vokz} zM3M^|Iv+>(oSDS5N+j#->eBM^{pQD>3E2PnQz!B3^2d_5)k-@Q=QTER?0AyWdvyV) zgu~g)Yf<6r*3EX)oL2hgUEb|7Uqv&io`Y9ct$y{Z!sg$P%l_~0?LEEHLo#fEXakSU z)SxL5N$r7FTu0BOY+d{6mUVi-Q2~dKS${X&e!Kp<;<7~|idlEod_ z6SgL4MP|-C?ER-aa_#H4dASaMckc4YnRKZ2h|aHyMeuJmxwF#LFEOT%oN zrVhrbY`XdlJu?`lpJSVUiuv@MqQbW6hZsUaLobIJ9urxtvu@f2ubDqy(;i?HoLuOd0PMh{^nOV#{){B$6yLk2-S=e=eMeP19rDl-{?QT@m4 zfA%|W5&L^}P{Lf{i{GAFomD&{@p@TYX>4d{ZM|mewbfrwPuCCjv#w70WM#@N`& z$?^AJwqPs>4gIw7{;}Xo*3S-qEU=otwfNeMZMnbqz5iV|>zMuDm;Oo9<{bN3W4Ua` z7DJakQET_r)cyJJ@9@u>!gHmu%X@o2Pf6k`-r%-;O0TA7dZdMY{l9|p^8Gj094nl+ ztWd^X@$S+&%PW@!eJkI6_7`hIg_iTVow=`;<*uK8`e%*a(vZ@%j*;)R;th*$Z@X8u zcI%V*TXVIuS}SKUIy~Tt%Fa&qs+{GvRAj@X-o2mW#NKZblsq1IAviR+`u*>H)34|; zRD6B)^zUI=hAESN+nu?r@;l>wO=#%f3+)11r){xI3ha$4)Q&3p=8LX$FHdutvxI3gk5ez1r^d#(2KxgEde6c+_$=Wc1MoTV?swZhb} zJJs#llTTZox?gv*e$KGqNl;N>hgah@uB~@&o_@P{`ZD*!zUx%CJv#ijQ0Ca*yZ@e^ zo~|Ef^Z&=k$LHVQJ1WvyWdG&R{Y!4e4>z@J5HJ*7G^0o_vg~=oEJhZWB_cXi(_9@I z&h1K+nOByqqDtCK zzE-Uj;W=EfH}0tfPd(e3Kb6MG0n+OOH4`Fdi8$oUO51$*$s~cq?f388z0W;4WBctc zEsJA6-re0TU;pFd^Y8LOJyFt2MH%!CKiU7sc3Orn>jINSpRcYKKYxEu9q)nn_x>H3 zvZnO*+i4QN>Xe#%a(JWCef_3%KbiI#7sWCOR^D$<3K$#lmn}ZB5Fw zH(MnZTYTASIPK^Wbw8ul42Df?o?9c`rW+}lb6q`Tm0Xyw(D?EX1xOxOY*F~TFUV`7)Ul66mgg@YIWzsW>9K?@_wvjcY|bk_ zUo^!m%<`FOPRpkPyZB>;CHI0C>G-CUd+{=XI#|YN;#Fs$}=oSn@rd5>$BZ?*03c z$;UEz4ENRj{k8wk^8Yz|e;%*@13J;_(bD;CbBreY^nW$zGhB8y?edkaOcyqXc`c23 zCXqHb{LJ0j zvx#)}yZ7(5Iv(HFvS5xe+x6L{PiuP$W!!sjY+Gx1EW?)P@XHdlrro=L&;C;@ar|Y~ z-mK=`XVr@qmIuXNBzWZT0eb>scd&dQeP8kJE z$`9grShaK6oD`PnK}>CD(_+ne8J0|WCh)|f&+yqCW6_V_xE%N7-o9IxzgCdvh=re~ zdT+$_ts9@+`u^C$Fxbz8$2s)sfxLOk_r2%dxK;1Dk?&;-KZYwNzM-Lp$1Gb`t=hHD z`AFrwq@Iq5P*cOFQzmR&wOW;VX#j`eaut1-p1|z249_h)I?qbPUhm~;3D!Fx$*|V& z){D$}A}iLt?XY~_(s9n}WbEtThbRAPm|1)(smbt~$1mCRX={Jl_@1lI&f!Vq`wX(I zGb&$7-BK)=#qHa<<|Yy4%~>&z_i`(Cy$;Uql{&0*_`1dU#ChsUVpls)yjbkPU(D!` zCaYZguVhEuFVISw%D>NU9{&5WqQkIrUJh4q$Huo&xx5XNZdWg~@ku^+({i{ zvqUx~fznw)tuLx}UJ$w#xaHa*76C>%!O+m9TjwqFe01bWe8;+}hk{zHmU*A_ii~@b zc0i|4rp4P&|8z@@%+p0;ozrfdU$v?uLS;RVLV4cD+2R3FnQH?zkDlUUED;s2Aaf!Th*U&9_BIg#f z)n9Kl1SP$gJad8DmUG%0A1$wWT(NRe;>JQ9`xQ$6Ha~D;(nxuxZR*qPb|9u}SEo~g zrecupv`Mj{UKJ-9OumE%cr6ZF5GbfKvvg8o)nvX+Db6!eHS{|A)DK*F8^1u6vz_(H z=M-h(7n}`o2FDjnd1AdlBi&<}x0K=1oHedTU$5JH`~BX{mnD9kR&8rw?a?d^K5dsa zPmOKe?PYIgNi3Qpb^Uch@3iALzF$B6RLkv^gz}=iS-Y~WLwFP`p1&!R4)!wSa(w;q zqr;Y|`IYuxl5$*f!$X6;Ea&PeE^{(CKUu-)SYb%ayRzA9^J_Sd22Jo5Inej80@ z=k2{^;wxQjHHYu91;^)?KR>6R+j;omhmVh!Uw*0b`qI^`yW6(jS+lnIQ|`{)xA#|m zepXH+d0^)AJDZ>D&$F+OGm`Z`o+$BZ)wH)4ZO$LQAl-WV z?Y7sucC8H3ER;&xta_*D~L{kS~_ zw~kC%R#@Pt&62z2+^YhMY|CGr%Z>+%eRX@i?zNbjQ(VlesOg^b<5a&?RS=po2s-TOM-`CpL6}b zInOG~)_pp6-td@!m)^crE%rjeTdvvheq5HxrC9M^Fi=}$(H>K?7K`)291~~sm_EvQ z<~n1-vdme}IR#(uattwaW%tu&=Gm!|Ah+C8W4l+;nP8{2y%#6#ShDrVVX?KPvEfTS zcCkO3vPo?2^-cX8ieA$~7anq1@ilGpx|*x}du^g4XNXSPVn4y<_`26F|I~Le2F~pX zp5PdBl9i$5(7L=ux}1(@ESWRX*tlH``j%U?7fQtKsrmNl>FL*Bt8C`AC)&j8?OQcx z%9%YZE{cmvznz@eyrLsi@Oq`5!>L8*4lGTXcT(cdHB{P6%PiywYR1^gwQY ziL|xgNk)x0w@UqnNS7Il0;iYx%02j5BdDAld;PM7@8>x|K~kOFGSAqyZj<6z!X!Mg z%qpEJ)a&8ruQhM=|DXQ<$$ouVHk%RK?QOa<*H3@VZ~yPd?E8N{^V`?x=bu^>%Mf!* z;^KvePK^Ox%8RycTeW9nSXJ}WZwXV(dN*uy=D2LZ*lZM~E10VAwM-&=eHD>IsvV8>3SC(!?EAt6%S~ejj`N zN^64OO^fA$^8LpJ(!zJ9^Pk_f%<$T*xpKz~U$igFHpz%LFYe$9xxMB2Y0H)crpD6? zWe&^C+Ew@U*VXvHSK}?8@B9DHfBEBunbGfKdNhye=v=JioBrDLc}#d|*|bir_gl?& zPB|A5uwZKtL+miJ--9x4#}X=nLjJ7!`hXYq4DT>X;(UB_cWIx35;E z_DKHn=3x|#N}pNDC(JNC=GMB%wY#_FN+#@yQ}xW2U`zJ7zIN?a7uDxReTNPD7R5Lm zS(Z6#S*Iq$>eElPYI@J6^_G9ny=|GlJaf~zt5ff<3Ex*_n7a7u*P@+q?E=XsT{W-j zxPE`OJ6$g%$miOuZMmy=owBTcKWm%g+GhsKWz27%&AQrYbxumRgn8%XPL61wBRnBO zswezQq?BeY*PMQa$I$I2=mOBaafeMB53S)^JtJt6yeNa-b8cbN+{>kNL-ZG(KRK(b zW6$?}pU%|mjSD`fD((`q-b_jG)Xt74p-0?9L+{_S*>G*0`Rh~R#T@O2AKRQapI;=j z^~5GBG4CiyKy`7pu6J_)?#r2Lx zoOiiq)%xS#-}b`~H{4v;b5Zm1Q>!UX*KW-zO6yA0J9AAk*(bY1@zM(M{ZhL50)iQ> zOAQK7a(}wwA>-fUIOoBpA5&%;>$0z}+Livl^28BW&D5T>w3$Vxr?;zwU)H<5K4`&| zq#hw=Nd}Qc0*7SMbdQ^MGoNZ;N@(?xPCg{#eePDo(!fB@&wqB`|GO@4_5a8JU;e+4 zYn8l(V}0H4-S_`K{{Q3n;g1Xq$EvgzF@$^jaWH6nO^-HcT))=ETtO?rL(?kRZ1&kt zVn=R-@C63?DW1D_bN#+w&u(r`S8Vf_x2v_ZNmDx0vcgqU_m&cC&w`FhiDRE%KH0nZ zo^NqavB|mWi%biz9lA94d8Bmi8rB#l_bEZQB#urgGP<^7)ufvJvrBz6dp|u+f5a-M zQ*~=qC%t&gXV25wT zN;^UpP1(%mqRLqD!(^$d0?Xb%7bPw-2uiG-Q_86l?{{*A@p6{1)3)n!Z*982-pppX zO{<6#3v1}p+v~pGYc=S9{9rG?qS14Cyj7{$SGnb(L_Lg_50Jou}p1P+o@Vr$rr1 zeU}CtmuP?J6;hM>{rBIr&2q;JXFYrTafxZvYr)dnQER_i^i4l)$>O9Sm}lTp%d ze%JrKexKL*zDaKL+K#3rmaA)24<1}9pp|heZEMuY;HvMRs~J|jo;7oMW|CHEZ2tD! z>oQw7^x7AmSv0r!_S-nkQoHymOz&5JO`9qIWDeKF?^EY|a=BS8y?E}jmmezLtgj6X z_j1%bTAki=k>lKbQ{UIMy({c%epJ}ZV_2|m^{REnUYAWyy;Pg_xln5D*3j6kzS2!I znw&IUHTUe_egECIYr9TAE!rEWT6g}Zmz20#n!@`%9XZKehOSEuo~QIo{TzG2VAAvI zx9gquUA)#LkRK8Gc*>zk-sd_+PWj3+1fANHqP3;Pr_F$SmHpBqJid2Nygqj3n$%iB zg=H>=%Yuxwu7tf<6yTM4d)=y6b1vKXGbkKB7IZ>l?G(3=bCwUKE8p+VdpcLx@}tf9 z#}=l(!Pi4Co1C-UHK+gh<(|zQ(n}U+y>ffEtNViA!fR0?GKMZ8r^2R|-?MzZ+UuWe z+9fyTFo9=BmN1BFtnJO2kupPL&RhwmyBjt=nxeWuAno~@?_2k{86M-8x2sNnz0J9I zLZZZR%QK404!Lfc%p(=Brf8zhMw!nS_%B;55m?EPabd+uou3`IX2g6l@ilDAoRwr! z^y?w-lfrCg@!OMq{?FiKm}`7mHfCi|mx1MUvknifnb&4T+WtKJ%(Bfu*mI_iWx5s% z%c6LP^ah9KZRef^ZAr`JNoQL6tMJG2`+t_*UQ_q6dwt}`hXsGO*Z)22Z@>5T`n@NZ zI$1xj&veX_YVGPwtz5pf?40*kE%SE<4AR*}nwL$2JbheWOHG~iO!N7k+wcFqTL16W z+1ciXZI54H236?C?f<Bg3!ILhQ9PsCI!8(5*4y`7A1z#SWtD!NT6^v8s_c%8 zS!$1z$`|Kt{dDhm#G{WDdt>yj%XRdc-hR7l>#L$J#g^AkYj(zMx#kuf zAG)bn{I}H9o~WY|`p-AdVZHP;BepYl#VW0Z&qG7+md0`@J{O!SrdD;Ta@Mnj@1I*9 zn;|Y$^w4SZ9sOsPQ=G0vy2u8nEUM(oy_~mg_S-b+6y>5n^Ue4UoYft9{cX=($Ju-C-%W(z@;UHb0K;VAO7 zTflSi)@7geeY$6*K7ZnykU6DpvzPA|c5=9KFZ_&6-=?RVHr4IpES3nA&hC&9oi^!s zp%&95KF2?g&qXQ;P20F*MzLO0a_H64*huAf>umaxISxu}j#@kCbC8pYr{(lD7cJhq z9C7qH7q((bhMtH<{LeejXDAy-zuxm*Lnm(S)C&$?jA<^Rp{2dPd7o^2i&pj;a;!?` zT*2bHh2xZpMqKy2iJcwO8Vp^$6rVI13QdhMV((*kY!jWW%|Gd#h?CTDPqs;)Exu3B z?zx!tIA-ZqHnzlLYJSHgN)Dw&=?Q$65ttg|`$O#i?C}|V0k0cBG~(N?TKS&-(e(-;;wxEH<&^ zoceo7a$2rfR?31&-AtwR<@;~uh-f|kbN6rU>!;%P>wbTDBe}@zvrTn;X0nMWLkm;QbXv%SFVL7AY;*9MsV`ceB?@fizaH#flG9=gd}|uCrQi zt$>K}-&T`A&4d#n55gu?JX~h5;n4mQ1(myM9tu}Jxwh?e?oo;4xuRbr13N#@N&Zn} z!FPGj&3CEY&*z?9xJ*(7hvdFNtpdx@IgYO&}(>i%to zPv!P|nl9OAe%8!Wuas=DQ~z+*#^2Z=BKgQ83GSyc3^y$MxjIi++&A%!z5YpeqB99RL698nO1UNo%Ilkx7%C za#mN(InI;yRz^6m?{7u!?r+!k|8v#+^0(izOY z-PrQBZohu(uZ8JJ@mCoFiqc$??^yVqdG+q*-tVzJnG(NFfAwrt=w z{rmO$ZJBt|k3T;*@9FCBXq7PDyyEvKnP)L8R@p3i*&evqENrt5gHdNp^o#kAE=T3> zdYcyfbJaTO>^mmDtFOK)uxK%`PQUg#^m54a%TEg|GLnn+rhmQ~Ubp>rbhcgn&zfB_ z$sAMH2PSeTK26zj^ZobLUr%KS2lE(m@HTer4xJthI;6tw`JAn8*>_8~eifZsWI4aJ zD}*caO6g^fltm@^cUP@`T{Y*JgtEZ`x9ptQ>)n~<^OjHS@D%h`f3AJ~%2va+M(>mU zEbJFMZA7b;PN^#2lK5=#?RvKV6xOXiM>cJm^s#M$)s&zRjiXb7xD@Z1wA`xJXO`D0 z%1%irTet4DSGKKRGKZteGKLLz->o`zdaiSdk<;@@W+ul>j{ltYci#nv<0 z555I^g)}lvn#?5P5t@D5?5U68J)gU$9sGB>9u<1JF5+9_iuqfllP<=_yy#Akepa_8 zMAP-;o)-b~egvKC z=umm=ule%-6W5}1Vy84+*UCzTM|S%hU|6v$&DEDzuz8$pFFmVX+ za%sJNw!6KZ{mQe`j~9p8u+O|Ldh*8EogKX)*_&_YJr&*)v;MJ3pys)##n#!mJ}Y}B zZ#(9^mZwW`*ZXS6my(ZjQj>cgTPzWo$r%{=deyF~8XFn^<06l<_U38Fd!3unQzo=7 zj7u_k^{Q(t+7n;>o;OpFRX%z3s?1kv&uw^W?Rh@iy#HSPUdEWmx!!8-^1^9PSpp5+ zw&#BT^RxK*o3A#{oLGLIo_>6x{1(ICyYJq;D=Tc7eKu`x?E9pi3?D(Y}_5nxDIyK?rf+}jdKtETTcvhV%u zT{m;g9$PG%!QhZxa_;Grj-9ztYWu!~FY4*=Kle2C4`0``2_gb-XYM(AuG(d)(NTf6 z!V{Lp4d1sq2^WWPt=i?ZG{{4#wc^r@lLFB{uBx#1#1%9*+n*xv}8nlxwSd zwO-X&2|HgeyJvAU>&^1dPi=Z8heHt9v3EF3(IDm!p*p76323EEVd7@inD%Df?H;F3p%3)E%n!`}9=$Y0(#mjiO3$eYv39X*dl@8)HE&7euU)0} z@w0HC*Tk8Efg+!czFnEb&Ex%6T`3`ih4s|i-J3KV@)$HD_n*qjy}7hoTKU+G?~Avc z%lKQc{e4!vS767^Cz?+)XE}3S`?)SNe08X1r9@kdQC9MoDV@g)7dhqL-gRoZMYqMP zo14>fZ~Hv1cw2pOm9BKcQkU$Mo;0S9^#+|)t29-Y3kXat*E?d-YH(gy!*{`+?f%<& zH;75_wI4R%D=#n4-5$vGdY7Q6k*Jl7{O^CKj#wUfTle{H`Tg4Or{nk4e(&qJwd`AH z==G~rd(Do?l=U07y)FCxI{trDcD03!-Jbg8ygA!v8?^Jw+nw6B{@Xmq%-&D3XDtP% zUVd3pUcUYAyVa|#=KC4Cw^|(&aW=VTR(h7_Tx6j3k}2nwZ!Y=dd%0r2aD4ZVZ7IiB z_L@b_*H7=A)wtELdU0T)j;sEhfQYoenp<;U&#|7Xf2ekQn05Baz{}e-(?2RI>NVct z5jb396)PvtZ}fEgHN~c4K9Ta+-8a|FExu;K`FGayo7UooYb@jE7|-;&z3o;;?3S(P zth{}eU-n$QZuYKsSwDs1R@)u5XpKD9xjNL{<+BO@VGAB*L*K_X=QnR$mK(iMX#V-< zg%ZhLo7mU~k`Rh({BpLelj$<7VE9z5fy_4=)PQ<8ZuExDK1E5*QK z#Jz!|=83DI=!%+;d%qu((29(X{i!uw$9IF#t7)g1_kB7x<5cw(tEE@Ihf78+xTfjG zw(Xk2D}hBnW;w20B{b76JK5{xWedN{91Q0wpU=5A;gW{Ov1RL~I?a4>;z&P($%bpU zDwp?8lssv3UQ<0;hvA4olGkI4X+bR8gqLe@loWb(9SeQd+LR%5{YcebF(VnnrE`)L zmGqSEJmQ(*V%% z+`^?A5~BOMK}ovMWvz%nYN93AG>^q>4}wlj$~k)4N#(hVdC~k6e>*Iu_XsiZSezHW zn9(!EWFenkVmp($dcyO!GlDEG% zzF(d>uj4&%?q>LT`CVVX z-Mky7EI8F)-oCD~CL}|^RBLlwzDUyDEBC6kQu!30+q4Tfov8FP?Ejpy_GYg6(rjlA z)#uswPv4DBX}|SiYrLUzX!z9iDpy}tSV(1SMO-_y!eF|HbIz+>>#}>>I+tfInRGIv z(4)oDMvhNWEjc%IYwY#4*B{&XhCU7KzTR}~;h{6<=h;hS&-hqmwg2zOxdw7an->KL zPBuMP?R`YvzIIRC?xx7%z(%g4*)cP4ePfyohI^}HD z=BU!pSj*?;J1pn+&$q8*S~_RzzIA&(pAiX`YFC`ThGA~G@8yW(=aw<+!<9B2TND-| zl9h9dP2|kyFOQyfr=Oqqwf6td&+469r)<_;vLnSt=(xa&fF(<&Bq{L4nD_ed^w7MqLiHaS~V<1f5)jzHnn(%kE90{+X516RKGI+In*;T`Ik znYftA;(712zgy=&%bfRo(q^ZS6^)yg7&A9-oYi4!)UZoe`RMdqt@mrU`shSXYTBZ& zq7$ZcYQcdhm!&~mt3%7n_jfB=XCKd;cf3+YZRr%(=7!i{&d>TsR|GUNt?gA@9b9yd z(XpdYrtNAL>x$r0+wB=o_`8NUaa=ZO4%%cDsu6QUCyQ_A-7aU(g-%iL>JJ;7G8S3? zW%4ZNMa69k#l?(u=psOW-`_L?g-Y&7_ z@8=t5Pus7PWpK={{@-Q-wyZlq+*-SA`S>3$Eu%-)QuVyUaVh9cm6^dcqlcMUC z_xS52{=*zRQPC<(4L7;PhtGbSwl%IdIsW>&-|gb(=b!z2=X>+S;JR5)cHfA|?%C$j z>9Of(L+6~&LMIh93%qQ7A3L~+P1>RQFY#mg1zl%HO;#@^6Nd>Bi#`TRO%z;m(phz# z{zQY5Y61U}maSSfXRE3@Lr{a8^jg6aqFPH+P9(fFdK%vomnC^vP;i5mYi8>j5dr<6 zx`0K&LjO4xvU?9q;FerltQpMFoB8hjH?g-i=a0w)ZM@b~tFN?u+uJo^s?(=D`g^!; zZ{*WS$zI9!=ZZs4mBfk%&w1XdVD77G{_gctp~pN7F0N0GYQ!#cTG%Y_|6IT+_G|9# zYra>*u70eT^-R-PJSkvR^xAjt{>nUiZJ?0!G&|Gtc%qJKyx#LViK26dEgI|Y7zu`d zHmR$$`Sas)e}8}f>#M7ZOX(q9KYwYWv*)E)-GLRdUw^^Fl`1oHqTX|*Ir2oXWhv) zzrN32wx7$eC1K08w37h_*YdVWzu)V|vb06m(D$;5A5V<=R+DSKq3d`3mk<8`H8pbD z-0w-n=Tf&@n)&Q9>NR`eEx@qe#Kn{U`~Lk(>rQwFDKSiRVD5dj>eQK21;=x@|G9M7 zW@18Lod-{eoBNy_)oU~Nz5o7oYuJ06{-mUy_UX==m3!mfev|e5q_{`M&;R`KDU&Yt zUeulVVprbPt-J3YbGPr`?6ReH{cN=(2D_gYNgh9K!}oZP4(qD0vb|B>o=cx?GrjgI zYSnCUbIZ$HZmn3h_51hoW3MDG?a_IDdFGNSPRnL!d%ucK-qNcmk?X``x1-=gl;Ouc znK{<8pO#qlrkZr`_1h)L>ZvMvd1mi6!}M)hk=f?2OHZww61B0YKtMr`|M;9u*P;}B z^JI=sJo`I|!6&{R>{)X2&B^U*_#G^d~_g|7A8DVnM0mQMLnn#a$uRcxur_Jv+6 z=VYJan=oUKfuC^y@}N_P4YWnQR%#x--pWwue86NYm+8?He_xj-w_iV_w#duF_p-v3 zIl{tHvyZI{({|`kbZUK3%TXA2=j(n4hmc}TRaGXPtp?AI|F&$&yZT-Dj3UePX@#?b zeB5RRae0X}dFfhS4_YlCB%s|iWz!U`)+47R98PKS*PXH4*s8pIO7XXIKM#DIdf@py z0hQJ#3q#JmeEsBG=n+R&qw?)J(gtqILJn7>m{x=fJ}%AOzC$FKWrb76*Sf#Y?#?y* z_4Gs0&X^#tJ0TBEze;#|sZI?DdVYx0JzQ!1kMQbEQ=GKdP8D-8d=~ZU?%%iT_x<Q{@Cwmbnx?p7n$`rd?KU|=+J9%m>P60JFEyVR1)A^p=0w$>a~;1o@qsn zj6ohkA;n@YnnyC{?f?BD_VzZ#%2XfStGaqWHaPpf%yzPMJQJ zx5l2yj7*c45$RA|D$=`Ev87+${@iD7hBIE0*%7`bpygIpbNzx&fhIwH_rF?es2bj; zpP{)VB=_={)pem^+cJy%g0k*$=zX?{(fe$qc(H10pJCm7IQ5@%Vi&e&4RWyyEi98Qfd$zAD*O z^YasHfW+Mr?ThQG6s+X>9-Ev!ZPYVaXp(6~>~+PHQ5){(L}mLw|NOIt@1Xg%i9ttZ z+}WfsF|4avJUKUc_Ub8nu6_O~l61xE=CYT!uFYzjTf1vxO#825nPn%x(hV1QjZ4#InlEieuxwBP6r-7efsjF>QbTPf_(yk*T2SI_e~87GV^6-(0n>2_|(qVx3~(d<}bLmEOXX2 z#U(Qqm7jie$mFbVvgd`Yz0ah|WZcC6r0w0eckjfeBoEO9iMA=-W@(qrj%|=w&y=Ef zXzHb{Qv?oeI4$D~L=T#}d51jDq=k?jJ{Dj8QClVX^$Y~>Uu7cVUbjrE_) zpC_H;W+-@kw46VmrNU)qQc7RN?XtrKGnZAy-LcWvR9(s%lF-Y>y>M=Ejb7mvuDI_J ztqtoY>xT7Ro@#uq{%PS%fkU^_cHez6r|tC2mW-Us@tIkVwX1jObaz`Czi3gJD)f0$ zpum#V&!W6Sxh=#)@&!7N-nbTVk>g~4KE4l9)~^pxjmr`;dbIk}F&D3!+wP`q z?lbhg{9^C(xrW}i85kTq8P5p0?D^E}rDs{FSH{GUyhUJ^>E1_O7ivw}M0A#VRr;o{ zV^A?zrn2?TE5`afA>_X^ znWO#i>BpBZzlqU}%$n(~HEGtmy*qbabYp+dGSSS9%Ta~pf8P(UAFm`HM7{i4ymIN1 zD6glyVi&I;d;Mzv>ek*P6=(0eD6+asUJut}+p{jaQC0iIgYW}$j2u2GK96BEh?!r_ zpfOWAOl74<%=b$wCA?3)l=WB}%@;9=C^-4ev=K_+nD?+pXH&o7B8SG72FWg$ma4yL z#phf?IaV?36g%^Wm-)G~(jz0k^Zxv?*VkXu7C-d(^JODH&83x5H6FL`u8aN}^}2FS z+ryA-K^G?tl|b%ib0S;9ygC)vU;n%Rr+Iy0ef{^Rrw^aE|9hr?H-B8_RBz3#8?Iiz z^*U|qYpJ!RnpVdaqe&V{BcUrt6gDUGrh%B z<`h4+;L_Y|w$rDn=MEce%MJGGRXp3>ui5)pr)33e(n_ZwT@KN`-&L<4%TNjSDm|Uj z6SIHs?{~MqAA1pcJ89+f(-xNYy}>J&X6h*HIC?c|XRxD7_S&FBQo-6fr+Zv-Z*yoy zen?x(YxiuD_gbEOmLsR11}$6@{&)88-@AA3UiUVsXZhuq1vXEmmQLx-oscne`4nX( zP9ZN(Cm+M-A$z{ZD{Dk%MR?6zrfGG3%h~n!YX1NIet-S1tA`IC-gE}MS84n6&o4he zKfmBO>3$-B}` z{+geTb#LTeWBF+P`5?F3COtQ6Dk~+AXFjo%;1Zga+ou}0`u3de9Z?fKS7y(j9^_?e z=%=~-Oi9ApRMwMbmj#mdo|+x37wNUKS97VslC;Qai90qJN>1EzZBy9Vsn@tz&z@Fk zIWf~nf98(2st;=wHfpTRWn7%A8&UN29{au%tV#-(b+w*MF*?loGtJ0LAdy4!XvUh{ z+hRX8cL_~nUG&Vkzp?jvu|@jHRIAqqvWxEdUS6{G)$V<{(S@c_KZCgVr>j_m(PMl6%GDVs*)wdsi{BimG zx_?hUKR^8OKx}p2#fakD@2~Cdop2;kDu_#U=@c)8#R(rznf>vS(p|d2Wbp<|8N+2Q z4B;!~81^Lnx#=G1<>+MgUnp?xm#Hrb|MD;_2&=2rBB$75UfT2K?j->pK|xu`L46$IWq#*= z*6h>Sti817F^7ZwzaN(mA3prCBEhwDQkU<74uz#JZkbJI+PZJg9*gVie)(n3HTJt~ z)1xde6ScZ>R(u@a)2CcFnHGoy?sK{~CCJc4@k!R!YqzFPX`0{u*>b5)%NsN4+c$!| z{5YkJ&whSbka4SQ^WA?x9-q(Mf9w4>&8bY5M`VOd-fF4Mc(&Nk_x^gXw#-?n8=s)*_Plx0pWB+JABC;n%Kz{5bbXbA-FKf(+WR?2KyRtr^NQ~1mCvHiKmTmA zaz+x*L=P|5Amb?-?J{>B@f~;evs{+4MQ+)~B{n>V4=`r>YzXnIx@ecO=4S;*A_LCQi<*vtF8EJXyL$U9S83o^8}xI(wSy zmCk&9O;^nmP28F%cebiZY|Gpdw9T;T$)i7;bPSJV&YN-Qx>>GXp3JdAskPU3-wori znd_&SDim`o-gmO)a~bE>SxJ2tx8{1Qcxi@jylFBm>Sm0p%5!nY%Vyv6@7Mf3`}?~Avd;Yw6d%hd0{`2Rsi&4#$WJvmAY2LB&%Ar*P*S1-ozh9r_`Hkaw%(Kd(bJB~q-#)ul@Z=s(tWkK8E^yMF>iNHi_w}x_tx!2h_D(6> z)AQNL|M8Q#?hE#1f4uskouzAw;SqVK9>F+AzO5q1GS>bQ&2ozcFZmlP2fNsLNZ9ZJHuKafyJd<4L`xC3bV?J-TfF_u>5d ze?Onk*)&1vxrV0NqJ0twD_{-1Bcd5KY!u-z6*e)bZd9 zZho$PRZ?+hxtf%Y$}CX0ee1#0ob$33dadR%cP1a3!r;&{q35Dk;W3Hk^AEp1`giI> zEw3O0-@{(RPjyq{KXWN8o#LgKaB}Cg9-D6SaN&%ei7cH0x7O+IoS@+7#PzVY$Mn#C zFQt#7U5zXe3!cqs=C`RHDQ z-I-;Et78)O-rjuX$Iqt^#QIzQuG)Uh%=hq?D@&)vZhyIZUhZcvt?bg3rCXy~Ux;rP zNYaxO+#0lW<71PLS{t9t+OvPo`IwM(;jee^jt;vj-*-H!>w99M_u8qI<9NCENvWjZu!Fit&GbW*;_Apc@^^( zE?F0E+B^5T<sbx{jbJsCo?qqFzMw4MIR!4c%4n0o)~ zXB+)<&x1HLpPK0FJ?$`#)f0D7SrWQ`*Z%!muUR~gi<>up-u$0Z37V6qOca;vHCD%tRyUX+KYksIL|MK*+zWv`X!a^D=r%YmaIz8Ub z|MHIe?`L0|=eIny(C0(){t&;J`5b9eEVhdfFzw zoks*b4fy343+9RY#j!T5d;R|1wZ|5GhYy@8T^e$(Ra4#NklEI-3sFjn>=Kd5of^j! zmaJku_EN~%MQv(;rf6kJh_|as-_q+*pLv#DVLI^PQ}!a0Yp3GgJYaS{wtHQ&!-w$W zQx)QprcUwV(OmANG9}>6`>g2O>%-P$uMgVsqHlR~e3I_cT#nw+NXz|C1SJqr|;f<`~JUo+nv1L z+bC6ub2;3;Rya?8x_5Nd($?a-nYFjK_}(tfJ)W+euN3UpbI#?^vP_1o`(Ll!$eh)7 zS|a(R<&!C9x8K%n4~??kKF5E0qQtr9pAR1nUitNW-PgbE<@q<4N(8T9I41Fa?)QHs zR_48t>%-P=y*6u3apd*q)2BcFc_ev$Wn5BEkmr7#^ZMs~vzH&`RAtCoSs=OmtIhvc zzwNv2>}>3Krz>+#@tS?CZA#3CgO*EsHl8)n?aht8y-37sb?AL=#aqWrzHjf)*lKia z*_8OWxO2}>pMJR1>FTXF*UeR*3oH&fm2tWB?IxXLw<>1^cNwNWx0Jtl_s$)LLqCfW zo419%DxDJe^T^L#7uXu6B;78%c4L|8{hH5b-=01D`LnUUT!e*{b$D=9W#!JE!zGse zms2Cfa^JlDd-v_#*>g@mK0N*L>FHCdYn$P*B_S!- z1-Y_yZ+{aA)XZF~x%7y{-1r%vl$T_z5p_C~XE|5TXU5$eGvCX6mnG)7AGuprU0i&- zY;BkpOT8pp!>TFoev1kg8J?3}5*DTv>7{vd$rQck-j}1+hMqjK-hYmo(yh1Kw{PFR z|9^bl{jGN&Uw%Bl?#Jb)HJ}3s0u`s1@0_yy`SaitgZKrc@RK0mFclU!DtUYV4%j}xlxI|J|Sd=zhJ7D^tv*OqbXM>?6Fw>;`}zkU6mKQDtn>@y7W2=wL+w@Ns>bz1mJW+knaQ$j44&$-bb=(W~s z)tXr`mn`4VUN`mS{HYf>1f6!o7fw7KL4l<~o5F(QKd;y% zAmF6e>cOFz%_6ijB(B-fEAnLh({EvZ=Ig>agwHz!cfAsIKDtE1LFC9)ndJu8uRq&$ zY|f{kEf?LS7p8b=J&!5s(>yw7%7inl6P%O+dFK7SGUH)Z!3x%iMU|;uQ<;|5?cJEg zs!((E%!WR%eY@VV9uD{5SgPWmx$?r>x4Md~d>vg+PM+^ppS1d`)peQRoRZ?=>c_P` zA2pUJfsgh}vNp@*h&L+-4vyYg>udu^rJ>Tn}(`?Z}j zgpx#CatcmbPF^tk_U+lTXY?(*`uhF;{^#1mx656LnlhpGdfv9z zb0!yGw4T3Cg*Efb)HSCZ*Dbvg(sul~d)gM4=YG#WzrFkS?d;q6`~N_Xl@C6Y*ABLVG^}= z`|Y5dbFKoMi$A^3e*E}jMUQ55_S&s`?zkN&lu7Q`wsH2^wzYmXT-rXF zfn1z|3P<1EF}cd1JL+ma4`S?OP&^{B?B3%_8*}8&46N7?YZi_d)Y_K6TQtN8T z+>l+b!)$(qd%eHCEdJ}kYRxBUyWj58dv4?R`Dx9pV~WdmWLlZc+4R`?LzjaS4_ z-tN1;_rIU3j$^QX&fvh6*yHB%zV=jW*39Qee+HZqy>ZIi@c#N3<SZ zn{Kg;>y*E*yM9aI(+w`h4X>kC=idL?KD~MNTz)xiueGy|J)1Kp)&I-SeO>if;ZYcGrF4`xBd2BY z$NAa1QPD0X=bi?!JT$*wbGhtns?417udhzAHq7F_Z>+lf%znN!#^QF}rCw)VG(~0a z+`YTDxcc|Q+4*_js-6CPJZ@k6?~i3m!jx%V!rcKk_@$CmwE~LRJY87!+)eSSeHwEx zs&mEbTbVNY%M~ro|6{%O`gutE=_$oKckkZ0^QP9~oANv?CAzKx+CoZ#US-dYXLAS? z%|3Qoqj9~W+WIPye+RaNe4X0Wvwqgs@OIg0K~6m3H~7E*U4Ly2*Z)(}Ph96$oOMfL zOu7Gk>9wP}5nde&pTuNy9+Xbzv=o}RB&eeI^&Tgm_0Jsd$#97Mcw`u zd;eR|$(T!zrWGnMIHhbBV_$dq?c0CsH^}TX?Um+WP2!W2Ove;nt|567%NI4?6W{ ziQ=QIt+(#yaNp0}S@-kPg1E`nOQytL|5~fRw5coh_PXn<{q6r}&O0A_J!I0o=f{s9 zpL4mLKmYpN${$Y?CDh(GT?@C%t?xMS9#_qdy-|wES+kgM=n_#d1U)t;c{CHX16{vYs ztwgEIaISu{NYi^`#nkJihD)YDPoH$V-=WW^c?b}~}&67EnD6!*Fuc0DSdxGKa zZP#9{zvn2KJS9kDDPzOUw|9cpU(Z%l`L4Ie@qyr?DU&9f?UdMlEoyDDR}%+A@kuW) zhCMNQr}s!Kj?6p$PbO^L#!a2Y%2UH8tb5I`*}C!AHeR8pL7?@nN4s*bo8)f3o9BQ0 z@z07r#cNSpr&&63wYa?gTDAXn_S!w)4;$!xX6lI2c1)SQ>>!)q(kZdGGZ$yCoc{38 zBY%dBoa8kxclCAL=v}b&+3O3|(}UDjHE}TPE9#!L&ecKQX(wBApkJkoyQXSz{Px>x zuY5A%V{m9<;rJ%V=&-W$zNXx)|)YqQUm9X>QCHFDeSx4XCJf4?2OKgw}!{Jxr> zKVE*eIlD(o#A#P$ou$m4?;0wcf!VH1B8#plO)TFk#Nfs1vC>O(Nr;zlXQs^bql^z_Dr{3OiV%u)6si`~GY?$uCx!}nboo<0enNqr; zo->sNrXTgx-1_Xb;E(BAQ!0GxS8^z<3(I))UQCczP$P2PEunvrMx1N6x-ALX6!fX< z&WcbEovlBnv`kQ)F425#SM=JfwTerEPMx>^|74G|$e)cqpJTFOziX$Kq|c7=?zyq$ z)$jNFO>b?`OD?=@!E1Q*_wViBw|{3a=zDC?qj@xSX7*acF56Q>&_Xj3(wA$UR%m#c|;)C zZm#~;rYon~0~UO{o3%IYYSi(}yqwMV@8^_9as(#HEH9k*eE03!_P_t_{(t)auj~7N zf3^>I`+xoaum67^+yDRAKYzbM=l1-2UMubD|5?Nwi#cV{d;YwCKz91qyv>e^bBZq; z$ofD3Y%u5ibgl&NY~8Q1$GbG;jQAh_`0`^#Z|0h-w=&jx`5l{awrqBi)!e>wl`3a6 z_48J4-E!^2oFh>yxB6MeM6T;lS-Mfp_nfsn$DU$_t?^&4y_nRAA{O?x!{TWcc-O`!4T&$^ADj6WtM7_7l((p?xR$i`-?!WO<-6AkeERwFgTDPA1OMXt>nj!> zGD}y|Vy(zNFxCBB<-GGQS4_e*Ra<%tmx}yYvikqz3^@je27x8rUF$0Z#j>Bo#9f~2 zA?@o@Q+q9HtKrrwF~<56UWh(!;qcN5V<_mqRJu0z?bXa`-PsHFeEPGz(o1peud8q0 zzRlbG@$@0ZPtQN^%q+dX{@$JY_wU@9HmUx{L;vHKJvCFeJW~s8N;zq0y)U^#zkT}Y zpF&9qdj5Kr$4%j;KytU{cs+i$;}ZJvMaHE)b&h->!cuXn5VUi7lF-(U0R$HyY8 z&0dCV^-_KO{QUms&o>?U_iWqC-BCA7YQJ)OuapST^iT>d-8<>2gh2K9uJTXN7#D)$e-cTM)!GJ zFK;rO{>XX9tGnFV?*q9`TKUO&sVIG0cIEm4rybi8!Yfi+XBDwA9OhE;<71XMruh8K zI{l@geU>2)TGZFBk9d9JV&$C-XV(TT28N<1mRS>Z_PIMbHLU6likepR)Uf(}ZO6_O z$>xhu-gAq^Tw-sBHV8QFfA#+T`_?I+jpp||_h=sd^zrg?etA2SZJY;%uP)p8cvH&* z)ucwF7(d_Zpu;YHJ_9v|%o$9VS)4m;;;T9BVbRBP&nI;zNa!1~FhtymDPGsGw8)t^ z#!VsV_BMtR8yUUnibryKFM2JTHFc72^6hmOk1dIbi;H7;P-5l(SfzEwGQ*mptyd%_ zZ}h$VV)fchBH~GxO=|DoKDI2zXWh;N$NT3Od&b_*yS6$ruI|srX7( zNYJaxFP}`?d9BLI_Wzg9%VRF(m2ci(xqR)1dvnh{fBbOg*0W`sb7%E=ED_loe`nK@ zDZA@bYacw-5Yk+Fdz+<6unXi*S!=dc%=Fad zmrM_4#%?;({drz%O_cW4(%S2{u6Zdgoz$`AO!0?C{?}(WZ98`7&3E6+3{w(!)cpCm z{PRy9hPPR&EtgL1tNooO!S=4xdu7bs>DP-kA2OZb&*j-w(ZeWO^TBXwRN}1k=X1)87H=I%A}|pZbBSuoKCFY#GU4_VJRm=_Q{=7rX+C$YPPl_UlsHh+qC z@*GZeI1_P_~`~)0NCwo$LOnRQT4qsAzg} zG-Yp63U+$%b)NEy0vj2><;ABmci-OTtGzVga+KFh)29qmSqgW1vUqvAJn`_?R1ql1 zV^f+I{CDrZ{gw^>%P^74 zEA`s!vuT^nysDqu*Z=zR}ZCnAD@2M|NH&_$L;-pqn$S1%kgSG+J4|ivZjE-^i5~-BA-pa{PFt!e_!YS z|7RYwdWsVN?VW*PLEa)y=D52!yy`gh+F-)|=Jki2ubuepIwkm#;xa?E=|ag57IsbH z%09cbpz*L*q^jzps~eA<;#-%o;9ATlqx8DKnVwJg$R9VHzA0|)lxv#DmR;0bnp)Of zTp}wGH$8W2ih|-&kDUw)99)DB9v2Y)n5x5(E$Zau>GW{x{wKNVJ}e=sPlAF@y50O+ zcYVuSlfD|~a<8cDr)%e=mD|Xf-@f~s|9;H5>7SoE>2j*9TYJ{hNDZ*H#^{I{)tSa{c;W58EZ2Z_uFP8V$jcH>&8qjCiZxaw2{uXm+aI^S{CDha z6H9eA!K*R1j;#x?y*%qE`-+IhDMD+nu3nehqqL^ne0R`+$~3o(g`sOzudcY*ce-fD zocwRE%W`|3*oseySz=;tZtkW1{_oEcmIJqE@7{g8`rDjoQ?@*Nw`5w=XQ8hqxn{d1 zE-arEG;{jrH;?k_FW*>p%`2!?!joh1)Lt=)?9T_V3l0^Yv0ze_6Nm12ag~9 zDbakfEmu=vYTRqkME&pIXR9WAhb-n()H`>*YjtQm;|<{^22EEnht?^t-t-uDsIX?O zWO2~5e5%xXCDHb<0dR85WNxoM{Mx-N-g6bwFh^7oXc+%xa%|L50zoo(-Y z=^uPa7k&$(OA9o_qEr{j!AWHc@@6r`I@b92mf7v z-(Kz)p?zi2P4VQ}(|$6EI*2tMxGF5bRrbeIv)7DM=*p|T+<^t(Y-X?cw%z<*{irx4XhE2xNRp4H z_49MjFB|Z8U??5OnGS<7m_fBX0DU6`hzGmqh!&F6D(=7;aT`|ah&mrn~OEMehP z+H{O*!FR?}S7p8|um8!IH8JSv6tU;tt2Bf4N(<*LU3g4NXiAXPT))`snrp?)t=~@# zb2<^>q-QyOPu%)<@9(b-`)kqHewbTY+a$Cp2!U-u_5YSY%$=VqKvm1uid zVa1_%f8WmiJNI^KGG5vDzS!%QNA8z@-}HUNme@o%~5v)!~pvtI7%^W1&>a3EJBgWa*r$OYH#=H36kcVpY`Kg%)~UYo+inK=8| zvu9>YOYHa=E_|Q6)vVw`s#frMq z;Ot&a{d1n0lF|od;+(d~Bn6!|@Vi(2{_Z9V#g-{nPiF)tT`gQ&I!}C_o>J2F+$CEy zUDs|goAvqOf;2^o(;N4(Ifbm5QN+f%*5q1*r%1AvXpdpgs~D%1rWNigKYu+`RM8iy z;8b5aXZg#QAAkPK`~UFvc8`x0Ry>Dq>YkK1yz6I>*N1tX*?SU_HCK91+-?yQ^*koq zSI@#>g(|a<`l>52S?f-w+aG@+d_^nZ$rR_0Q43z|zPjr5ExqSIe*F0JdAa`d(_4P1 zOu6b5wnCAm;Z${b_S>wjT!wm^yA`hm^)FmX!K)$-Y2S1tSHU;pR${@?la-yieW&w4&Hz@c@D&=V!C zUlL9|Q(F`#IW1CIcD4BMAuUBkj;xu>pDwrm`{n2J{`3CrrwdQ4+ft_L8L;&4yJzpt z-qq)i=UX1^w=+yfRXK2_O6w=Px}TqRyb8aikvwIR)zS*P_-UCFRpM4Q1pK`AbJruK zLSCPp>!!yam55>epsd$oaLl5SVd z?>^d7e9tmFH+%M+)dD9%O1C-w5LjdACHdajYvROZi^7}~6yuJENoM`!(%J77$gnd~ zOeG>;S7b_1ke{N9(7MbuQEmNj^Io!V7#vQpk@Qc!^6vKM0E zSszay`sumyq~_GjIp?QezI-`&|Eo@y{ZZwy#ysMhqF180k4Us02>Sf9zBwaVjUs(6~toicCl`?MP$_r;qe_CMUcX&$izdt`efBZ1z^P}2dum68t|NoKx z|F@SP{`m1!Fz`u?-t)v+$5LnRQ0g?~bo~8yt)beoBd;4L)M~nFE)BeY@7~>e@19py z*40H$bM@?0*>LT1fmv^_<}ZPzSDdC8=zrFbn0L7(_x9Jr7IXd6!wwcQ%?f&SegEIP z`~RB%Pm|NTo%{da+uuTyg4~wxxtB9_!Sw0k(`J5nx^PO-IpLEMe+w+1%j{lvdrerp zoLuDl%{qN86M{B+MSE#3_0x>ibH4my_TOpSF9et2poP)F!}{m*GKZgv-zY8?D6lGHJgGuKP=TEz9B zDM7uaxwkj8Ui_^fxP80%?z?{8YCM5nm2qP4#a4WM_itaWl+&hLT1$@=K5>*`c$1%} z)li%_EA724lb2%fzGdqeu1GX1r1t6ta9i}{uiLuGa8mu|!v+=amuJpf=E-qzcdpW- zIXdS$pZPs1+PTNiLO^gz&^5D~|CvjjITY32$FAMObiT<&HPHJYS8s0E-zYCej@avG zPd}PF`?OH7hU#$%N&a?*Yi5sDS+@sqa|$?RCLjCw=d!*8^ZRt48dl~*m5V%R%1-BvXP7<{#GeV;Zx=*&{4kTnyE z>$dic7+}1Wt!XS#|PQE|sZC`K!plv2e?=*|TrQ|Npf; zYW3mMr;YS2pI?4D$2-t#kwK2qrLgGRd1bpx_XkB~E&XAm?!@4Z z-{<C?Nve~0duJEnOwB&)UPs_lpAzh5qw*MGh_q-g7_+{H;9 z=l#z=zwD{Cbm^m;O6qK^sw%-|>yF;i_Fc?0C1}F&PM1#QjLsi#dlieeNJyXfaVueg z{_WR2*Hl=Z@@Q_XX^3bgy{Rrsvv;XEiUxTqa%u*Nc(qEL3h6Q2wsCb`?;l~|sdK_tPgT%b zns(s%^UDP@y&9)lC9STXwK6@cX=G$|Hm2%Ql|z zJ#U$RZ8g8VorkKtynO%h&jpfm$~h{E>*bSw?cV?Ycj~+=}q4p)HdD9^69etUF&8)ds!oS?4!eiB}y$yEjxDa zzx^%`bflT*O8eShKfZj~a_02UqPAICx~KO{vZ_4-eSEom9of6b>YN?RY(>b$_ z<^#hRua)m%DLJ+8=Q$=x#jmJ7d@Oxd&Zrlx8#M}YUrgq}-L z_BJ;EKFlk`c`K=p6-OBtu6MAyqE zx!E_%6_#eqHe2@R=jX$3->!SToB#d&>s^+IYi8+BKfOtZCoj~iFz?D6=W~|N-gxVo zYI`=BrxfU3dtRBj+l^~xP@iGt^PnC})qMd)Ua|N0)&Ku@_qMsqmoGnloPHk9cYJbr zpKa8U0-jKB-SFgTmvS6uKl@o?*ZXA{1v+${o4;_ZKw zCo3?V*s2kl)tXwU>5^(9$Tea1+p^tvtKJ{4jCqz?cy2fA0;An6p&_6t?)=%?rmla> zC4X@OBj@dHuWyxE%>Vz){?FO_f8PIJ|9rYQQ;^rQIWbxcOIn`ruikhpLVHTUT2)7n zh;=qj6TH5zPTL);y*9~b`QyY{>1%sMtd7e#e-ha9{j!mK`{U1_?^bPY3{nYNx0T`O z6p<5M3LSTz*!RsZ_Uc%l8L?Ajh0yvpDd*NO^C=%#zosjyGxmGW@!GSe9v3P_um2ID z;_|#^Z4Rrx*C_$z(}LOziJq3)W%C&~h(C$XcW0QFeN>=ZgJD6hV9=B)#a=q;`j*kz zjSks&zs@bzYjaSE=$Id*Wm(KrduryoZqrqZyi{26iEXnDsfYd~@9T;4i0ZCI;~wep+Lt)VVD4NOoy{ zZgQ@4@~u4ch=&VGdLEYS`=BmA%~ex$VU@gxfYQg9jSg!qxrJVT&Dww4bEVS<(0zmY zER2mi4k>PYZ>=e6EPHF+-5j&oZ^Jylem-yiG|Kzv>8GD+EI##Imttn{y}ahv)fZ`+ zPrTNsoSZQydfFWC&Q(uBvj2U2-hX%5|JC*1#s7N!{{N5JE;guVb!h%t*|x;UY0ENi zZTotU|FcZf`(1ha?<;Qe;An`Fem?EA0mFgW=J}i#E1x$hD=eP6HEnO)-rC~-e~N76 z^emUg7|A<-Hk#`<{chQ8;pM-l2`rlM>Gu15J9pkyerSGwPkq&or|T6g2_G`~#W-YrJciC?Bcf09t<~)BQzP99gsgr`pg{bw{U#HGoD)L5)_ulh44ayx0 z+63AQXY{M*_9|8V|2H>8Y1s+EiFf+-P76))H(d8?>c(wT9yNbIw|S}@gHw;4+Ocz= z=108_f6lW$mt(_}q}j_hiV5s3kG~uKa#o-c!vdx~T$fi1ELrkI#aQu3_7Nckr|YG2 zN=|J&H|NONW>1D2x!ZfU26ajNeOe^uv?bPE#mVPhtB5kgw4kIenJq3Qy$`?EbsAnT zT)EZaeNSMJM#HIhZ`Nh+Ek7+ZB`D^F(8MVkEeobhQ$2T@#WZ{E*36vCCT``m>1zz5 z=K4KPwK{iN$VqE!TyBb?WcIy1mD}&WX)CO$sHpn!bb4h}T(8fPDN2f3S0))T9u=5B zKfWV?%e84v@xk^<-bY@{b?;VLA=Ywzjf)~{{GX-MCM^tmutCpni@KxAMBeo~rwBN6 zY@DLBHBCt&dsf!k({I{OKUu7=xp+cQK9Gj!FbnhK~{4s{Hb&AWPDM#k5i_#A9unbjs_S7(BahQNg zr-RjV##I8JUd>~BFm$ZwGUeEn)t(==9_%taCc@h3 zBy!kv<>d8qg1Gv1F1^-0!F^BLnP;Ptr}C-*7ZIlqil2h4mL8GdP3D=LaO3AhZ$ZtY zt7oqlV+hp#{ncWUS>hChN`^y{mP+CZB2EVWRTAx$GnYaU9Y{4E&B0tdeghF&5;YJY}*|5RrBcQpF3{9yEa=?P>!EndXHh0 zm#Y(pV%AB;mZQHb=V&o-2{CkYZ<-i;e`~Ha&*Rgli#1&}JzHMCZL?kIvgv)e*QssU z<-7NOfA{g@#}cchQ|9n570G^kH}><79|q^U1ya`V+GVU2+kQmAxzloaOwb4K4#!EG zw)q;~xqDyf+4=c)`>X5KP3QErUw)~yG-hqs?6=!q|9)5$=(YL$$x42PoljB=RoW6~ z1o<&U6g{h7)EhS~GHu}!7S`Ku({|sl`~2^2-gfJG=j7{unfMjAYS(R<_UX@`FJHb? zSjmY$&tCV2J%0Ul{qz0pj}!Ob{laU3>~$j;0EwshL_=gS55Olg|`KXXoJC?s%k&|@qczu(h@%VKTAVgVx}Ff6i+&1*?M60 z`cwaYt;;Pw<#g(6n(op$(^Gf!TxRjHS-F(U@o$yx(uil75@{c z9^cVPUSrrc{r)}n_so0QrYdqRO}RYF`d0L6>DwMld$+nQ*>XzN|MQb)N6+TXjMZzs z_-T&T)djwW|G#`bfBf<3)2B7tGPVc?oea;BeZ6WU)9jeZ{+8lp2S1BmEz?!p8hg7u zRkE0f-WL^^p~y+U;Q=htWoXrITM@->H5px zOskytJjlp@Rc~tZj?CL((vySN3A|mFE2Z4Y;32@Jw06ssEU9Jx&)5Ic|Nrs-5C6ld zyKdf>SXy){{^86ur-fAc*=h)xv!cSlW0fKfwr&DEVt9=tZ;yKEplDWHotvnAx! zEEeC>I$qbjmQUH~#%aeOq!_lxkF{cbSeB}g&v}OI;y?XNOEz!_l85S1JkR_fw?t>_szXBm4;4+B6ZGW5^PYR` ziYGL5Bd3;#igyI1XzmO%h?36EUaPr$&Srj2ErA0z=M~c$vU*%xo*#R>@y^|Q|8Cd+ z{=M)0|Bv$jK74$9d`gkm&Fc4e_ULe5pZ&VDKVZ-N;4iiJrB-vYaz5%7);=Z?!=Zn^ zeMZvStG@+OBl53B%@$B%$XGkK*ke(2>F>Y3UN4_MeX#d-j+tiqc8l94L0<1`I2=7( zRV0*Z>g)eMoXvmU|9t!5#*PUuo>m;W9GPj$f7s@%O2Wr;GoBs(%|G|8*#3;Q`+q*W z`>3rjYTw=czrWpnU-#3f@34ul@N22E-E%KRad8Ih-luPA{QY5t&9MnZnyTqbzuf&B zd+uq8%{)K-+}mrzYxlpa|2+GC&bGaQT5ii(@7~QT-!01du;#Tl&ceAJRXU~14k>-Tpp z`Wm*p41XPJ-Fvw}@?Pzaw9PlI#al%>j)ZZoxLDmM(s=r*&G}}5{rh*{6joF)^S!+7 z>=PLViN(3MITV){mV9Pv33!#dqeUQVt>yDfIo4~tcmLkDb?KCSuV-)D8KbAK&wfqn z!v&?MJrj!c7z~$&6gjP_y?y82y>EY4<=$TRo+DFc`SRt*9~OBUzAc;mwQA{Brhpcn zcc&HEZb#fM^X6zj{Mlw+cE$0EAGvS9`D)j>IJWnD z#TIeBZ2E8Is&Z(Okz!Q#&%6AJT!AeLVvN(GmWHg+e`c+}G=}T)0nOBdpRICDVj2rt zZFe$6c%`&%R+!@C<>-5PTdwxfRSZt2-j<#IqWu2uo+(jX*SaFN{Qlc}@fdq}5$D0> z61%o#-iq_GJiqxnpJ4XULJ3cfwQEHJwDZcXvvp6GRh+-FE%y4>+WkUIjNOx%7@pni zeQe=>+8|c%ut3s+u5;DT_lN{#oMcv9Te`NuMs8W=oaKhcby`+6a2(N8->LRYyPzfD z+|nmZTc^bcZpf6jQx}}5DXq%#tt5Nt&cCNrl{pu?T;7uZ?tY7i|8ilajHQvSJ8!># z_v`o9w9j8^e*E}+Tw<|EinR3N^;>mLmsC{R^^40lP4ddB+`FyyK~}cJeD>TK+%T6lWRPx8G zsZI_Hq8hh%`d(HL60wl;J8Up5h?S{a$B9#LY3{792Tk(l#AmH|Yo+L-%MqZJxbKl> z#0tg=UY)c)5rvSG2J3>ll=}R2Pb3~U+;sMz_Ak{I0p}fU7d{yKEm?BP@^ekIWo-QW zbj>4IH`Z_c8gCi2CfP@;RbWTp<*4OT_7oqxbu6mw{3g>Q%pRIqnx;o5X-{%#oU^FQ z!%)>_QWJyWR3WDmmWu_p9umD4x02}!*Um7;I|^4@G^c;^y5+cZYFJZka?1=Mf4$(I zV57?Ix3}kVd#n^m?!8tOIqmb$9cQMWZar4GVOiwbUsrd@wLHWzW=s0U)dpX=;_m^AD*6`9(3BG_x`^B8=~JkdEBec zRS{<08rFL)_P*ca6>IOl%?o=yHPlUORlxDebDHvf?wVKL|NoPryVstw(ffNN);us19KLuL<)*MKbClA zDN|?Aq>xiTcjo3^-!5;rhk1qN)Iu3=RwcpWeRJAVBP$vXDNmR(srYo(*1GTS?p}VJ z+_-A0%c_`%roPd|dw(cwjaz%Y5@TZ2dnwc(g5WTUYfo)KycU!NWt-OYb1N z(38FOsZmjBOSzR)Qzv=7R%*GEb1lg8eXQoBiLr*q<^<_l9$hdcYc|`4Ud1fcZ@+#0 z?tZM8lb*lUbeW=%Wx7(%tYz0;HEXMuAJfuYI%#W&Lr&D~@3Q6QG1~I}%g@iZuYO-M zLvWPlXA$Cxq)}xvl@RY|X6=Azho9mI!%BG_B__bX{?k z(Yakz=|juusLSQH%^#C|RHsV>`^|Ly5acUVIojBIK$^u<26_I^^ zU*+bTZ)-eG|M~dz=gwz-Dbse>4f{LnA8yaTmuD7j zcJ^x2jYCtD?Dp;XbJ_m?$NYca_y2r)S%1V#a%Q1-$wsYZu|gYm zE)Y==YT5ZLXyVICp92A2Cj^w$622xhzCPBSFID!#Xik6@Yk=jyzTXBbLKVDJLjyZk zH%43nbDOaVu zo4i)GKVJDhYHhE=!HJgJHtznbw=8FE<}AII9Xj4#o$8x&PG`(=^V+C$&OLwY#i;fN z#|w+j`Cqm;&+Y$zdg{#Ql@e@DT#8R7F*cZQ3wU_>`T5JK5$mrn%M4pRujRqjgv&E# z|Gg(sX(N;VyfQjiVX@1KRH@^sGnZ$6dUyQov$yZ-e&3e=_tgK5ND3=gzUcxn7pIr%UI%zJQ=CKml_P$J7vsBG(;reT~#{StF z*JkDK|9dT}+D7jD^3B^kvn7+Ijt6=f`W6;A``t9#7GHYq?B_S$n!DfjCEo}-d8hfw zlx3M~974m^MLYL!D736Gm0G%W>zR&6?RLv8cn%xHASSk?H+Afi`_T0`&sb0g`XR|iPDqk|_ zdu&qid}&DS{!e;81l9(2ObI$UGq(IhyJos;oF3>b7n^xOIvrc5WUP6&`<_HFm)_}_ zlYN&=Q4x*i^gd^qy3c9LMVm>wJELxI$<6LryR}8&=DG>RpA?I{?(cfP)zI*mM3C1> zsmo=$Oga&fYh$xkE}gLB)T2tbRM(tpm{z{|{qL_Pz4o<#IOf~g z+ROVLPu301W?ec>=55yAsIN{7qL0m)Z06>r80orb>J%^O;?4Va?%sWO&2^VgQx}93 z_1ORY^RfT`pTqw4|G!+GUsvj{b>T;EY^JETMPzO4eS3Yc85?K7>Mn(?Y)gVx zD=@B~I8mi#Lekzy>D(z(rav{B!~SfAu#mtLk-gs!{thX+Y_j)dtH7T35p$9^l&%yl zW_8>;#jAvsz3701l3e|-8grHMV>%7<*G%VW%wgiP=H!wIg!TzJydRYne<27Gj ze=J_JHEsLc5HBs&mIJGIv9SoQp24Ee8Kl&s(!nw{CaCB1o`~$7>c!_i7xOT9t(H&{ z^b>v_`Qm!#vI^6w3l+>XT$HZOPFTyz#gu-GspjhU=|`B$CwPfgvI+(Bk;8O(Oi}4QN6VaY>wDhe4$tPiEBn1Y|9(_*hgR4c(Pf!c zm6kfEm6jMBTeE%kwzq&keMd(xcq)yae4jb zo9m=+ACZ{z{rBIw+j5VFER0yZfoaOFGn4xlzMXaWp-pddknZ;Pibo`BeRgKvnmt>3 z=@i4TSFX3!j#`{;=1<&vYf4lwPq?~3%F2+-6Aa6C-<8t!?$kUMo*UR}_atcH#$zu+ z?|=TWaSG4W>svHDN>eggvIqvZMW8SA73BH|UpDBmct^o8uBe zrwoq>B>CNpTD^AW^v=b-JAa0GZT6lX_uk;!jAD8E7wh zc5v5clZ|Faj?aj^$MB%mfknw?o|afLp@YwmIgHO*^2ulj6`b63!bWiNwHoh~wq z$~C*qRXXR`^uyfy=RGYwynL&U>hwu10yT%`oIm`qgzwB5(MuBAQx#mq7k`h-UOFXd zmr3yQ-+5O{=L(pd59Y9Ye!`1m@rG+%qI1g6S*|ouUn}r<7Hj6xV6W4UGztsUxil9P zX}P7|ToNrae?^T-S*?7~pNGfg<>ck&eqX&$U8HvZtz7fojpugGalU-(Y}MBNclFj) zR#hn~E=gFRq`qs$?Af!;^Y5?u73U>cGi%w$A}jg0d7uqU{{Mfpw;x~rLb(5O(W%cy z@uI6tg_YckrpMHL@zP#u;=$M;s_m{Vroo}WF*QZ>sux$I$Me%qOYDLLCvCVAq_nx+ z*@u&rQ_<3rB};dyU*)HJ$7gtX-g>bt+k?w7;&iFlA>C;c4n?T0KXf)IxyOkkFCeIg zb$v(qye|e-0bY{9Ngs?0udA^dzkU7s>oF}J&80h%{%iX)M7&!#H7M$1JJa>&myC-w zoc%PwmP`P*N~kH|Ndr)suf^cWKm!T5Vd}*Lf>m zJd-E%;SMvmHxZMkT&ulr9vfYB&5ixJ{Cxh?n=(>^&UCDNVtI7MRn2lQ0DmIhf7pA9aRfi)}=d!tlSWlziV4~RR3cOePPbbrM-qqoGb<1(QAy|6tAs& zJtbtTm_vu=vMFJ!r%pMQarp7gb+5fwo~vA(yZ_bfZK5v6W?cSRWI2C&>NTerRnOCN zirrRRwqboPCtn9TEd6%)3d^H2YBXD0R2Ilg>SxvZ+y5U_2J6?KvCLL2Ubn}BwIyh? z1JjCoI|ZD4UZ(B8|Ni^&%P$`v_djj)Jf||^_*BX@7YIMGsXI*B_JF5r( z_yaU~9UdF>etz;yQn2VGgN8>`a_7+vQNG>D)j4Z5T25GgG+n=C| zJiq=#>UE68{Pxs&Hv%8*Dem6Ce|Po&e}A8!pFe;8{9v!G3=XMp4Vfk^2r6cESn$zk zUR$DsMS5gO?DqTjBa+(_Jv1i?+>&BS=+_L6zP)X8@77~SB$9a66bU zMJFv0@yb?7=wWscbkb7vTG>+IyZKq}KA$6XQ$y2t@%qdE1)4;bP7z?5zwq|ks(H^v z4i(sGE-livtXz8{qwRhd#8pYPuF zUHNtT`TbL(I8+zT`=q#;v+UtfPI!$Om=?Xkzt=B{1JW^gQPX_4N!ou4ZM zn2OIuX&*7#ZQ;wEZ}|U*zP*i|U5i9`ZGHKE=S3n;1?M81gDT!Tf3REjxN;BAW`RYy zvqks~)wcVbJL~0n*F;lnl1P%xTz$o7mrsT+h|0SD?R8zM&axhdPtTtD2^B3oompCBr|lMUYsnP7>Bi5$Dm;2wlGs^UnHt^xfb*is zS~0Dwv-WChmghFJG)(0v?sD_Yeruwu`Sfa*Y%cMZF`DF@e9 zX7P4-d6^|BFbH4#X*JR7UHR^>RkBGvh7+cEO_&xG9qc_Z=!incgr&Ti=0P5}BbJt) zds<*2ZGe!;ix2TF<<|)4Rgra(Cgb)m)4hW_T3oc7pbsqbt%U@@xa{MUu&E~zRMI3hf zY-DF0yLazyRJQ)~)2Bb5KK)SjSOm9+;`P_1m$#f*eP*YZrc=jF-CJ4Nvnu6+wQ{e2 zU2V7QNmAEREzKoC9uWrNhpu0qs?I-CV2RM~yT_!o17eR|zWmsmgOinI1;Yey(`y1J zC!I3dDdT-s^_j)l$^1y$ZQ;tr zeqm8-OS@J&O|_Y~``+pfhmOX~Owk2Xx=h)(UE^N3g=xjZu5U~gM{_q^oFNcDSv^?S zH6>8H;KnwFXFF0FCmjzsw{yzoi&jg&tXg+qy;r=_z5_)ow=VJUkoh=uN6do?X(hjj zZi~WiigK+KusHnb>b9Wu^%0GpE%$y)JhI^FFyMK?mWFPHaDWI~H zD{7T-YS3p{&!ue zjf}vC>|H_G*UM|~3v%95i&GVfGWQiuer+q=@XDaaMIx!>v`|n`kfCYY_vbyY8iV$J zZa=KJ`Kio;D@8UjV$aQI=PIi_ZwDPE`uWYV%vsM{uU38Oy7l;gZnE$F@2}5BDR?ba zlD}|5wMIo~Ynp1S;j&C=`9B{k&h0etJtd)iWGc^rLo?^R-lSu=gzGB%{G>-LGo#kc zY|p*Eb=yQQ!QE{Wr=`wYzB25^u61v%HA8z0k1o5ms6~lI?k$Iy`Tcf_-YcG_Uvfx;T$RY*~MZrwLq8%}9ri z>`-F#Q$rj5?%Qv+=f49T1!MmF^M{|)NudtIzFRbxQG~rkcx^Yei?uwN_qx)aS+% zKr_@k^{V~(*AWv{TLQeU2F-XbpS)&igMiL@jjdjqiqC^Kxt#PBtq=N?b;jvhqeS-Z zb;e|H`CbE!E|diqDz;7T`O6`R(1k6S6kn^w8W>zGlg#sJ&i28$R)K97wym z?|tWs)6?UN{49S)J-U1MY<}J6XqTNjb#ZCu{I>cAzY1DtG|%k$9TVT{rIxNv5&7F{ z@Ap2Q;GMYMPWVrUi)LkEc=uj~M-wKld%bnqWs`H4gAT3_)4n-#TV~c;MYd(>hW9&+ z*``;9c;%Q^mo5&w`>MbG&-CNV)$1%hxXww-EbpA=ddl*75MztTJ@@x>1+IM8c9|4Z z`F?J6kzeipU3+)Fyd~Bd#AA7Miub8b=hE5RBG-oZZaw_-N%P^L$=4KJG@pLIU;qE# z-`$^omh6m~wXO5Bk#)V*T-B3bt8#NU|4?1F)F~^fwz$|?rLC~Z%lGmTkz&8gCfZAP z^gLK=z1T1)iCgzu?cp!)PNx=}E6rWK_0v5@5lzX3AzQ+n5;z_%pI>(`k4;ff>v#K~ zmnDUYUYc3DLEMXdcwQRC92Q=ayF^6uX_WBB)TK#0iY)7=2Du3bYMuPeuvOt&&{73P z)}nJ>+6+79oIh=JwQG(ZXaTcQ!u8VF>qiA@cPw&{;Mj6Ex7YCC@j|KWwX@$|i<+Ii zG-ztj(nAI@^XAQ;R+x2rTDYK3-sLFHjY1k*!lI=6+)UhzSM(omHe6acYo6TvzT=zu z83kt_lgi#==)|#fiGx#<#N_gM)@Iq#N>(y`#~*9{=s0pkZr3?ar;;aAf&!OJTHmRd zd!KU=m+6rQdy>_(Hk6vHPdX*l+K_u&!_I9%5Yv3-$&)PVeHfQbs z{RqBYwrr!h`F1bOrC0d>f6_m%uYd4&=Bz2#W`EuF-s;%zWs^1>vN-kpsin((voQ0t zy>ZgXTQ8m4DR3#eGg!(~xjxjD(RxvpAn zKXcl|cWYQiMd!;_;U4*4UY_fgK07q|SCp6JU5Vh7Kkk0l8>flH2g-_O-F;hr`)pa{ z9sc@XlY@$`ZZ&K>{P4%q)6*;GEfH|iP+AkfswD2LE#$Q_Na6XE;+3Lpi5`ZM$x9}9 zeexCwox~8~CF=ApKi};3*3Wqo&$s1nxg@IXeJtXNrUyf;v%u6X-}B$y-(P##WZSbj zjFoJwr*zrrXbNyCKD9Wjy1ArPX#IKzMZx4XH#_yGPHItAZ(L-!?1_>}=Gy%~Kb(#K z_cVTg_4j?*QYULCP5N}}{F9iu?71Fh!XCw1T>ffs}T-(j>2- zh1ZWglR6P}%Ka3s+EQ)WzR1 zUTXgqtL)zAb#mvF?DwU9`%J`<7?-2VIT%Oa^`4}ZVk_x^X9 z%yfyQ%UfO_o5s`eWRAL!mnWN;fvBf|RPymetGRJSde?46WqH{>k1Cos{bu`yGr@hF zudaqC_Ei4<=DYUk%a@iv7EDnTxok7vZ~3I`p2Atn_q}di@cHMLr>Cb+`7_%*|K7e_ z3x-<~yMO-qSz$GZIT-Df}k`1JI2^Y^$pPu|36 zJ-xg8`|h`P%L25g%<|gG_T={V^78WEhd;_BU%GYVP0>laA5+|vO3odZkj@T&4Z6Z; z*+#MDnNr7p{`}e6G3RrDcK$qzBAx~6N=ls_8XR0!?~i9n=_TLaUc0}mGuHos*z(?K zLQ}WoUOroTO0#{&n%oyrpw0?TR3Zy(epV;FLtfVmC_WwqQtU^ zWxBbxYJ6wkb)_>|$!n&(+PX7b@yF_Ni3#OeG1k(#H}1|XjuSKYyL|cc=HhkPQn@^f zJT#xjFlZ}GiBh?E_H5q%{i%o6TstMPb_$cz^F5#6z5G}x(RTUgmG8Cp*Dn#dSGirs zE7Ng`K;)LGMN9$5IP8849Nek8{K^*Ii8G$hRR8AWp_s~g!jfJ7Sm6`VzH42<>5|Ti zr=(mgxV|-NWt0*7*R+NxEdQLu$?;Pth;m{QUjV~uMNIceA zdf}MBM6;s3am)wihHN)C_dl%VpRxy~MnmCKecpEAWL=Ij+N!y|=K(=+?*B5q5p&1w}mY|_&mU34l)>$X|$ z^|IRKYsKc2@g5KenbgFR?))HBtl!0Db=}o%vJQ$&`+qDye*D2+B~hn_EvL>iFfuTB zx;Tbd?Afy|NH;1-uX~_yZ-Ca z|L^X$|Nd?M{@z|ur>7Dr=OVRP8MeC3yFVpr-=!rr&ZTA;)4ic}%iR zcj9u2%>Jl2MWl7Ydb3$`;?}0k=>8F&Wx8n>3s>}^27zKlw&agfmYve_5LhVC^`Emr ztVv5?;+KQ{mt}&@wr#vt7JJ&MMsdY7ub#ity*!ROL_ePsv?_Df)vQtrnMM9hUp`iJ zd~82k#m?ZgVhVSwN^4$@cEFiD$u`Af4GlY|WX{Su*Ij(?XXdPq6}hvw_uahDr>E=dFHiO{JZjQ2nSWlr`%;l}b;3>|2Z9u*ANq2ma8A#~{C9b`m&xis zubj8M($8?y%l+?vU#&{b4fbjbYCD}dQ*Cj|Wiw6H>?>t^?dG5VY$Lwn@*}?U`YKo6 zmHmIa{eF4u|MUO9vip@TY}H5GmZm^Y)xE^;+%yk58qNXDv&WkzbxFdHmq- zf}L}Y7v{}g_CEaDtf=g^IZ2**6ICXBczrV3%TCt+vQ6Rq#+KD}S50`UZHi8bYVBVb zv(PAf`sWKbcb+%982)d)p_hgygG4fmgW9>H0>SeH4US6G%=m1he>!!J^O-ptH9@xv z}>R&?n)sjnv(mAz{nGsE*~sc}v`(;o^YJ;+L%^{jH1*0j*uTjyR4>N{MpFD7W! z%+DDQUUR9&XYW{TytV56!ez_0UYqgu?Y*4^HnLlb_llGwz*E;@zcWprU!O%7i85#*M1IVNtrL4>|E?=Uy+@(cT-Q_a-OM>-iKu z{pV9^)|PI*xg~dd+}hqu25&vpu7lL}jzZo?7vvLC^Be{GNA)B4-%cdv`j zOl8O_*tlrIxwF2vy)?Z}YP#){OJ>q!Q4mOCJdv{@d{XR$J=0T5ZT(+cD6PH5b$!W` zPgA*%{>|4^y_VIJxu*2q)SBKaPiA+z3b-s;`)=2}qMa=AJNS1WIsJ7NbKmzC5#jDg z=bo)WUOU4s9Mf1j)y!s7Men4v`wUB_y!?OuYJFXFS#0>c2e<0v%g-8rJfy&+z!)ep zQ)p`BB#qGU>nkpPJzoAjBQ<=POW-1A!Fx8vcMWf^yDERJ_Jj;?O{XBok>clX_MVxu zU$a#xh~uC+zaXdTglMy~)uyRio~`c)Rdi`-P&^Wqefz8L?WI$kc`U^>l?AQRw*+}a zc!dUaDNQ)edU{e&(K+QuCc6CmyT4|=_|4m0!l-a$PfP2K0QQ8nTklC`SETT?ZU|Fc z8WvSvCDC^HVS$BA@5TLpj_T{_>FxXf&;S1am;Rqk&LwK@EPHFPrtNB0ZDHZUDT#^P z*A}fg(=^5C{5;$E_2$QMrPKs z%-P%C?wa#?)%*Q*UxOU}etCKM@ZY~CeXq|}ZH}|Id1HB*HCs{9!@=(>fBlbU(|6y# z88gJ$?~Ga-CO-Z3>)Gn_?dpDgco-bhm49pB`w0duDlAum!e4*Q+8Q^la{F!H&r^yS zcOmu||B<*jX>n#5sPxT;U7;cV4qlX;G2wfE0nRw=%>%J-D% zau1H@?)@jM{2dr7N=vuijr;TAFux*8>9_U(y`|H%T2iil{cjTElz!KM=k-@NNnZvX zm2;X!;cK@FGQE?3$Jbu#HzSD0aM_%oGy6W~-d-|gS?0Z*Yd%Xm4ZRe5O_>trEYo^b zSs&B+dhhPvyYJ>5Z)e^-wNvBh5zn1LUg>j9!?YrcR&I@2ySF%f=PW}_SC?16{Ww=H zHHgxFXSYV|5{Ke6foGLniccmj&y-s}lcB>v)a2Oawn&aZ!NWGn%l9%1FkGJfBX-@^ zup@JdI1**t=N&#iVZw%N>7^-}Gdqrb+;XP)I?sxnwKFF^3gTM2CCJM|*TLXT$*g55 zSFc`u`gQW_!~cH#`pWbXs6 z2YZfBxsfJxthvDa!mx~!{eqQZ<{d)Cv`#(SX>wjIYD}P{NoZh?HO{>tV*N}~o%R%q-$;j99 zEZ1H&@mqd*W7wX3yY}tbw7tG`|Yz;sXcPXC(1J?M=2gT`t6W;-&s$~ zbOsRxpS7{Kx8%CF+%w<*=c#`9>WA&}asU5)ufK4sz%w{IHfh7Tjk4}uPKONiKO1SU ztSL_WVD5W(#{cK~^Y+*L{PXJ7r2NULv*g~%`z?Q*w>{k?{`R(-`{zI3DPKF~QpW4O zU7GLt+YdjSko2iHdsZ9o3_~}CT{|m{dLP&A7v2==Wq5tgPpNy=`_9j3RcN`9r{8vU z!B#Fs4n>BLwGr88HK#`P7#`a_C%Z?rZAW!Q#e*3+$q|2&KzFOYpBez7Pn zIdLWT`IN1F=K>R*JY5@{RlY*7syEe^jXu2t>i@ZNgOOVev* z(4n-wH}mxS+8^&Oe}Am-NhxRkUvJINdp>kc`M5C0U&%=^SKX>EOXK z67pu3yl`5;NwHNWG9@rdb8U*Ci^|f9m>HKXXHC*N&lRzYqhD~kONd~xUerp5DMfjI zb}E-GmB~t7E%yFN#h(>{*6$lPSIRLO{5h$ix^#j{3(Eob62*%*EspwsUR)C7(RbdN zNoiYvN$%xsx|bbpESMIg;-nVC;L#8iG%?6*-%5|)hwYXu>758VZ^O%}K_JqhHf7e< z+{>YhQX`KQ_GoG^_0qANe$3oUQgO?Kj;$8YTSL6E*7{!Fu`c`ewq8rE#e1^DRzG~b ze*eqc-@eYie*gc=--^EYejuV1sn@7I2RdwP1jtpD=% z--qwsJ^XJ2f8xfl<3f>NgS_T0zZkiCYS1Y;zTFa^&ExBHr~LWy(o8S$g>|OMZ}}p> z<%M&Oy)@}%c=YS!*TaYXJZB~)J-uOW|MSg(y<-j=(&Z0}vrP4X|=fB$Z`|MRhbd%T_f9s8ElsP)%opRL+`x8TFZ^1SN8z}5-g zdv@%4cWc`eBi~>j&g{3p&PnJl)nontcl-Z;?e=x~b6)>GXnHT==GL`lTw)4BmrV-m zfByN|efecYeSJLln()^lmn`NbvAmZ~J|VGstB0nt9tz%)lO3MU>sFg}Cevw=&m!w_g6#BJor`fL( zDa$FZ3T&j3!wwfnEe|R>=_-9TxowiSmnR29a@N|{S6|)SoZfzzkwqeVOVGJ$O%?7B zdp`TGTOa=VtBK!bo2hdwMQ3#^5%NjX`dK5WwNx;B<b2>9B$;-=A zZkI%GtHA8HtrJ$+*-Q~Qchm3Fn%k?kPMadQ`qC}S=Mf&y=S(^!@px}4ciCk#uer{@ z1tymN)SUkN_uY>sXFm%~V^LT#>00FbJyX)>iYsaL`rNsnr=OQM@7(84zrONc-owGP zW{RS4Acx^G50>fE!(V^(;z)>uX;~mJ^v_AM1iGI$V*e{ ziko?F{MWx_yQexmo5bqRpY`=zl5><~dd|AY9;d9AOTPLRS8gsYNPRFo-+zsmf1_^6fYH1VPZJ;w&47Xq(2vD z9lIZvuKTcZRZuqLQr&c=gix6EBI28-tr3R8mPoHksOd_6_vvyHcE?PC#v_aBZGo_eXg(NyT+ zt5;dOZ|XHJecl!DWX_)Q!>=w$2v4gl^!9Vjyt}2>tcve*)!tvvW{02KIbA>ghycTf zPp=OBes+1ezh(c@XZv1XJA9$^tj+oN_x4o&fA;n2*Q?>{#Cao3`zr&ivu{qpz2(}dTzx{Q){ogWE9tH!yolw(Mp8NT7e%#)Qk5_k` z;or^fl(px&*VQQgrjqe)j9vuV*LEp7e41{=au;o7=Yt>|WOozIMRFqh-zt zf#h3n<$z4CiGe^BS!Z%KROs^2fq+_(4dq~d*h_Q~ z?%ln=Haz?NU-SEaT3!}^-n2v_d#yKz_l%`XhbCF9pP-<_&0%m%fKkc!vB;qs3z>WM z72FQ`aW=pG_y0H=t{-PGb64)|vuUfR$Q=K8xSc=f)c5!I|9{*5e_oJO*y_ZsyLMC^ z5>|1sndi4gKzpmy+Ub>jhPh_Zs-dSmy+Vr;PiQbQ1>83A>WDJtIM_8wm8DefYxo*h zk7=?}a{ss)EuXjFeOF;4nQS&&SLlL;ma1m}N8+6HWA;|>t*(1&CU;n>Ki@m4h*R;{ zj=1&3y7Tk*f4FhA`v0S&+>5{F?VrDFBFC1iTc^#L95hM%t;DKrZXTBU!RI77mP~SK z=$P}dqCLVpYFewm9LvQL+azAYr6J+jmrZgzESFjy;Zc~g{an;HU4f=ar=(7Ird_*L z_u|$uVMgaa#|mdXpV_}`S?=zA`}ged(bPRHlpNt?BiHY+=yjFCjAtvSW%bw{R}5oP zSraM{u-5RY$Yu5=s^<(_HV3$BvRKLWZw<-)5+1+z$FJAxdpG|6%g2z^vnqG%wbMrZ zk0qQ$nmVg>9$q_l>y1(NXTJ8sZ||=CUiZQ}dG4g3?7c0m`D)o4-rhQW+FSLji9f@E zV3+qLU6#ipvK0?}G}+|4`L*@CTbvx)VMo@zo>T1O;cq#&Pra>7I=w}}qr>Hx#6s2I zDPCuVri7i+n5ylmI(35AeBn5Tgv~eiRs8$%C`s8-A?)0}TW`N@pPxV7Qs?p{?~74} zlLD7;1PKKAacm2_-#s*?!FQScbrY zwZ*FGVFg*|B>W`hN+ll|CjMOWT9kpQNb}5V6%EVfK|O|U*+(ziFg(%kydH4Qa$EYr z#WEYOWUa|%j1X5}`sUX@`L?b#E=GGqgMabVJb5Dcgqx@3;;fy_GUpF-8=X9*(GaxS z%+1p?irL|m;KUg#&x?xlTm4W}TO_f_)U9w*m8NDY-%6%~U5nT7YAo#$opEWCj-ra2 z-10fc3ct7sE4VwgPw3g2d)+Es>VA&dZ126bH*DX?@riD{A{CsmiE%|x({?w`PNykz zo@bshJ^%Si>D|xI{rw+*y#N30_V|5wqOzAZtjnIe`CJs^gT1eRzx$LFo&E32&&$90 z-{;A{i`)C>Sk(7VSGgH7*K#IK^b+ixQ1tpw+2+`_Z4%RivTtwuY;*TsUEH*zk=biq zh1w54+;CrVty~$uq0YX->({UQ&)?f&SoiZ&>bzxNt7=PYTO;J<-Z3Z~3n&(2@#1>^ zw|lyN&$a8})BW@E=EcN+jh$b7Oa9ycU-|o2T0Wgq|6Trn;r|WMP6C$-%2rVD0SAM*t5&fR>!@AtPyNx#pZuK)Y6{d)W1liPRu&(Cka+QlBA z=_b%5Ft<2zWyq<&hyRA_$9;4D|Lkq_^Eq3?ICzz8H*K5RP@IzOrTH{wW7gVM7DlFx zNoyIN)Ee_>zhmk@e)(U;lbOjqhR=DNI9C3uGdLEuWX|Mm%U)MKkMXmdp0rl=NMYoZ zDgOEMYU!qtDO1ixY0hk$x;)cL=G$c(>(?zJ*Iw*S0Cs_*jMyK94P?P?RS zJi7Q=QomZ8#!@GzMkWpke(T=J*V6f4oAjltO=D1US~NxYxKh>qFMlT#SN{H%+p{tA z*43)1#d7}3FK6}`7VcRxW!3wt=eC=brdQ6K{*z~#uEtW4Ai<h{;Sm($I|uTKw~EyB#Wn(N8myMK3o zf4}Up%(R}4tzy}}T7oQ=uh(qXRE=!8B*pOV{rmpo!MC^G`+ar8uW!5a>;Jy4KT|q$ zdZhH`mIYG;f|RZbh#gt9h{f@iWU#z7|JvToVGReOI2TM=Gd0Xh=wwv3=1lviLEOeR zFLm|_xZJZ$?@@F*Qp9$0uHsZL!NLF<45uwv(@s!ep}~VwwyX;!Ff$BOZ7``o}6AQS3Pk5w{JqwC;#U)`>zFQUrmkU zIOpR1?@*+dXUDCMeYYdtZ!I%i`uE`u9q#?@UYUzaAMc&Qv{FMi{P3+P@noN!LKprO z2r%)rU*5#BW^0n@r6qG(Mch0?5|%hN#T2>vXew>HY_YHQ^SAH&fB$Xgmv2iH@Nl?d zk$k;*tJ(6Ni0P^_{L6zTZTMs#-s(NldTbDs%?X7jfPQIUGvR5vB^L@7nGn2Iuccwm>$`V#lfK={-8Or< z|9rp8e|~$ML?`P-?@b zqfL#cgR~EEIUK&gUn^X6WD@Iw@?G!sY933y|DE`umvzycV2;!6{qnQ^*2yfNvPd9^ zAtL?y>aSO?e(jzfKQ+p}=GT^%p9MB|?%w^h;jZQHC%;avo?mnM>*2$P-<8ph9ymTQsEZP_(@mK!cv)1slOe|nEj@%``T=h?;{`n24>j=$x8-S4~G@7HxlN_&5xt{%g&ZJwv)AmNjW-Z>LwR4W&@eN0&d@?!bwR+v#h?rZe z{j#Pe*~#^Jb9l@MR%-fMwKs14&Yw5rzim}n>$_I8VN1{w5%0h$(|j)rIGkL1C26aw z;}?tR+N>=C3awkONdL=NcgmtmV< z3oeR2b~1OX0!LwC;lsV&n#-oJFenN(Y)n^}{rzm#-cM_{pR>rHlEzhBwr|3Opi}!^ z_iT-NUH8KLb*TB4Pd+c584qO$KYRLjcKPGPRZ}aDNIUT;hOAkezbj8cVcyK8Q>IQ5 zHS{%J-&pr)^87!4p4+=05edHi_1KKJX|s)r&lj)VI!!7|MJG!0=^D+Y*NS=7daiw) zv&|>RAvo$3)7EDqo#k>aE2c24VSOFb%@7o15X_Z*FlY*c%BmTT4VO+eo!ZGXcW=Cc z%h6NZT6&MR7@mt-nW8t-^`z*kYix5Kh0{rUZSO=cgDLa>$BS_6k^L>-p~=}Ym0{&Fk=^^Uqw?n%UYmC;bLMl2 z#UYoZs%vZC_PF`v-mm+7cJ-W7^8bF++VvQ=6wWy$lAupB?mS)-^5VR)rI>+4}3(P1`Vs?|9EtfsG<-rv~5K8K#}Q zMzDX|+Nh5uw-VQ$+x_?7yRZEBnODSc32-cA=?E&i{A=O+Ef<5l7$>zF>zq(o>3uG9 zW`*ANsI|AYt&O<9uYS9ZVk-kf?&UodKmR;E{rqyxr+aljzdW=5_t*UY$M^L=E`PUI za60G8akDDD=UC77a(@53b!Vz47x^tbb9v2FPtCs{etq5J6MlXA%p-|WS&l+c8k@EW zDwOYb_6gKfE!I1hx;IjCZ7`RY z=h8JImojpt z1Q(eud;R9I>z&pd0uV3_O153TGdh(>y9A3<%RzYg>FP#-+s>W{ESZ`$sLVX z+dO6zozm1^JE!#Xra69-1=1OG7F%_2a4rpDSR~Jutt&8x-8*9KHp8WlUq%G6Def(1 z;F|bwr-p;p(%xwd8_sxld2(#Lx#si*S^niy7Hx`(em(ox_OlxzbM>#!TD&xSs@K+D z5vP}_?|wf#IXOGIXOffj;;oYywu&s?V#?<&rtp3D;eUzeXZU9NtetARH-KAUiNI3B z7ecoCA6~DIt951Y=7^GH%`#rIx48WKcdxAbmHQq9dl{bFbxo^v zO4G!KiKSIK`xz`w|FjI!nI4q=DBmjCqi5@c;I*bpbrKxg@2pFkYPeV=UCu_yb7hZU zM5p3~9PM+K(uex}!tneY3!`syi(xxrSt%Yy_sdbesWov>-^ zxlgNHPR_XW{r-Ojg`l3@ck5*QWd!Q1d~|d}*8BIM1>Rr(-d+2Bp2IG$ zYf;%#7(7(}KQrI|GyCWKc`^6*biem%`&z}T&~x$kVZY3ePoAxMef#lC35}#x4lnlP z)+lei^PYdH+UhMLO4rwHIcceTIb`LpW5Pilj%kTKp0ht?KFw1$}|UIrA;4}i>WBE{QP

QH17p{Qg0ed)!F z8b>UAKMOTY+4R0WGp@+5BQ!I0F7twIx$D=jU%Odc{C4~AzZSi77KeGICNG`BrM*mO zda6jW((i_PX@)-T=vSE|acSeZ9N= ze(iVh`+Crv zw#zQmV4Z3b72RiK9Ok_;smn6_y>zb2gBs+q#bt zA3q;I-+uSq>hSZwx9#3nS5)|MX$a580`cf#GcU_b=f3v8A8+UHzx(!E;?43}?UqxT zKaa)7#^spL-j_RdisCYX%P%d2CQM1_$=joR=~%|nEgRQm-YT54MC`LovaX_D@299J zJ>?*$Y~Smp*R0fH&wkEX`}y4SNlq4-OQq7=u6?`9yZj>m>r$VW59|C@el5Hom$U8m z+O1)no*g@vOeyNxcty%dYWd9w-Ki^fay3YNZs(u>?f2eGyEawEbIi-X?zJ@K+_x>q zmdH5sY@IS^vYA^`#_esn(&u(=ce8$<)Ny)`$v(ql$3SZ-f!n^rl`TX#=O+%(lQ-5bj!*cdnHoUSasXEJ^I^wToldfT^e-+Cp8Yjge%y@#=; z#eBtnpH1?wWv@B0tIhpepNq@dso&>rU6#8yzlP^@+2*{O&G+ADuZ_LWcrnu^k#%M0 z+R|BSM^;Vo;mlr}dwXhB|MB2+oz?NVCAZCb_CJ&rV*WRKon+g?o0n%lKR-V&Cm{m?|Of#B=)6HeMyJ2kwil7VpiEh*+VrWXiKb7KWu;K8JZu z4)!zD^EQb39-G~BG^mv!Ipol;yK#Tsq~G_w{N?3k^`&1f@Sn5Jo-<|BG^=@iyW49| z33AN&e$DD%*z`%uId5)@o;d5+)?DdnGlHfBSw7#juG{e3XC4OM*L%x@v?nY|xZ1{S zUu@~nyX$Vxxv{RpUE7nWxG2qYt6@XiDD80iiH`xXDh z^#o~V^9nAVvh$VWp~oL*1c|9U&z$*eLeRCS%KIKGon}l|y&BbXHH1@eYtS{R_jgZE zkLNw?l|3_Q#xlM)T+lC!V&?%2Myw%w4w9ik<1+zPjrsXRG%9yt=-ww7NWg zV$nLy~-&kbsuM! zZkMWBnzAs8aayWX-{sro`Pm24{pTN*xVOK0zIU2T#!1cTlY)8_r*^RlwmjJ@B))U^ z-G85_v$RCbUiW&-M#E!Kt4f!J>1Ya1yd<^KAnuOD@~L4_noDaho8FrqRHU`$XqREl zj}MF->({SaI_2dhHW3utchBG-EH==+&GK%TjnjV?A&?9t0Tzc`5d*g2V@x!MeLReyY)hX zNjSurBmKTffZoj3i7^|hp8xzkFDxu-8ZN{q-ioBjBsgtGLOR?}(a zHmc9py=}Yd(D3@~hbJe`o)*`4j`{by{(t^_`}&#BRhLdF+H}pXwko-&vZCU~`|r0h zu6Lk|XRXYYy1H#n^NOiKL8Y;`P2N3BTt4;HuD%Yx<&(FT?cS@gbWhFC ztEZ>ym+xj*Y?!cio&RdzO95{f1lu_o|ai)|T$RSyEfwXZYFoa>X(= z)86m*`XBGmIsL7u`s*lJUuw;s|fN5@ek8Y|Z|1X9u7oChW9^{C4uhh_VHbFOwY2S0SbqcrEgum^+U7Nc|0Yqw09^w!|c^NmFz z3m?tB^|S4=o3!%MF3qPg&r(lnt`BRz#FZWDrIeMIGUVgrYrlS7-Tn3( z|Nj4n=l}ch_weC$({g$*du`9PUSoF7w7h)xoXy326N@@+>SjLETzX`2tfGrO&-rh#AiMS}L-i1S$c?f2hJE=tO_EdmTGj=YL{tG~Z{_3D;Yb#=8r z%i#;w*}WSXB!aDEjy?W(WLw#3ucl=ucdp%fWX7^g&W{0JvSHfyJGQ;uv(;;5-HY4X zHb#BEIV)(*))omb-JG@3Z3iv4-^|(CbSxu#3CjbX4Vp_XZ@FV@bLalsLqgwoHwV2c zvEkI*dhOosb-A~X75mvRR4Z$TDJ%(EHOWgW_h@B;t-j;)NkNl>?&jp1&E{pibN8Ms zA3LX4_R$9#3acWfTnW<96iu8t%{Aj<1%GaI(aycQt$l+ZOP_n^y-^@QYwC+LYc#jo zy(u$1b~!UQtK)c3YTb_>J7 zDLOh)E235h9kNPZ%0j7Qd`Tr>C5%sSXLwbA|9imPe0#l@R_0=;-uW*^Jf*E4&4)nYDRyRy>K z89^N}d2#kT|DFH;GynSPs}^##w;LLQiq>|pCVP3Us(W#M-J|2;LfS{BY-MQNx?xS< zRGy9x8a)h*P6w3VH92^68(S+juoV;jS<bd`h=zDAbWoooGU@H#9-m&rkTpvueNR`nWT<$*J^THvWw|wL zGv_T&sR`n@-(>##dt`{L4dAMZ&hGsuBfhz@Um!W+S)r3t+&uGr zm7ktGORC-(wRX+LtxqmzUg262v~!ApUt-*^uUsr|zI`{GGV64Ol;etJxl5Y-=i6QH zT77lHefg}lhH6V~n2yYllPiGHxnbwYx`MKPX?`8R_HZ5 zQ$}7+&TqLOL(sX$7FT0#oUmBa{?SC~p@pAi`cjka)z=~)_k?XqVpJ?zx5wDeGAeoP zH3p@nCtfEi7!D-SWiN|;E-hElZ<^AH`R z&3Kk5v1C&F-$a|`P1+F|Ns5H z{oSKvSg}oEUAL;V=eLl;D1vhRsYYPbp^IAGZ@M3lP+Ryi%&*3evK31}{ zK_J5LIgf*3>lcBo*Me4Le*aJx#o!^G+~asi!nteX%UOl9k~)t`)-}y&T{7j-hH51y z#;EM&e;!ZSBrrETz-fUqzCQcxSC!Zy21h;BwgQ2MkcI|%iDX`(+V6ME z_y23(|NrOt`gut!wrVjjtP)*1<(O35CN0gIeX8=t8>XcyDe*e6s4!%mRR~=h>cQE< zaHKvY+c)T1(9Z|cf~3|?{Q2Ld!c3o;{g-0k%E;!Rpes!t8m%mjK`ZBcHaUMyN>kL{ zt^fb@DMDE*HC6kj9NqjX)??`pW=4kF7q!AZ&e&XY?Wl2QV1dVkf^)*Uca|2aYFJze z+4M$!=EJX#Hw1D#sGk~=&etKZb=s`e6PzU1F1Zw?y^yzK(rN}y2B8+?W9IAR*&4W( z%qdpAzU^(xl%-*c2XFB{_S+J2uk!sKks=w;g}fnaqBM`lIDfcT{r+a$uU}thM|BnN zd%Gtn?4_4P-}09x^O9oBW}BwpIa7RK&V(N+CZ8?pY-(-f{9AV1jpJDzdVAZk7nyUG zKW$M>JR0@-^!k0jzS{peoVK^}|3ha#$$*~AKi(bwefZ&ymY*e7LXE}iGH+k4sx90Y zn`98h-E{cnl%zGP_wUtes@m3mYN>ta#I!KvQcJj>~Wh zdg#u*s<}MVDs%0%*QT5Ky-r>(H1TSm5Ojt`N!C9~J3CkLjA~}8p58WdbMw^;%g<$I zC!JB$Q%z?SYEWsox-EB(;oq#SQR}bSZ1vN8Tekc6@4GhVPa6rJkV*C|j$+zSaC57q ztEP(vV++I6HT!q%dH4IC#NiWqe0q}0En8%i()pAGKkO5o_+d_S*PB1ei=r%l%&^YS zTpPPK#mBIXW9p2M>T`dD7i!e<`H_E_>AH+d=1dLh$WDIsuwYO9 z4MvXlb44G_Ry)y)Jo=6km^m@H6fA99S-woCJj;|GU>1L*axZ7)dYue9;Wf4O;;k3Eci*o6_unT*%w%8UEC0u}pI@Zx-@p6$3p+M<#V{tT zH{a*m)$^S;(k* zkD*}T{qL)-SadyqhO4_iq3H&)?_U z)do40#-0}~)|?p|ef;smhYuefZs)g5&owc&?a){rvyn^Sngrjer_ODKyoSdn758X9 z57`)%yr=x0wPxz|ZHKc`dsd1jcTAo=Wzrc1(_3*dIoYnsJW6_r51*?)Kh&-$Sorp6 zmylnC?oOpimsU-wvXJ@ld;Joru;$Ht9E-Yh&Zc!1b0{7O@G^{I4s;Fivdo`4MX~L2 z=8-5y7mmcpCV@$p8zxQ=O}`$rEjL?Qdul=N=4&4-E@v$6ou*LXd~8mM--R~goWM_J z&-OGaDLCeQJF$~#Vc4=~Qqv1;iK%LmIdx^og1fl z?zt9`ts}U>hCTbNgX!{2ElXCml9y51pSZak`osNnmogP*E~~#{T@d|k@ltNVl|o6X zMo}E9Q>T>VeK_|nePPR_J@*$yd5I;gefrAu*ol(jr7Hg|7+Zogj@+Dcew}_??cBhF zI^TqxI2P8;kttfP8X>*%$8r8M{~VIOsQ8PuNTJTyNptD2*x z>N08O=eRW&S1*+RweD@(l*=D0Ui>~-dEIMv@vlE0Kd*ZKd)eDx?`~dRe(vV&+q>_+ zYiM!4pSjjcZ~nLWS*n_@>c1Ag{~dbYq)y@&|9ks6eZSXb_5@xwnakI{!RPf=u8nOH zQ$tjz7g}cOhOho7zW-0^eg61;ckbPPSEOl}wy^d4wzpE6M}-yzIeGPZXg8D_+@|R$XYx(>#TF{p_`dqN$D%@iBs(?d`uWS3C(mTz%hc?bmbF@=iXM zdY^6d7j6i6C{v_pVKsGO#<2|5qnpn7US1Q%eEVOqAB#e;-qIx^kt=yvXNWFXk~7;a zi*?WIbrIQr4dX12h&VAUQDxCNvBM_$%-y?lx2{V%X&L!?$s{k$=eCTJxwp3M+_j5M zVE5mBAy1x&FfIxUQPbA-_KPYy@>IylMTzgUFq8L6C$6PS!i1JSG4D|R)MFUbCE~1` z+LtNy{A<-&6~SyCp`xxCL9g7a`mRN3`#N-p9ImmEY58HUsBl!La%=OFDOy{nOq#vy z^SL7vwB<e*b;=@7=#ouQp{{F(t0~ye~8;Q}w3(Vef3aj-HE72TbM#b=?uHe!nGX z_0?CeUcK6#zkjmVzmNR&zYfp;S70OCYv#1NP=<5mHKF7UVbW{Ynl8V5LLhqH){5sn z8`s=6y7r|g;aQ5$LYu5RJL2B&y0&X=P}b(S`*-f-Y|GX>e=@-Gc_rWTsQXzDM6^;n z1QtA*6Qt+R6kOC}IsK*ObK~iu!Csa+-W?morUYp`mHb~}BfE3I%95@hGqP7atF)Bi z%e^g>*yZWvcx~e)N3YbiLJBTIUMF`PiQy8Ae4XU8)8*c-28R14vG!k_@2{`AZ;{K( zF+t&Hmd*Ux4m|g`6;q#C&Eqe)Cf%Uq;1qXdZtdmE7QF{|UW*L&*4tW<9bw$Qbj|v0 z_WysLU$<`CWfQ4nFI^{&l_jc|f(mt3g$QYf9g5=OFpA2$T5zfNN|1E2fcEigg#yc; z9CPjJ674ART5`TIdXLs)&K)aEuX|PKc^EI-yp-9&Q{QV= zXWQIE4b_irTItK-+W&u!d4$uS{xZ22ff45y#I5U|INNMlf3e(xkRT36haj$Hep+jn zC@eM6SA3mpqOQMhNr)3eQQ@Lq5vGY*nmff*Loa`uF1|N(5`zW5(sZSctg}l~mip-K zXc1P5Om7u(&{!HXP4#(*_d^CbF&T5`|9@_7UiLZS{8A^5ol1!s?G{smDl~W4%S<=W zQq^0pcWa&MlIQ2+BGT)B&%Qs0kNy4Is=L+iC7!OEqIPJ`q>xpavl1iL{xUnZbXtv7 zUw+*FvUBo$zklETm-v6d)Sdfw)&2a$s`T$;|9#;{{|Yu7i&|aWXKKA&u4h{2%v;Cy z|N1ri@xKRK+Md2eaklIAH%e2b@icy9my!>6mO#r5aa|NVMh{$5=~(Kff@ z*!_{~Z>{sS49}iw!Xj`;C|S!gTeJ4@p7QJd>t-Ljd;h-gXA_?0k8asdV}HN7d3jr| z!0PX7Kl?Gv@ms#5WkXAOe%$R1 zCEB|+Dtc+-f={+hE{aE|2n!a?bLeQ?ZK!ypYA+kZzrRu1(wAZ%Km2peBRQwfvMs`B zdJ>Dmxoy8TuZ+-8U23{DENyYIRpr`JPQznUy!Dnoo07d!p`r5_!+|~X1r9%ET(@SU z!U3_xQx8R11~+g2_=acZ^45-9>%R8p-e#LH=ksZy{^P-_e2O713Jw;lxf1EOAt_u2RS?;EUT{SDpnG`)1+EPpm@28Wzn*|8Tv-9k;D z5A~$dq8+b-Pv+MKqPhWp_K=in^xjeeTGFAg!rMJ%&bHkzO;VcXH@{=9??u zb`Z24JN)p5S<7DX@BjOEwt2psoLtM4tygq57r!&QkPEJI;C({QiR1v^QTk1AMSQ#_z<-`OvJ-JEw@PR`X(LK zXOoN{_*|VfsWX7*?*&uKrDxMVU$6gr`u~^f``_%|HzmgFq-uHY@~P`saxOpOT&{4+ zvOTrWuq5_)b#IK2P)1w83*!mm0^0Vog0vR%Hb0%n;VsB;;_?6Eac!VHVt?q` zu3z><`M}JtQc;g3rF6Y?y|P!TFlgpR6#iUMw>&6IGht0&zle6-wjHM?Us5PK@8YrK z$zs3epC7kw`nCAVsY-pDY0n-ts&_KaSF#OT)YkZ->aXG-;6bMm;qV)pFS>t654UlJCfx$KU~*{lft zV1*_F{o{xIw6@;O`zc*tTx|QL*;Mbjm7M=$lVu{Wm!Cg2bKUD@nNrhlTK@m@bMtI< z^D}3Ef4e>3L-YRr>h-;`Yp*e#Ft3xSI<#oYot*1lO%n{(gq<^YDX@{%zL+J^mN&ua zOyVtDn?IlJ|6dRK{8?O|A!Bc(cjC3zrd_`t9sT@%-|x)(-@RV*%iH~Z7GJly<<5^~ z@=@7aHn(+b348r@!_6E${q60K*S)sBzw>3urBg9EZQZZE`M&)6@bPS2eZ`L5_Huo1 zG(?@4=WbtO(>w8!Nv`o1XU&~&cO^z#?J{&_SrH-_*rnAngMatsms@leiX78+<`7m& zTKnh9#mc4zCy)2B_rKpWH)Jal*ERg$Eb8o%yd+|Z*JBIKrL&f8JSNbt$Z^T*V5GF_ zlu0ZbS|Yg`BfNcj#gI^B;e#`TWhC=jMSPi9LLmP5P33PF86z zx!}aD8N^ZSaZM|EZL!(&l-{k^46Ev8j_-TFc58>`5!IzRYgJXmUKcKXU9))VwriVK z8(mjoa42}oHRsX6ApZp|5mODt!>?bvC6Rpp>jkG%noos-byTb0SL!a=nkBVAir4G) zn5L{{H^H|L^wubvyq~3VznHN@TL>x~v{W4b|}1Uk$z-n{mu? zo@(K*{%k452NgC?LoDpBOQ=p=u~JKyv$5gUzWmu~d*imxwV(IRC8qUyd;9M5^X-Kl z?Y}sf_-@|1?&Dd@(*c@kjNA`oVkdZowh4tj(Fjqwa;)P0l%O|W*N#1zGBM)w&MUJV z9d2*Sy=?h>NyyemLeD}LeLfZ;>a1aEoyfUr!l_E_mJY=Y`kXBa3Ts8Qwm3z;E?TFs zWXh^*K1_#%UR3za!{V zLckx3ZBO2cCo8MWjdzYq`gpJVxBb#74<-dM6n@~^-63D!Y^|%X)Z%#Lga;}W3~O97 zbC<5^U}KpOG3@}$Gl|95etr6MNg(m)lt(6Wz4Vq|37Ij;F-VJX=@XOr8;Y+*rOu6A z9~PDEH+_Or&*L3uKKn5!TXtyZT(&vOG_7kwP{}QC#U;~jf8DV6PSN+1YY!*xw}12f zvyrAUOTanJWmmr1y6YU95^{0(oEO$hLo_7? z1z%TK-Mf2#?$&2P+xJafVRygo-=~kCzn(q2{eG=^$=l=K-`myy+oDsf<>{HawtQRe zEH`P>^OsHfPEV@%{ZcwRSAWS3dHG|7d9#+uhc&S*>`-lPsN_6;`DDiSdoda-pK1D- zY)*>ERAz}-J8N71?jwHw$E3IAUN`YwY1$ItwbJrxQCzJc$Mc+(5!sqvQ(^=HJsz$J z_L55evD9H1o8ZS;D#tyXJl3*wL}|Uhz0-M>XytJqhNWViOq@U5=M-n|?vvMA_h(t< zt+!=M3#1!kujiT1UMSyVyfsAcLfP)aCcY7x%TG0PJJOdu>> z+N6F%^|;-}4t-kBCLJ&2@iw@&foI2*BgcXl^RjaUD=>9vKA-eFwXp2n?|YWV*KF!O z>w3Fv_g%m2#Z#l!UaKk9)bu{uB{DryGuHc{lV!N#l5JcJTP`h4S~>N8s>6=|Mc?!P zf13S!_wS%&SDSf#{QTCdw!ZxRwrurQwU$Y%84d<9i4}RToodJu!L@0|?D_)^0!|5E z9sO25Iizt^KxpER)$xD6y#M#`{d~L1Nx>HLmdz2?c3d(g_I_z?dW3G^rm1O>&s235 zEi!cTlzg`1{`<1swi%b0Jh%da9z{Mb$jjqc=~Q^EV{<@|pwe@Fjip(dHud3NlF4$b zLv_+cdFOnTjnZ7|rFmkK(Bg_NjUQgYlQ?=VxrQt;wv6Lqm6_#>GX$L_^bYRTLNJfv#H;ORp?m_pk6OYY>OxjEhdsCGr&}1+5Evz3ii= zYIf|o)Owo_9*Gg_Z#0!z&E@-D+hMqDij&Vy5tj;=d)msZooBPYJ~>%>@?f=oYt*!W zQ(wNk6jpuu{JcDWd-C>l?e(p* zX;0^a9Gxpx|P@GsWE6q2A>(lGwCl7h8Y{;CauK48W zx1y{04OdL`d@uibB)|E$fr59dUe2538c=2#_mDr4mYjtb`a-a$d1 zWo~C24S7AqxS>OF$&{p?kO?fDs!Jy@O!|J|`>m7>}U1%z4^T*B5Iov~w=zUt@u zIX!WQjCzw#d3qddOd^+3E5e( z?~s`4wM1g8S*^W^@8_%C)34pUY+?0$YS`8%F-GY#=b2`gtZSdSxj1f<@-;8t+p9Kr z&Cpzsv4xSv@0rZ;k2wzn8#=tS6h)N&?U>K3pfE*f+w85yVcE*8d#Zo`y1e|he|+Bb zr#5y^71Zq(%(JU~HOG1mUqh(p##iC9zgiuuu#oH6Akt&_F(^2D@s?Gl`k&``Ik~Kz z!!R{CtczjNw3OaUmoyhn5l~!u*@CI#szF4$Ge?B>Yu`DaqqMW<25I>Qb0o&49Sx~a zYI86Cw^k&GOVYW6K__%-kVJB?Y0$|j#z9xzlmpLcwmsf+=3e!E6YqG}6w z?yW2O`{k9@^qS9KGB0msIneO6>qrBmrh{~D&(k^4D?@x&J_*v6WIb(h_r9&p=Ot5L z?&>qGQfRsM?Af!M_y69#zqjhE(NhcG#}!hn9b_$Ni-rP=;HR2>9ZC~_%sy05 zKJD+-sPl{Xt&U$-RBC3MI_Xl;?ZSQ9Q!j?N>b2UYGaQKFVQji8^XKiqLW`68ZFV16 zD)3=N@*0g7Z!+Ahj8{8c=%2B2bD1E^YVAgW7KT-K{?7fXz|i}B_SvefQCGVjTl8Jt zbLO;Av!Pq&->0Xir$%;sFzNkl@;s)p_c*V{wWzF@CA)8~+4u9AcKTcSe;@u8*i4IA zyY2PaU*Asao|doTJEj(?otb;>weMw<+}nPqWt_BDhSj`r&h}J1DzkNqN5j-5L0dx> z``$JQ;+UXx-Ky{N_WO0aZ^rxswd3y|KKQ$BLK2gvielKRsMTkyYAY)XrxgiiEOitR z)J$dZNM37dcBJCQTQPJ0$D5n&ZSLIleLSZ)>Y1yTuZ!mMC}Yl&V}cjcHox7KIP>%G z@9*c^?XLXvB&&Gm&sVG0&oO>~f4_8pd+JQDllRPZ>{mv7=l{KX_wCGCOIp+Af7r=S16|Nme4`d@eb_f>tII`LQh|F7nk zH7l-vJG^R&P;$oXV|iwp&mMnVGIf%dR%T1nKKU8tGSg-RecHG2-nFRWhq>90CMIaI zuJ{zt7@AU(Z>Zd*dM>9=Wr2vYphBa_k_F4Aa0@AQe!g>3W9b}Ym*-ECzJ1%|&B)=E z>Xf%)sTV_>j<;^-gbhMM9tRx{bf_#@wN*>gvW!vgzM$GE>BS+}KR-TuHAGi+xlGE3 zeTm`v?+y}#6@q9V;Q+UQ;gb9d|hGI)#3GYse;GKn#X%TGw9g#eKzr2 zx+~A%nn6~87o*eFS)88QYkOC1oiZtzNAXseS%j9<+r9hu?AUVe-ralu|MA!F*t`4w zdw-qhQES8QnSVCgp8NU9FR$#yQ?5njU(3#Zd-dtJSD&01YJPlB)Hx`T+hcmm$Xm|A zpuqN+iSOe=8E?L*=q0bV?A^6*-<}&9OM^8cT=i6!vACSGRCM83_~cS)?PFU-PfZpM zx#lAh4jlXTKImC@TqM}W-K{MG6=yV-K6taP@?6XpiN#y5Ie91^*s@Lj*W0w} z%F3T_UY@n^doLL(6}L2};@iSl~A){65gt=HHZwdMuOXf{tdV#io< z=l(@y!RF4rYu1N&UG>_TA`qo@vLc^v!jvFR7X^#&pMtzJy}fw;rE)55j}OxPCvj)z zJbRJG33J{uoe=a)FIz0a=VgVJ;-al$EeRTt z2Ul(BGdwnBnt<8suT@*iuIYXb(yGeedX3|VvRC@sv!F)y(Yo#5?@3l$`g7lZByU&8 zvf!He>O|F+q(FV|mtMMeFWj@KHuC*kw|)C!Ez9$dJGM@XQf)Nc7RI8wbVBhAyP98b zdh+(Z4tMT&`FOd0#DkNUm)|~{>bGKx5W}1x4iCe&eedVn*YCTTBl!B^hWWks)?CZk zJL~yloAb*rOC+l(t^G3ne7U*#_j9|0R$melbY#d*pKkDcih$v)9Z_Eo8!Va9tK}2y zdPLyx@9*#P^55In{@U?swz~hiR)O;G<^12vA7`3e`&zsIZjRnMz4p4RTh<0CEwy75|9iUr|MuOx zfB#K9f7lkZ!%bT>*l+2byLaWkStqYGJeLt2!mBtfXwA_%uQF}s_9bhcs9bf}U`y_4 zt7F@8fA2fL^h(guNq4$Gq#pYo!!hx;>C%$hzZT}^E>6{38j^c%l|iCl?T%+ zC$70{&=sZD#E5X`<+F7bwrk#rv%X>2C zS!EJ)Pw~1ZP99sQOcGU`8j#F$u2R$0k8|RbpgF46=ZifQkDYkr@a=26K*tHG>f+*N z(Y2=j^X+^!W%<|@T~D&M*z_*9|NoOc`AgdFiI?7${}yOAJod9qAo$dsx1OpoueV%L zy}qWX>tx2#DNiI%R9J-_T9!E_%!_NK%UXt}8QiQFu0%1Ml!y)RI{0?_jKea{J9h5e zS@$V~Yvb-SY{yqh;Q=<>^o z`ts>TzfStsJ9+T$j^&uB{UpZg;^CHqW&w)ne?jSkYeTLPnG`PlPLS&GcI6j~~h9eaJ7lhz@r z(2{FXx!Y!UtNX9ZpJ&H<=P_S&tcOn%E3Z+!Ot|HJqH|H=OofBx3O_q55`Vu440xC9)} zRnB~V+2Yf8#*Q_`vDx0bE} zQ9L$#wzSd;*6gFp3Y|`6+O$saTATaWfy;5xluhO`@B8@n>ea7Dzb%>~l(ge_ zjePKm&p}zT%Xcn0^n17J8N+keI!>Q$W0|u|_1K@|_I3Zi-DO`XvOVvyN#A7_jWveP z4}>l4Z$Eta@ZX0!-v5r9kic}-#CMO&+O6Aie?R%77F;r8&DW^gk3VLt&6~4-@7}+6-`C9#(y7#)BKG|9{9EhtZ|7~k8S&ch_~pBA z{Wt`JtYrA-+tuED?{6voe)iWY<27d2Zbof<-*)vEllIx-6%qfR?*ISnX!qUg>-T|| zc>TX!|9g75{JpQW->-)2d+9EJoVj;r#jiiBV_wZRUoZdeeanPvmYS(Ooo^3+Hp<^7 z?sYQps`gTg7-x=)rFXNop0f1RbnekSHpPfx1OKG?+cRf9tK>9%7PCYkclFXKZ@#CT zUUcnN#Np(bQGr^WEvnCFT(a@Mm64F(s~O}m;Rs`OKc66jz_&udpzOE1>NXqRl34p? zlTMAow4k3?GH#dc-n(M!k}1zp`xLnpf9-q!t8DYVjKzY6M@{rkZ#vV{kQBDC^j4)f z6Sur@_+PJ;%W`*Z+k7mePeyIl^U4?1PQN}q_3pVCxjkxaLqgQELY8}FpUc&z`?%Qa zRd+DhT$!RBz~Sk6BuL`2Xz&@;*w{F!wb>`1&)hfR`9u9MH~9it0c8zd-`BU7DJrN) zET4Ss*RNkEC##z)wwbTLf9L-Fr6CheL><>$R?KmF(~}8Hf?Beqj@P}||MS`M*I(24 zKEF24e^w~M@cQekpI1M}yuQ6XpTGUDjs6>thf+6w9_pL@w(Ry+XWbQB<}AOQX;b-r z*S6n*EV``**FdYh+42&7gmu$s5A7^h%KDQDqIY@S`H%cHgz|vlLnS)I^tMMlh(v`{e)sOaJY+ zW;&lu=JM^9RmxgBJ@w>HsqBgaejTdMqq;uJ%`97{GQmOV+6N_fJ&y}I6BHWv_*otI z<8mq7w&bAXVaFqm7A`u=o(RUT^$59Pf5I$UfU8qQ|8NUKQ_#(g>(@?vnseXVK2Kza z-O~z#WiPxmXYw3MwyXHVE-0$45E&B0C8YULxhT##mpG{d)Ckx43@Gl&#k?=RL1nW_&=T!(eIZcJ6Dp zqPQjtZNL3i=+bZg`DOgy8-6@Fc{Z)OQS|q^^4{6M|DB$$zx?ov*z6vaN#DPNF1D2W z#-9A<@7=>kkEY%)6`wwRbzP`mTLw+hnw0R=rHn%Jp$J z@@;lYO>{SJUibD_!FPpLvsrr%uk!Nh44w1%Wx;-Hj{vVziC6!BxxIetHGY16-^(`D z-*PtJ^ZC5#$kKf4rFZkT|NiqccWwPKuccxP8m!5F5r#){dT!^(a?O%$v%0v)}s@;r%=&cFk#{33F=xZvNrVg7Mtcx*D_KWJan8m0yk={+_f$Hwiv^nd@tS8d)|~=t4=knicEV} z|7)4Arrzg0I^AJjUI+D}7?WQ7UiV&Qe`AB2G^c~*aaGM$h7QrS#x1U%scW;O%ZrO= z7TNY1D{AFhRBA8pX*quRqGfhZ-mY!$qmpwUUY37TwR`W5Jv;X9uKxS$?DO;T+U{Nq z8+z(ysk<#$DiOZ=>(Qg%UY?!2{QNvK&=DMO-~az`Tt9wa#!|D{yu0UpZ$JF^uWh@+ z^*Mqo!(7&Ax+$_KDKaj2qB6njDyO@G*IFi*QGQ^C1ywd=R8ioQP&)yu#l)`0hTv+R24#kICTX;tG#{~ zZNLBa@uVF;4X%MUKG^?V|NqCIpPPd|+4!EenR@(mjK|UL(zT_xx7l}nohI>KUT)1+ zP+LWFg;es?^f%K~bfV@QFI?klcU;Lsz)j2Mxxsu-ze$l2md49C8762I-|O_yS~(?b ziI$SLV#WKT|F=F-Qkg58zemqW#U$|Y`JxDsBbTfIN_pzv_W+^x?GlQbFZ zzr~%<;Ik7{>9;+<%PE2V#>}0HD*V=wp7Hf6YV+JXT0E|;JXSq*%C%|PK@3)Me2-0v zZY;fcOKNg4U!td>r>ZXdZjOs4XP5iW=l^{;b5_Tz3qRM%|5|r`=Mjni<<93UTPv(& zz1P0}niaK7KR(ZYd0|{r#3u{A$C@U-pP!yympM!PeEyoyYdhDyE^<@xSUF`<`0T6x z`zlL6KT*>CJpccnXQ!v@Z{>P^D=)(G`C7vu&*w7JWv|Uxmb-f!GfPWxj>O^BPCQHv z9Ny=Gxb78AofN6w77^^><*}Ma;L5DIrA!*$ty>v7y+j$pYTwshH~IJL*QtzZDgX2H zY}p0k_ubiB_cu|df9aK|O@-U0%_{0rG`u{?TsTN#VdB#&hKb&DmpMFk37B;)du7(q z-|fFYnk21l)p*+BB5zmsrvCfx{XcG=ulv{?|E8=weqT+P*Ei>Si@Euw(6ba3#}oVRmoP?6X2iOo`4*&Xt|>$+Ba=nKnSxs{nA`R#@8_ijrl z)h{@k_H?cGRvqsrH*^&rK1#Y>x_9fd0t=b`tK!tJiGJlzM(gCTZ=DcW-|`E)4Oqt^SrIwfyqO69vs1&Ui;W zE7aLq68nGie9+$A+SfgI<@xjP?fLoe>+IXNtC=!pnYH|QdDvY*VTpsv!WJgoB^o=G z(iB}Zr&^zu2zpg~+sy0azSnavItVzavS>$S+Z4iCj!+pgbY;$WJy+_J5sW6p*%M>ZWy)upx_>_^t$f$!|IQN# znlfkhzQ4Ese_B2N|J8Ud&cu1yo_oHg3r&5px8Ewi^2c%6WFF1JQ{O5NgatYj^62Pk zE;}GrwQ}p9jjEG^ES}%cmJN0}^117EU|hvDQ_cxa7bD;6adIyFwDz<8(&CSOoN}y3 zzW&jCR2kFZ!7$Z3HYtWZQ151zbNX6^*Y8*bK61^q?E1qR?y^S1gF|&{ki(KKcG(&~ zrzM%soRsb-$r$wXG?$>Gz+VqWHivzOXZ*}wn=6$byY}bT*Y@X6FWQnd>zd{CVm7C> zOTs4of0QZneJR7lFstP;T1uA;)-b+0`}OGM<>s2t1egRQv$J)VKAZFT@b2>Xyxs|m zvUhjSk9}8u{QUfVbNrN>A0(Z6RdD0&yIuQCj~RSX(c$E1+ZVs~*TUCZuJJ$Ka{GP% z@x|Bn<+kt3cI?QU73`;~zBu>#&9hnFEsrgPCSG|}amSNWf1bVZu}?QcvR$?IOnPeV z#&vVsS`nq^8EeHR`8{^zD!6BK$vKFv2{k6aA{%^3C`^L0nv)=Qn z8}elLDwxf>aEWQ!JhiVsCM}sGcb{qHlu!Fo8+92pmhN76 zKWF=~S(&qrRT@UEZHv>K8+67}P{{xD^31v$wk-nxjz=tVy1d0r(}kt*+Dq<*4Cj_* z3mBYtWXRbj9^~O-;rn<~OJ&@&)NSWVbJN$FMhQEVd5#xIkIu31GGQ){y zYuIAn>!rT0w}dey%}ZCDweIzg=!wN*3R5oH-;(z}XUMX2O3e#sA(pOoAaSC3G^3?or?*EtN_j9hz ze*E#nySKN0x3{OBJY|s{bMB_~dG`s1)1oH?W$_p@NIaf)AWCw^^OYh~g0}6O>{azP z>@I^)(b?a3>t^{KUiJTG{GT^hSBLM*Tz2_odH%o01s2On!EaxE7hrre<_K%6sK$A<#tv>6%vzc!Ew8N>zFvaS&rzvGp3) zxtYr{mrQYqDC#s++7$k4&e^m1bAqz=M$WI??B=aFy>jOBo0iY_KHfBM`M!QB*QHaG zT#m^K2yn|U5z7d0(pa1`EAp?; zzM3VG)b-e6+N>a(`*URsm+Z~2$oSQ<-APHHX^KsB(T}s|m(F>wbkc>VN=A3y_7EG+ zCyWbHijG$byFGUhwpebH?7DR71g8g;cAX3?Az4f1tIbw^&{;Eq!9FcWNMPzD&M!Zi zk`Ho*DLQ;OBIMwq=+Wt}x&Ke$ZnahcChlob77r&Cr#$$>Xt8DHk`RxGte10A7l-N| zt-k)f`C8SSWxIDrJH7g~E;~B=ZC1qB+Q%PKdWn>K0xoqL>1=X?!1^z~`0NYSik z+w<>#t1U0BUAfh5$+FCzQvTh)m%Y6vKK*s#)$(j7gKM9rYWV6bK?`8rCf=b(eL-Zj56oRz5cIsjY@7i_$b=pt;dJ zrCVR_z94+iME_jn(l>i_6kPO5WtPv`6k@e>Q{4Kw`CG0is0cEIYAzADWbysWv$U;J z%QAm`dRk^PH*2l$<%rimEH+$Iy7aJO*7H_|Usqr0*L~akHO+Wl-}mk1-Vr(hnyH+@ zJrkG9ufAHfHEQ#bi5x*)%P+6|c+V=k=cHw;fOhIilQxs{m-n1;zP-2R>#EFImGkHL%n;v_y?seSWs*Q2$1z*3uzW)D* zcKbVZJ6VNt@8_ENc`Y@SNdCRgT}iRQ@WZ*NxH+5Mow5(F7I&Nw%;kMT^Qc6{6}N!v ztN-cOe|tMw-Cs{%f4acE{k5Mzd?>J)GfnkL)3r$MrVg)_PFhP-^inw;=Lt_|U}%|A z$!U0G3b&w#;@qz}yJk(VoR`M%a4-L9BmZKbjUROdpUk?cv%*yW)Y{^CZ$B`|R7VGU zeHOZL`1oQcxhDlLEQQ!3k4Pj(9C}%?Ms%&|u?)o{Cg&e#_84a6wlt(h&RX{Io_v4u z$*Bz4x{5k3UvGX+H}{{%;L!84!a`3`WQ~(Y_LhrnCmUB^eK1G5E%dfo&C-vY@m?20 ztMkoYf3-P3->%l=-X5WN>GWflOJ=z-7#uU%_j1=ciL)oSvj|083S?ACI4JRGxxf7P z-FMR>vL8-^YBvL~mSWv#p0 z&oyzCNQ(K*yDIHZc>e#_-%)&h&*86p?U(PqVdK)6I92kM#`S04o885pA3JoY$zy?* z^5Y#lXTJTL6O?}N|H?NF|7&8ck4m}!RVrWOd-b|#_|)1foln1h50u`(^h3#ECsSkN zl`r}gniZzOi*s+EHP~`YB9|le^sYI_y?N)n*}blA_3K^hR`0&~{{6f6zxI`u?cKRM z_tvwIr>D=atE-nh9&7&Q`)+|t?fmj~wZCk5?*9FI_wHR;#V<8h!iUxrA3t0f1LyuAa*hch+p>QuKKo<8?E~jMd}A!CPB1 zrOvS`uHL-PPVe=uqI12O*HX7$_xAUo8ZYA zT*QzX8J4Zf=*98+RwIL_q$aE04l^mi^Uu4#`xVp`R|k7>DxUfL>Cc~->Gyg*26YwZ zZS3~{IDdEjufOl>>nrNc8BXSQImgN9cG93kzp{d9W6$FXtGR-KnyO0^rf4qxy-qve z-15(rdwf1i1^XDbiG-*7Kc8H@y4>!0%=&)Q_w$PP-OcMY>^NGz|0Yk*#?;85Ls?&+ zI%#P3W=;vSd|Dbgr}UiFW+~HEb3X4{_ciOjneSzjbGs!LfA(`)Iqk-j;13Hg-H*P@ zaqw*xP)UB*Ubp-9w^;dL zPzT@N{_mF`Kc2*@v=}RLT{``QuC53CG3X9vZjvwzW)}Bb59mtv=Y)ed>v* z`+8CN|Ca_F<8tslc{CuKoqX~B0Jj*0C3er?_At>>Z?Rgz1#Zrc`fdQ*!;RM^`k z;fx;VG9$yB8VVO}&5PbF$#$$#Q?>1DiQRnt=d13D&ymi(wQVI|xXYfHbB`+w-87f3 z<2cYX#c<`6@9)mn|GR8+ww!-^{=L0ddnL_g=NX^KQgCj_-e$1Ab@eU@<0n&$S(E0@ zJuVr%?!qz&=iBE0znuU7B%$zlyTXpS1tHWb457HKZyh*S z@AaPBbuHWH*q_o456!*Pb~3Rv1O!E9wS;uv2tCELb*|plhO67wFtn~`T@&NDFJOB5 z_5T~DOo~@&U3!JVC5XA6W1;?9QGp*?ew8!%POj#C`|I|Kx3UaVR;jFe?`PUEg-P?* zgILduo}GL5?%jJ=SmF8QpMUlD&pUs4S53wL)4zZH|7iL5*>?N7?{Bx4e>Xq9$;fM| zz!5te9A9@bIUhR5@J|Ws<}AK%Y!k%K>cQx>QjR)@9*sm-Sx1^ z-dl!_3=lU;%QErOZ}dAe|OYcx@Fmvp!19#vP|4s5|1z2*w+1% zY5M)J{e6CZeuKl)nPMjlUC*d4zfrbqN>GE(K*%VO!-q%kN%V$x@msf;p!< zF4gSuSzT(VrWkpjzvYmB{oicOCpEin995b&>+C7k`rmJN-~apeeM^A6UCp1<@pX%) zOi7$_RU+uMPMXbpJtvL@CeM91^Jgs!iYhww{9{sh zv_bHxvfXQgc4fY*VOw$~_PXLU-N%X+@=U5v^y`0iZ+%y@Q-=RM&w=Xj>GEDzt3^2t zuhp<6Z@Cu46}ezn$#Mzjsf`+n%a1y0PMOqYUQ<_R=2e<|Sz_<)+^uG!Qjz@2WlK8oSz=d;pmYmUT){B=yGnS$Re#1lBoqt z`&1bu%;xUsI;vnHo;)e&>ZcwK%c~BnTsRF5_J&OPG-qPap`BT~*1c1aNDh4%`L)sT zS!t}2r&s7&Q;X+wO2fN#)0<9rtyur;P~}YSX}6~6pMJDgeaeM7&v)wWeWh_Dpj`DK z;{*YwWS{K6ze-nceKsegk8#4LGoQ1re=h7Xd{?%6-Fr^OfG&|B&D5S{Q>F?{4Pj8& z!JsfFSZB{`>-TfY?%^wJ1^Z{eO_u|%KYZyj+>U@ zxr(kGuN>+M6Oz4pf)C|v&^#7&_F+fI35(SS^9|XY6xD;ax&>(}rub^G25DTm#lpg{ zzA~1@I7yZ-&TzMf7o)=UNaZUJd4rf*L=-*av`N5~2LeKv~m++kB^7Pu{((sW^KZt21{foYjb0^G_j zdYR3=lehc#?`lQSfR2mv&MlvRZ-0HAmF0Ae_FehKaQTGZFwJGp4}V;5 z|M%+9cJq|4jYc`2SV6kHHgq##gyDT?D5hq#@aqKImU zBr8j!ieq=co>@1z7anN&+-P{Q+>OJq|MMoDb?=N-O7E@9jFq?d|NLZ4c27~>-yg5n zI~6&&_(&Qva2?#ej`yOlQ}LnY`u_KRW=3hGPAylr$d~rwOrD~3^H@jy-`D=_%acz| z;C_>z7q_==Y45b4(q)FrZt~Agoj0F9zU6}N#vSwed?vra>(L?h6m5Z_9;nm6N>Y;n8j|s`^`7|)!cJY)$=shK0Sp_Ne90?{rvp#=ftgH9DbTlW;lH?Z1wLi{s20r?|pBk zWxDhI@Yk*@y<^UNwwd3@{#+)>QG?}x$IjcitpW$Fbwh7om7TD1GLy@kWiM;|qE_F{ z3s;}~*kbMKyE)~*XYakH^)#e9JbQYf)b*QfQ-Zv%t<`j0`E+;w{ePf~_KpY$iOqQ? zrRLru^4iHb>F9nh&8Z>xb_*zr3o!(p`Mf3I)UI`z-*eBZoQ_x`!kTp=QlLqDsYJRL z=hCBR>&pKZoY&lVy2@^T-}21@uYSL;U$a+}q1h>kWBKQwKYv=P=1ZNmTt3Czw|MjZ z%v)wlpWKm3zEiT+tj+1;YTYR@*8L%?!sHCieHU-tcJBXt`@c7yYb^Tu^mL(& zyJE`q@~GJ}ig*-{MI1eB_;gP3jEz5S{s*z4-m@cUc;qzZ&?*zw@n)mx>pZ>{`4 zhnkBN9j)R#XR&;$lCAIL?+bHpTZA7^=DPQM%GQgf1q#Pr%>8WVcRFjAS<8lb;m>8$ z_q>f-YbwAb!sT$1HF({@Z>kSp8p+M?XZ5_DzhugckH_EF|5#rC$6Yx(Kl!$lVkt)> z_d!lpr&Y%sIZjwszBf!i7&$l7|61FF9S?s0`nTFS$za^~vXlhac-o@0IA zDiGk6Yso!j_1v8btZYk-sHse$9C*&Ru+}Q^-k!u_gQ84eRS@Vq#nt7^%z&njb33(UZoS=>d)s5(mIqFOOb_1tdU^S|fb&92ws@m!9~Ljm zeJ3Bs*tk6(bUgI#pijHbC?0zhta;>?Kys2#!MU}?{{8U`r{CUAd?e8ATSRDcq+lGjFvO{`yc* zUSGa?>c7+e@9*#bzx93Fw%5r#nn!mn_*DPTcl3lBFf%Vh96xAWK* z^IpXkzsn^y^BBB%PDV}NXwvKGV|Z+p*{KdW+q8(Vc&}$YO3!v}U)my>+mdk3@_CHc z#wm-YOmZrX-7aZ%wS1x==Y(CiuH51{X!*GP@Wa#7k4sFRW|N*Whfz#(X~LF^QC@rw zL0zw3#dKU%X_+zQQk1mf5u5Xx>8ECdd1W7FT{>rK&u5c(J@IVWWS^piEeu_nOP3km ze!K1UuG2q_;`}s^&Hg(t(5!E|lb6>`D`{twV+H2~E?T~ytK5=nb~tUfB7>y2*T={0 z@y;DfMNUPW;w<*G6!PlHTp|**a$WZCg{(~T?Q5?Ed1(Hw+kN}4XDS!t?i(F9vY1<* zOp)7PbI~iuT>9Kjvt^g~^7Hd8h5O^}>VK4WDeBB{?g`RV487|m#B}af=B#U_u4fF{ z&RGgLNs2msJ#6;$&yOD_zL%#yS6Ld;RkdI5$!kU@%}}qcxrbTB=LoxbGqxYJlz;3a z>91*-zHz~%AnCPBRxSA?5cENgVM78>XFxzumg-J{oFt;j&%%W^V7lS8Fc4=H>BnHItpHW~gSkj8E@%rUvnaz8w<5UE!(jtldW) zI82XSv5Y?T^W*2`Q-YTk^3C4&{`ay`0qJ*issMZ(rqi&~B^zdwX`K zeF+c?UboP=U}~_FrYpxWEtXYNB$!fkB8qx?J}%3YT>3dFbLr;gclg}0Cl`Oa=K8uK zm6u`Z*4ov!r%YQXZngRuoS4W6+(@C^+qO zXh{72`d?QM`Mg@rb>e!+X*01kRC;z&t z9m^hhb&ro`;v8cy%ja{7<(tk+3a+keG@P|@m;4l=Dmnej)4e+s)ep6XMCF#n?%u!m z#l)bc*IT)-mcC6huKE3Td&kQXE9QjT+boZsPAxj8nOau8@BX`6+gc~BjjjLny0U)r z?YCtS3QxD(EO~xfmF2zjvMEZ*rnN__MVhkiIw?;7Y{Bz)mzCEkgDsOjKM-CzC!lR< z#e2T?=S>$(d@oBVJ`sH=oUE5HrQ++Wy6tz*e)iDJoqcS^vc6|q!)E(lFU!3=WBKy* z2BWxTr!5r2TIOqb^n^_jbTWLtRXJl?LhZcT^Li(kG;7(ZUp|_Z{DA2n z)q6AJk2tPl z5^JNkm`yEcwDb)=BRPB9&b#-`e6wSx=8HIS2rDfQ;i}up{`R}|u@&DPJpy(AdwHyM zxjOaZuMWXQrXq(L1TLG*NM?+<)+)>Y*kbt%jzrDnQUOX9v44&FrdQ4r)OMN5V6gB< z-TuU$IeqSD!r9Yrn=u3wJ39ASK3n(p#sPuiSdrMv?=@VHe118nIBr^Ms69Z%Y?gm+i&w7PJ7^F z@%mb6otpd-SvFKxxU9cbiB{<26&al#yhVU;7Z~2yxnOAqMDa{tHF*x?-`|o2W z3<+yv-@cj??&Y;|i3n@gltu5YmzE^=T&$WG{{KzmgkvUA@x{%-UWP|9XNB)J>rtAv z?R9?Mx2bbpuHiKlQ0Os^OX;~7x!UKjCbQ1@_<8nqzhsi${C-$tnZ5M??fSp#^Y8Cf zWatrO;5fK^PWA(5QJEtW(^DVzWUbX~=+|WXZgRH4ReR}F%@&LI$vv7?nI-l-Q-l_q zGA>$r=-7v4nXk%At>z|iM6KMGYc|{W{&%mJ65wmB`<{QkdEPFaN7HL!{1hez76oM` zF^xqb(h5$RSC98Cec~h_xUQ;#Wx*cdcaue++M#uUW;+~)A%)K`sA15e4dyRIJo{IJ5}!mb5s zH@-Jts@i;ee^teSN&3$>7q65zJGDY0`1XspW!C2}TlBp);yze@yHUW(|Fem2$Bm4| zTh|?&eoR8a%fpf5h;@2Q!TUQf&a zxiDp7&@^s~?N;flIYhnZJzFUhzfL*dEnDPbnl+E}{9+}RV}-vy{k;75?%(bA_wL?# zvsJ*p?$3uGUzX-ZGg#?g`}x4R$F68ykfYiT!=l}{|2#V`KU1Lo*Y^K^Yb-byE(y_0 z(^E-h;*eebv&c@qUwwPn?_%p5v!GL{Gw09euf3mJZo63em_^cG#|_u>?@8U2;cFL{ z@jD}#EpB?gxZ~aK_xJWyf6t3L7GL-G!CY+)`+wiegH{}VXmBq1y2<+cdmN9fvdo*6 z7CG(l$2mpo>WaNQu9q|wr_1JQc(1X_-19l6<7`@R{*fs`OSeo}lpA$<#xKUKS7Rj~w+HnE3gSVFjMv1j7Umkpt{e7*| zoad|?wZuJoPH3!6b2#Q=dpgNyY8}gxl3MMSH^2KfM+JK}*6cj9{PIl0byE_j7^>a! zdpbvA@f2>wE8(1O=XT!D=`qt>S{j+$Rr<8Cqjc^ylU$C%na}4KJDrSJ;omZ0&Uyt_ zjR`?QPC{JQEOl1&b}GvFtG`}zt!n#i#kBX%sn3GEoH;snE;8R@WVv+4CLJL`?T$?% zDlIJn&rb-7Y{ zN6wyo%YVQA|KI-mlDZ#{noJ8~(rD2xQr+DAAjW%ckOG6-Y^|mj)+^?$WqL5@^R;6S zI2D&B^fXr1STich!B7NJI8XMTUT) zuUUJ26xZ*m`EzU6)tu8oTuY}gXg;-S{Ifr${i!Qoqz3CTmsG9-x4HXIeKB}m5gRCQ z)g;!QCH}s-JEOqH*RvBhzis1P`sZaq!MUi{r$qf`SU#`49_%zvPQHKnEQ<@#yar8-M@ zJh0N|PJi?4+cuFuUp|I=op}5A@B6y{zpt-<$KU>Z`SWJ~%opt~0m6xEN*~))OG^mH z-k;%if6MOo`+x77^6BH}=b3TE3om_PF-$(Cxa6{Bi-B+PxlVygywc|)uGaqf@wmMH z?`i+~fBp7`d|sVp#yo!y?5y~^i|6pNm$NG4_SqcGw39!7{%Za_p<@!}v(J9#f4@Yi z{@>^MjT3(VzTWgXQ^vloX3zfp{|~>fub-#4`)u9*+goF&2c44OypTC->aP-;d5jGK zT1$nRjtBTE|E#bvICg8><*4mF9E!7+ZCw5Jo^z)Y58n)l4^i&Ffurib7Y$=p~=iY|B`?y7HhCm*&zitD_OCCU7?dO$(l< z7u>jL>Z)AB#3R3Vtn)rwTYhi(lqr*9K1d|7h6bGKG5bCHZ{GIRIjqlZ1vcG&D=;zW zoP>GQ@hM)8$vxMCyc%b4wljNxsKGC z({KF<+kE|N#S$0IQ2(^Gy~Xc7KR^H3MpP%J<8NMhb@k${%ci{Bc3Z$X)V@fM1D$kA*>F&l86U>op*6qX?%tL?kH3BYzc0Z&j+!f{a@p1V`cXM= z`t|kkZ}ZD6A8uN6?x)wiHE9#M^0Q{SX^94VS(cxBS9UvpUv0IWeE;#sX*d3TIK2Jv z2Fv-!m?|T8Ot@ll>eka5tJEWR^V}`@6}j}b-_AX2tkQDD)7)XDWPi&95$8ikEceDN z+xGU~hu`=2*8L6O`uEfRUy+TRrlOm9TkaGto{C<4=+C|CtumJE3>3gMwn^B<)1%F zek8qp^(Uz(C-ZRY1uyN?HKhy>_Wu9#^YSj8V_Pn|yRG>A?&Hg!`~OU@ulW7$?uMM@ zr%n8n8EkBABlq9Cdr$iEpC5<$*T?QrTi?sr;AL+$*T3|Z?fGEMR>P?CYR4oW z`)D5BSut@{dwV-*-FlE4=Y_`)MVz|=X1ST)s(A0I8B|cb+5Fg*o80RgU6zKd&yGzG zGukDx$!jU&0;Oj~#{~Y^%zs`Pc`enLC(7fP#uT9%iRCLd@h)`WIA|Te_LSyPtM^Gg znTzl5sVv@nzy8OMhd=+sJngH!&n)oVCe25)@L19ziDX{KxVMki$S&WwWX)E&HeRQV z+stAGxutSvZM*2D>XLo;r;Xp`iqzj;3zsaJ60^~~gW*8yi(6qHew>wt=Py*w+SA>` z&ABAAN|WVjOz<@BY11`sO>ye!G>R&GSjrUUwQ!1t=I?cDi^8-j&Q;EPK4qd|qEGN* z&ma%e^ZVG&3ZG)|xL=-oTdX0*Aj2(8 z74k`I{;t0?t1wDHd&njG`9bEvd_lc=*4HRNAK8L7J04foi6LD z2Tek=yu2(Ek3D|rqIvUend#lxEdi!;6n|*VTHo&9GcjP-&74*L*Q_gTc@R2p`~CZO z?%plZV|gwq$gy(f%$`E0gjvf9_gqmG7GIR^9(Q@;N1hu7!dGdgN- zKIh%znOR!@|JA#9>-YjSm;bRUSy=iryngB~i!%Wl&wUCQHr#vGvnk|Dd*o@`6)ZNm% zchCBIowO4F{}br%2e%dd3AHz~xN=2ttJxlBj3M^n^+ZH++1 zN^b{EAt%dZ22$&Mm>x4*myV3=dz8h?fTbM<@LL7^9FJC+da>CE0sKJ+0H48wk~WjI2Zf+ z);3|UrBmeW?b`$dPM2MNF7j=6-DcS-muk;#PrSe7R>sQSsAYm4fk9uKkMEfAjFl^~ zC;WudQO1niAg(EsBr4xZ1o5;VHlFsW{r=yFv-9uQ{`&FpeEq-A6_)(7dxI|h5(wf_ zG&t1|q#1I#q_kOf(Roe%&qWqC`kybE$fhXhlaMfInc>zW|v$O7Byv5_(&SBWLEc4pcdFPLR{`qL{ zSxGK~_neEbFE}bOdBP@#z#!9Q7ni-X=r+8zMe6;XpI*PkS|)VmBu-f}XIbVdiM^+v z&M^-Aw>M~2)TgzZw<_;hl)!oC&b^%Nvq8uIUN*_fm3t?@YaOqmOkew)jaqLOPB@+F zqbb|({=DX3&~EcHl{fefUno4b-@Ii6Mlhxd-rZ_D) zB9hEe8X4Qb@?KSZ&gZ%twl;T6d!_uB|1H~m`|b10ZP(6y_TqRLvt-I4k-f!J;}%X? z(3E*Mz$v)P|NME&bWN3Xo9Anm1Vsrrz51OzSKRWtO`|}QfQ;bWt%j})D}ui>9nd&y zxxVgfhnCv=nuC|V?!7dNfkQ3m-1E~rvof|`nsIs4n$X4ZTuWMJc-KBk>X0nrSZK9f zL-m+RN9r84wIP~EZ<)4q%*ggFJ-2MmYq^jDN6pm*RSX%kmYvexa*bh$h>zv^*Ps4; zdDA@IDP(V|&%_|5mIDu3r$nu0S}NqQWXjZ=H@3~4ut-b#$!R+Y?~muI zC?erMefja@Z!Z*|2MO)QMx=on8C>f1UpS>-zt%D=gULv#0B{{CIae|NlGw z`t(_9EgTI&ue|1+(VQBzO0wCg>M9S{$|;ZLvurYUuo zt>5b{|NQ2T63diB&ohrOd9ALDJr!JdjejZf95fv1ix!{dGS+ybM04(Nb{F`t>ZG)z?xZb2U}cbvQ!X0;h^E zy}~Wkd_hmY1xxx2lkUIU7VY%n z^TPrg8Ohd#@8#af3w(ZAvS3Qmsr~;x#m#OLcr{1wIXYt%t-RlT)#Ti=BPli+w+=pJJxL$*pz!a z$m{A$rbQv2{{6dW)y-P}=cD=kn$Pd9$J_sV{r}JWdzLFh88=1AzuwF>!La&$vww?2 zqtIf1LqV?Buhwim{pgyh!7=TIRr|6-tsK{DR_;`J^Db{{h+>47k>-wdpY!WKC-c;8 zJ$HNQ#a|B}ew}TeUwl%LE!StM?b>cz89sTt8jE@<+y%)UETkCGnR?*yF8e4^>5kz@1~p`#dhaE|14R@ z)OD)vj3%SP^EpA&imU4C_zqUTmv|m~KUvSPP2%trVa206t(?W0x)Y7>yvggyT(kA{ zrnqC=iYiMLT3790cL@m!dw>09$*g6SkQD+cJaoUhleb)$jA0=I@3f zvQf#i-llzi^-y%7!P(0;-+Nz{?T(#WewB5VK$Ae1rLCQv{^=<0vf2v449!kIW09O$ z(+uOdIyLR({Vtn4ox&nMr!ZlQ$ci7kdn#w8J)iSnkKF5Dt7;Ob2)OVVUg>nzoKSSg z$9SFL(kTW@Lj)urFJ(yIvnlI(={Ln#&VnzqmN{{*+~up&Fr(D^&9>cd(>CAbeO>a< zDPu9Gs+ZSN-p5@*+ah-Bcz;kls?qY`kcVdW1Ve@;bCNrzeuzvrZ#i&*KgiSNi(ZJV zFNflDmH@3%4sNZbJ%*-Jzpwh?;prXfRd{Xf>eMNEpI7xJ_8b*h8_l5@+V334^dxKP z&G-3EA3krIGHG?pX1~rQ6ZTa&t=n*G*TQvt`=joyiOqIh*y5$R#n3l7>v;gP{c3^5 z>n5&=4L)_X%EG^R?@?~WDIw;+Wf^)NKP|B6nm%Q9)fFwx+uKB)j!PWRTvPGlZQ1hO zeT89M8Ef;4m$hiD&EEHT&l$zXKYwo6S9RlV&PI_FEk*05Cbh`Upa1Xi`TCz<|LPxq zoH+}4&W9t(2tiY)3gtbE?`V9v#5 z7x}ku-~L-xwZO{m|A)KZ|G%C8pF3$|ov$IoBB4nld+xul|FiA?zuWosKfk;z*;i7# zeckEj=ldNLj?dw)`}6hj^5e&w!WSwyTvbcbj5qep`Vx4yY@O2)o(BE5*Z;>JUZ?*{ zD9m&9w2$_)jTKC_SiNlJwQZ!igKl}A-1kr3YvqG}?R|_J=bTGleD(0%b-}ZEScLDr zE33V3XD_>T(%se9oj%P;W@Vq_|9scQy*KY?tleKzDWkvm`@Nf6vwOqV7MI0cw>o3E z*KnEXv5d7E%$k*szg~WRzWlg*w(V0N&XrRHnj9O?*12;yHcT-Rc;u|nYIsg_?LT(? zC2S1O|Csn*tMWQow)^8f$>cY?s?vkKrWBs5yuH=jeEa)*`}W4@{r~s=f1OqAyS)4v zPnKUkdOVk>?)+tz)`EpC6GDsxf?nG2L|>M8{$|&$#~+Jq=Cw{R zH{Z_B&wu;vvQ0eao(C_L5O!(-9aYh?LEzBOnpHvvmt}G&UR(Fs=K7DRbCef`aQUhR zdp(8-DC zt$Q7PcgvJX0#07%)_p75sq;GY&Ku_mPF|WxJ)dpNdNa2gdQD|w-Ta^_P%L=WmwR)9 zMWup9_7VGat zIAz}dYHq&!Y+5j{Z_nqyb*GPtCW+TDoSHg8Y1wMuc|m$R43C*C^IIC^HO2Az_OjB) z7QY^FPm}sEvG}B9c1X15(dzleNqSR)mM)p#RFEmv+ws|CeVOYKFYlsry_ZFt9;`9C z?(KVQrSam-ISVH&yXbXlPIAvh%Y2`OEzhRZ78u4%mGbh`c(BJ`vrXXD)67|R@p{u| zX@#$^?NB|cvQ%TKhUV^d*|WA~y|`XFH7ID+nysNLE3D=QdF5UXv-I41U%vnRIaA-u z0jJs)Dz`kDQ*@%mNHOH-FA<%fllxxxX1h+%=*om(^V_tk!%Enh#yOJRY4Q^85e z(=*n~ax*CRa7D=;?wP>EYV@qBuC!I)re=?7vRF{isnXREn#-R*uZ&z&esBG?b*HVo zPkJ)yir(j&{i!Ls%hFSIYmjRC?sZexG-B^>-Tn62v$XB^^E4d;nNNGGbo{={wvqQ9 z`*VY%O)p=5ES#mLl{$4kpY!%L-IE@bSOs}WU)H<)<>$v@+o}5wEju-JJ)^#rvqA); zfB4Sh-IMkOc&)p8wY%u`t(w@kC$HOI-4yb$bL(bj7x_P>Ql4HN6N>}IPuqN6T_T~O z+Qa1}xqJ8C+P|+H4MnEpwEw+*{r#j=O{C*oEva?%hCgzbV^g5;^ysjN}L6nS@>E6 zit#C*lXU%);?{iD`xS7~eCZUcT=7ZfYXhBx4heYoy)b0BFy~O|qy;LfM>H;%trZCJ zmTclSG;_0le(a+S&*2S{x31-EbAEbj?deZHe*FAUGG|$-%<|`#3nRl7*S*Fa)wwfp|tZL?psdT91NK4NL?`}x;Z zXI`UaQz~D(dHF5B{Ih1+l(%KOmt}IOhBk$AIqH=DT{q>FWxU?vV!j5ArA|DnzrN1W zTq|Q~#3&ZB6b+N!LraUfyGozPvDMZNFxiWBv6~XZt;se=n5f-aWs3&Z`TZ8@(1xpAy9K zU{3Y>;5Cx7eUqau&zN(edcL>gn%fycYn?Pz^_EQ7bc?5mhhfV(If-Yh=7e~0E4EDd z`g2We_=g#WS66QqX%d(ubWDU>z%6oWq_W7Vj6Too)RM(pN<_Cmk2$4zj3r{ngQyo# zR&BoaIcy&{{r`L~FpXpF7R9IrhJe>KJ*PM6urA2Gz3uhfSk9%Jx>mk;y%;Up=c7ae7nsd&;HHmy6HF?Tm3t_PIP|*I%bI zmQSU!xB_m}r*Wh^trQRvJT>+9X$$egY04Jv4oCK^(_T1*+fnnQ-Y)@*%JNzeV;=>Du?8*mAL9>Jg!u70GVLc3cVabY=8v47tGmey#V#^QVQH zU*F2J&M`Pr8TX@X`%xyt<(YFhrtN$C>)aE8>5*2?%be@$%QxT8o8>0V(1L<%VxjH+BCe;ng)qRj;&^Y&mCr#@BoM?Y;aDA3t8Me_s7q$mNn2hcgEySRBsXJp9-8 z`I)HKYhu@YRTS`QTWIckSn_qd-Sw7=V+PMt=gy5bZ+2(WUr9N5ueXn3sWV(tB{YgcAXnKO}T z=@bzSP4y;&xMPPu3mQL*N#a?4xszKk>4RO>`y>vA0Co=1_}Na+c#8O5Z@PBmhSi+4 znzy%}OFeVV>V0#1OW>j@NmHgZIbW~+{qF7V{Ch>mg1Y4GYpVYLtL@h|~h{@2Y~cKM}E-{YS(nyg7Zmdn?7 z#$R2wYKxeY*3uhStxv2n3@XrL5pGgEWaIz*bY*0ie3MWY$4SXhuMJ*_2V%T9HG`b? zem70fS)L=c`Sx3b=^OXyP2Z*HX=p9~DsV>7C9n72!E+S%!>|6^wWH@|q`AKF(knX^ zmR`CQ(mS)5&*SATH;#+jWEby=<4QhtEiyaTE6^j7OQ^xiyw`Ba6hW@#EykKE(}OrQ zTLh+VpAz)P>am9=bIAMvKh3D@)eQn1A-`plb&g5Y_9!NuJ0|h^P1bbnmx@Ovm>4f@ z(-nAeYn^G;jhuV?suypylIc^dc)#ZfkC&xfpW>0qILqf{rKTtQjaE&`>`825zkfs^ zOMoT0C&0@{S#v5!&*xWbm{>H9T6$|v3<_OZeQsvJC6i?$0YQRXJrh^ANOnvK;wmgI zW)9Gs?(O(ySD44nb>3Y&v(jyTHMw#rP8Es~V6r^=Lit(JmSsJmg)IV$-nVbh-}m!( z{g2|F|9`bSg6o=cdjd;itE;Pb-+dR<^ZfHps{rrb=LdG&s*LsEuzS8{ zO4PHHRxc+vVnk3Q&(Ex@zM4Llk4E0!I@eH&BXvrW=Dpt{ zLK7wh%PsE~c=Ybwx%ID0zMbB6<}yczWb&ai61sDglTKC6oPPM@5zEJVM@l1?S5_6* ze}7zlKcK1O=C2`?(yw z4aZMsp7lO6`CdO<#x%0H!qnJrrWV(UMaKB`Sq@7=bd}^4AkE2$}x}j^6b0$ zVp_4J6Q7{-m!6FRk9OUVm^>v(V%P59vzHlKCi_J^{_%=+k5d2oncP#tyFFFsKb^Do zv*NKiMvvz#edE01^^wxjYn9LMJgxb7(A@X%t90A+^u6CF26b^VPRq1of4*(@yWQ{i zz1{V!v^;;y%ineTdivw#>}#uRWcrssUi$L)@7>{&FLT1z_GI3YePnX(+OLMhInV2V z-~YdE>bu`{n{O5!lw7KLd_vP_If3J+T{PdfpEbW<%gvhP^VnpX_o;|ArCwT25gnQH z(w@sYWX{?)Wy=}wbCox~%zhi^HPi6dhGP@vc5`aa(%?L2@woGdz@kg9RHORJV$XA= zUYAVx`s4B(V@(g%!$DIfED<;+`Mk$SNyT6D{O32dAMS;k_eaNWtvB=CJaLvl+00{? zKbBa|=U|w=NW>$;$w$$}$@BKM+Uwc6$vpGz>%~eL|1Cmt$;tm|s&U6ZxdKjyntGr+8^>Ufv zwOwb^PD`k^RK35Ry6$9$VM~vO#q*wErj`Z)reH2r<5Oj~*BVvDJ*!;z(k5-|!;+m} z4nO;S|Br?K>!&4O4jP{_yUpmK*urtJ*h|r(L5XE`{{Emz0+WK0dft^uKi^ZVSHaD2 z_(Jiy*OP9~SoZAYna_K4CigGYV=`89)o>`*o57trAvkG@QRw5%leaGPi{wze)|NSM z>$TZu*%tadmPk(NSuRoRmz}&sARxfgG094P{``6TJ)d^Bhig8&S+!Dy(L+`F$=STm z<^Ml?|NrCs|1CFjlX_Z(yfPC5xD0|Uk}aRkU}6mM%2598t8c-6Xj-bGrjQ@U(w@te zXCGI3-J9#T-2UJ1{qp+LckjM!{{Ego(6Q4T9Z{TqnFnQ@rtErnZEgGGlRpb~-`-dG zIiNQ5{k=W2o<04$D`wfo?2yH&*T2uvmQL!awCbC^W2^?P>ip8fn~jnwheDlW~fa(wD~$D1;3;$JPCEVw+CQ*-K^;)Az$TRZgJ z+q!J!46mi3dIuzOcfWgDjcyI~g=iZ~F0| zsJF#kASmo^*}6iOT+M*2me{EWuB{O`6CoeF)HL}{wR}^4@6Nq9@87?B=kC34vG#qB z+xhD)W#@&bA1{!!~qC8@O- zy6xRAZ(sZ6T${j~S9ATHyc7Zif=+!lYJU*y1KQBGdtb2E^Ep}H)Y8vLT>WmFtaNNo z)%@VnZZU=vlCrm6ttmLy_gEs!eCnjw>r;Ymo0*%N>&y2ToIkzG@9>4vSn=R}J;goR ziW}~~?=@`NY!%9_Sh4@wZ}-1{^0(jq{o7hGb?;hE&1as9cG-(>ZFAH)^7}zet<{{* zr*|=gXvrt8-&mn5Ty-wfefS9MQMDo&fTTtaJ?+(wrC>6I&;JEER(`d)nz`@AT0LEwfI z7pDWi1Md4TRdHBpIB$wkGEe%Q-wiW%uhVC}e)X8dtKARhF7Mgq8+`x3F4ml0!zj;8 z!^Z|ZQzlJeUhJY-TwN{P{Qv#`zyD8Lcp6&mW^(8k1vLErOiW6{aicD>)N^=QSCYf~;gEz(@duv6_;_S(J0pN)P$kQBCjD&TbRlF6>u zR$kekltSM28}8bcZK@Thow~MHvsFS<;9Nvw()F*Wr|ZwRuk|{(y|*nW$wyWBY00eT z69UYAuW#$wSUJ!5(d_N(|NoT#U$|R(&-UxhGntl~9G%g~b&f0PpiWDGQ^@aa#rw8B zpVAO?sC|Q;_jQvs2ioK7e}4L6;`i95|J+)6yFXg2{DvMY@9o_u-xjBR_v_)qtFO;7 zKBaNw!JgpLHm~14o1Gsoo!Zy_Tw-rn(T(@{@8Uhr87BA4)K;8XaycmI$&`YqbEmiX zY>rNE(o}lp)MEH~Pe!iQ+C9bR^kz)rPn6QPTt10=TG3TSrq!mL972b*bN1JKRg8}h zzg^}!L%`|PsVA>jPcw8{U$=#8mQuXuw}4kNn^wJAe^!zqX{GhHsKXjdug;Im-|g=o zzain^5lc=(VK3t(ua)v_$5j^X;ECe=xM%dGlR!>E)LtJ7a=+ zd<@RH^cWst3E1ZR|JU2wiJNWX_0G3{UVePJgcoDhTK5CTkGr3X6ivS^a4l7iafv{A zd3o&ZbC)9+ri$G<@i(vd|1Y~AC6@k8r)QlMD&{*_oxM|FilN3#p@`{~vzC>XM$Mih zlrV9Q{hj;sm!E!G^Kgo=pX6i7&Ef?O6}37gmZpvlOqAtL*=a>((XDHp2tR(ySR z)&B2e`;4X6O6M*={qV=X8jHW5Uh980DXl%dlx4x`Jx`c?FPrB6{&n_sdwaE$r|4SU z(^WQe_k8~S_w4(BAKGuvo-N>fCR674=Z6(m^PD(hFV8x5W&4-EzrV9|m@RwzYuipzxeyM%vr0J>ai{rit_aLxL#y--iskd%%U^2!8`pI2jnTVolby@4wCU=ChS$?(ggui|WcBhhTzbY* zS>#oN@^lvliM5)GOK*5^D=cj>RMS*-5b!>C#NGb9_;aS^Hcw+dKbRV{bisGV;;%e@tU@Bgnae_S{`6g31(b2~e7@5k@2O_y9R>@rD^ zRk35onr@LGj)k+^1T=P5JQwV~Hmh4(LjNd7fW_l`k?h&L@u_oyoiq}CE(9%Dc7sFn z)g=ArQ-aobB^;YkY}PizuvqWcy7V)Qn!TB=BATvl8Ve6dcy|PCsC$vzqh}#sb8e$u z$??aZH{B|fTR0_Kq$AAfQ{}AVg>$B>Eq&Whn>{MEf8Uk&Ergy6rt22~3&J}!K`S0uX8)bJzwRs}Fw%*KI zd)9Q)@(HOM&+QBPv#WQEA}(IZuZlt@A#bl z_QOX0J%;BpuJhh=Kea?)*Zb|aN_vVfo77%kD#73WT&hyoSz`G_fm>y<*S{ZovF-J? zpn!wRPwzN)+Qc^|;Ihd<_SbJ7KYqO2zu)pXlZ)5I?|UmMH)%h~t>65QnB^Ups&^=T-HH9x=E@Y&bHuearDE*AZ) z*s^DTe)(=)&8H=H$;ZNTyII$KR-K+2dnar5wyROPr)~PCr$+Xe{k|K=(a73=Jo&H6 z@+qry)}}cMGJwvM*Dkf1_y5=R{qy|Nmu{WnwFiRw-<^qRRs zIh{da>NdAb$Ame8hqUS%E=F-_Cda6T3QWCHdwuzwY~zCxo6qq%|NnM2TJguvnsb)f zOF0#LGkXlDO!)r({{H&^cFFhh%$+!vhHxa7om-pTvGdl({^g4Wigw@q()+0A`r6sw z=LrN!dR(?xHbY=TR^jSNM7D(iRN zjxkVl@%^>-Y?$ijv?EbcngXXhS|>zCXRX!ch!I$7%3N@et=u!>RajTB+bPp>HxI?7 z=N^j$Pug_z%UrXn-FLU;MsGDUR8x3RyCTKt9BZ=9^IzW8$0U*_1_d44!+&n)zW3kF ztDO$CJ81MO9+|QvAolj#{Ic9+4gsBt);_EBw^deiHJ4u7xh}g^;B!z-|l?fd>LqaPu-7y)lMEC;IlPnQOP^1a~wBUHf{sZu8Tgr4rXlzn)7wGiB0QgFTghUv01dEdBh= z)1u0%Dy#W@*P@QGrrP{E@O#_F*SEH^d7ZrY|Ig=m%an67il;FxWAg8hKX%#j_!ghd zv0~5n#9m+b_TIj_;_B_^B{v4Bs;Vw|;=?f^Xi>T0Q5oA~b(`->Xf{V5znp0`m%&)% zz#Q+T+{{781h@;FR_XF^CGEVFwyq|Ag;>}6S3S4(U++40!`pMW-NdQa8>d`q`q~}8 zDRy18A;SvZ(sL8mN7dHnrpQ#LPf^3ZH~(Y~-NmqFpF$^7SsU*=2? z<+@mU_xASu{QUR(YJLW7wLSj0aI5yxJ*HFFy0=WxdA|Mqy?+ni*WKUpR{sB&_xAt) zoKA8+=fJ^wC~HY}kWlf-@C5!Kx83`0zb&_XUOCh2SbKZ>uI#;g_x5V~T((^5WZ~{| ztg>UC(v{P*CO8Rgy#039I{W$huOBTdRI*DsV|XlcR^0x|-|yaCSv(`M#0SKTt+{Bd|doJPV?!U^)k`tb_SiQ zo*$&saj#uKDAC6&u=e7$o2vwWe0bqBK7OJ zqZ(UR&t2wydB*Q{_T*W|k3WyEtF-quPF<7P6BNY}_qF({U-bM1bAnbTy0sX%{WSf| zIUz-7st7B~f`pSZ7$*vx(p(+Y_Unt`F^-9~*RS>+-2GthVJ5|vrX9EQKu3day>{&b z-?^C~wv0x;o4?o1T6X%U($p_{0bX;r9&Gozn>))b(^=Ab?)lT3xD|Q`L)(v^rWP5+5s7*;GpW~b2mG1EY?ZxwN{X9e_NJ4 zYgym>B0mO}i3&-VHO^@|FfI^1RlYCgTI$js56#|7K}Cl$I1c_wSo^%kGN zFRbXo<;Th3;1{T^y3|It-!i#(l6i}&2E&tYY5x!N@3)lgf4{Ht`-fkO%Xt`P1oyNQ z`f(T?+Ee`asgq^rz3DMqu0?5{no+FR!q9PV=cK~@x4DkZ`F!nWri}jb&o;4hzPEJ0 zU6$`Nx#(HUp4#8vZs+e`I>qDMRzuc?|7ZE{AAeB)^Q&z_<>z1j(zbgVs?7Ixbb4`t zH~03vyZ4sP*t>oD>7S>U`yW@9@jt%&^5wkE|DS!m{d>1HlVq?!({q8KgH4vI{oX=7 zQ}$h)G|(dm zQ@i^`cYh4po}pWM{rSWRK@U{Rg->@MSZLSF>dh;ob@8*(jt4CbW@rB%K5W2K`fiR< zoL>5|3XYE-KRjpzRdYJ zqdE4ijlc8#z183IHs8!x?7#eYme7pnJA)!y1p+>Btx8!Vawl(lkmfnnDPFH)j=dCW z`v2wOzw_o73=N-jrEf1Qa)3u*1mOC5Np>^_Yi$;g$^EsQ& zZd+6#$!+Ir;f^^}mGnrq22KV#9^XnQJy*HmQ2=&FHdVN|D~0t%l1k zzjV<&skwB@UVcgA#WGwktJn2vHJm>mo-#jZmENu&TQx&^xSTwC41Y`rx|j3o_wU`O ze|~y+xIJ~=t1PXjE*yV9JbZk4`f~wcraAiR3_I#~?B2b1X^$Y&^Ups^tgd>v>1ebl zOv!2EO+0dQ>lCG_X+8G#wtqjJZm;SnR@)M|Rd=;(_vTrN6D^j9XdJ7&`he@)>C;V1 za;1({o?E#{vgn*?NBZ{LR{}#@4fek2_r6_rwXs2!^JK8s)uybqf4A5F-X7BWdk&+3 zm*kX7CTS9EN0dUZ{`K6wDJlV8OS@H0ONbq3B zoQcOIoEQx9f6bdRDQU$4dwC|IO*h}%N=%p|+t03eEW{Apbz(i6`nu-SYdHw!o7X9+AO!#Ie8zH*nDMY)5@T8(@!65Z;+f@e6P|)r6+I}r^C6Q zMsd$7wN&|?>&wfx-^_{ITP2#bXiDgssawzO{`z%w`?kz!sXu@GER0=asQcW*^TbVO z%{hUmBJR)XiCDQHN>h;~%_jcb*CyqqdweGUe;t2+Pu1TKua5oRqGzW*DTq@skSj|2 z@|N6c*J3)43(1jHJYqFH$*A|`u0F$Q!FmBnJ<~s19$Qg*+`>JGL)Xhw@%#4e+e077 zIWL)F;`Y6E_0&gA^Up75dy%$#XOHr;Zo^}&_Vdo~zWeUsw{=VY{P|L2H$SLrS*{a@ z!-*{2nBoT1L?u%Gp@B$Hub$-M{Yi%bKPjP@A^2 zR(mPSgBHWZzUO{^ejJ#+bPAio{o3DaCuGH5tBf>CbDMd|$!qIf9?`4C6?Omq{OE}I zl_$^?E|&Y%Sk-5a;mVX_S6(f1>s>M>$%w(_6oBAG07;2|35jbb*)D@&P?aiLmYnH8h{cU&o+CZn3vjRLc zzu#khZ(sY%;%xCDjTfQeoe>QJnki@JtbUfH*OGU}NWcC0^VHbe+urV4=Tszk@fPa= zgJai9w_am%xL%rjd!Nq9JDZ-R&OATeFx}+Vk{x=l*G!!dB-b(L^}OE2#PIx%#M?RNEkn=yh?*B2<8Zic!qOGe6u64T z*lXSEoVQV43wx$ZdFZmF$>>jx{`>m+`lTYK`cs6IcJ&!YaXf52`#8#IUsXluf-|3e zI0b{2O)>r@aJZB!*XMev>6bq>R_8DG%-AXNd$z6kB<@BIFHQ#8niHB~#|^~i2uuoo zw*2{VnaK}yp6=1{J}Fpa5*21Nqr5CKM>Azsnwui~anC@nBER}S4{!he9sW9pfBE5; zUzVCWaf$6&y2NDJ&9~cX=TEQPH;3=s?J35`QhGjpTzzpy4ew$dmJJDRYgAXo$o`Js zt^X-B?*G^G@sTnST&^uzYQMRn_X*uzS2UqYcg0uk1mOXo0 z=Cf4B1x?-8OjsuwMtOa!vH0`n^6$#1rA95OE*T9^=j?dU(j*Y`dVQtFvMF!&d_VWJ zXNp7b?_Du^px(>Nk_&Ez%WhvQP^;-!`XuPmrgP6fYpSiuQ`v3x-r4JBl=_Z}ufIf} z*KEK4|Ka<8cE>+F{ps!H=`l5l?Z}*BFHeqrxAVEDO)1{HZSt(Ga}AT$l>OzJBl~OL zYioubF?tS}jfZw-L=~5pZ{B~i^}+GNv?)r@M1&a(jx~KhcQbV+_mpEZ>i;aSpA_`h z{{Q*?`~Lr&9==}gSE58)FMp#^S#j~=B{6qjIakzG&3o3#Am~(@ds=dN&&|@0n~l}P zbPSIQ>tz5NEJskzbW! z$u#YOv(HKaort%en;2#+yPLb+bkz)_V-g30{N;s`6_+SXlVDzYqbU`bI!FwnPi`);-xCdTTW#&elqc$GAV1KtBz&rd*+2p*mRdonKH%P+KQ6ugeDv)s~S%;ci=a?anU)4yNaweNlC+%w;87l&laCja{M^kspK&grQ94PGa= z<#K9TJ~j0}_gf%$SfA?>bj+(sKO%|e+D)0@MkmeOp2Qd-r)il* zbzkR6<#H?y$Z(AKJ1clvleQ{nfa=sq4wC9D8Vj2~9@Ef#YU2AkOETH6rKyAUZPxTs zP1Y@!d@mdMc^`b`E7RtyDIxq!!YFC$kt=i5a?e~SpMBOW)kd!G>*wI*$4xV%I3^k& zlVI?ws=Ug;buQwoQ;F~5DGXkgs?Mt=8%txo92H$ImpGjYaQtP+t+@5t_VVpPUMr_e zxx_v{NT*fq0^MloHnH`Q=s#BNr6e>7z=~Xl?>Rq|-$GX?kOP>aLs7j`KX&;;+ zFv;80S6R`4GqGoPDkIyRk8^@QoB%BlDR1rImj~sR-`ld|?%ti)6nlSv)eij@$;(mF zW#;BR-KQV^OnP29*Zl9TA($G#X#cC_J~f{{O#c&!!ojYc04I?6mco(}mx^tyRA+_wSFFo4+hmwZ}MGrKE^_IVi|p@f4bP_S7Is?u`cuuG1izq}+urt=&3@}v zFsmqLPHjz+QM4}%2 z|9t=d@9E2rFO;9P?)9u|J9n(h_F9;G`*-;If8WaY*Y2qQ`|%>9KmBuS;i;V&KIVO&ckPJDU_B&retDrx@yXJY@581ir8Q})ayCrK+APW7z_s)C z`{1tWrzMI{X+9M(pZ)fI{r}&kk!wqXR!aPRS|oSe)9c=ry9Qshql^2Ftq58o&{TZ0 z^z6f*7IAv_e(#g7`~0|nyLol?+vEN6(@QxGmxU~u6kEP{tJ{%R=Pc)+KYsY*&k~)b z4huEa&qZD@H8(f!xistIu65h$&S@_8T3O}ybjpKS)-Cs{(__zhaye=|k8zoKtgx!i z?n+p*kk%3o#YDZH&&MQ=RhVv*2-d7@Wa?Iw<3E0My%Wc|%F7a(N6U7n_RQ^juC;Ns zdEl`bK~b5qNhwq2oqvA#;f{5$A3t}$JY(PcTib5unj0QF{BVcP=btueKCB)mQyY`s zW^EChG=0(%ku>8qt7E5c6{-36iDQPdo6d_hi>Ksmn|;irCFsocJskP9=YE3rn`uUyKdQJ|I1U7Qs!L0e4tw1d4>FjWpA^>rw9i7SgI~fxxBT> z_+rSmpONNkHDdpCC4O=Mfk0J{d?c*9S=0q8I&w8M-`r1I%Q6O z#|y2@8^5*A@7%d_@6MgMx7iG@Cw<;@QN8rT<2~m!ix0Lgn6h!srPNvT=FOXaRO!)$ z@7Zy4HV3V;ieK5cc+TXYSwUV69*VC6cdpCs=?rEHFjkp7Eh$5Fnotw-fn_h3Ogf#l z_H&H1o@w zvFzG*_w9Gqgqdlq2UJ2epX&F`0YbebVREzjx=}H&Q!4{bj+<8cW%IHUCPtzbv0UPiV93uNQ}H zbdNvI+~adBL(k%Z)!eoTkM^A5U|NtCn{@Pj*tvtDQUP3z)BCz7X&rgc6TeU@URG;~ z!Kt`^+ZZ&%dqotd>@nE9{sv#vh9fmwo&G<6?V1?hDt|RJY~`i@AE)o%k}K`}L9syb z*S_a-f{!H~nq%;MLJ*UJ-H*#JcdX0y^VH((Xo)*{Nb~9CmntmBSD9FT;Ws{>*R^5ru-hBJ6-JV_bbyc4f z1rA*+wVkh~6W{X0-2C_1Wy~6paqNP`{^eF>mUUNuJIhrdn)WSGKWC z#{JxmYm+W4TDr#JeALocQqoyt2B(H<5Q-q(rS@ZcIbHL)+buXq~@(M{a3-vlOW!}7bM_bpgzP#(9Pt386 z41BfmTpSC%8)bqy-r4j$wkT3GJDeub#wGJ;PI&4!i#~?b?Kg$ImSyfsds7w8Y~2$U zbgEEBb7|1Z(%9A~zyH?FJ6-}XKA^5e(P zKaHxMTO>2MbS2&D6B0|dkm{_>b&iS&)tgv*iNDfU4 zb_K;BxAd-Cvp)^t5UibWsU+7t$E>G2HInaq`>nf1$7Up5^>mWFng8o1{nYfHUKm6+^)co}@WH(!5xYGkpWqKnf4rCCd+ zgodX21fQA|w4~)4OUh*l%@99MLv7X81kK~KCYqh!JZV<(!`x5jxYnBgo%ZHM>5JGd zp_s0^-S;!s_MK_;l6lW>S}*T?xr;AoljYJWEj!p6`yNNA+C)4KS)wv! zn{Rb-@#)qNdOEKsC51$3-VE2#Z)}oeOpvH@WArY&CvQt3rr>5AN*?d9MKTQeBOkwoV z6k=mFth7IGtURZ1n)ki8+gjdCOWkYXBnP50j~nKETkPD!t=m!fi-m&dsyQKx@| z3QrF%I=#my*zf4hf_*-pH+`FZ_riC@q@If=XVWB%(%v%%Ofr}(bw{S@vtQ65&CiQ))=_c=a+OS$sJtQWOihDRqD`E`eSvC8uwzr1OUW=lf&$h!qk z6zIfpG>BvA9HSNck|(`>`jM53HNI44N^J3o1tMMr-e9mLl!7WiIB@$Z%PH`~KE!;Zq!XcKc zLDlcYt3TP9ti9;=efBjIt*K9%&RQ0pb6TL;Hsy1Bf4{#!d*N1%OWSfKl!Go#>byAV z?weIB9XO^5Ci8p_;hHuj*l#BHM#*h+Hd?JZCwcz)PgyQ)nc>_c z`u7a$@|G{NAO8C`jq}-?Uq4=6{`hicq1>Keue*2e+S=^Nx_vCF-SW4@t~(m4_V#vl zfBsbLjIpcvvElyPUE3But$r4n#IyR@7vEB6et$81uZI0jGeaYc06fdP4c=HZtQ#WecAT!<%iQ& z^cWtKnAW4|ebBi>F|^U*;hbD^X{J`D4chx0L-%ttL{59@cE9HT-R;+O$a3=E8e-=J&VjpN|jHiqdqtaUoy(>0Iu(uY9Miw)^hB8E1PXzcJrA>3zqG zMTNuO-nsjBZBUlf@zmJHfHR(+_qN?WTlV5sUtt(yT0}u)t{aP&$n@O%Wx4BnqrzUV zzH#gP^2?Qx;X19xOPA_N_FlCA_v7r{zk5ZDmX%n|x3B#-DJW*`(iR6t%_sk!`FXu` zp7T6Y>Un8oP?x1@{@Rc(L%HBjic3T!AI}KnSZ8`W$Gpp1{LSOX)8`bQ+p(tm`@!n- z7OTyp(q*4r;jcZ*D1Sa(!9ilBg0IGcDZ6F`7PEORF}OM@;_rLSg|W7&TiW{dPxG+0 zJUAv1bYjEH?XweQmNPv)``Y~A?*939jAAZ5hEto)9hXoN*Kty}gk4 z`t#7-zUbJpmz#ctYOLKV7Bs2H_d1)mW~!5E|9N(4197hv2PBHlIjn!M+>2wykF87& zoC|wI7ksF1t=pTy7k_D{w$(fVjh4M@CcVBMq~M|T>~Yua9YH)^nyj3OQ{OX6YA$5u zQVgh!ku+xY($HKQvAgPh_B_p{TNq{@KU(R|A?$Q)&Z{U7E=9}Nzw5T&4|dvTW7|0= zJ3V-DxtFGwLb}e82Q@d|vadYy`10qEC0V=siX$0$o&2(8!ef|HJm!E%sU%#&>*w0gQ>+QEK0gr8r z-S$;}KYRbrx9ts2{pcgF=y?}ZAxW?z27!<%+? zS>Zq3vl)99ZtQj57P+{QP0E2eG;*qz(UFPKdM|Iy6|Ylj?Cjpt(Pbkks{ZZM)BE4| zoiDa~CnvA5lq2ofZ>7^Lk1wk{RWUr28hTXX+;cP639C2V$eTWYZxx^687@Y~l6xL2 zo4Siamansx;ky>a!k~OZW9i)E0@3+x2iqBX%p@LvIo7L?^Xe44L7%dd#_7*nwy4i2 zP+YajCu^yVw&c@evkE18H+r2G$>ct`+$Q=~n&fK61|?yRiHS4X5Dtnz?rKKKyz;Rny0z6FYuJb)?-p@ z`P*Zwiq5C`#!hX!{Q2dZImHh#Z7IKi)A|q|N59GZaUbrmU7xG&Cb^JDut|XTVylGlK}VjVAVsEQkzrFJBMpvi z{rG&_SH+l^xY}<&#k-$AdiU*d-Tujsx7=N4wz~Deo^@K;wbgc7OC`j2J`iXqlqi~* z8@76t&ZPp5V6mi>InOKO!c;pFWsd)=F^h8K;5@{kv^KOwP*`P#fq)Q?(ZSY)TgSe? z&pUCP|9AJ>w0#jQ*@i5P6)h(oAAWaS{@y+NH=Fi6P-+&;P-Wzp<=_^$X4Ts&Tm5XG zmHq!7pY2UJ<8J@bGkR`^iGZTlO5b7}^+?eoDF3tk^Lx zsfnXiea12M4e$3KF~6|CVIhM9M*~N&<{Qq%6Vr|uIx#r)tUNtwUA!XO1II1}rIs2W zU$+(kHh=Zlq)0}Nz@~0y0nTG5m3SG8&RM=+Ydj^;PjYEX@x0D8THM)lHy>k<4^54| z{&Jg~@`rm?**&I;6<(cNpY6Cm_k6l#*0iUZ+(OAATB#RopZ65crKq1)`dr6a__&e}Xt4U1dOx4z$ied7z>R^WD=<_%|K1)qj(0jD|K$98NjlTmNEv_xQ;jUj`@&lJt2BEoJnzaO+-K6iOfQj5gm7>;u; zUkW9D{j3QK58V`%ZkUxUVs!7P=c@1T?e|oFKWG^qdiCnnU$0(GDqgtdro0>jOYC*a z(4u!NGZE0Bbr_wyLSFZhGg@sK2@hS6Gt$Ow8mC{=+0hePE@;1-s1U;D}-IiEy z^Y_a;+k;EAyd;>Dd7L~9nlzRkU-(%}vEuKWsFj&A>hexJ>t44BIK^H)eN>2P#gvkB zflfVJ&snle|SL~;GFK2tD-jqp-M^(O2b1_s{$sI5BStM}I z^105`E(52>3cu?vcxkT%3$9&DU;48hg$RTR7+Qu2q|C7Ee)g z*|pVd?va=3x7Tq8CHb7%q}O%ncY$4be}A>5@8UTzMLV3Q#Bg1_oP+Zx0PNKskED|A7o^3zW5;P(kl^{%lC#%GEP|=?A1D9 zR`Eqvc14v5>Z&4YEy2^Cn4YtGsluSZB)Z;xOj1H*wS zPQOj8ANDBw7%c2g`}gdsx4`4${PI&S2|i*-_;9fLKxLc3>|NKQy$w!v@0od4Fp)#> z@h*k~S$ls@w*Nah{rvo^rJkI#%+FeUI~e`GIixwvtcfE@pz`6Iy*qc#JpVkt{;#Rm zFB4N&hGUrVk$|}9-+^nLV5qoPs@;EM;5~Xvk$Cx4DdfBzi7RPfZkCx5l zp0}|lUCDuAm6gTvHEb*q*>n2Ov3nJ?cu1zNEoW3bv*+?+pa0qo0_wftJqAZQ>~)u4 zviNwiNb>LUPho#7OMYaw|I}3Hf3Q5jjYXkKKIx17{YwfhFGZytIFg(^8|!~&ee~SH zB(#Jp@x86X{D0a8X_E8LiYYV&-(D6O_G^W@1(({4;w~kL<92tf5B@CMY<5e)>ywWE z!AG8c*Jep5KNfN6Q7AZfdrxaR(!{)nCr#Uf3YO$e_85f#JFC`4?Zm zing4vw)Z*w^NQe+o-#uQfy+#S4ow10Zr&?x81`wcUZkVB^t0OkKi%f9&pvy5yZo_9 z&*6~JQZ7-WCP{6HpEt|o+Y@IjGd$fQ;2-Lq{MNv$C1Bq3nI|r(tLT4{aGthQo7}Shi`IC_ANpA$=g#R6;<=!SOwf%0}Xm2!%}p- zCtZDz9660c$cb~Q&&ie9?Vf&z4JzJW-p!Y+X>!_$=dy`n3u6aI;yH<9Di!%_y_L%~ zpVpY2|8ys+Jj?&>&e_LKN+eI-Xz6n7qPpSC#Xc1h!ArkYSnW)howhGZ&)u-}xmaR_ zh3tFz`792ufm0&o99a~eRQd^&-Ak)vRJ?FYi6>Hz;~+<(mxf-!hlA|vr}P|5lyLme z)VWAQz-jUn!wM&jBODHIE+-%TT_@4RKPAwoU|y<#lT$5^f9RYo6Xq>5{4q~}hvCF$ zC9&3v$0E2Ld}i9V1b$FFGC|HwV)3RkNi4@KS}qoConh`fkyFiL@e%czJFXSYTIOjb z&eV~qRCPn1W2qh6smOV$Q!Z^WI3{o{bnc?lUMnS%FWCy8l6dMG8kM}~v*q*odn@-w znaz15vDiGV+$QVw+-|cQHmAc^FD>8p-l|1Gg+(!C?q@Y0uiXN1`+vTv|MmU|4 zzvPnNbT=*CZ)3>NqM5GX@?NItAfvtSVU6RLKdQ7S9Ivge|Nr4IzkN*X+I>GBoqc^> z|5)OUJpH=AtJnYfrZd%3VOsZT?gk|%2G{T@sx2Ij42~i(4njRGwpWu2X2l-mAeuf#-vOZ{XFe$i6G;s(h zwHSy=wq$z#)BG-Bc*vmPiD9JPG>*ehp3HsXVNjWMLYYM{WkyB5^M{~Si*)AMS8t2D z`FUq;#hvon{N7GM7WpkkEWsRsQG1e`md;wUwW3pho4D7^%P&ndHJ7{C7@s`wvYA~$ zFHKbV^tRlmD$hSh#$N5|yt+%PYtLJ&mJ3gx9(`HT#*o}{YunN(&r%g#+6024RPQG% z&3BrzY7Xb6S8KL5G-b}}F>RT2eSO^Wyqu&ZQOUQ8U+)d}au-;;F*Q(c`surM`8}RX z81nxnmUl2tNevZqohP0e`Rl>&vJ1D)&5}xvxq8*a*GtoDr-0JaWd?3s3mx`+wwstE z@tkGy{QC*Fw$&z0{k5R?mu2lNi{2>{&ZY&6xqevqzJ1;6q!z;;DgRf!&a#`o{PiaT zp4@6PH&+IugMu!KM?UZLeJ{Tb z&c9xHOz}w3B)@9;MVF36ax0Wv%UarNcG*HL_Tkg3Pmdm*`u*YY`E|SY@AkiZVw29R zWrgSN)%;npg^fYLN5SQ&k-Nlv|MXK5iY*>VTY3$H+$JxyIB$G#XYsd^u)6n`gLQ5i z&D|=YbZ(}gnXu&f3Le7+CN1qz+UbF@SDii-PKdql&Y<8FrTyroiEsO1#k1-L$6Vu< zEGqdhQGi9!XGeQTrdI;T+SL2~vG;o)r)*&r;7~mG{PN+$wf9RoP3Elm#=yYAd~nA) z?_D!A*R~g0ZGA4G%yBWQ)-K9)=G&)FYc^_h--%9NE2*B@+sWWJnde@3s;7ZjP^-kN zRj0a@47UoZO$lgNP;ST4Zpf+Fb6MqSMsKGgQ)Aa8!*d+VIIZ)V8YMh4mu2=aDD}+Q z%D{B$l~0PoQ@P}pDGCg8zcU=UQu5_Mlf-3iz2_oMFZN9LVoGQc=rNk4DlJ%i&SG_K z$3gaN*<&3+V$IT1dpHzW7V&c~Y~YkUrr8n@rn)p|*0O!?6`amlGblAkCQk``Hpf?? zf4R=?1h1^*9&Wj#0*V{1Wi4&~zWHX4K#hgW(;Lg4tG5_5rmZm*;go2aG29i zX6YP`rA{fSiV{!tc4S_-Cb4)c|3PIJ&Z|+~I*)rqUHc6A?k#$t_<^NKAco`5*VW>u zdli*Vo1a}~Ex+YktaaU=it`*@3^8$e^?%jl|3Cl#zJ6lSsV=UhBP$k&xbiOznNY=) z*pV1CDe=iZx)5@?cDfgC1GfXws)_iH=em*x$>d}tK ze1;D5?dG9Z&!ide-u&1=;E+bDp_|CwVnM}#^RL9u&zpPua=-n)x4*@h3Q8lJB<}C~ zn>pi2_jCQaudCzl-@9jP_vU!vhI3O>U;e#pR`;U)@WT~T+&D7U`X3LT_HmUKr|Jxa z_ce!DD&C8K{+a*(iU0n8N5jp{x9|V|_x-WNFLnmUG;|HxdM*hpI&tLNqbBaeo`W6B zKIkzRoc}D59DA*2^OtoK(j!?CUR1xgRaIe`pI$jlG&DA~(oeHlK~3|%iSNPoeeajf zQL`89*{pN^Q;pqhV>kcj#rI2RJn)}jz@S)iZFw}OTSUP`yX{fg@->%3*2nFw`S<6h z0Z)HFKj+NC!i}c4e1bw#IR!r|mgJ_d_0;lyF5|+`vSJDogTS0CtP_Mp6z8;gX7>ul zt(}${nRBgs(}T!kWxww}EGW>^c*@{#t~z&0F&|@tAd{e?&!oQxB+75d`{h=b*RNl~ z#89N@#PZnSM5fY=E}Lk*X)B+HT;KX%M8>~zC+Eq`DPCS2^8+}%GUb;Q&N)^X62v#( z;$2hmDTxLNrz5QTr-f8z?YnkN;=N5;%^bHL28IJx>2o(9<8xlW<>R^Mm2p=^-7G#{ z`54S%v$v>G<;bFzl*)OlHnks4zP~-{_Vk~7%E00#a9G1Fq4#2tZu)80(5-JHwH+iUPY$`&AnpjiYa#SS4w*o zLZYNJTN9=vuoQSSK7X**@LZG=$5II=4U6YXrp{55G)m?V-vNGud!O3v9ETOnWmyO!f3JFmkLjvo)2ylqP+mH5Tp zzI*p>o4~47YgQTcE@~05RGblzUGv3%?W^PMzrWRV9Y}2o^m63jlynw+)D&*X9%pSC zwN{xyz$n+}h=^OWLG909SHEt4ek!BI@z!GpkEYFn4o;r4>TRx`;}D$5p(xYm&ZBtj zghZ{yo8^U3I?oPPDsFmO(dplbt`@w)E&eHmnjpgP2_y4Et z*?y3f!BHu#fg`yhOIw+PfrFvp!N=vsFEg{hX3G5dyjENG&Hr68->w}0EO$gbgr!Fy zp@HMsoJ6O+{IS=|V#V!wh3w}}G@Ph+aQSVyLn|`VKHpsPde`TlZ{|GK5sOS)!IGU*Ej@_pHP!K<{4tkBXgpt3E&5d^6|c-R<@=eC+AAEF8*fw%&VR zo_qUg<;J-663Nd$-;{9W5L1fRN#j`H>35KQY3c2+{r&v$^ znD)SE^G_Y&$jFc&?OY$VO*Yz^sy&7R&OM41{RUo_mtXOD|I07?ZO>Ojl_P>pE5EWH zEA**VYZ6pbVSOItZ+LpsCY@)SEf1>fd>b4WqxrN(LE?PcW)a_Q*Ss`0>uf$Y_x85e zyZyhG&3YOcD57c6CwT3c0*hc$=hd_=pA=PEZI*7)Fyfg0(o;&gz|5hiRlw71N{_MK z4v9VA9esmuZ!_GLDPx(g=;m~Bjg|q^oJp%Ro^`7(I@8LK^yar?-{L|&hK!z%*Mgi@ ztupe;R^>eS`s=P$TJtBEofGsiJa+Ha16#!-*4NAb?#zw-&=c4&q4Zx(*4pB8kuxW- zvKSw8`oO{Hm7(SmD%umYSdyhd=#0kabNq)_^lBMRUoz#f0iSdDw4*h4v(KkD zZCcVeEknuR+-ok2r)OE51R~FM8zl)Tw#|sOum5*C{-2gUcdk#59@}w=?u*jtYqv(N z-m8Dj&dF;L%4!9b+Yxc9sHqOAP zp8}VcaVUxO92aPNu)MJ5`^R^W?dAp{y!Zr&-d0KtHDF-?~^8BmhZ<}5)?Zm6B!QuEV|lY`Phc(2Jd3-#GNd+K)uJr4B!cw)h2=)}Be4y3q4K? zDm_|Td@e(EZ+~d$wA4ruRgO6ArCYfUMyJo&e((0CHLF&AJXrmnC4G*b#!{B#=Wk8= zS|$|E`Bj(V6L3nrd**|jpj%PtYf3@)7xB#5c(zJ(5BE7|4yO;f)mo>G&U2r0RkB}d z#ID$q&>)a;dzw&D<;L2dk5cD}=HK62|NUKUpvd&qXHER>*M4ulwrk?|#cU%;+rIxn#-F-0SLW|96+$|NZjlsCazM$Jg%`!vL;u!uWJzvXvpGnUf$@1$ai74osNTa9 z(%#&}84~y_cuFIe+l?)2zzc(uth=X5?Qk zI>+8%So`Zkozv?E3I5-IKWP|(dcIFRF0k7(sNj>j=`+CRQaEYFjF zViC1?t#QjmqxEUNZ=DT-BA;7451&50`=*aq?%Gnth{rNtj!7{jXenne-a0F(U}f&x zz4kJEQL9@_7xXx=w4CTxN^UtOVIdqEo7nWJCqC!;>Bz&9wsZTE?SokQ)rFg~U+vQR zR3{-k;kxzoOIH@pkvQrUqQLZgu4$MwFT)ZM>9zKCKR)cJeslX?B|m2)L(k(W&p!7Q z_82;?oL4i?ZRWfpiN?28f1hm*mpHhjC*$|ShyU^<)O#+hS-E2yYTQio-n?6=dxS-SV=ypjsA%RKw} zm3jTA+0*ssAC*`;O_9w@ks(0Z_{(7vp34@Yp+{N$&6JMacvo0nUEVFQ>h-LEs3~8I zcD5Pnh;uIuxil#{TO-kM$I+Uw>%HaoJZdle&U+zvtotb!OIq)xq7PLmESiho$(O&k zza!(PyR&}B>?b=S*8F>OzCOFhQs&$9ZyWBvzqfDqL#6nD9-+>*hu;Pvr3 z8G{yuHY4F~*VxXc&1>dA+4%aM{JS*Qy^(9}|9|q3ZxVw5gMrP~ZS!58F}4WQZep+z_5JtM{gsUR`M4i0&YXfaTldXNl2SV+AY7<^ zM8Mg_g3)5$nQ4qJ0++tNd-;9xyOPTmc1;%Z&ra)hWjPn+t7$!V_de;TOD>&WWB9Db zu34g3an-6*-FK>IGw5EsCE(D+UUZC+qvx}cK1+Iw0>k2Y9(;_it%FAn^(^6y2dkp1ml6%xYcYpVqwa!*kb@sA-yLO2=y(#N& zzuOmN#q*fo{!hd-)#rP1&p)=XN?)~0=J?9o^Vd2Ak}sdx*6di3Y~|3ABvtNqs8mT>A>sL^#bD>T&f`Jt$} z`(#}MPk7$DcbE0OVoT&aU2$(cqt|oHgj@t_4oDOp<5XpESi7}iTKal=2`vH+$vO-UE`FS42Ynx3sAgA6niBOo=0lHh#{x6AY+FxF z7X8zw+D}OAt^2lT-RdosUZK)!j|W%F?Na<2n#>X!8rUSz(p22DfIrbLyK7SeN1K4a z5eA0ESFUVVT0CcR-G$}J+pBHf^6llHQs^jg&LwgR>%tT6=Q+-Am{26smMG!OzGHl>hm3mwOiR3PGqt$ zDD*gWeA8rPS(Z5|%WA$~kfzg#tgR7i!@Hj@I^n9esc_D(#1D2$E`>EJY`a* zL|MPJrSF{0i?ntKD6*~AO)fFhUS;!q&V0Y;_y2$7|GPrqX{qh{*>2fax2I>Tv>c4e z-g51hgMgr@-H*Q?S4`zn+$Hf;Wo~h0phzX-LUT{ewZ4a1gWVJzB$Zjx4cP>ejq}4+ ztDgU{Ca-UIv)ML{u1I6ynJZ3h%GB~&u-z}<%U9Tp1XePwp(oy z91CZ5KemuuY;Y*1!}HW04MV4%O)u_b#9rTeP$~rM{J6n3KTqdG2D8J1Imzb~56U(CKAcx> zS2~%+kZYcI^1lyv%O?l6KYqEvzIQU0V5PjWi=mQ0vez;v2E`-)t2R&NlV)h}KX^%^ zaM7>d3=9nymcNdeWx4D_V%PrW`oeHK#Rlh&bJh>$UFa}u;;4}-S;BZiJ;I^&E{pX(bO>+kPB{y5QW{bnV5xpT)NdkVFf zkG{D4VQvtUp-@lIt}3TWAtT?(Q!f9kIrk}NYgF&!9rZRImd7MkZ(8+gmZ9F&FCRX9 z;Q#-DUq61|0g3QZ+dk)>!o5*}#}rsNkL2~RGDKx}*(~o#uViyN!ZP1aeKBv7ht|uc za%=)# z3`$2=tbBcQ(L*L>mOaK=Zpk7o0!~xfR&V~*$|eWc&SG<*^;bVZqEv5^auNM~i#etkV1zCNxs;p&z5<-6Ct-o5U-ok;4`rweC*UwQhJ<=*}K|1Pip z^Lu{Xua`9z(%BkMEyZU%D3tiVd#|CC;HwyeV+l=bx7J>_eD2Wk@?zk#pJ}^iolTPv ze7@!)n}dRpBgd{iyDI*DvRwS<`~N@f{pRM!tn_YlPd~qU(E^sp#X3_qoM>BTp2c*I zC4CDI!{e0Hq?V}1$M)Hm=`tw1ZQgn<=+uMe6GwQKf8X$;NAt80)0!nN?SFT^PTPDl zX(I!}o%{B4{M}cU^fU-C87fUNa7y3I)^KBg1O`)k>;N42oXnvzI?n zZctdZE~01U0iI;1i@{x-2N_y+X?5o`a;ob5E+}SaIMsFNrh|qf$G6z+_uux+D>^82 z($eDn9PW$WmX+@f8@;pzl6n{&O}@ld+kJJ`cL{rvI*qeo!6mDf84bw-%3hYIjMC>8 zA2#v*{+<8y(M7AY&qan#YYApaU^rkYzSxz6_5I{Yi)WtIGn>6^raa&8bfqceXz_e(a=qw3S1#Kyj0f_kHW?4udGKrLh(@R3uLn~uDYiUK>*TPL^LuRa z_B;DAiG?cXH}4T(2#NWyCHo*pboP`hZqo8hEfWmAINSs{^$K>~H8AVDoL4V#{G^5S zobVt|O+&Y)={-hz5giL(N3|#%lyDJHTr)duYgE+Q<%Jh+F)-}qPQJhWt))fol@Fi3 zel}l!zr|yhUvSvDXWCjTHe2z|No>gE>tR?jBjWY$ZK*wh$1KuIFI&8J;_)$X;%s73 zTG2C$&*=0fmBXn$?hHYJX%fF4IG^%~%2sqzaN}6W-(fgQLQl};oaI4_s`VO80=oom zt?N~ocP{sOX;h|SMc}mVpEklWNhV*L&91TgbLOV;E){Vy`SRC*=U(M=+e0%FY-XF6 z23OV}we+5Ev-HaL`*qLfaE2{gW^-(Y(5%#^K&j-Cb8QdilvYmD7u$UL?J?{6^788X z|2MwA-6^fa5MTH6=IiVE3O(D+w=*=nt+I_?yZ_&(v(NR{TP8C!UO%sAU;XjT_x-=0 z@BiOkpK(67Nr{0$%~EseuZ8Ra&K^lTG8ZzXoIlJlnAmwWYj528XWg!sMGTLa1Qfb4 zGjOtqD6d`iz{h2WmNC=8zYhhS3pU=>N%!q(44hS%X7Tyjsl)|SOhTU~nJ7vuR&Y4G z=A)fO`l?MQHu-2hT{G|e>#Io*_b@XQ%uDPMlI}E|Sk>v2(%Rp0;quERQ(o<|JK|vQ z?Vh3A^Usl?(|QsP&J26C;dX0fuV9a&l=$4Q$0RhLsw|)4eSQ-UV}pV~&(S;V0>uYU zuhMY|yZ*GsuARd|Q_=0}SJke0Nfw&Sii=u!?ylJKc#5fSX=!QXxoO9@b$6sF6`u=k z(^O^>$GS3c$_kS$>YOgDE7^n&| z9XwiNmORII@|J5Y0+Omdn>ZL$L;QqX7W96)XZ=2z<(8G+5uMeB$t-R@8fO_Y{C6Ke zemQIB&xg194`0xqcJ}I1HH(<@=KFtj+uxTzZ=P@8#vssktxaOC(Y5OLbKE7Qb2XKf z&NHR__S?7Le#`TFRoXgAEs8D58&;lRy7ZTu$%o^Ziss(`p}VykB=+(f);L`~>~H!+ z;8K_N-=r3S78ZsFwMCDYEtP8fkh$fD<}yachJ*jRnRyO9dC9`z^xoFR;0J@^!5K@+ zd(+RaOY>E>n0q!YR8vA-^8A#ILB8$5x4%Dc`LU$Mr1a;fs+rG!9zE(BX=>=U(&s+^ z!S+On-*r3+DlK~wWRxrH4uAg0q4+^Bspa%06^?7Jj+&$~KO&Ym(2~6F)JAl+G3V#IgD6u2bDv^%ifxvAb|A<56(pRBU0BpYypZ zdfw9+7ZPk8{8y{`%=~`MQrOAFOFCKLSg5HsqrjZ!JI|h0I~E|8df8-3pe1j+t!!JM z_VYE9CwCoG<`7hTY~g#^#o!P}+Y8HN-_04bmaWV_wjw0BL$PXhA)7)_6m!SvO)gk-n7c%w8w{QHv|?<@_+v?H83pn=nM~|1DRg7G2WVo1*DVrlwV{v)|7Xem%jRG zP|Bg7H@??$m}Q*im}Po4Yj2l83&*d*E5|If{f}?n%2v@JXmYHHW6js^{OC}sttY`mQuRG1#Nn=?}(B4;%F9w?u z!!6Fc?Pazaavb=%M)P!*-Rkvv+ZB!#z9>*!RIYf$^VII$?U9#1=$!ua=4toy#}?mW z#g!QjvZnVKPD#+*8l<-Lx5RT6K>^OiJ)e7o`V3EgvSFHVAm^NWi$cp0tNNVkY@h32 z+Ybl&1^4WmdEO%VqEg&5@o@dOCXtq%Ja6yUAHKS7<*Gax8wJbt^KzfR@ZVomV{^~; z-I~&KkqjN0t;S(e>;WzU2VT71v}f0yb>Z4xnoD~E-Za;KKYTm!#nxa8%hWBw!TV!$ zZqBKzsApDUFf`)QH|t*X;Q3>}la2)!zsKG$-+foPb%J1%!Ma(JO&Rq!1SL-Gswmsb z#t^1@IqEQnx}|S0o8ZZ=JG&QbVPrTiv31(DjLwb4b@evBi(gec|;}M>sea zwj8h#?v4xI89EQhuC+FO*o3u+_?QxGlddL7=XQj$xj1C@aT*?T5oh8!=hzW=^@?j?g8*Oq;eQ_;HBVaVzv?pZirMtY(%dt7 zYcXH&o4wt32FJo*o2H(z_HXHEsdz51RAuEYhbzbW9yb`C=}zJ}7a^;-Hr@F7;%mY? z_wH2}QDS+jVtj6$o?hSbZ#7mWcNyKf^{(ySefQ|mZ~wpS|D*qZ_OJgn|KCRQPYAJ6 zyOqbH$iTCR)$o_V)WB;;1=e~e$EVb`X#Cueu;{cq15<;9ViV`GCzJ2m{8(eco8$NsMiO=%&RqmQAPd5oQF}&LSuHw#d3BzO8q~5eiu}Vdb;y!*sg<;;>iY4s|u~=o)>JH zBsS-hz#_9-R?G<<3Kr=-*BBZk7MA;IKcDm1Ds;2qQWO2IJsLs!?1?^{8~=5lW@mKa zIQRT(nytG=)$?OzA;(-A(mvlz+x=|Lw_5uzvJDJJPe|O()8FJH(6c3M_g_9H2T9Xo zZ@#q`&gv<14XpWiRDAc{$Nl#Ee4PX)-LibVWXT~GXAWl;4u{8gEO>6sda!uPsfgqy zn{Hk4-5jI8dY9MhmQ5Ox{)@|+Im^z?yIssOF=1ECxmUZ~Bu;zE^}V&|bITTwIcEIc z+3Q$(rC4%8Pq5eI)ja`H3)7pF56pTyO<_vFyHgo&=Y+92K3EfYnMLV9hZ2Y7(sRrm z9Revc&b+;Q`0LlNSzCARt-W$f!AIeUPsfWDpZgLPP31aMe2yct$@}3_4~?>Gp{W~V zbQyTs51*`V7dXOU!6scn_(A2pF+vopEtigP2c}#X=$0v@^blmd;fmaT{vaa zo>fLHjvV)P9+232uIjmF>VgJFg_gi0g%WHN_>S(%eC5UQZmn#4Tz2Gj-h(dE+=fMZ zbI!RhG&&X$Dtdmt{pWj@kNb{4K6|`h;NTvOwVMxHI5nKwHP5t|p7aiX*UN->Of$G`p4J^~BH5i~alYd;Ome@6_cV zCmC`J+PwSr*o3h`$>G=yj)ZCAiihf6^lUr#lS40Q+jR9$EckixGd1KSVymZa$-7GI2y<`hP;=ZKr=_zCe?inalk%uTifx^q%G8t=D=A zTMBp%RKMToFTMUt2ftxLhrzKaLOv`0Z`tSdJlN^P@)^New~j2Ae7HPV`-s20RGYz) z`Tw^tGk1QOTGYe&UnIHbL;5EX9>yh-;RXL*a@Tq}xiPRLb0#uPHWhT>aK8Pk(8%}h z3Qr}5?X$l=kFPl?VLi`p_g}|8r3wo#Q(q&d38|J(@35bc)b8H1YL$6@oLD8J(~Dc* z`0xLzlVWdplyl9dy6o?tpPy~?%YOg;`*-iwy1h|(X4;2#Y<*T?m1C?a{pzDFzW7>|)qMY`m7v*|zmFE>@U?ZI9{w&W*kHeZuWUc32Z;;b793QQ}fT=L4~HC*$$%5J{)+O3xk8$@I)vPewM zlL#-YD7wA;dB-Z9Gj&Fi zk4J16hoJl(L$f0doXvWCho8)0zSuZ{W6n8=&C@5Bx^Zl~x90USmJ9E{9e)tmu|c}3 zNi%uNs%{%$MHf|7mPWxd7V0L`A7A0Oby~1$))E&bmbd)sEQz7l&sn;09Q5E2t2txr zXux;CytFl8%e|7nJ7=CBHGfAMbN8b&avVz5>BU181O|-bM8@QXleXgxBvG0B#-5>U*8o)hafQ zt-0(BB1+rUBC_{#AB$+(_}wuj=1Hq*n3VO)C2P7jNB({Eb8675ci*o0EZ%7Lc+ZJb zyVe;UoLza@Wb0O`i12_rB9Z0W{;sH)c$N2LS5)f_9Wn9qH*Rl#&$s;W#@A_|eJ7q@ zzixf_>()8D9v7b1yql*#hdWZu;^34d&E=Op-58uXcHe#X?;)e3QT&RwseIRX>FeoX~urH2?g*m~+qX*M9%|-M)^0t#q~N?j8TW&i{M# z66uy7YJJ>50IW!6cr5V)j<*>Ir^#7`nj!mX+qEFabk%tE|t0&g+w%lu< za;c4hiNl#=#sr)5>6>Q?SUMV5D%{$()lHef`OE2}NjsxX7XJD0@bSxX{r~aU zpXD)!Q%i&4H1X7F;m3AKINmiWjZAWK`f_%UV3T<0zo+8ye}2yY7a5l#Z(nC4%byw; z6CZo`{{41=Q>#ASt7d<`xy|s>_sdI`Zk9{VS<9`ckt^BT)Gg6ezHMcfRq`=Q&0e2f zVQaOMojDG=`(@7KTx--J;5MUsPSL+5<&CV*ZDjA>`&~Fmz#+T$yWU#X=I_TcoCF?= zBztIXJ9qEy-M(b|*`}|*ep(%0<11h{@4U19yRz^1Ys>BAzFoG6e6Fvz?WBde`t!y8 z3`$N0iSqgHr%dTF3i>Rfe^2&X)+~meDDCvzN9jRcLX*p7j+^v*9ee&|yV8*bQc;PqpJ5>=sg~P1mEc zm-@`>UbIHzG>4#)`t!>#KYlE+`FeTDXPfx7&jcK@V>>+mz1!@=Ah4#;aMvP%*=M(9 zrd^v+H^;8-Pl3(6b$aWkb^o;Ri>*!*IiIe5j+Mb7+mj=5?Y8*ZzpQRQ{(XG-?Vld2 zX?l0$#PrZu-=dt`eFAJsZ+2}q-1hd??c28(>1>5{@zHlI#EAGiP4pWoZ>*Z+R|{r7R_<6o=x>Yip|yu0f`xbP( z&1R+Qe{Xi_d7X;*a4zDu$(O6E!&iTO_UhNK?)Cd3be@}3@hCSqc)oPjIOU}I^W(EL zndJhH$~G6e#mC0R#pNC^EOE2lz3*0@`R==nOb>cmTdLkaHON|<#L=VZ<~i;2vQ>|& z7#U9#&EtGHH~8{|^q*;a3)O0Vzby~q^YWc@vFEbHLV?KG>oFzicPoQZqv$klWb|wl4Lx?dUDg5$hWeKr?gJ_G@Z+JLU&KlvzjZ% z;`Z*?wPRn=!R&O&^J_|fx8MH!^TpigwPI^@_VOz-yyoed7dY$r&TDKe=6u2SJRFLO zF7uvNPJ15O$;tPz^Xb2e`g*ImQ?9v&nw`~Q@i*9XtmpBJ^hmXqi^rtco7@EwyQ?Aw>zhL$@`^e+cp@ui7;|9 zICQY4b1W8cx;-!1B{ERVbz$#1i^Bqvnw4t*{+$2!=KlZR_4R+BO?|n0_wVS%y$LxR zXE;jSum4f-`=f0`!NwS~QrVhcY%@K5Ih1@P+-5i}X%jrg;Ha=-U&Q)PF6+P+EDHKwJ(-to7$2I}E&5o;C84&WrsmCV!_%MloG@H!cub)2yIk@a4zWd_?k#<( z%WzRbi$%MOiOGcLQ;k)2YHyjmeT~h0zuVn!#pl<4>yEFhtgiq3z5aW@{htquug(i} zoXIdzW3giAoI+mA>X`FOEXHxm$zg7$2+rl6c9A$GOKr+LkYwDR+wN#WQK2f7+byPMqW^Ysu4W z#-`-Kku*iHLGaMIohdtgjbBYsY!X-|#P8M-G%vlWP^>xJD>8d->GEx%uQ<7?+Z<9@+M>}nS(&(j`8hBd`a z+)k29uSA;r3OUY6CXLQ{TiOp7*tI3-ZJWJpRl4uvHLe^-64HEQ zMHZcN-5lxX`|#vKi`TJDAYtg^k`~R{t ztk~?uCws6%pr*pcLYnK?mkamn_PlQE3shoxzNYf)D_`5}U3DA`PAz`NmD5Xh##~E% zy!UvE-n7$a%Yvs~y~2O|v7_|c=Ho5vrmem@OMzwo?{C)6d7Lj~J>9do#ist-o;6xt zKeKi+GI*}EtK0GO$K%^yt3tz684Ry7b2NR9{JT27=I@_hRrOi?&lMxA*J~Y}V%qlT z$E%xPUx({=*>K-0|65sq!~6N-^4RP5?%VPlUNZGmch1gPrkkHV{kqj~S?=yknf~L= zGODR>RPepmQbHW^N&EAbM9jjL9nI+r{*{o&%=Kb=8 zv)dk4ypfZW?=7EldCe(9RfDD~v-;0B#g$I)3Up&|DsW!^E-!y=*!sBrtJ1F;^ms{g zOx$`c;%3NME&VA$i$gjC_suvjnSLxn)AT@r)bf-|3=ACS44f_-Oi8_HV#*ZYeRccY zj~nz?CD}zYaP*pSFi0r7U69CK+sTlTybN@^RY>tOUWW-*(p&j%IIoSJ=6dSJ?DKZk z^Ug1SEGb-OQ=VU6^Z)hzf6vVK|9$)Y{-45(--|S=EtejV@41lqe9u-r7mlD^{0t6; zM_Y~vIP=)K2{5af9AQxAvOD(T@Z^Wbnj|K>o8Fx_FQWDTs!u%g`qoU4$hOzt^+>%m zwa4HukCXqbwO6=*PrvV*bosalbED!94#tBlN-vy0C`zn#?@(Zf{C35KQ!vSA)1EI; zYH`Ut49Qv&>=!KGTb@q#xF;KQ?P%F%%~PKq7XJVESa#{LEa!~F7F&0nHZc_QO|7H83_x{na%!JqN|to!Ydf zp|zNg>4e1jO*ZF?CUQQ{x&QU}<%^d70`p9J%g;qI3%vOy#x%o7N3rd}-jGYy=OrFH zEipRvv&3%R?tSmqlt#^#P~thz){v-ix=CP8(j$R`N8CFUCoJ)K_Ghxpf(8zar2;IR zGjEj29-8s_PL#iA_~|9$N5|1ImJa{lwOd7V1qP3e_om4zo+%a2t& z-@Q-U#cclhSwA1${r!FWcJtc%*=yGpM`b_XEA#XM|L?nUN(_SMES~$E5$}FlQ(Ibl z*W~OH6Sa&t?VtZm$TjN;a^h5+VccY36!rhd-|xQN{%EbJT` zzQ<)NFV}41Q2b=)6U3>w@6VrZ&r@|00xYQ~)mN_BdM%SBu!Jw1m0@DSj;OQo)AWlb zo-Tal_k7>`Zlm~f*TSbypQF5Z$*igef1j_9cRF=%?|$W${ZacgyJRBf3chOpaCdwD z|9|=aGxyGAmY5D8GF8_qvzA- z8~x7uEW2;$q=za`&AQ!Z+;#Gd)Z(r0+8@4q=l7&3u*^zk>n1%n={*-Ov9DjY(SH8k z&!?aMdH2rl$AvQ6V>@P^Z#{7=bA{wlj-;N?I*aGXg2tJ5*vq+unfMy_U$pcOxRdvc z!<)(%d&Eh+r>{75Z&MNz!z1@kzCowv+Zl>*wIxk*& zu6foQi*3t4-@W^IXYO47&8fE0dQ5JSp>ekLaUoY#rfxGU-RpX6-=9z2FJf2<->|nN z&PzRByn2a`K}({j z4SZ=678}|n_aqqfP7;_l{erdr)@#Ra;4WRSPN*C4U| z_1C9arCTGfl}=7|-L>h6gk`$okIOGN*w5L$?tOb;)>#eJDOKWIg4mP!elL;y()syC zO+b`8!?xMoN0SnJrh3ggpRRh`=6v^}B~y}mx{TCkv^>yMY%JDPUo6AK#l)l7#v#yQ z%XCdC`|t(%-}_&G-+ehf-|oNp|8MtYzAgX$-oEEBhhhsyMzY`X#bL`<&5ATup7Ow=S;j~;;Xi4&xzN&&V7DUVzsyaf1ZrGfaCSDbH8=Y z3&%4n_ArnX3aGdQ5TM;7jr}X zqf;Fuqt}K_xl}08p7`OMqKnX7_4CGx6|v?m0V`7jr}bGiwwe9mk{b#4s&(G7H zep;Y;&auxQKUP>3ADi1BrKx#0%kKBl@cH{|%0izdg}nOq?V8u#I2|!xdP;`?67}x9#lY^IJMZcOBfz2?|)xc^E)@Rds(L5w(ORRKldE( z@L9ZbzxBMn>SGsHPhVg6cXhm}@6-tg&$+LCy7KLs@2{_Fop0I9*1Dm3hxI(`Q$KRd z(iy(gW^YvM(3!qkAZFfkmdW32=Ij;|zH^0l8As#RIM2nRkI!Tj|Nrn}&E7q`^t!X= zEcyQM)y+`jc$Gb`66=5b`z>Gp>*ey}clAzpU(b)rHy4*YeDA>Nx6eMGx93+(@JZsi zSZ(WhP_ku*#;RR2E#Gsda$LT9xRLwc*X!|X&Aa!#K6m_Z&AuJGcFj4T#*DrIsCras8LuOFFl?s4ykqxL;|_D!?fL!TN5%dKT|IsMtsAF2sM&ZUZ~GL5 zwNo#xdS9(6x|ql5ib*lc!yEmNHN?A@X%xM#`LOPF)zXx!XOepQ`G4QsbMBK8(~P*I zEds978ehmh-zhLjkVwZ{XO1a?)92M;OA%i|K=P|0w0mTm`YKk0A zT|57s+2)xhIHT$x==?qp?|KPF7Z$~iqyk1eUpr-C8KtDP2PMQS@mD_?oGeD84p7PgOZ{IgG96cW%b2p9~SsrYPnkV`_s|S%T}I$UR<8Xc`(Q; z+xHl&@|T)-`~E&QZ@+slvRh%(ErVkYED{SZe^i+2#KJEBYSpUMm$W%&uUg}!IW03 zP6{TAI9hC9UMSIXjlspMQ$Xo-krM|)z)2ULgA&R<8;r00?l9~W+?M{+|GE9YFW;X1 z{QO#f|DQkN`+xkYu>1StZ~yaP``%Trt5=_W_c0>$>bE527K2l4;-B{@wB(r2ewuV~ z>)V$#P8^xZ%QCkdW8%ZQpuvAk>s+jNCJdP}Uu5=9s$wCLaaxkOueiovl>HqpAE zrOWSsAAUHY(?)pC0#E7F9BLel9G`XOUk#m>s+rR#c;%SJ$}JaD1J&L?dRZfNe4_@J zqRYJWd%q=Wg3Se8t2|NB`>!qH{t915k-~aoreROv4m1|PUPdf}goxAx! zL(%ZZ#`kNDuSKlf!fj|V|Eg2=*W>ne)~6JOBaNeZ{;pVRx!w7?zrYdm{Cg&)tOlE| znUwzhboBJuvd<3>xBveY{=c-`CgQbXOK50pTujdPXokmm+ry*8l$aTIYQZaa7H->B#0)Sk(7uUWvM$7^-v3}u$yYsVy(mV7wq6U;DaW0PT9%o3L77kj@7 z2y!%B6H~epviWGvweNpd=I;G2F?pNy-Fr0}T{*jB&#$_3O3@;{_JUWoOt|mIPdA?) zt&&{x_uX#({@=36e||pQ{5C7KSGlJocelNbK@+Q^f}4v0(}z!=iZ>eZ^uXQ7e7 zUHkt&E_d_29v*)C?KHtEyLoGhITk)YAFr|2a9MtRRdsp!_pjmcwST`}Kll6cPZq}= zb#+@~_4KbtKe+SqU*Y$P@Trxd)AZwQ-`{$;{JZ?xtxs!q#;pCcDMeB|^bqDiH0{3P4N09;PmDBH=CYMhk5n7l+G8w-ZQ02>iw);=T1*bpSkf}Mc()J zzq`M`|K4xC(zJoY!Q)lh=hN{I%isUIU;p3!-#JSgJ}IY9-|p_-wenPufs@XG(?@?D z-8_Bss*0j>|74F6yDGzlb&IQ_Zu$E&C3zw80?*h4`~hkpy~@yz6%;b+QG+oO2w!<04M zCsQUp^Uy5o;7pT9{#2q|e9ei2^Rd61(EH6kF>x_YEPG22R&yk9FeLSe_FT*s-{bGD zdg61B(}ZW!6`Le#c^Hl)tXayUSn}uH?fm$CHJ$>m&Z|FPD=?=xY`Uo8lRMk)?ONUa z+FU;lG+=knzDVoTsy%-zK7V@~y|m=fl1`i1XVYFkHHy-dsNb=F^EJUk@5=kn?M%OZ zerJNSxyDKkrPc?bYU|eNcPq_aGAStY?Uce>61i{QzOB8zjiDmjhhyD)>y8DR*R6Z~ zHS1pa@`dl$T;y21Y~7mEdp3KW%9QB6#G%-u>BOSYqM)^UPga`i!8KQpF?39rv&QUK zg~h#lckOKM)&9IHK7IQ1`L+L^@z?*jcgtYD{+213^9t;?PM_|2`HG=i@)koYUGGme z-w%GToi8)>vB5DrHSVPQrGjoNuUPOYwlFZYELfM_u^>q6xgtYkVT{Ig);|>@VOatmTWe&Sbl{isp8+8$jB6V z`+E!w=Cea%uh-tUy<@+|Z1pCs^SR5n&Qp`L;W!xFb$Zhco#`xGg zw%UAk>b&qKPPdMrPw)PHySsaP?)B`{SGy`d9-aL)?dH7x?c3#7ZMu_VR(oIm{PMyn z3PV2P{lR#s|=Z}XUAHJJpsNd~m zb$zRPGSAXCr_aa#{kDC7#QHRIbMyZ%?f>Tg|KY!%eZupeXHC9`85DI+F(qp}oiXd~ z4M&Csez{~OWhRAlv#i8L92yK4^=KyF`ut#xp<9`)Nm#Moy!d!wuc=eKPJgZX_@~NS zGw{-$y01lBV=UA8f8Txg@!`8~WjX!#{}fl>c)Mon*Q9$fSMSX#lvus#)T>RkJ9h4@ zpK18c(e*L}XZ`tHAv72gkVy!-X7H!P4|aXa zS-XG#PG*jRd5jF(X8RiJ$L+0(J3s6A-F5G+-DXZX6){~Ll%dUUpG`7MW{J%=7kz&B z#T2duYB>eHlre| zxlb%YO25AP`nuzy{lCBVb^j0Zw`|(}-d@gc`RdoFr|ZY9yXO6v_h5iuZU~#}b6E+| zijSWn=cU*GyDP88N~{g^oTTS+?YnJ*#%W{IlI52Lm0bkXB$OGHTOtEZ&elx&^z7rh zx4SZfj@{VuNOF0gc4!#rgeqmrzQYDyYnOzz1o*gKnmS?sPyPS3JEQ9V{Y?LT_~-Zi z|L;XKD0DN_?AfnuAl4Lqc1~xO)YY<`Ib3!38Y%5}Uc_j#VDBY>AMRUE}@Z-2{OZ9BS#wU4`|xid3g_^(bELogXvVzB>4l zzW2tyADYEKUh=STEE1S+^ySCp{w>!%UmHj`b}+iLIQU$RWl%VjvF}DmX{FU%0jHCW zHaibizh8UZi}&$@9yb4to;?O^0@2SEJ8ZDTz*1>+3=L-z591r-*3OoyEJ8% zTl!mr13HhdMI@UnfAe&6x|il^-`Fq(rsTOno8*uG%-I&R?$oL$P3cP9iW#flzk4?& z?Dg5GnVg4q+{~-{`8r-leD&=9?ecbaEcp4_-LtRkTG?f^-Sry(VVZ$}P#|A!syQk}~Tc^Ke8iydq#4t9+8L3T1ewqtg zo>gv}vA(YAd)=Ir%V*49hsKJwa4{8ruoJ#nwKXJb&1y>*j^snElZ)mo`}pfu*5;c6 zU0c(pPZw7Zn)tNG(9|>c%g(o)imR_q68QA{etofx-05Aj^qHm-CmuK#(wg?Sn6ntmtEU>JoaFK>1NAvg1 zKWko83oP2Gc=~72Jr7g9+S3-UD^f(xr>V9qcw584cj$zr_UTP4vpW((#a_Sd653^L z<9Aqv<0R|;8+m%yKEEsaydhG!sXZ^^Y?^DNqT{kcCWa@K@AmB8yLadPyTNYB9SSar zn;yTEX$uVTomc7h;k3E_zc1VOUpxGvqPYCs@wct3jLdr89-sZaAGC{M+NGYLH)}S# zrK;Y(a(epuxFnvr#$VT*bWxK~pQ7X=pjN4-&=Z!;!_bki`)=2wOL|I9wLQt#r&r$m z`uFf%W*$48>9N-%9}6B6S?qk@*lpTr*Hg=M#f;qo?{Ckozjn*Rly#}3^1;=&XZKI} zbhYa5v(M+(d_2dh_-Rv)MEbpKiM1QcEKc_4-`|#7|NqAg{>c}^vW1hk=s9%Eirg7> zPV=zW$^89)7f<^2^Q!62T{Zt7{(jh*&Vjl`py zPHf^ye6g2VAVpICSK;>CZ~5F`&-osxdADq>=`;Z!jqKkaE8Yn>F|hO)I`jlRY5Hy# zbWGvHlZOlVm&@1xJbnJ(_5Zi)6E~+lKnRJNN9{7ckjo+p4#> z{=Ur(n|k|elJ)BI%MbSie}B|!c&mz?(@gKq-M4;kZ$=bb%qjal+3?%tUhRt9cWJ7M zD9%Vpn4sjP;ikdsT_}~%#-LbmZ-4FY$st*L;}U!3eD5jb`0(yy*r}IU)q+V!S`vE9 zUiWT%`1q`Q!->_?*H2&lv})_BSymt4^<0!V@?Ac!cjNwFN5v!E<`zpVUbJS*wzn&7 z-`q;OW)hmqbFwH<^yZQYTmQZKl(dysKAJ{~QL4trX2(Sf6A;(BxG9*>@cDN>3V=eQIb z1a|D-Yi*RZMX@D;#h{DD&FLjaWA}%zucy!c+GU`yyl|(^dcANCAr^*15C45ToTd3m z>-XFEKUo`N>h@Ib*?V*T`MptBPFW`Ve5yZ{=^!8`nZN3k$yXE4r96jDNJWa?TywJd z`>|UBF~=&`d{%mXEXu23>csBQx#Fzikypo$QDXV*8>@fq_FK1tTXXsJj@A8N zv2@+4T^ia98?P;sKc=ATK1@#nKmJ)b|W+2z zA>hp|Muu?nvqFp3N$zC8!nI}8s>7`wFyghs~4#fFEfE&umpM|I`jU)$?{ zEsyE=J-h#}!ruOVert}$;_>?`|2>)+@~h^ZW6O*MHm6T-idjE>_E{@^qsMEf1V@UP zoSyl4+3U?l@n>c|`1`y3z5TsAzXUexTo0=Ke)l^=Q>3xA44+|Bp@e8y7cu3;wwW#FF)-O!@j?M6S5~$&2_81@>hce17=& z_~nl;YCW|0p35#YF+ISs)Q+iRLGd+feuWN!3I-?8PM0~~=k-Y0-PwCTXZHI2zyAFH zRsYl5@Bg>Azu8ynIIWmDHEibLy7Lyh>&jXcO75N87X7;B_rK<#ApW_ps@*m_J^8G~ ze?k8CHjP=~`u8iIzk8ow|Nrdv%No5G&Da-qbhoNuh^5M((J`Mj2gL7_>Bg(caC1xt@@TGeHg-W0zm=Y`wq*pRgw)lM5-etU0U?f1X@?CeIXd6%&m-|CY+5a|2r zKYn;S*8E@;xMhpH5(&1CnmP++04+g ziug{)#iwiKZIPvVuy}j-2@5==i z^Nc3O#>S^eW}KgUxomTU$<<35r8c(z4%h#;e%@~Xx4d3$N5$u(o1bPC-unN1vE&tt zFJZ3yf(rL`tjjfDojUpYdA;jR&z1BQlul}Pan4Yyw3=J@=VSZ3nqw^t46f7Ghn-IN zvgf3I^}fh8+uDP_@3<}KXXtW~`Jjd44*U7_fBxQ{p=<=^=^926~J=^K{ zWlh1lPt&#@YPnmp!~Wg3vuhGw^9t}(TbCYxV07ljnt2B~dbTu)f4-@s)UZbS*2m@B z<@=7C{m5w8xA*@G#$Q+0*Vq32!z#We`*)!PV-sW3rZvh=0*O3@RnJ3$=J}_$6fAO9 zd8Cm#Mey02@_W^q$sIES&T%#cNb9y=WG{br?t6LpcXRWDl@p&=&gbN+$UQ(4sv_+9ETQ~Vkg!tl3 z=Zd^M7j0VW?Jm*`!oBc}M+@?+2f!CieC! zNTmmJatilwwlSzPh`MnoHyAQ8DD-UBS^Zi{aZOLzx$5Whocb9Aymma`P;@%+@(z=L z_~)Y!E2?TLW-O8V{jbET_i)0T(5U)7wHa#Mih7@o7CQSq|E%Heb@J9tf!S@vZ=1^P zZtbYwWBaZ=zV2sbaQej_1CN#sfs+`NxB{>HPV4S|`|axL@cOUu|DW~O|9O1cJpWXc z*=*AT)p`}x@ABlomGkdTl;Kq6VDO3vz1q^1DD$gMVtHpmP}tM5%@1RI7Ekj^SDI~V ztSj0Skg`UDo8eHwtF5I|3XaXV%A&}m^x@p{L+cHXW$-$6C^(2LU9&o?O<>!#ishg8 z7{uAXE6crI_1-dd_F1=-LsF+L0)u)lo6J7@?6c+bnit3Sy^nrh-tL~h*3_!vxWx8z zyB;uiG1-`*(KrRx*6ouV26X z(uAi%%2lN4Y+kQ`(wuZxKF+C5e?Qm%RexP|OrT)jx?MjbjJpgQstW#p+W%Mc^l5(i z`-v&8+s`Y<)Kv6pvP&3BeVHp|<*GUN+%m@#f-JM&-pl9}NSaceH0|`$vh7oYgjyCW zObPN^J~=%9-roBE*Q{gZ`J=paLuFdI4^1%(EfreS&QTh)&V2O*!RIBnuLa#+Cx1wz z^U#Nf|33aqeciZym;BV5x3}jnvfxr&=(r`y{=V&=lDh(Ko->~>KDp*Z@e^4`o}@cv zhwt8OM~ORkM8(`y$?Tc9SYmnQL@{NLg7EO!Uq79<|8r8pJU&K3b^GjE zhZ}Dmt;&m98agTMmemVJrvp{?JclaJHyvPpu;=>$L$7Y7-TQW(`D~NG!*JnSZtV5n ze;uZaO&H<=0)|++x1lyRR!l<3MZ&&GRzPV zYCBl9Gv~FQr_iyV`Sri06^}f6RAe^Wl7H5^yYJ0TKP}pCrx!o(RaLd2=K8c?<*zRb zUb*b9s)4kgmdYaJJ)O{7xrAc zCUHz-F^}VlxyyT813d(qUgT`vB6p*KJ#rRf!;&c?4bz{VKK%G_)>k8*CZ!!S^wU27 zEV1goT(bG*|IhaS>-OGsXgM%b`TN$V-5N{MjqgW3@h~_h!N`Ad!PZN!zSsZAeDQR* zd47NB*1LM{k3a6-9)8s@H>>c*{3jPekJm~BtlezNP3F zNHX};>TUK{OM5bB9sAVq`|`^TQAPnq858BJ5*M|^$KUTerpg~7aN*^akRT(r*ROv3 z`SR!8Yzg62TPAo#J)g5R_wJVmX$&*whkDZ8O zx8;y<=6L2hD_!%DgJ-4HR-sG=2B#T@Z32c-?THd?ic3wpl(T(rUlU-`{QC08gFWsX z&JBK{x<@XQ&NWo?e5AtFxc%zO6&M)Aj95-%b>scdSxG zHM3>Yn@?eT%P!nv44oZ3$>@oQL2WR9;S{4&feGSAZ#)x8UbW`kY&FkEI()B=XoTZ&D$dozF z#CLm5)k?nI7gxW2ecDyXb@yS-j-;o5E~jm_l4-pb_0pz$U0z;V_s^`YwjsO?PYNuq ze9hc@Z%Luf+S6Shmw0rW)K*sAnZIg@&ieZ`lUrM;CACRcwo#6{jOMMd`K4V?q9=ed4jP>*MFWxBDM+eXW3y z(v;MXHQPEKuX+3X`10k!rCiR_mPQIKXAxeyWzwz4{aN9ev-Z~h-XPdP&=eyYg6CMkv8&&QGT+Dg098D97%5KQc zpEtk%_~FBUV{c#KSQr@A!sz{|C5Vl|3j_QQ-?W@@jII}b~ zA8_taXt{8B{nccKy`S$fvn;%L%(C0?AWL=qx$FujjipzrxNUx9zjz$O<}8uS(764; z?)4g)$-JL99AE4aJrwA+r#v*&^7)0zxl6ZxekY^E&fw%Bz{J47!f@=zoXI_lEqI)d zNwDn8wqCg;XxZDU28G2l0=oowDi{o9T;k`hT9$iyQO~1_{q`ZDkM|lMn`3G;AwW}? zf5X=W`PCg9$@-g5wg{PZ9E=o|P-eSYYU?>^lT6~;f1jqyUoW>`wNLVwSi_%hSGPXj zSYk6j=8Ska!=hbl*R)1;?B0F8{QLZ+GOHeceECtcWAVb=DIuwp!+qEAjQ%b6<-_8{d@>yoQG}c}tdCqq? zE=kuyiMH;eO#+LiWZimJVI#Hn_~c0fw@Uve)?2-AS-bgKgxm)ePw~$;bF2b`!hC&2 z*1dMq^3=E*_5ORiLf5WstTLKboJ*r`ALaG>Gd6j)}U3Hv-Z6|tiZyMv3B=5 z=@yIK(oG7F92YF9sjR#Co{c^6|@q~uz46#Z^#br|@beWH79IKob z)K%SZ$fUnXr25I6P76C(v*mZM6?F-uO<`jU46^;Y z=Ki)hE`f(+8x~1|PHX*DvF5cG-?N`TKc1eh@5IS+vBK!z)%A6z^L4jO`Ty&;{hy!h z_OY??5v!-4p1y2dv%UBgZqV@8?G?nJzsxa_Qe;^hUODGj;VXtYzRUW* zf3I%(q~kw1XTSZHSnU=&(_F)O`(~Y8m%o1Q)^oS_y}zQ+<|yFfBxiK2MWpnyPPqQ! zk0+vZk2DzE$uWO@^%B=y^~FDbJo;6|w)V@ZmcWCP&oJ`uwUycCB;hrKJ#Xut^?l1V zrYydgVmM!a`me!=evMs`a zV~IeIP)A{hfMQ5??Y9?6JzFmZX)c~>ELHJ9;;CVsdeh$8 zxu+Bu7$!J+zG)P?7xn+Net)Xu-v1x0x+d$#zuW$LuHlQv>+8OrZO{K?k~hmuBXavx zqjSvn%|b0C4dtvj)IYyG`sh|Qli}AJTNhbg4ZWV(kU2>uGE$+#Z~3bNo6_9(0tN40 zuWn^vW?8d6{-^)*+6Uom@AoTO*c^K^=XA-rq!vj<2}Q4!A@>-+=g%*fyS>Hi-|8cm z6gl=!o2C0fV$RZdM*+9Qh|uNxre4#|mURlqR^GpD!Hr5)&PMy?@{&sS~A+MEAs z*Xt;T1~aL|F2hd@MREmR63gYPI1~=9(C9Kb?e@4Z_WJL`AJ5EUI+5t}NQJYRAt7e_ z+>rF})*JGEK`R0ySs3{jt9S`HbzEz!vX^|iDDPkC-n?J`et(zW_w$39ev*segGyb8 zpeg5W#ql`nGZcL1|84*8BY$r2R98OF2`Zj*R1QSCX)KjU`t|Luc29v&JImp}|2Eu` zowCI%x+x)YTGVO<4h5DVt@phWY#j=29c?R47(a-)Y;sJBBXF9Uz*jHH=MCiwJcl*9 zI27;wUYB3Kd#x#(gThib0c97BrH&GF=6i1meE!S!ActbYUf;ftdU3pMtCMJ3D1WYf+9WS7Mg|v&`9Jk^cqW_hF1}%X ze@kg__}MacMhBK)FTq2;mkrzwPdM&5X=!GpSd`@JoCD529}IYwU+zrESz{UB%=kb< z@mTY&bz84JUoN?n#i=8x=AWvi-0I)I^+QAX4`29xC_=c+QG(TVo-?~(3*V1rnM>aI ziypn8ynN~u0jI7Gv#y|Rb&+$oGB3WU;lH_IQ<+3um^H(Nw{Jy{3rIXxk=Zp(K%~#D zFf!=T+H1M7Z_Q43Oq%^Vdo6E6+>3>`O-?VLzHO?&Bn`gPHecd2uI{O<{rqtD_sTif z&VKl^r#5nJ#0_Dgw!$ecnV09hdoRD{($kW8Dw+#tSQOX zTI=-IYb<_!Hf`%IDRzB5{d@bW zGnX$vUTOEWdDU#Oiu>D=_SUs?GOvwmW$=6x=oW z_3z_b^F$HXdzGmz?{cRcnG(P<(c~2K^EIJCUWOXEjpb3Axh8Edc=FPwSY^w$rRQ9I zKHtuEzs~J-cjxz&US>-SnDVp6EZQeMb)NErsoQ33`c`q{?YG~D1GAHRS|$9|Coj@j z-0|HvdaLjK?}ruG3>g{}8dwB77F;_P<=G-~fr&}dMWQ&O{L{a@qx(A*QaC>5WxRhU zubDbsy!b(^Q0wK3D_-t;R#EZeOR>=3U*Bfu>*cNA=C5D>>o0$MuI=2j&$|mF)#hAi z;|bz8{&Dv0*#;c@?%d_P80<8wF5|Zx1CMP%J+IWf%rF1L{x^MCt$dT&gM;&MN03$< zqf(WhLC`sk#m6F--`(%}d2V@B=2}xH0mV1A3~@>cHixGOz2^3kUL;;@{z?67VYFU-wS29bAKl?ESTrB>{jI~n}v*O zXDqhgfBR~g=e?tuQpZ!zIsJHceg8tm)l&p~?{lv1wU_O`_IQ`%w%fkuTS~VUhW&fD zeg9&en{Tq@j)h;pp84rwmg-U=udHm%gOgvLh_1B~eaHU&%a$iR&npF4HU>p$syA4FT7UgDrz7X>21J!Xhu-kJ$L1n-iyZ{e=M*NY7%g|bz|B$ zjzf!dKCMxh(vg_4Ta3BEP>H2F`^W`O#mP^8on0OM{7=>1(sPpOzUyrpY!Yk|(mSV5 z(&Cs={;$9yW3T05p}Al0RlQz&X_i8d0GEU_gNmmN%b%VpJQCf{irros@c3Sic&^xz zIY}kcNkUD6`GMr)!YKky0&2%z7(aQ|W1`H`&d7d1TXpG_<(F5!-u<#f?6~0Kg@$bj z6{}W1pFVw6*7n!Fx4ZYrulXqVLxJn`CYh6*pQ^IdfNTgi*32C zsv<#h6)b;ptQK>aFVDJMK6gUjsg z9(}L&Co98wiKmLq4Dx)p9~WNzo-31f-F&^<(-}d3K2`kx@F6C=zVDF5&mV;~m7I(J z{H*^u-Tu!*evZwvU(Y^$^iZU2d<@g;CY8Tu^B_0YT;~t$u3SSObKQ~y|ez>tX>TUnI7u&1~Sf-q~tTMN!=y6`(+!Jn-EzT}d zVf6bRd%5K8%MIO=Dr{!^F5_nSc!6uF=n;z^VLy+(w_O7(s;hU~njL>^*KsINtkreV zFGo$*h1PQZyYEgc?ovpzd9k#rE>3Hq{j~z z<~*|LdnW$brdv?0z>t-}#dGG8%G;&45AS<#b!>;R7=ypS5{c}^=MIQ4*eI&>D7APv zIKTf}w%gb%sBf;3*Nn4;?V2S~SAHko&XiH%b6A}kx|Z=s=X)gV_RAG%Pg{O}mdDKfb(OTz6~X zttk7MJ?WcYp8fiDw|TyV;?*DH zJlE{^-NO1iZ@=Hl$TewdNm#V@<2mc+o!iAKS~wkNDERC>x;(7r*Ox6b&Z@;B*J{{MK}!neEV()LgM^*`R%|NG-TN!`&w^YOKY zZTT`EvNHBwbDehVew~9(MBcXjHNS$co3^wh)ErZs*Du4du=pF#?H}8l9z!<{$LF>MVhlVBL%lSYo^O{tcH#fjmVSjJ9E=PCEQ-%17O#6hr#O9uRGXH+ z3bS@G%0*;zs@{ucUwGw|=lb5KJpsqIOyo#B)?8$=?U+ffujE$t%UkDOBrorb$|XRlf%vAA+x_4`@dimwSdom-*Bq4>P#@}z(@QL3`K&#yndyF}*hotW*V zoA14!d;RmvKd*kZu_ubK87_DG%KyE5d2s3HZ?>u>e?Q%sH*4YRZ?|*r$~@L6l51^I z_nh?nX~ujcYmc`-lFB{HpS|7r@BmQpx@({N1w{q{W&DV#xZY|aLKvI z$8+<)StphVwDC24ttz&VeZ6es0dG}Bm&rGEl8#jGh|8a{)pcHFx~AgflO=z?z5QMO zzI^uA>HGgIy|s3cPS>U-bNuRJpXD!U?Q!??(|FvIWvQX7(GT^s6g7olM)jufD=UhVR_3b+7q<|Gm9E|FOo0BVUs~Tnjp7z|vu` zbnBE=nX}3SHve3t@kynR@k{&N#QoO3#}=Efzn7Nz<=3-Gf;YDMUN8TBw{G`+y^K4& zujT)Ldi=2DNQ;4xOQA^dIY~~7p!dTwP@$EBa*fjsn=Ftt=b%V?MsmMOV(|N zAD>=u<>^tM%;@sHo28T(tXvBXcviiw=69Ht_W0$SXU{@sJ@LKl)N;wDf3k$qieAGg z>*o@whYffRAE=z6qUq95BsF0GW#(8n>W6jg2FaP^+ifCkT=&=_+ zOPVqvd?v;`Vn8Ve7Hb7&r6seanC zF^d15wM)m=cWbT}-Ait{WHQ(1Smm@pH$$dKv9wGRp61dxuB%@cty^XM@}+kEp@<&O zxRk|3ceeT7=fB4Io-tX3p~1j%o>tGqkSLG0E96_QcsZ?JYjRD>N#e+lm#J@iFN#&l z7znlWST48Vmp$aA&G2T0#^VXeKAKCrkJ|W5`kJ*jN_TCTr;sws+G1bU_I;gB^PZp7 z(5?IZ*ZlR@eezq%?nNjpeYfjhiB;xGRzcC6UGL@u+{XJ{QGkY0iFjrSxuv&90q$-|m)E zoRXR;)BgMLL5a!BB4yfJ5*C}P%`o=kFgwQP$f1zoSZp$_UzXb8?$RpO|w@`^O}1w*{N$;uH|E!2SFY!3A0i& zr$s#e_Ph3a`GMb#9gZC7vdL$x?|*lmsD3>>eDTF6@812Zc@ua{g8x&KtE0^F%twOi;=!pME7=b%dOiDk{qOfJ0!F=$m#qssJ^krt@9W=>-SXoJ6bU{z zYui*WMV9kn>YDBQvL`+jSY&d(N|C?)aE{ST|KoEmYCLJ#U{W{P?ayU>8D{TCKQ5FW z-*ta}7I)yJWu9}i40&Rk1y4;{b7J!z8EcL;zaDK8DzSL8^upf@&Mh`y7!ECbf8;{8=HUrP6&H%!y%!UBgk3elN3%eP z=NgA*Y}k{>fqZ-7-uu0NyDe|_OO;;cRa3OOZM4--{ht2x*~`qey%W9O`_19-_zu&KIXNb^?mpE!3K0Q+=a_P3+ zXCGhwoVK@7!Y$E-Bhc%5>Dw)H&Ocipzu)UpN6;b5(rMOGR*m$E`{$ z5Oe6*BKU80-X-Z|MTVB3o{3xI^ADW+n)3f^fh$KMgUlh0$D4)DIF|HPSJX^PbADl4 za87F}BZJ0|SC@`(%;jbYIF_iS!p&lFSUVEiJ&F5%cCklge6M1;arqib^g%kSoWzwRqC(=UCxxcKa| zY>SjXoQrT5@SAib;`h^cd<+lvgnCVPUDr}@?oQ0P_2R`Y8yp=pu7=w-3d}#9)N{1Q zda-N)$K#|Bi`S1Vn9uPyzhXC?>APNEze`8)z`gpPk9-{tFUa+8NpPDnEow{Cfwsh= z;;ju6xN{i~Tsyo?n(z4Ik0l{N=DxQ}w{DhVIIq#nz{s%0%xGU7ratmoxh!+C3Y(mVCZ}TMyv-N@Ier+xmuvov#%Ii={6vOjX z2j%;gPv5Oy8{AqL@g#N9u|N@Jp7?3rs;L46P8&>@t^ED?~SL(HFlK}G!wUZ@gj}sN9zPVLrBo`YW zYv}cOLU7NNYj-zV$@B#tSTc>~;cym0QKELkY{C^+q|Nr=$?tSQD z(m{zyxp(LE`LF()WLICCZLxms_uF?9x88mKO?|DegnP$_um4JH@>*mp9`jkexOn;W z>5Z$_Y>*Lj{ zXG?56A8p9@KYg{R+9JqJk;Th%Y3p{e3i;@Gvo4EGySIz2gM;gtV|`zumE+$~{bMOM}FISj_EHOMY9^z|nLj^RevJ zQ&~&*d{3OC;(1P@n1iXItH?!Ssu$yX0lUlDwk`}2{u*a5z477Ma7?MmVA-`>hd)-_ zkbn1HuJ8EbDOzVg=kNdf?d$FBUcA?$RjV$3kG(AO?e^C)b_I{kDK>J(a$n}Y_~pmL zyd=X$s`prO%Y=}oz@*-f#}qjBd}mSg+W1}0(Z!&JMf&ZQZ7dP|33yvEm*e}&`4n0t(tAE1U~CX@3GmWfT68WeSAiu^r0^KK;;ntN7oXg@ zz@5X5gG+HvaqexlL>sw&&ZU=cn)p3Be!P9xy3Y#wT|GvJEY4qEBhD-+<vIV zw`BoK{`T9KKMJ^BTz&iX9vRu!QjQYOB^z&4x9n+`Q|Mr55m0p6X>^Hs{!_^U0q0k} zkLO6FFPU=9YOSc#2AQYf;n&@6ugaQmT(Mu`D5LrT>EtQbuQ@N?V$R3tu;=T`n=|f| zaXegf;?RV`V-o4t8uq=OyZv0`YTk4nk4h^!1(!@2%iJvi2P_wxe3&QnX76UcdD6Yd zW=;64@Zj34!+EopW*%UE|K??h&0If@g}dGgxCGtS=sSF&-O)i~W0a9WPFTYhDDn93&&gfD(k6m$T_M5r$eU=AGl*ewr{dQl?zZTA~0-C7;ZVZjv z-o4rt<~6^%IyF)vd5MTk%HkCk|C1OPx{RLNyx6sFl@`atuV;M&pXi@IUs}7yZ+Uxr z`|;z;mmhc66g_x^xxtW;!Nc-PGY69#3#0d9CGT#Vr&CrsMy(c`{4pbjfhB(4{OP_& zIr=Ue@i8~W%=&mUXNqVicfg?s8(+R!cK(vY%$aXit;<~_^~>+=t+`XHruAP^+Pi0$ zU-z7VP51J4=ViR!_5Rk+9IN?fHMu8e#LUfF*{;*6{&>;4{@GvsL@z`*NY;Jjii^2- z#AbunUK_hN{MIavmYQvG7P78P9+g15reWjdG$XY^Z);||L^C&cWc-0U7B^9qs8Z(V)_>0 zZy!@P-=9Cpg6FY|N3lgn^Ar|g!xe7)vDdBm{X+l#e8+vLAf(S?F=Kk*91g`b*XC?$ z2y8HLl29zLk(>KHa@zE#92UvFl{FO=b(NX76@52gNjoZ$q$hjIJ=yQ(;m3=wrP)aN zrf=@wBtDqZ$;+*G;RW#?eU&~lJ^?RRC z)b*`@l9qk044c+d?9ve;<;1dwnPF~g$czaVU4l(>ikJ3I(MmIPdQouW|Cj0WudUvC z>DkrS=9$wXlTTkf!ED}f(2|+KLhjr0$Az0Cotwm`i<@s>|HvfNiPdq!GQk5HeM&8z zTdzsHU86iBp<}DK6VJ7%|KVx7HcBOS38=A1F6MZAr>eWb_*~^;g(W=!9uf@8-nsPa zCf})i!N4Hr$iJqLjbWkB`ZWgA9yCu|miemw=jHP+SNzz$|M&0h`F-d8S3ll&jKRv* zAz{BwP2Q}31s1veTek`*=S*O}H($A(L+Qs7J+j!wp)1%pZf4J zL_78OKJg%prAtE6Bh_3ce9`J#Y{B_SDf?C0`fmacjhp&6J9rp=IegHX!%>7y!qJV> zAw5BvPr1!-(=`oUMTSVR-s7rH|6YB{vYQ_odbiw`frC+qW#NQR1vaIzuWc=&w^S?c zF+JC~DlacS-u>f>)z(YrTs$RtSc6lO^`0_&k5SJ>6Wtb}Imdg9I5wKgRlIIsK46}^ z_jVrRk;gB;JbR|(opkO&eBGa&xjA7^-YRe$o-*H@SJoc_A-x?tpC)~G665HgUs`ec#5#iDLf?MqIgh(QKj|EJ?*8t_eHP2 ze*E&u>Vk9E)C=c4l~k>GA?21iO?{4Vk9eVo<>UBi`m^8WZM~QImNAKAqXN$&6`|Pe z_umVA$(*$`vyJm0$5!9xb7Ie=yGBYR_XM6}Opm?3z4Wff(nAUi7q;ihsz1E?)%E`O z`aci#SAX3m%q7gHa7e(3WvW?JQ=Rc!MtUzw?mx6<74#~b!cDQwQp-|@LKcUfbL9BMb}+xxB5ujULlrIUR*r8tz>Uq z=4<9ddn`}g^A zeBGZI5!Tc#nc0u8zFmF$WsaAI^sk3Me`@+(j{8!3f4K#3vxC>oD_eP)SOn8~DxObq z3M=N2Tv%S-fBWr4>*E^1LFLU1Q~tdB{ob!szuoS%5&y%i+kZa&T$jtN2O?{BTs;3|K3?ex|gp090G zf6V@#FUaI5aOS!3dk29H8a(so&!20U&~|T6rS&`?#irk9m)Tv+dofkw+wZ%NA0K|V z!T++#>0nhh{+cN+krU(M<9gIB7pCmJX=|t3x`Y3)#uFR=M{m5Cs6-*01Q`+uKR@99TH8+Yv5)niasRdp-R zyfn6&p>5&cPcNT;%-I;BGsz&ti*fbd&0bty1`B%x-q`RoO(oKXJsWzSFl|3B>0D?fcaY|e7|@3(mW z(8C?Iadwv6v;7jJA{f@%Z{}Rs%)r2J!}3D=$Ll|7J!RLb8TNdBK5KgMIjQPnKZ?Wx z+%yi(`SkgYGsm$RD;zi%`h{NII@j#kpL53FzV~dre(c36MV89+y~aVjHXe#BvA3nN z8Jswh56zf-s&f65mpyOV7q0WaaC?uArfU1)iH2r#&+aMyYhZ9}%|?HJ{R7qvjt3$I z3c3mdMJyk4Bqo=NuwIKw`%n?$bNp#dA*14&*SqAaSWe!|EBltC=INxV+M>kKdo4G3 zYwYhopWZBxd-^oRCiL!uM(@ilg-sl6*D{wHG^#wAW5llTK|t}*p4Y#wSthSp`|5Jh zR+C#jZmMoGSl7(yzu(cl{5ZR!kHkk6f6a8p<$*_9B$^EN?2rCBSKyN6gY?ZOD`Jk# zQSK3Jnc>iM@#Ti>*_Ue!{U>)Q+~7}=Sn~Ot)MwM4=0b;;HFmT8mJ3uFcKrSKVsG!` zq&evljy-!e^94L&un0X{x7+s3_uF?Flg>!+FV;A}`Sekn^Firzjm}A=&)s@WQ?>ti z@b&M$mrH8fIsGnAF(}gu(a@aAl6%);^?dpHt#5iSrUv?)3Y>SWP$o5U=Pen~lAI?G zBP3S~e9VuJ-@k9~!Hzd&?8&;vOnQ`>1eb+)iMHAFIpy{U2r`7ow^r`feOb9E}^@w3kkOQ7u>~XVkJ+P2!p7jq*S59!7GwD%z}@mLGS2iR&@N zl)xjBlNa6BnJ?LDxZF87@yYMY|6We_?=>;e{r9xLzK6-^>>&d_;g^rRde-aN)%}^e zy}4{rcJ||~Ms)$(R^2w}Tc@wj@%X#H{^_Sde20(s%cp;S`6>0M|Nk%kq1Wra-(4?w zH0gzb+S)hv|3Aw!d|H41&Yr5z&t84%Vt;s{+hEcJ2ad$srlq%Ib=T>MaT|V_U;i`J zYOc@n;N-ViJAa*?|6gVIvm(2rHbnxG&$sXX`!Lh8WbWq#lidp(Q*5n{SFK)FRacq! z>s|%Job3Yag{KY~a8(}Q;J>j#XNg$-pNIU{ubYcJet!J?x!*^xx(0|aa&bIge{bKc zzGJRp*7SqqH7)$bz&3fQ9*`;I@p`}gkum#==A z@Be42_~rBS^H)Eb%=evZdMo?+laCvWkEKOU6L%F-UHsCL0P9gn>Dby1#Fw(OzjP3u@ML_cnKOz_c6=1|~Y@b!RIyy@N3r>p8M7#Jjy zIVKn16IF0vX!+KY>NG<@$W?H|JDJS#Yja&!GcV`lacdej~5=Oyj*&( zdhyopZ30<6OQ(plruP(|<2)$9uzKqWVNI4}8?xIk|J!n3?%C$bdCM7;yeyMlv{QSp zg?V+}-uM0}gW$)crNTwnXWvFBVMy)AK@o zhJh2K63b*qua=7yCQSFd6=yA5H~ZFH=`#m36&p9tNKkGyNqb?fS=Evt)M~=aWQx|xS> z*5TGqn;if5)nA1x^8dcJ|9@KlH~#mk+}qDyepFn4sQYJ5NT226(oo$~DQ2?$lW%xy zuK80_e*H^qW#QRHDdiRxO^nHj5^=rhlgs7VkGD+N^j<%I-SyMQ@7C?VJ1^|E^X=~L z)!r?8E_!+J8vObG|6~6By|v{l`0W2Y^`9^Q{?sO(#|M~KpAAS!JktC(?fPj(r8(1+ z<<#yuC;xp|AmHV3K zRj+pI@6Va$&2aJPRo=D$&rSgw)3@{M|D1jppx9!$F!wU&(&h%Y#4xd!IaV`$j%|LK zvN!I&y-j5D>s@LpHJ=~y@-~N^zWgqsF>jXemu)L06qo3D`E3wnK5BTFJO6B5{il0V zDrZi0yv?4C{5c!f=jHE8tJqZ^E-JKrt@`|O`uzUmvZ)7;$JhS4EC zN`8F4cf#S*mmimHebUqC{#eCx%f{b-Hp8e&EHiWQ_1V2wx4my) z_Ik^u_QxAnX}`C9+_*JNy4sCTiNjNZg@Ga4^jQ4l(jkv@UFQdwcu3*ZV5} z&kJZXxFY8K{@1p*x6K}fXszv4>?zN!t*kt1xzVkA;o_Ea&x`plB<`30l~!wgdeiKW zD`xnM$KEerG;3M1?>Fnzets5u*Ch6~gz6*<8By2!-C;P zxeGGvcx#oaQ*wa$`JVD~%>0_i?#a~#?C7{;wq;1Eo?r*V^s z`_I*+UuSpcuUog?fXS`fgfnm5GnVB33s;~1`Sxm2K6?`b(38A z?=Nv!gKKrNY8v-aTQ>=%pdPR1H3BU&E?msGQl5J~{Hez5o_ceRkDF>Lchqw=FOc!~ z=2*R}OfDlmxy^m@rQhL~10`AZ$n@UYRmj#7xZ~cts!Y4O?|Vajz5LiJ^k&XM30J|x z9GRMmA3{Sp74!1*ITrd}{!&_RRjTQ-{e52Edhv77Z*3fTGA{6H@!a|P^wGChrz%%$ zt*xl}v%LP7?U9y`7n~)G!k%TuJ-a@AdhYc*_iOWB9gA)@=zE+|Qz_au#ZZcMkL9)H zmp8t&W{s0b!nr?|+49DYmX!z3cVu zL&lr!X+{j9_)XO5wlZRzhC>M-7D)8EE#9=@SV`=Ah9Wt(L=o4(>sd>uZa!xDd=6)Y1WVxM zZLhDE?Z2I0zB}%O@nL?)Bg}bgjn8Q$8@O>Oy=OE$roeI5@>#oK@*IUbTklHlc)(C_ zF6(V?>9NRcot6m#|F8cubm6e%HgFMON_Vi%_`HeR;`C>s;$=&&-?Gy+50ub;-J~(8 zv{_)oIfnP=4IR?&R&H3_6X5lGpLu<(gw=B!mh_IlmU9!=zHPrZCwm)*a`)QOzXo%j zyDsesI>qe3!PoTRPLJJWvALz!DxWuQzE|;H@g3X6?Y3?mhE;K9QRzD$FevdesQu76 z^u$r2<<{HpJr^BM=y@)-h|v)%jV-O6pZ4-+(?s@#rfo%T2AoY345e5UlYdm$I5Mbj z)@;bw8l^j3du>lSL-HYw_8-~n-|`gMG%wq;ox_pikHMF}3XCq9v$p2`e(YcWaL#7~ z9*gD!hF$tquBTHAL-v@wsoe&!+8-Qf}$mV*pAU!mop`1c z%6%|M$eg4W*d!Pj#mw;8#@h6(g37WR64%XquYWxz!H^)coa5lCwMzvKY%M(De$;}) zX(jJM9;PR@7qS}88MJtlbnBE0N4GqXaei_1f~|c{U#PR-F~<$o zGQW!NUD~yg&pN#( z+WefgM}o-ma~4gVVOD*MHMsUn?wP2;bIC;Vmfe#*@2na=-rl}Get+fSHS1@+`@G!$ z+^oXDm{`3Y!K;OzKmC#UU~snUZr%Tf;qva2PyW5jteRn^**a~;Bo(jEHmvt-?QMCT zoOUS_eWx`rWZGj3PqEK8kLT}q zoWiqLrbS|Zt?69fW(%3Ny6vvrKTY}`f7b0RKPPdyv^RK_m5k=m_4n_*<71aAI+PJ? z9=1w@rtLBCqeV zF;o|C{qb&hzj*g)i>{`Iz^2*1c0{i4TOJ&0x;!~FeC-q_#f;v}Tk|FS@6~^H)QmQr zyYX3t)f~Uj+gmcje4dLttqP7^J8kvde>-l=?#jz(WfAaO9_-X9@k_;5ee%oY{`S{% zzyHn8)6)~ry?!lsca+}Lkj)n>jLu0O#tGUWFIrSd?~Py^Alw`CzaY$crVyzj(pkf!;dd}hhD$O+@a~> zCfRGKHYbw5r})^6&;NbcWf&T+l=ccJiOg-hBk|aRhf{IN1>-v>tG|bPt8BWV*s^Bp zxtpJPZ-&3nYdKfR!O&`=?f$rkg&}A5w*ni!B#uJ~0ndMWMwhqQG#2L^uI@KbYH3N7 zIBv+2A@S>f^J>XC505|BuzAjtoYO12W+98S69a=+wjoda(=!ZK{0twwUVeTi%v3(F z;M@n+4ndX+Q3|bLQT)#S3P#gXE9V{YynOAL#PdC7QcNC|4w8>`p2Rra$XHu``|igA zE+;#=sf8l!HXE*SU95gES*35$3XRh`zKShDClVzTz68$ETDrFR_T7gsKeATHu*7t% zdfzJhvt!fxpO*Yr}Zgw({(-NIi z7c*J}E_s#t)*H?~`&{^3hQNmj#uJt~-)8Ii78^X*FYo}fS5t%kvdJpG|9p)*1RFWB z7tfz|`uOwdXVa9~E|%^!HOytHb8+7i+A09mP|7 zz0^zd!JB=V5~-VCPD%LlEK+gnP{jJ?!c&ueJnVQ*1wR`1I+z{lRXG&yO!QOid}b(FtFDwW!u=?vkK$ zjT(N7XPD(~kI6T1D-_{z3M-XuQ9SVE&#k2bUMEhkUVXIY+P16n>wi4l9bY?Ht?=@e zo=qmcz2bija=x|S{OUU??N%fMWAY!K&)kbR5`8qDG9>iOmyfUe_w@PUe-*X0f6xBr z*Lk4Bwa6mKi}AgnOH}sz*jdMZ-~IRQv%1g844Zkr>475OW0z0O+qLd(`!@M+%Y~aN z%w|8S?em*EZ7pXn>^BlqVds^>R_X{kUQT2ea;g-Zw(}O17 zNv~~_YiuOh6ANtQUhOKp7IaGDd7_Bq>hs;f*|ts+$!n(G%>B+Gf4ITjQ{wo??ehB< zwpczlI4H4jS>~*5cF_x3MKUKnsr>lmhYH)utt(%z%Z$GA)luTn$r%BUY+w9Z_uk4Y zK;TT1f=F$6=+td(msHMgQsB~Xo!CA1`{Iu(r@O<=r(b^grs9sJs@h_s^P5yU{Yz7~ zbWMKo?W%5TheCwB+3vslq*W9x`YNZ1$ZcMFPdc|N$!4a%;nVZ0zfS-Ce3$IKJ=OB9 zfigz}4{Q;Myk5x;{gd?i_ZdyEOW{^4(W6*R^X86 zW0EW2+atfLY-`m2KcDq4e|+-m*CLPLo8^}`ojF}7zwCE@|Gs@+S6pkz zd{2&l_VeS*FK=8n#ew71E-t5Y%+p-v6w4Ie+rH&p6?=k|tJmx9n^OW8E!nqY&#t{Y zMT5S^2B#RNhNvY@b4#6NXKTAPH+yA#jDFhXlPh!%HI(>XkFeG{&Zs`;(F8+*4>wEZ zsAk^(I^VAT*84KPhL=lbpHFLf@HXr3`ThT||9v+j_2<{C86hc^cAi})?@r(StMc#P z@B8=G)^E+e;$QzS@U$Y|%=VTK4FXMqT#x6tUd%B}4=l?0^)&70XZ`qBf73SCOpJBs zkoQe9dEh#+_n0KVME7ZfF3qLi%fGiD*7KfZ(rNj8PGGjLP^-B3;_LZofnC%6PcQ#% z%6q9{xs8)oS$i!zqghn-`{SvB&no8>xUDqH+J0Alb=J<9WT!5Z@AvfQ$D6(G{jAqD zdG_nmDtxp4nsp!UzrUZE!Pdrxzjg_EPi28%CFrE9;qJdYG>FT7p)`RE;w&!7G*+IshW|Fs?6t67&V zS=D!K#?@Dr4<1JLX|>l`$tkw%-gfhCdD!fQ?`Q9t=JYN1|CcW&9gK_>eb%Cp-(LM% zc4m`b*Ym5-DlE7bt_ltf{ra+`Nn&Xv8|THS(5s4PH|OvF`|a-T?<|iK1+LvZ`gD~58nRz>g7wx0+wNL@7DW~&7J;qTDjzI1=vq`D(Y9)p zO~Z#dL5%Ust-geAZ?Z_{SY+U}CCvA-)%PYtF(&~hiN`GhUMF9ctmt!ZDHLFdie9X% zc%ynjFB6-@Vs=Xp&E*o+>{F%%X=V3>IC(x@bN5|Ys^PD9kIU~Jt_$#bd3mMTHKmrd zTTeedmF25pcx;Yjlw82dCEvvq{ycoVz1CcVcaw=^?1}RLUYxV_ zrmVt}?bkogejTmZGObH>W9X|b>nE95HrFn;NSnj0$aGFo@th<>j03ZnL5G3cj}j|e zJG=USpCprOYIkb5yPnWIcp0>-YoL_aox{P zpI%P)KWre=cQtSO^dLd6(@7{k^Q8L185 zy*d+LOg5{k;9u#3z4z7%~4;F&n=PqUZ{bR&q zlyK+e;g1`%PhG74v3vhQodB=YOh3unYqmSfw+ps1T*-Ck(Lmx972wWX46iPO~X?RkDb$z{NX=b~BoXxM_9C3!xDSzMHKhytzivPF#|1bT?Cq;t({rcO!FxQ|gR==k` zGSZ2md~Z&VAcJyLF8}e}`ueff71@h4`W{>GDCIV6+?O}&Zrx4(+8I5Yazu`M-ZoE^ z@KFMf6q7VbDDtlZr{l*3Q0@c+eNNm=L=ThvM zh0Xqn{9YJCmoe|yhmfrXZyc@R<3%Lcg2ur{`J>i)9#nvH8}Aq#69uH+2c1~ zmMofbE`9RHls!@FIgWk4sp75F^ROalNoEfx+X3Z_N`W==!(UGqZ{lz)*nfX*SoYe` zS!scj)K7M7ybxi}akQzxX0~VcZAXu_^7H4nNUXWMJUCR$HBQ8-YuW1xfoD&zL@u~n zeDzAR$OXq!1s;oaywxtBEHPp__qOcz-G^VM&$s{kbo%+!nf>mIPi7q9INI{{aNW=4 zPd}E-@>4&zvo=)r^{n=XSHE7Zu#sweI7g&ILYXCd>7L>VPVfJ|Ecx~2wEmQf^*`GG zOC0ZL!dyjJ{Hq5!`b;7OlZdvrz(uvOtr&O@7+Lbk{jnz@$z~6^E?B!Ok zV -o5Vj+x|oeA*U^+r{-8T@hR~Ox(G;T?`4;Gyjf0oVX-7v_4B!#Ig*?bYjCa4yny|#Cmgmm^?L&oHf;j1|oNMtY7RP4TKqF=o7%H6-Qv8Dot zEc`T&GB7M(E5p*#kigS?!J1o8s5xi$xk~;C!p$#!Gdg&1Je~4U;Y0miu}$ark{GTf z_6RmT&s|zPEn$v|r$pPqe^>By91Ikh%jzeD@LXp|q zZr9FSo+x3(z~EGSf5k$7p}xRV5@q*f``uTXsC`H>tN#A%SX8miSHH~nu~Uv7P2u6R z%=>Qt_ocga$ArTsz4`0%_4C$Qrr$pMY`TBBb2{5nwu|34nO|dUc>ek5*|XWYS3kYH zY3)4m{AQmodyD6Neh?CRtYE2W@|nu~wXF$_<+q77sQCWQvykJD(&}}X`Mmz~ z?D+g^tN)fQ-@5VI^UA8)>R^$bLQc|qcSIyKz24QvAs%U1ayYL(XVx{VWAy=>z7c=b`$rh@HvbFXD*=gbrr+3471&^7C_r&euYrRT)=s}{>i z@i;m-cPOwF-Yz}&;=Ntn4BzFMQp}6Lzur2DiAlU@+2RdWS1ZH3G|P9_20lCf_hJ3N z|9`I^U;g}PjiJ=}C6@}BIFHVGxII74@LYG1&CGNA{{KrBUB#vfpq2z33#9e)II{PX!jeT{Ztd+$o)FW_5b`_3-uaaqnuS{M9Ba z^t@X3{?@j8u_pN|A+09KR&6rvp#N~`sR=2`+rrG7B_3uRsH#_ zZ(qCY#^3h(|Jm6m_W%3n-tkDMpJ8F4z-q^J)AX;+x><6};;NhE;iX7=){4FCAAWsO^L_NSYWuA> z%^H4_Z$#b?5pCL_^U8`oVns;80NzSqR zM9G~p-_W^j`)uX%o@t{s z$)cCB|JG^{Kfa=qS8=4vRR(;kx&z*fx;cC&=yKldFPCNU2 z^XI+4uYMI>cqelA-L?Jg*Ccnxt=}>!@x+!*QPM42O7Af@C|vP!O_WgQJjNniWFzN# z{a7K3!m=sZxvdi<9v@9Q*!-cwLMMLO?60$g<~(6~khT5()l&T_y6%gACJDAQUcIG} zImKv>GRNNS5>AV!Nc@`qT)gST_x=CAKKiuitr7-Z?{5S z?QbjnB8CMMl>9aaS_TE>n%xe*s*%d*5*5vE)8LW$_@INq*8|QSwhd06OSiH=xTC{# zfboSO6Jz4@E38c?B3AeQyS2!P)A65NS<*<&yFooY(ha{s+w>D^Kxrjp9S zuV;VfPcpdip0DV|CXFbcfEyuE5&{hyESZ5K68FSz{r>1KgR?wX6vSp0q*|8LXn zJPFmHRbSJJZA=9Yn0Re6I{*34)z{ZI-}Lb+z44YaRs6H-ecnhJrVLH)OM84Kow2H~ zuiqYJo6K@|MaAc5Wrp(|IQCqu4X#{lxit3rUK{<}2c7!Ym0L>g@)qfiyFO;-qd<(=HB}C z@*iX34AssT)|zbwUIHxbhxfg1U;Vl@;L-8<|2Exzv&88Ay?SQFn04U-3my4Herg?H zS)@Pz-L>1c+!eJqHVOTG^r*^awr<|~y}^-Zj-`IvWAkP2gE_&6T0~Br zQt9WODYc&cYBkT2uW~JqU%EE5yeactyJkv|;-ec+7BSo_eap!pWN@sKK|#d1dX*nT z1w-O(z8MQ_&ab(eIjP`Wl2p0#`d@iAb0bVW)?Qt3$YQR~^9#c73uT1g|9Mza^`}UG z=`+RiT};V)swxeS-HC}$t-M!WdpV@XF!}Aj0)}IPZl*=90@d2Bk{U^iKb9EHEfD+S z`+DwYhiusvM(s~YA=6SVm0BWlbr@prFOB?o-2PwT-%YP>@%?4#338p~mdGKvg(EV# z!PrxzD%<=)u$w@xxQ*k zuWh=ue)hIc=gy^VHt`A5_y0ft|Cjmi_uGj7)bKT8IQaJL?3+nyPd>anJ^k`VsorI& zcIPf#Qa;}-;CP;Sf{NtxiC2F;dsVcx>fg`H>e;yxMqVFFY=jTxY;RvEKIMXrcXLC3 zS4`&JEwUdDBwUdSy}otpI(e1k#JDNz)J#~WDYCa}u7KRFj3D=%gScNXXT(j@Qr#0)eAID5ver?wJQ+h0%OF5QZ zX#Sd|+H&Iaqeq{N^ow$PCmfPmzEHKLl3|J2DJurS20n)ej7K=4+1D7Sw^%t{sC>{6 z<`$jK{6KT5gzfuliN<{m099iF>2_)fT_s z_rIX@?=Fdc4(A}=*xQWlEfZc}P1}B3|hEA`)Ram@HP!kAb zZ}^^IH+xy;o$aQM4T4jiau!HQP55|5^73})#VXDxqJQtX*OF+USA58GUZ0g>#ew|w z>-S!EzAv3S@0}c@OP~m+VS$@_s-Vllt=63%dC9weEwAy+ulxOUlY9GHA{Zh`mNs|JW8_D^3GoS z*+FCXO}QPv*2cp2(H6)}8vH-MLpTe2&rk2KE>4{w40c zDb2oZ_Sz|*!tdA37XA7B{QURt-|zieDD%Bm-t}RHk>4bfv!A!UJodOy%EigkogpB| z`gGHUjA)TciKBkfJ{PLCu3U9^w|V~Z%O@WnKK$uvby*j3pz_fE?VW@d1I>?!f-@yhqV_q~s5DG+-;>7+_$+GE4U1^3>s z>1sNv(s#<-e0vY8)2!X+=kpz3`?<$Tkr->+VFDQH>Yq*T_0Z}xn?9A(LWAhL>e!RBks0s(ED zsx1mhwqLFUJzrZcXThM*@ZycfhI?~g`d+YPXH*D0z{nDt&G13M;RU0^h05}@ZC*>) zZstgQY^$!~DeSd!b694pLKDN4Qst#0MGXRTK1xa?F(@`oD4wRmc}#-Ig6BeI`da4a z{YpFx1^2SoE}e33_xjboJ+=jM4B2-q&0nm|Thl7xnmA+0#%q@%wU^q=k8nJ2TIpk) zHFuLiP*nEPsh1>_b9=t>F?wizJ;%tgzs*$gqMP+|1Ggywe2Pv50?p-m984~qB20=h zu7avAo(rdzT`N2{Nuz~>Azk4Jhw_Wwt;a0N-KIyp<~YjnGk9zMD=CJ8jXUnV^N)?M zey%?GWR98oj5~MlmghDbUh|54%vR{8b1S~?CrbmnoPpck(%pX>KP&VsdbwmvRJN{R zgH6vS8*{#GwVyxxpIvDsrOL#0dskwMNTnOc-JI=H3jI7MPR|rkeR^ingMu}OH2!@% zeEejIluO;+y|n|!0MFLySdG5zVgmk&RFefwzp{d(=Gt7h{#>-?(w`bVks|EpI; zc6z_#-hJEs^zwAACvPvr@87vkLN#)Vl&?<8qSt1t+n+CYe}CfW*{_BB?Q_g8ICDNJ z+Md5l&Ozqr&8KIx^Lsa!g@`exhsLfCd;M&Ksk8d+b%!*3D(9s!_NAR zUslYWeO6EHb7f$k-m}XFT)*}ihJEgc)b*?nx;)h zA~xISaj0H4Q5Pr)s`>e)a{JA~WBTjXo&R{%;(^zf^Zotj4u8xsoa?i%^7Feb_y2x8 z|NoWEltn>a`PWZRIB@!?4$opgOU1p6HPts{_}VXKl$4f=vfa`8`s!2F6w%vv`& zF3f)Y`t)jU%_D2hteM7ez_C1WN}7&VS5RcT@>|9?TV19o@SV=IlG(9=cZ#CUB zXFAWia(vnZg)O%#OlRDCxIj{b|4=vU?+>f_J!N8Jbe3)QoR-cp>)np&b2pq4IJCK> z$O5!_`m##Z_g`upH}bYm4q{5yWwHPJ!8vGJetx`bh-J>|#EdD^8dx(u<{qCHdnZOU zGDo+jvZ^X7kL~p?zbn4~_uMVJyI`qKd8o)H8!m}uTIV^A-m`YvxHZg!xBB;o&znOI zNo=;4`TvvOzNRjMv0w3%=b8J=yaFe>ZEmT?s%vo1T9>RS-P@&d*5F9WIm2&mnrGIo{A};du|<|oid@V6r!xdF2zf|a9oqU?an|9Q-sv@5{QJLK zKM!_Wbunu4bIFf;-{wVMnR@M%r2xynUw@J&rTb`~|Ga9`t6SebJ^S?S85@&>#+E4a zBiuGq(}HC-dCJ9~PhYiWi;&~ImdYoFpMHJ)To}*iRk)(yMA+-6UuQ@6Z`ghQ`~P3- z>uYQ7WN^=z@4+2ke*8f8;}hM=`8y-_)c;oNF=|+8xGi_}vX!k5yd)b8RC%181j2%( zdJK+nT(;mitPtYWBKIQmz0SOIVaFN{e=J$^+Jfu(=1c*WV-1HN{@9f})u=U%N0vhXRkRh?AD4g3INXU!Oh=m1u9`=VQp7y7kugWR{6rrzJ*- zUwv56^Js^3mHrp;bZwuNrF@D^6SoFkb6(#fvumI9E)~yBI^LH~Fy&5pHLFPG&o$rJ zkf(X|HlgZ2XA3h2_{cK3c_i~V-C$&9RFsHfUt@YJqlxoSfTVSh+chf&8AS<)?6s`t zeAOfkk4YrEO-&VXJtm>qVwmRhrPuf5Es5tbPuP2mmZo0b!jkNixqQkbuH+sAsg;@@ z3$`AMVprU7EGoJF_IHI2ohBBm*LO}#%Un8DD9A%ov1OmT;jzb>8EdBoF+DI)I~8?y zYLJ&(phU&xn@2JhZ)FpZT)R{#a+;5Nu#ZN=icG1+yi1St_&Gm!J9Oq?;G9+iCys-O z69k<44NqxoaPV3pHDvuh9n| z8(F#ie^#~p`t>r{WOhu9{#>)C-`;)L_5WK%{MDleTIF3{?yjh8nql zl6K9I;-0lGbJnYuFDHJ_6*+cvz{rU923Ar-Q z?YLKO(mBn>cz}0*-1>9Vu0MZWD%HktYg@MaIdhJpmOt0;zFR*3|7M-O-23};`Y$aw z2|6*X&}FNTm-p*8IcEEJ^_)K+?kV(x@v|f&!|ynOiiZX!5|33p1=P;Zo*rNS_sfqz zf1bU)eYB|Ze?k0I-|Js?A71$Sb^X8T|M%U`UwmPDkg!-jZ+iQ*Y=8SXw#N5!E+2Dh*rQ-_%Px0bkLe7h2M=<#?bu<#K1HfukjL`f5hDR>zh<$D)!s$?^!Hy;P@g9j$@g#tkN5c zIg%dBh%b8oTgBJJg`r7-!=_)*sn?X@z}>>{A3iWJbS&N=Z&lABxm1T|iy4EHoVmBvo~5U{qpL?fvhy`=!}WXO>?w>6*mSsn35hFf8UhHY1=((CnH-wty3Zlh?+reqI;9GB`bGILslL zuy$jzgLCBSB__uh+9f_~D(-vVnwT(GmTlc{Mn?AILf$=!ED}nZOAW0)*fwpP66BTJ z!^ad~@Uqe)`XsyMhl5e<3`Z(ht{gLcoIg>ZWyAB03LKKjw=%NVOks*J@RDCL$*F*& zA!%N~qBYtq3nv6=whBCRl1N^5*;B}!&*J%!A;0jNt=GIlOK+Fn`MhWLYw4B+reRu^U*?vEaa7t&UG8i- z-*@qW`|tUGAAVV2C7FHLz+$$3So&w3)B5_?HlIB5{qNmxUqeKHKbEuLZv0#`?fj#( zjQ9I(=AOF6SfF+4@xzE|q6>eV-CfS0#NcrI<2LDY=k@S}OUwm%Wefd2scp<-r}&EG{XPWp{a_3Nj|X@45F@ zJL206h5m#d1;rMAAq#=yh9!ABZ6*gd}W!ul`D^?c-!+a9lobU#+3j zUOQXcn!ipn?BjLx&xd=;gro*uefx6DZLzh58FgmG$tFIR!!mjwcyG+FUwQp? zsKegfo3vJc`gV5r(N8(sZ<_2;ypYs%?p&H92cyW8i#qXh{dmPMgq+R$&&60*TY2+M z-0Ds5PV))8sjPb-$aDAT%{OPiin=LyHGh}CcP}n3EwREz?*04yk=c(-%GsvW{{8gp z=XCw}vi1I-Bj-Jfob+@G>vi+|`}giu$hC#>2Ia+nzqZRj=4SnU%gd#VIm zcexQU+wW8V{r?;6=lMPLl%Blo<>lg98`+}^7MUvh%_3zj2dmibzgF>{a%s-z z2AIq})%T9I|Y&c;Vg)_f9+X9PKME?u)~)vi1~*ON;{oCKX7S;+k@ zc(+~4-Ta%VbBDpPV^X=i2U`*ztx;{UVrh^O^}8Ukc#CR-LdPSEd1upDm?YR1ZfpxQ zdsV8rw5KvE``pe@E{1jiwHZCi9vqBKCoI_oxs=k@N;@(#9MAG(5ZuI|v3Tc{AkVc| zJA9crB$F411!g;Qr1dDC5@_iO^5SG^;O6Vu=q8<9lJI6r@PtEpy>t9PL-YR1* zX;aA1SukapV@2uCe|OuX$0{8BqT>17Vn!sH&kXP-^x`!C~W zSTNyJg^lLgt)>zJicb!-FU#|DwVbcNNQ3_{gYprL(ql|K5{eZe3Q^D5FW%HD6)@_# zwJX5cc(zZ^)+3fDW-RM@$RK>-DvMRd)JiM6yY}~zTlW3`*lsd^lfo_8zS6t*V(x7? z5WoKVWzT;V<^OlxY}q$`?YX|=e^1w`>-;!<^V6SSVe?MC{_1+-!`0iTEoN`qTTy%0 z?#_MZmUHLAdIIfuzOD@M`X+y`G&OYUQsWG(_^RgWyVFANDkp5+sKb$dlEpD$3loD> z?jwVFDv^QHqB56WDKUEJWpP}9>9S7bdVPt{{mU1ve6=pq?0o<3>Eh~F-#)cEr*`?} z6oEfqetdZQ&`(lYW%BIRUoV&a|N45bc-!ml?#h-T@mqSisCJ(~(TiWll2vBuEStRf@HbVy z!?_p#|9TmIZ|mhNv(J87_1ZeKhjHQixqQ*dz4ES!vn&~!6im)JwnPfV?5}-ooNw3J zVBCK9o>Y11&u4FMfB$M@2~dWa7)(A_WI_ZU-RmJd@HG_`PbDs z*KgW^T?Sn1^OmvyiZa(tEh#mdpDO%hvxeU#3tk~N$+{iCH{H#Ni;GkEYFGE)cIm(6 zzjwFqZa;qZ^Ma6RDf|Eb+im~<;c<(e9TVy#)C*k%G`f?ITPDZd=HdVT{rvg<&qq_t zX8Ucv619AZ=4viMA@{=DTWinzAN9;C-ewiAGri@Kr(NbNH&(Y*2IoIpoHI!3zBy@Q zfzj>1>+ApbpO4?i(7@2pGC`+%=Yz^=zMph_C+T!5ZV+D`dY5Lj| z_0}9S2x-b*`a7+m*~E!UlP~n*)VC%b_tssP*!lcy@$J_+n{4{?)J}%*{i%qUGGpmnzPi z_h|T^e)?zk`+bjNYN~h4ezN&x4(O_&vfXz-J9=jBQ8=@SCpqAH>iw%d8!h@cmoh9k zeth}1+?mG(79N|iEmNUk^;OB|%xX9OiX}F4EUK}Zc`p6l4H=e&*JknGu}nDo;nS{G ziMx01J^Oit>BY}FiMF6qi*%L-&3o?1VyJY0Q&V-Z$%p2bDXV&~MP;Y=u*KfbHPd$u z`^(OPh>H?x4_;tkKVxr?VTJvg?c z$uLjC>X1b0y!5rU^ZHUFtM;)gN^wtAWSA24{k!)J=CT$K8!?uLKirOUNM_%)5Nc&` zT+%x&D0A)BYb=V2Ev5&eyg3pLyAN?BE}3d}%z;JLVgJ`4ri9+M143=VLegt{O&KE6 z8A}!~WC?I%V7Mmv*k|7J*WITTxVjcu$oM;-KluN@rl|_H7+{r1(=Mur!+--U>s(&+eDWA^j!ukY7hOuz1X zy#3wF!0Z$m4Zq`$N~GBO1d^say{zftq4ijS=kN*sUAxx)dt1MM&wji4HUAA4@C381 zx$35wdTW`nm_)7s8-o+a-3!~@RWDg6^1bcWvhqH!zkbb&)sM47Lu1w7&o@t$iiuh0 zZ2#r&hRo`FrJsL(`SU19`)#n*Y4wZmBOh$sruF^yTmAk2tTKwf{`>oO_xG?DXCD_| z4w+Lve}5g@1g1BhiiVDtYs_Z*2snj#YgXLfI{SF~(xnq?Z}T}yv}YcQ((Ri%ZO^(> z3S8-*ZTyV81e9A0Cmo%6evgjtu_f7)R9>G{eO?*Ip}EMf+G)t zCq29Q>r>m0K+yH1#^OcWVj>wXon+cLfjPva*=*ZetJPnpFYB?XpY{AovaS4-u);Z3 z?cbI&DRnR&V6aF(#(!1ClcB`^|EJ(WtxqT4{B8gKeD&u=7G}O{d+*4fcDz}#t3xv` zwth#=<+t0{I~OZSoO#y0S|wg*nh0Zp@4PgD$?Gzs?0lA8-h6eE_r2|JRnDF}fBrCo z^wHIeq}cXZ@&8(vh_VeSFIotzUI6s)gZAJ7Yw_ zwkX#~qw~vhufO$YZ+rOkXQV)T=xvFsbNQY=ew-XB=9Vc`5jf{9|MK6vpX~&W0-Ly-Ruie?+MDjGpK^Dc8TZ-MJgRlDD z=6BLiv}h4U+> z{d@OPCdcqFC+qYszFL&m`%xoVj=T6d!v?pjM^ApgS+tX9&v&__MGw~d>-+D%yHKyT z`;kHk2Uqi=lye4Gh3bC%J>G4eKi@pM;bh59iCybnR)m@f2>96gL|*>>_QLe5g2`v9 zZ83|2eg zJ@vJfRW%hJmqZjaROg?6-kxmPvFml!`Nf-mwruKjPn?(fGbL%w>)#J6H{aa1M}p&! z!Jd7)Ud^)VTJ(Cgx1vjESnTnw*W9GVHk?cmYFoIqOo#DVj8r!3^VCQNhPpq8<=3yv z-|=Ai<4(gY#s&^`&s(K)i}|cL4A~fXVt&bSA6D=b@;RBe|MjV5@2d``nisrgaN=n= zXw|JKmz*8M^?D97AJdD5q7xUx zfgc=B32To^1fTj>#}?$pY+t}&{HoH)HJMEMuL4@PoZE>XVT!?5^FLOQQR zv-LTv*F9ke_?y-}=ReH7_=dWKvvf<{8&yXEy=TQ83mz$X8ZErC;&Se+Ft1L5s58&g zPp1mBdCsajb2+L#@rvnR1D^KBfm^=XeB3%I(4@RWDxz zy!h5CC4tQ6prdw|t$Tg>UxiJPqL7f@`E$=!w;n(7?dj6jZR;K%vsm2wp4IK;qk<2H zU5_n3M5OvghPG6tcmFwV|1Z-jp2zod{{L_Fc7LDmpZ-;_(q!(Lu;&ZQuBpZj`^J*5|5SwtUe?n@5KCBTh+(EwYzMiN2mWWl#0zV_C7G zMyDqqt+}@4Y}#j?skidt?P`C`Hmj=LD8~GLmf4r)pC!+JKPj>JqFaA!Puk^~*VvRc zMa(^(85L4`zIChBdG-2#pH2K8A4uMP{rcV4pSQ37o+$Aov#P9cU)=hwjKBYG)Zk)p zka3^>{JE#pb59=)-M;qY&!4|Ny*cPu_o7wnZf|?&Jbjs_;=FAO#I?93t@p{(>~yeU$>9U#?T4S0YEQkFTORgWrFY}j%O-w@9agNW3J`*N$NoV)Aony#uV2w1}xp&{iTVkc5VP`*o4h}Sxc>FVISA^dG56j=* z-@8}wfLZ9mEr(hxJDLp4uD$)X9Axi%1GW2qKg0?hEtFtl*tKpe!%W^iiAB>enn(pI+E1;b{5A_qHqJ?;j6q1=EW$=!a?2(OiRQpRi}zQsptmiF9!`>p54?CZXFw|@Tlrl@w_b59XB zxm2%$`=5U(9Lt;;)Wxm1RA&F()(JsguXFD6A3kvYy#BpC)#5CTHY-)U1zl!bFyQIE z=+v<+^Vr{WA1hY9KBcPpys6l)QN88G?t6?HOI}%;vspZ^vXe;utMEa`B&`3p4uj() zwdvxoEjn*6yZyE-G&DMUZK<1f$AV@Ci{G=GnVecS8hT|}o;JKK zyMwb>w&)%cVMy7s@kHf3`*T{Hic>CqToK^aH(i?Hz=`VK=Sn4cQ`#6AIMi-sum~Qy zVfp;`KIR#Q^;TyeTL?`m)?@hdmOmnSPHCV}@3JR4o)U){EHt0yZPz~4W%%qjqgxAi z+%(k|1J9)M=`9yb&h}ml%Ia9~T(E`Vx$}Mga}xKz8y;j}VqkRB=yXodWf61EnDd#_ zaY|jk;8`kDEb(&f6VFP=>NAbI%0o+bWrsq%Jv>VLmmE8hCoz{Y<6yYJS|ePq-x z+i=&;=l^^8@#BXj7H_^ydAQwt`;4GNCtg-m{3?@YOPurM5R;3o#|pz|&*SYyef3?} zx9fR7-hMrQQjO7EKe5ekUPga@`;k%MQRTb`mozuu%~>0C$bFC^^yhHA-5R}1zjx<JEuUX;d~0lfdUG6yrH1a# z#s_cT`U)ML-Tr&`@pgmGbLaK9l=b|&{BlylnbtJ{v(q$_?HD{bC0lhkZSQT3`tYb^ z&Fm%pHnZ91sk5>t=p0L)eSP}$v#ZPa7+$c~%~F$ieD?LS z+*=x4Y27zVtTskTu6@19=vL4%4mY{vuJ6BN54LWIx*HR#64_?BDM~_D!Jzg&!;4wl z43A~x&f91E|M_|Q@?39&nNL@W{C&COO4bL1S?9$a1=L@foCyp9xVpvY2tf4ie}jY5N+^_Fr!Q@59}qjSuFO=azqe+I{U!ZV3_^E8Ujv6+R`QZow|V^nA<3 zDVuaoyV+#x_%{kfzW%lEIWs$h#A02BYfPyPL0%kvhG{*Xn!DHK&&rD_No=|MqOhES z$>Npa8q=uk`^jFfcICNoFWQ=CVs6#myK!;JVao*G0}_Fcdh;GJF!pe|T!>^>eABit zQ)_9nYM~+^7^jrS_j$Qj0&*jaJf4~1;Wbz`FIK6Y{&Yz#X?Bt|~BF3K7DOZlJ%({K{ z^X0O735)bA%mUSRyZ7y>D6ZyYtoZTAm6>CX@usJ9$`5%iZM;0IyjEY$IPB}&m!-+7P-;=^SHt&P_YL`6hev&&$tP9dwpf)Yw?PT~ikI`ee$ar2MU%!4`K5w5MSyr}f z!!b6FVnYS?CIuIcZNA6V;_T)Mv)|tKD8?s!Gyna+20VXW-@UtWMcQV`MNdLZTP7Iz zsXQ+Y4e+1-^yQBG+kH*9+%2&xJQk6@c54vFsTJ}8ilB|4`I{?m?%yZFUwyD$gR5C# z#`$HH^TbPIOL<#xOjq;@7v*{k5qg*6iP11E-0$rWl=ktkK1E|8vZP#*->ydvyJx zlPywDC`!40oSiuU3h?#|swi) zO7_$)+pE2e3VnxFJk=&|$-e#kttSWbn(j#--u}KHA9vruXZODRTXoy@<>cngT2}f0 zU+gT^-*>YkuSZE{=SuwWoRqoN?EL!J`~1xgGb}qkdCJY&nCn$3@;vfLW%Sfdxw8(x z%jkLBY$=)=X?iSz`^;?qe7m}u>*asHeQi-VdC7<4<;Df_OP&WV{hBgMKRo^N%QKR} z=DeHF{d|z2%kbgTC#{Y}yYl8ROC)k^l>c#H=epPKx7Tm&6F4(NoFzCpBsTO|#zGYp zi?f?nz5BM?IWXiJlat_vX+~C?f|~f}@K3q)v*z1z`+tf&O=mAZdwqTPA`4bW4^82q z=;sEC98L^xW~?oEY<9+q!9nKe^ziBFr;Rz@{AW$t%u*(5SH;7?z`)??>gTe~DWM4f DifYg6 literal 0 HcmV?d00001 diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py new file mode 100644 index 000000000..0175f155a --- /dev/null +++ b/ldm/modules/image_degradation/utils_image.py @@ -0,0 +1,916 @@ +import os +import math +import random +import numpy as np +import torch +import cv2 +from torchvision.utils import make_grid +from datetime import datetime +#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py + + +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" + + +''' +# -------------------------------------------- +# Kai Zhang (github: https://github.com/cszn) +# 03/Mar/2019 +# -------------------------------------------- +# https://github.com/twhui/SRGAN-pyTorch +# https://github.com/xinntao/BasicSR +# -------------------------------------------- +''' + + +IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def get_timestamp(): + return datetime.now().strftime('%y%m%d-%H%M%S') + + +def imshow(x, title=None, cbar=False, figsize=None): + plt.figure(figsize=figsize) + plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') + if title: + plt.title(title) + if cbar: + plt.colorbar() + plt.show() + + +def surf(Z, cmap='rainbow', figsize=None): + plt.figure(figsize=figsize) + ax3 = plt.axes(projection='3d') + + w, h = Z.shape[:2] + xx = np.arange(0,w,1) + yy = np.arange(0,h,1) + X, Y = np.meshgrid(xx, yy) + ax3.plot_surface(X,Y,Z,cmap=cmap) + #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) + plt.show() + + +''' +# -------------------------------------------- +# get image pathes +# -------------------------------------------- +''' + + +def get_image_paths(dataroot): + paths = None # return None if dataroot is None + if dataroot is not None: + paths = sorted(_get_paths_from_images(dataroot)) + return paths + + +def _get_paths_from_images(path): + assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + images = [] + for dirpath, _, fnames in sorted(os.walk(path)): + for fname in sorted(fnames): + if is_image_file(fname): + img_path = os.path.join(dirpath, fname) + images.append(img_path) + assert images, '{:s} has no valid image file'.format(path) + return images + + +''' +# -------------------------------------------- +# split large images into small images +# -------------------------------------------- +''' + + +def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): + w, h = img.shape[:2] + patches = [] + if w > p_max and h > p_max: + w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) + h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) + w1.append(w-p_size) + h1.append(h-p_size) +# print(w1) +# print(h1) + for i in w1: + for j in h1: + patches.append(img[i:i+p_size, j:j+p_size,:]) + else: + patches.append(img) + + return patches + + +def imssave(imgs, img_path): + """ + imgs: list, N images of size WxHxC + """ + img_name, ext = os.path.splitext(os.path.basename(img_path)) + + for i, img in enumerate(imgs): + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') + cv2.imwrite(new_path, img) + + +def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): + """ + split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), + and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) + will be splitted. + Args: + original_dataroot: + taget_dataroot: + p_size: size of small images + p_overlap: patch size in training is a good choice + p_max: images with smaller size than (p_max)x(p_max) keep unchanged. + """ + paths = get_image_paths(original_dataroot) + for img_path in paths: + # img_name, ext = os.path.splitext(os.path.basename(img_path)) + img = imread_uint(img_path, n_channels=n_channels) + patches = patches_from_image(img, p_size, p_overlap, p_max) + imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) + #if original_dataroot == taget_dataroot: + #del img_path + +''' +# -------------------------------------------- +# makedir +# -------------------------------------------- +''' + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + + +def mkdirs(paths): + if isinstance(paths, str): + mkdir(paths) + else: + for path in paths: + mkdir(path) + + +def mkdir_and_rename(path): + if os.path.exists(path): + new_name = path + '_archived_' + get_timestamp() + print('Path already exists. Rename it to [{:s}]'.format(new_name)) + os.rename(path, new_name) + os.makedirs(path) + + +''' +# -------------------------------------------- +# read image from path +# opencv is fast, but read BGR numpy image +# -------------------------------------------- +''' + + +# -------------------------------------------- +# get uint8 image of size HxWxn_channles (RGB) +# -------------------------------------------- +def imread_uint(path, n_channels=3): + # input: path + # output: HxWx3(RGB or GGG), or HxWx1 (G) + if n_channels == 1: + img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE + img = np.expand_dims(img, axis=2) # HxWx1 + elif n_channels == 3: + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB + return img + + +# -------------------------------------------- +# matlab's imwrite +# -------------------------------------------- +def imsave(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + +def imwrite(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + + + +# -------------------------------------------- +# get single image of size HxWxn_channles (BGR) +# -------------------------------------------- +def read_img(path): + # read image by cv2 + # return: Numpy float32, HWC, BGR, [0,1] + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE + img = img.astype(np.float32) / 255. + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + # some images have 4 channels + if img.shape[2] > 3: + img = img[:, :, :3] + return img + + +''' +# -------------------------------------------- +# image format conversion +# -------------------------------------------- +# numpy(single) <---> numpy(unit) +# numpy(single) <---> tensor +# numpy(unit) <---> tensor +# -------------------------------------------- +''' + + +# -------------------------------------------- +# numpy(single) [0, 1] <---> numpy(unit) +# -------------------------------------------- + + +def uint2single(img): + + return np.float32(img/255.) + + +def single2uint(img): + + return np.uint8((img.clip(0, 1)*255.).round()) + + +def uint162single(img): + + return np.float32(img/65535.) + + +def single2uint16(img): + + return np.uint16((img.clip(0, 1)*65535.).round()) + + +# -------------------------------------------- +# numpy(unit) (HxWxC or HxW) <---> tensor +# -------------------------------------------- + + +# convert uint to 4-dimensional torch tensor +def uint2tensor4(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) + + +# convert uint to 3-dimensional torch tensor +def uint2tensor3(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) + + +# convert 2/3/4-dimensional torch tensor to uint +def tensor2uint(img): + img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + return np.uint8((img*255.0).round()) + + +# -------------------------------------------- +# numpy(single) (HxWxC) <---> tensor +# -------------------------------------------- + + +# convert single (HxWxC) to 3-dimensional torch tensor +def single2tensor3(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() + + +# convert single (HxWxC) to 4-dimensional torch tensor +def single2tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) + + +# convert torch tensor to single +def tensor2single(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + + return img + +# convert torch tensor to single +def tensor2single3(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + elif img.ndim == 2: + img = np.expand_dims(img, axis=2) + return img + + +def single2tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) + + +def single32tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) + + +def single42tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() + + +# from skimage.io import imread, imsave +def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): + ''' + Converts a torch Tensor into an image Numpy array of BGR channel order + Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order + Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) + ''' + tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp + tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] + n_dim = tensor.dim() + if n_dim == 4: + n_img = len(tensor) + img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 3: + img_np = tensor.numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 2: + img_np = tensor.numpy() + else: + raise TypeError( + 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) + if out_type == np.uint8: + img_np = (img_np * 255.0).round() + # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. + return img_np.astype(out_type) + + +''' +# -------------------------------------------- +# Augmentation, flipe and/or rotate +# -------------------------------------------- +# The following two are enough. +# (1) augmet_img: numpy image of WxHxC or WxH +# (2) augment_img_tensor4: tensor image 1xCxWxH +# -------------------------------------------- +''' + + +def augment_img(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return np.flipud(np.rot90(img)) + elif mode == 2: + return np.flipud(img) + elif mode == 3: + return np.rot90(img, k=3) + elif mode == 4: + return np.flipud(np.rot90(img, k=2)) + elif mode == 5: + return np.rot90(img) + elif mode == 6: + return np.rot90(img, k=2) + elif mode == 7: + return np.flipud(np.rot90(img, k=3)) + + +def augment_img_tensor4(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return img.rot90(1, [2, 3]).flip([2]) + elif mode == 2: + return img.flip([2]) + elif mode == 3: + return img.rot90(3, [2, 3]) + elif mode == 4: + return img.rot90(2, [2, 3]).flip([2]) + elif mode == 5: + return img.rot90(1, [2, 3]) + elif mode == 6: + return img.rot90(2, [2, 3]) + elif mode == 7: + return img.rot90(3, [2, 3]).flip([2]) + + +def augment_img_tensor(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + img_size = img.size() + img_np = img.data.cpu().numpy() + if len(img_size) == 3: + img_np = np.transpose(img_np, (1, 2, 0)) + elif len(img_size) == 4: + img_np = np.transpose(img_np, (2, 3, 1, 0)) + img_np = augment_img(img_np, mode=mode) + img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) + if len(img_size) == 3: + img_tensor = img_tensor.permute(2, 0, 1) + elif len(img_size) == 4: + img_tensor = img_tensor.permute(3, 2, 0, 1) + + return img_tensor.type_as(img) + + +def augment_img_np3(img, mode=0): + if mode == 0: + return img + elif mode == 1: + return img.transpose(1, 0, 2) + elif mode == 2: + return img[::-1, :, :] + elif mode == 3: + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 4: + return img[:, ::-1, :] + elif mode == 5: + img = img[:, ::-1, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 6: + img = img[:, ::-1, :] + img = img[::-1, :, :] + return img + elif mode == 7: + img = img[:, ::-1, :] + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + + +def augment_imgs(img_list, hflip=True, rot=True): + # horizontal flip OR rotate + hflip = hflip and random.random() < 0.5 + vflip = rot and random.random() < 0.5 + rot90 = rot and random.random() < 0.5 + + def _augment(img): + if hflip: + img = img[:, ::-1, :] + if vflip: + img = img[::-1, :, :] + if rot90: + img = img.transpose(1, 0, 2) + return img + + return [_augment(img) for img in img_list] + + +''' +# -------------------------------------------- +# modcrop and shave +# -------------------------------------------- +''' + + +def modcrop(img_in, scale): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + if img.ndim == 2: + H, W = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r] + elif img.ndim == 3: + H, W, C = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r, :] + else: + raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) + return img + + +def shave(img_in, border=0): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + h, w = img.shape[:2] + img = img[border:h-border, border:w-border] + return img + + +''' +# -------------------------------------------- +# image processing process on numpy image +# channel_convert(in_c, tar_type, img_list): +# rgb2ycbcr(img, only_y=True): +# bgr2ycbcr(img, only_y=True): +# ycbcr2rgb(img): +# -------------------------------------------- +''' + + +def rgb2ycbcr(img, only_y=True): + '''same as matlab rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def ycbcr2rgb(img): + '''same as matlab ycbcr2rgb + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def bgr2ycbcr(img, only_y=True): + '''bgr version of rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def channel_convert(in_c, tar_type, img_list): + # conversion among BGR, gray and y + if in_c == 3 and tar_type == 'gray': # BGR to gray + gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] + return [np.expand_dims(img, axis=2) for img in gray_list] + elif in_c == 3 and tar_type == 'y': # BGR to y + y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] + return [np.expand_dims(img, axis=2) for img in y_list] + elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR + return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] + else: + return img_list + + +''' +# -------------------------------------------- +# metric, PSNR and SSIM +# -------------------------------------------- +''' + + +# -------------------------------------------- +# PSNR +# -------------------------------------------- +def calculate_psnr(img1, img2, border=0): + # img1 and img2 have range [0, 255] + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20 * math.log10(255.0 / math.sqrt(mse)) + + +# -------------------------------------------- +# SSIM +# -------------------------------------------- +def calculate_ssim(img1, img2, border=0): + '''calculate SSIM + the same outputs as MATLAB's + img1, img2: [0, 255] + ''' + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + if img1.ndim == 2: + return ssim(img1, img2) + elif img1.ndim == 3: + if img1.shape[2] == 3: + ssims = [] + for i in range(3): + ssims.append(ssim(img1[:,:,i], img2[:,:,i])) + return np.array(ssims).mean() + elif img1.shape[2] == 1: + return ssim(np.squeeze(img1), np.squeeze(img2)) + else: + raise ValueError('Wrong input image dimensions.') + + +def ssim(img1, img2): + C1 = (0.01 * 255)**2 + C2 = (0.03 * 255)**2 + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * + (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +''' +# -------------------------------------------- +# matlab's bicubic imresize (numpy and torch) [0, 1] +# -------------------------------------------- +''' + + +# matlab 'imresize' function, now only support 'bicubic' +def cubic(x): + absx = torch.abs(x) + absx2 = absx**2 + absx3 = absx**3 + return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ + (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) + + +def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): + if (scale < 1) and (antialiasing): + # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width + kernel_width = kernel_width / scale + + # Output-space coordinates + x = torch.linspace(1, out_length, out_length) + + # Input-space coordinates. Calculate the inverse mapping such that 0.5 + # in output space maps to 0.5 in input space, and 0.5+scale in output + # space maps to 1.5 in input space. + u = x / scale + 0.5 * (1 - 1 / scale) + + # What is the left-most pixel that can be involved in the computation? + left = torch.floor(u - kernel_width / 2) + + # What is the maximum number of pixels that can be involved in the + # computation? Note: it's OK to use an extra pixel here; if the + # corresponding weights are all zero, it will be eliminated at the end + # of this function. + P = math.ceil(kernel_width) + 2 + + # The indices of the input pixels involved in computing the k-th output + # pixel are in row k of the indices matrix. + indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( + 1, P).expand(out_length, P) + + # The weights used to compute the k-th output pixel are in row k of the + # weights matrix. + distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices + # apply cubic kernel + if (scale < 1) and (antialiasing): + weights = scale * cubic(distance_to_center * scale) + else: + weights = cubic(distance_to_center) + # Normalize the weights matrix so that each row sums to 1. + weights_sum = torch.sum(weights, 1).view(out_length, 1) + weights = weights / weights_sum.expand(out_length, P) + + # If a column in weights is all zero, get rid of it. only consider the first and last column. + weights_zero_tmp = torch.sum((weights == 0), 0) + if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): + indices = indices.narrow(1, 1, P - 2) + weights = weights.narrow(1, 1, P - 2) + if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): + indices = indices.narrow(1, 0, P - 2) + weights = weights.narrow(1, 0, P - 2) + weights = weights.contiguous() + indices = indices.contiguous() + sym_len_s = -indices.min() + 1 + sym_len_e = indices.max() - in_length + indices = indices + sym_len_s - 1 + return weights, indices, int(sym_len_s), int(sym_len_e) + + +# -------------------------------------------- +# imresize for tensor image [0, 1] +# -------------------------------------------- +def imresize(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: pytorch tensor, CHW or HW [0,1] + # output: CHW or HW [0,1] w/o round + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(0) + in_C, in_H, in_W = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) + img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:, :sym_len_Hs, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[:, -sym_len_He:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(in_C, out_H, in_W) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) + out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :, :sym_len_Ws] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, :, -sym_len_We:] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(in_C, out_H, out_W) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + return out_2 + + +# -------------------------------------------- +# imresize for numpy image [0, 1] +# -------------------------------------------- +def imresize_np(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: Numpy, HWC or HW [0,1] + # output: HWC or HW [0,1] w/o round + img = torch.from_numpy(img) + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(2) + + in_H, in_W, in_C = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) + img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:sym_len_Hs, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[-sym_len_He:, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(out_H, in_W, in_C) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) + out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :sym_len_Ws, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, -sym_len_We:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(out_H, out_W, in_C) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + + return out_2.numpy() + + +if __name__ == '__main__': + print('---') +# img = imread_uint('test.bmp', 3) +# img = uint2single(img) +# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/ldm/modules/losses/__init__.py b/ldm/modules/losses/__init__.py new file mode 100644 index 000000000..876d7c5bd --- /dev/null +++ b/ldm/modules/losses/__init__.py @@ -0,0 +1 @@ +from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/ldm/modules/losses/contperceptual.py b/ldm/modules/losses/contperceptual.py new file mode 100644 index 000000000..672c1e32a --- /dev/null +++ b/ldm/modules/losses/contperceptual.py @@ -0,0 +1,111 @@ +import torch +import torch.nn as nn + +from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? + + +class LPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_loss="hinge"): + + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + self.kl_weight = kl_weight + self.pixel_weight = pixelloss_weight + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + # output log variance + self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm + ).apply(weights_init) + self.discriminator_iter_start = disc_start + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, inputs, reconstructions, posteriors, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", + weights=None): + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + + nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar + weighted_nll_loss = nll_loss + if weights is not None: + weighted_nll_loss = weights*nll_loss + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] + nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + kl_loss = posteriors.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + if self.disc_factor > 0.0: + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + else: + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), + "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log + diff --git a/ldm/modules/losses/vqperceptual.py b/ldm/modules/losses/vqperceptual.py new file mode 100644 index 000000000..f69981769 --- /dev/null +++ b/ldm/modules/losses/vqperceptual.py @@ -0,0 +1,167 @@ +import torch +from torch import nn +import torch.nn.functional as F +from einops import repeat + +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init +from taming.modules.losses.lpips import LPIPS +from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss + + +def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): + assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] + loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) + loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) + loss_real = (weights * loss_real).sum() / weights.sum() + loss_fake = (weights * loss_fake).sum() / weights.sum() + d_loss = 0.5 * (loss_real + loss_fake) + return d_loss + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def measure_perplexity(predicted_indices, n_embed): + # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py + # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally + encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) + avg_probs = encodings.mean(0) + perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() + cluster_use = torch.sum(avg_probs > 0) + return perplexity, cluster_use + +def l1(x, y): + return torch.abs(x-y) + + +def l2(x, y): + return torch.pow((x-y), 2) + + +class VQLPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", + pixel_loss="l1"): + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + assert perceptual_loss in ["lpips", "clips", "dists"] + assert pixel_loss in ["l1", "l2"] + self.codebook_weight = codebook_weight + self.pixel_weight = pixelloss_weight + if perceptual_loss == "lpips": + print(f"{self.__class__.__name__}: Running with LPIPS.") + self.perceptual_loss = LPIPS().eval() + else: + raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") + self.perceptual_weight = perceptual_weight + + if pixel_loss == "l1": + self.pixel_loss = l1 + else: + self.pixel_loss = l2 + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm, + ndf=disc_ndf + ).apply(weights_init) + self.discriminator_iter_start = disc_start + if disc_loss == "hinge": + self.disc_loss = hinge_d_loss + elif disc_loss == "vanilla": + self.disc_loss = vanilla_d_loss + else: + raise ValueError(f"Unknown GAN loss '{disc_loss}'.") + print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + self.n_classes = n_classes + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", predicted_indices=None): + if not exists(codebook_loss): + codebook_loss = torch.tensor([0.]).to(inputs.device) + #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + else: + p_loss = torch.tensor([0.0]) + + nll_loss = rec_loss + #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + nll_loss = torch.mean(nll_loss) + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/quant_loss".format(split): codebook_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/p_loss".format(split): p_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + if predicted_indices is not None: + assert self.n_classes is not None + with torch.no_grad(): + perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) + log[f"{split}/perplexity"] = perplexity + log[f"{split}/cluster_usage"] = cluster_usage + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log diff --git a/ldm/modules/x_transformer.py b/ldm/modules/x_transformer.py new file mode 100644 index 000000000..5fc15bf9c --- /dev/null +++ b/ldm/modules/x_transformer.py @@ -0,0 +1,641 @@ +"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" +import torch +from torch import nn, einsum +import torch.nn.functional as F +from functools import partial +from inspect import isfunction +from collections import namedtuple +from einops import rearrange, repeat, reduce + +# constants + +DEFAULT_DIM_HEAD = 64 + +Intermediates = namedtuple('Intermediates', [ + 'pre_softmax_attn', + 'post_softmax_attn' +]) + +LayerIntermediates = namedtuple('Intermediates', [ + 'hiddens', + 'attn_intermediates' +]) + + +class AbsolutePositionalEmbedding(nn.Module): + def __init__(self, dim, max_seq_len): + super().__init__() + self.emb = nn.Embedding(max_seq_len, dim) + self.init_() + + def init_(self): + nn.init.normal_(self.emb.weight, std=0.02) + + def forward(self, x): + n = torch.arange(x.shape[1], device=x.device) + return self.emb(n)[None, :, :] + + +class FixedPositionalEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer('inv_freq', inv_freq) + + def forward(self, x, seq_dim=1, offset=0): + t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset + sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) + emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) + return emb[None, :, :] + + +# helpers + +def exists(val): + return val is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def always(val): + def inner(*args, **kwargs): + return val + return inner + + +def not_equals(val): + def inner(x): + return x != val + return inner + + +def equals(val): + def inner(x): + return x == val + return inner + + +def max_neg_value(tensor): + return -torch.finfo(tensor.dtype).max + + +# keyword argument helpers + +def pick_and_pop(keys, d): + values = list(map(lambda key: d.pop(key), keys)) + return dict(zip(keys, values)) + + +def group_dict_by_key(cond, d): + return_val = [dict(), dict()] + for key in d.keys(): + match = bool(cond(key)) + ind = int(not match) + return_val[ind][key] = d[key] + return (*return_val,) + + +def string_begins_with(prefix, str): + return str.startswith(prefix) + + +def group_by_key_prefix(prefix, d): + return group_dict_by_key(partial(string_begins_with, prefix), d) + + +def groupby_prefix_and_trim(prefix, d): + kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) + kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) + return kwargs_without_prefix, kwargs + + +# classes +class Scale(nn.Module): + def __init__(self, value, fn): + super().__init__() + self.value = value + self.fn = fn + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.value, *rest) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.g, *rest) + + +class ScaleNorm(nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class RMSNorm(nn.Module): + def __init__(self, dim, eps=1e-8): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class Residual(nn.Module): + def forward(self, x, residual): + return x + residual + + +class GRUGating(nn.Module): + def __init__(self, dim): + super().__init__() + self.gru = nn.GRUCell(dim, dim) + + def forward(self, x, residual): + gated_output = self.gru( + rearrange(x, 'b n d -> (b n) d'), + rearrange(residual, 'b n d -> (b n) d') + ) + + return gated_output.reshape_as(x) + + +# feedforward + +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +# attention. +class Attention(nn.Module): + def __init__( + self, + dim, + dim_head=DEFAULT_DIM_HEAD, + heads=8, + causal=False, + mask=None, + talking_heads=False, + sparse_topk=None, + use_entmax15=False, + num_mem_kv=0, + dropout=0., + on_attn=False + ): + super().__init__() + if use_entmax15: + raise NotImplementedError("Check out entmax activation instead of softmax activation!") + self.scale = dim_head ** -0.5 + self.heads = heads + self.causal = causal + self.mask = mask + + inner_dim = dim_head * heads + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_k = nn.Linear(dim, inner_dim, bias=False) + self.to_v = nn.Linear(dim, inner_dim, bias=False) + self.dropout = nn.Dropout(dropout) + + # talking heads + self.talking_heads = talking_heads + if talking_heads: + self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + + # explicit topk sparse attention + self.sparse_topk = sparse_topk + + # entmax + #self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax + + # add memory key / values + self.num_mem_kv = num_mem_kv + if num_mem_kv > 0: + self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + + # attention on attention + self.attn_on_attn = on_attn + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + rel_pos=None, + sinusoidal_emb=None, + prev_attn=None, + mem=None + ): + b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device + kv_input = default(context, x) + + q_input = x + k_input = kv_input + v_input = kv_input + + if exists(mem): + k_input = torch.cat((mem, k_input), dim=-2) + v_input = torch.cat((mem, v_input), dim=-2) + + if exists(sinusoidal_emb): + # in shortformer, the query would start at a position offset depending on the past cached memory + offset = k_input.shape[-2] - q_input.shape[-2] + q_input = q_input + sinusoidal_emb(q_input, offset=offset) + k_input = k_input + sinusoidal_emb(k_input) + + q = self.to_q(q_input) + k = self.to_k(k_input) + v = self.to_v(v_input) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) + + input_mask = None + if any(map(exists, (mask, context_mask))): + q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) + k_mask = q_mask if not exists(context) else context_mask + k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) + q_mask = rearrange(q_mask, 'b i -> b () i ()') + k_mask = rearrange(k_mask, 'b j -> b () () j') + input_mask = q_mask * k_mask + + if self.num_mem_kv > 0: + mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) + k = torch.cat((mem_k, k), dim=-2) + v = torch.cat((mem_v, v), dim=-2) + if exists(input_mask): + input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) + + dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + mask_value = max_neg_value(dots) + + if exists(prev_attn): + dots = dots + prev_attn + + pre_softmax_attn = dots + + if talking_heads: + dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() + + if exists(rel_pos): + dots = rel_pos(dots) + + if exists(input_mask): + dots.masked_fill_(~input_mask, mask_value) + del input_mask + + if self.causal: + i, j = dots.shape[-2:] + r = torch.arange(i, device=device) + mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') + mask = F.pad(mask, (j - i, 0), value=False) + dots.masked_fill_(mask, mask_value) + del mask + + if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: + top, _ = dots.topk(self.sparse_topk, dim=-1) + vk = top[..., -1].unsqueeze(-1).expand_as(dots) + mask = dots < vk + dots.masked_fill_(mask, mask_value) + del mask + + attn = self.attn_fn(dots, dim=-1) + post_softmax_attn = attn + + attn = self.dropout(attn) + + if talking_heads: + attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + + intermediates = Intermediates( + pre_softmax_attn=pre_softmax_attn, + post_softmax_attn=post_softmax_attn + ) + + return self.to_out(out), intermediates + + +class AttentionLayers(nn.Module): + def __init__( + self, + dim, + depth, + heads=8, + causal=False, + cross_attend=False, + only_cross=False, + use_scalenorm=False, + use_rmsnorm=False, + use_rezero=False, + rel_pos_num_buckets=32, + rel_pos_max_distance=128, + position_infused_attn=False, + custom_layers=None, + sandwich_coef=None, + par_ratio=None, + residual_attn=False, + cross_residual_attn=False, + macaron=False, + pre_norm=True, + gate_residual=False, + **kwargs + ): + super().__init__() + ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) + attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) + + dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) + + self.dim = dim + self.depth = depth + self.layers = nn.ModuleList([]) + + self.has_pos_emb = position_infused_attn + self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None + self.rotary_pos_emb = always(None) + + assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' + self.rel_pos = None + + self.pre_norm = pre_norm + + self.residual_attn = residual_attn + self.cross_residual_attn = cross_residual_attn + + norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm + norm_class = RMSNorm if use_rmsnorm else norm_class + norm_fn = partial(norm_class, dim) + + norm_fn = nn.Identity if use_rezero else norm_fn + branch_fn = Rezero if use_rezero else None + + if cross_attend and not only_cross: + default_block = ('a', 'c', 'f') + elif cross_attend and only_cross: + default_block = ('c', 'f') + else: + default_block = ('a', 'f') + + if macaron: + default_block = ('f',) + default_block + + if exists(custom_layers): + layer_types = custom_layers + elif exists(par_ratio): + par_depth = depth * len(default_block) + assert 1 < par_ratio <= par_depth, 'par ratio out of range' + default_block = tuple(filter(not_equals('f'), default_block)) + par_attn = par_depth // par_ratio + depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper + par_width = (depth_cut + depth_cut // par_attn) // par_attn + assert len(default_block) <= par_width, 'default block is too large for par_ratio' + par_block = default_block + ('f',) * (par_width - len(default_block)) + par_head = par_block * par_attn + layer_types = par_head + ('f',) * (par_depth - len(par_head)) + elif exists(sandwich_coef): + assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' + layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef + else: + layer_types = default_block * depth + + self.layer_types = layer_types + self.num_attn_layers = len(list(filter(equals('a'), layer_types))) + + for layer_type in self.layer_types: + if layer_type == 'a': + layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) + elif layer_type == 'c': + layer = Attention(dim, heads=heads, **attn_kwargs) + elif layer_type == 'f': + layer = FeedForward(dim, **ff_kwargs) + layer = layer if not macaron else Scale(0.5, layer) + else: + raise Exception(f'invalid layer type {layer_type}') + + if isinstance(layer, Attention) and exists(branch_fn): + layer = branch_fn(layer) + + if gate_residual: + residual_fn = GRUGating(dim) + else: + residual_fn = Residual() + + self.layers.append(nn.ModuleList([ + norm_fn(), + layer, + residual_fn + ])) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + mems=None, + return_hiddens=False + ): + hiddens = [] + intermediates = [] + prev_attn = None + prev_cross_attn = None + + mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers + + for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): + is_last = ind == (len(self.layers) - 1) + + if layer_type == 'a': + hiddens.append(x) + layer_mem = mems.pop(0) + + residual = x + + if self.pre_norm: + x = norm(x) + + if layer_type == 'a': + out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, + prev_attn=prev_attn, mem=layer_mem) + elif layer_type == 'c': + out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) + elif layer_type == 'f': + out = block(x) + + x = residual_fn(out, residual) + + if layer_type in ('a', 'c'): + intermediates.append(inter) + + if layer_type == 'a' and self.residual_attn: + prev_attn = inter.pre_softmax_attn + elif layer_type == 'c' and self.cross_residual_attn: + prev_cross_attn = inter.pre_softmax_attn + + if not self.pre_norm and not is_last: + x = norm(x) + + if return_hiddens: + intermediates = LayerIntermediates( + hiddens=hiddens, + attn_intermediates=intermediates + ) + + return x, intermediates + + return x + + +class Encoder(AttentionLayers): + def __init__(self, **kwargs): + assert 'causal' not in kwargs, 'cannot set causality on encoder' + super().__init__(causal=False, **kwargs) + + + +class TransformerWrapper(nn.Module): + def __init__( + self, + *, + num_tokens, + max_seq_len, + attn_layers, + emb_dim=None, + max_mem_len=0., + emb_dropout=0., + num_memory_tokens=None, + tie_embedding=False, + use_pos_emb=True + ): + super().__init__() + assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' + + dim = attn_layers.dim + emb_dim = default(emb_dim, dim) + + self.max_seq_len = max_seq_len + self.max_mem_len = max_mem_len + self.num_tokens = num_tokens + + self.token_emb = nn.Embedding(num_tokens, emb_dim) + self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( + use_pos_emb and not attn_layers.has_pos_emb) else always(0) + self.emb_dropout = nn.Dropout(emb_dropout) + + self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() + self.attn_layers = attn_layers + self.norm = nn.LayerNorm(dim) + + self.init_() + + self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() + + # memory tokens (like [cls]) from Memory Transformers paper + num_memory_tokens = default(num_memory_tokens, 0) + self.num_memory_tokens = num_memory_tokens + if num_memory_tokens > 0: + self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) + + # let funnel encoder know number of memory tokens, if specified + if hasattr(attn_layers, 'num_memory_tokens'): + attn_layers.num_memory_tokens = num_memory_tokens + + def init_(self): + nn.init.normal_(self.token_emb.weight, std=0.02) + + def forward( + self, + x, + return_embeddings=False, + mask=None, + return_mems=False, + return_attn=False, + mems=None, + **kwargs + ): + b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens + x = self.token_emb(x) + x += self.pos_emb(x) + x = self.emb_dropout(x) + + x = self.project_emb(x) + + if num_mem > 0: + mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) + x = torch.cat((mem, x), dim=1) + + # auto-handle masking after appending memory tokens + if exists(mask): + mask = F.pad(mask, (num_mem, 0), value=True) + + x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) + x = self.norm(x) + + mem, x = x[:, :num_mem], x[:, num_mem:] + + out = self.to_logits(x) if not return_embeddings else x + + if return_mems: + hiddens = intermediates.hiddens + new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens + new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) + return out, new_mems + + if return_attn: + attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) + return out, attn_maps + + return out + diff --git a/ldm/util.py b/ldm/util.py new file mode 100644 index 000000000..8ba38853e --- /dev/null +++ b/ldm/util.py @@ -0,0 +1,203 @@ +import importlib + +import torch +import numpy as np +from collections import abc +from einops import rearrange +from functools import partial + +import multiprocessing as mp +from threading import Thread +from queue import Queue + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) + nc = int(40 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): + # create dummy dataset instance + + # run prefetching + if idx_to_fn: + res = func(data, worker_id=idx) + else: + res = func(data) + Q.put([idx, res]) + Q.put("Done") + + +def parallel_data_prefetch( + func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False +): + # if target_data_type not in ["ndarray", "list"]: + # raise ValueError( + # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." + # ) + if isinstance(data, np.ndarray) and target_data_type == "list": + raise ValueError("list expected but function got ndarray.") + elif isinstance(data, abc.Iterable): + if isinstance(data, dict): + print( + f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' + ) + data = list(data.values()) + if target_data_type == "ndarray": + data = np.asarray(data) + else: + data = list(data) + else: + raise TypeError( + f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." + ) + + if cpu_intensive: + Q = mp.Queue(1000) + proc = mp.Process + else: + Q = Queue(1000) + proc = Thread + # spawn processes + if target_data_type == "ndarray": + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate(np.array_split(data, n_proc)) + ] + else: + step = ( + int(len(data) / n_proc + 1) + if len(data) % n_proc != 0 + else int(len(data) / n_proc) + ) + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate( + [data[i: i + step] for i in range(0, len(data), step)] + ) + ] + processes = [] + for i in range(n_proc): + p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) + processes += [p] + + # start processes + print(f"Start prefetching...") + import time + + start = time.time() + gather_res = [[] for _ in range(n_proc)] + try: + for p in processes: + p.start() + + k = 0 + while k < n_proc: + # get result + res = Q.get() + if res == "Done": + k += 1 + else: + gather_res[res[0]] = res[1] + + except Exception as e: + print("Exception: ", e) + for p in processes: + p.terminate() + + raise e + finally: + for p in processes: + p.join() + print(f"Prefetching complete. [{time.time() - start} sec.]") + + if target_data_type == 'ndarray': + if not isinstance(gather_res[0], np.ndarray): + return np.concatenate([np.asarray(r) for r in gather_res], axis=0) + + # order outputs + return np.concatenate(gather_res, axis=0) + elif target_data_type == 'list': + out = [] + for r in gather_res: + out.extend(r) + return out + else: + return gather_res diff --git a/modules/devices.py b/modules/devices.py index 67165bf66..f30b6ebce 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -36,8 +36,8 @@ def get_optimal_device(): else: return torch.device("cuda") - if has_mps(): - return torch.device("mps") + # if has_mps(): + # return torch.device("mps") return cpu diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index eaedac13e..26280fe4f 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -70,14 +70,19 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir) def hijack(self, m): - model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + + if shared.text_model_name == "XLMR-Large": + model_embeddings = m.cond_stage_model.roberta.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) + else : + model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embeddings, self) - model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) self.clip = m.cond_stage_model - apply_optimizations() + # apply_optimizations() def flatten(el): flattened = [flatten(children) for children in el.children()] @@ -125,8 +130,11 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): self.tokenizer = wrapped.tokenizer self.token_mults = {} - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] - + try: + self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + except: + self.comma_token = None + tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] for text, ident in tokens_with_parens: mult = 1.0 @@ -298,6 +306,9 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): + if shared.text_model_name == "XLMR-Large": + return self.wrapped.encode(text) + use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -359,7 +370,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): z = self.wrapped.transformer.text_model.final_layer_norm(z) else: z = outputs.last_hidden_state - + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device) diff --git a/modules/shared.py b/modules/shared.py index c93ae2a31..9941d2f4d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -21,7 +21,7 @@ from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) +parser.add_argument("--config", type=str, default="configs/altdiffusion/ad-inference.yaml", help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) @@ -106,6 +106,10 @@ restricted_opts = { "outdir_txt2img_grids", "outdir_save", } +from omegaconf import OmegaConf +config = OmegaConf.load(f"{cmd_opts.config}") +# XLMR-Large +text_model_name = config.model.params.cond_stage_config.params.name cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access From ee3f5ea3eeb31f1ed72e2f0cbed2c00a782497d8 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 29 Nov 2022 10:30:19 +0800 Subject: [PATCH 02/53] delete old config file Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/stable-diffusion/v1-inference.yaml | 71 ---------------------- 1 file changed, 71 deletions(-) delete mode 100644 configs/stable-diffusion/v1-inference.yaml diff --git a/configs/stable-diffusion/v1-inference.yaml b/configs/stable-diffusion/v1-inference.yaml deleted file mode 100644 index 2e6ef0f2c..000000000 --- a/configs/stable-diffusion/v1-inference.yaml +++ /dev/null @@ -1,71 +0,0 @@ -model: - base_learning_rate: 1.0e-04 - target: ldm.models.diffusion.ddpm.LatentDiffusion - params: - linear_start: 0.00085 - linear_end: 0.0120 - num_timesteps_cond: 1 - log_every_t: 200 - timesteps: 1000 - first_stage_key: "jpg" - cond_stage_key: "txt" - image_size: 64 - channels: 4 - cond_stage_trainable: false # Note: different from the one we trained before - conditioning_key: crossattn - monitor: val/loss_simple_ema - scale_factor: 0.18215 - use_ema: False - - scheduler_config: # 10000 warmup steps - target: ldm.lr_scheduler.LambdaLinearScheduler - params: - warm_up_steps: [ 10000 ] - cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases - f_start: [ 1.e-6 ] - f_max: [ 1. ] - f_min: [ 1. ] - - unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel - params: - image_size: 32 # unused - in_channels: 4 - out_channels: 4 - model_channels: 320 - attention_resolutions: [ 4, 2, 1 ] - num_res_blocks: 2 - channel_mult: [ 1, 2, 4, 4 ] - num_heads: 8 - use_spatial_transformer: True - transformer_depth: 1 - context_dim: 768 - use_checkpoint: True - legacy: False - - first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL - params: - embed_dim: 4 - monitor: val/rec_loss - ddconfig: - double_z: true - z_channels: 4 - resolution: 256 - in_channels: 3 - out_ch: 3 - ch: 128 - ch_mult: - - 1 - - 2 - - 4 - - 4 - num_res_blocks: 2 - attn_resolutions: [] - dropout: 0.0 - lossconfig: - target: torch.nn.Identity - - cond_stage_config: - # target: ldm.modules.encoders.modules.FrozenCLIPEmbedder - target: altclip.model.AltCLIPEmbedder \ No newline at end of file From 52cc83d36b7663a77b79fd2258d2ca871af73e55 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Wed, 30 Nov 2022 14:56:12 +0800 Subject: [PATCH 03/53] fix bugs Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/altdiffusion/ad-inference.yaml | 2 +- launch.py | 10 +- ldm/data/__init__.py | 0 ldm/data/base.py | 23 - ldm/data/imagenet.py | 394 ----- ldm/data/lsun.py | 92 -- ldm/lr_scheduler.py | 98 -- ldm/models/autoencoder.py | 443 ----- ldm/models/diffusion/__init__.py | 0 ldm/models/diffusion/classifier.py | 267 --- ldm/models/diffusion/ddim.py | 241 --- ldm/models/diffusion/ddpm.py | 1445 ----------------- ldm/models/diffusion/dpm_solver/__init__.py | 1 - ldm/models/diffusion/dpm_solver/dpm_solver.py | 1184 -------------- ldm/models/diffusion/dpm_solver/sampler.py | 82 - ldm/models/diffusion/plms.py | 236 --- ldm/modules/attention.py | 261 --- ldm/modules/diffusionmodules/__init__.py | 0 ldm/modules/diffusionmodules/model.py | 835 ---------- ldm/modules/diffusionmodules/openaimodel.py | 961 ----------- ldm/modules/diffusionmodules/util.py | 267 --- ldm/modules/distributions/__init__.py | 0 ldm/modules/distributions/distributions.py | 92 -- ldm/modules/ema.py | 76 - ldm/modules/encoders/__init__.py | 0 ldm/modules/encoders/modules.py | 234 --- ldm/modules/image_degradation/__init__.py | 2 - ldm/modules/image_degradation/bsrgan.py | 730 --------- ldm/modules/image_degradation/bsrgan_light.py | 650 -------- ldm/modules/image_degradation/utils/test.png | Bin 441072 -> 0 bytes ldm/modules/image_degradation/utils_image.py | 916 ----------- ldm/modules/losses/__init__.py | 1 - ldm/modules/losses/contperceptual.py | 111 -- ldm/modules/losses/vqperceptual.py | 167 -- ldm/modules/x_transformer.py | 641 -------- ldm/util.py | 203 --- modules/sd_hijack.py | 15 +- modules/sd_hijack_clip.py | 10 +- {ldm/modules/encoders => modules}/xlmr.py | 0 39 files changed, 22 insertions(+), 10668 deletions(-) delete mode 100644 ldm/data/__init__.py delete mode 100644 ldm/data/base.py delete mode 100644 ldm/data/imagenet.py delete mode 100644 ldm/data/lsun.py delete mode 100644 ldm/lr_scheduler.py delete mode 100644 ldm/models/autoencoder.py delete mode 100644 ldm/models/diffusion/__init__.py delete mode 100644 ldm/models/diffusion/classifier.py delete mode 100644 ldm/models/diffusion/ddim.py delete mode 100644 ldm/models/diffusion/ddpm.py delete mode 100644 ldm/models/diffusion/dpm_solver/__init__.py delete mode 100644 ldm/models/diffusion/dpm_solver/dpm_solver.py delete mode 100644 ldm/models/diffusion/dpm_solver/sampler.py delete mode 100644 ldm/models/diffusion/plms.py delete mode 100644 ldm/modules/attention.py delete mode 100644 ldm/modules/diffusionmodules/__init__.py delete mode 100644 ldm/modules/diffusionmodules/model.py delete mode 100644 ldm/modules/diffusionmodules/openaimodel.py delete mode 100644 ldm/modules/diffusionmodules/util.py delete mode 100644 ldm/modules/distributions/__init__.py delete mode 100644 ldm/modules/distributions/distributions.py delete mode 100644 ldm/modules/ema.py delete mode 100644 ldm/modules/encoders/__init__.py delete mode 100644 ldm/modules/encoders/modules.py delete mode 100644 ldm/modules/image_degradation/__init__.py delete mode 100644 ldm/modules/image_degradation/bsrgan.py delete mode 100644 ldm/modules/image_degradation/bsrgan_light.py delete mode 100644 ldm/modules/image_degradation/utils/test.png delete mode 100644 ldm/modules/image_degradation/utils_image.py delete mode 100644 ldm/modules/losses/__init__.py delete mode 100644 ldm/modules/losses/contperceptual.py delete mode 100644 ldm/modules/losses/vqperceptual.py delete mode 100644 ldm/modules/x_transformer.py delete mode 100644 ldm/util.py rename {ldm/modules/encoders => modules}/xlmr.py (100%) diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/altdiffusion/ad-inference.yaml index 1b11b63ea..cfbee72d7 100644 --- a/configs/altdiffusion/ad-inference.yaml +++ b/configs/altdiffusion/ad-inference.yaml @@ -67,6 +67,6 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.xlmr.BertSeriesModelWithTransformation + target: modules.xlmr.BertSeriesModelWithTransformation params: name: "XLMR-Large" \ No newline at end of file diff --git a/launch.py b/launch.py index ad9ddd5a9..3f4dc870e 100644 --- a/launch.py +++ b/launch.py @@ -233,11 +233,11 @@ def prepare_enviroment(): os.makedirs(dir_repos, exist_ok=True) - git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) - git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) - git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) - git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) - git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) + git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", ) + git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", ) + git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", ) + git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", ) + git_clone(blip_repo, repo_dir('BLIP'), "BLIP", ) if not is_installed("lpips"): run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") diff --git a/ldm/data/__init__.py b/ldm/data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ldm/data/base.py b/ldm/data/base.py deleted file mode 100644 index b196c2f7a..000000000 --- a/ldm/data/base.py +++ /dev/null @@ -1,23 +0,0 @@ -from abc import abstractmethod -from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset - - -class Txt2ImgIterableBaseDataset(IterableDataset): - ''' - Define an interface to make the IterableDatasets for text2img data chainable - ''' - def __init__(self, num_records=0, valid_ids=None, size=256): - super().__init__() - self.num_records = num_records - self.valid_ids = valid_ids - self.sample_ids = valid_ids - self.size = size - - print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') - - def __len__(self): - return self.num_records - - @abstractmethod - def __iter__(self): - pass \ No newline at end of file diff --git a/ldm/data/imagenet.py b/ldm/data/imagenet.py deleted file mode 100644 index 1c473f9c6..000000000 --- a/ldm/data/imagenet.py +++ /dev/null @@ -1,394 +0,0 @@ -import os, yaml, pickle, shutil, tarfile, glob -import cv2 -import albumentations -import PIL -import numpy as np -import torchvision.transforms.functional as TF -from omegaconf import OmegaConf -from functools import partial -from PIL import Image -from tqdm import tqdm -from torch.utils.data import Dataset, Subset - -import taming.data.utils as tdu -from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve -from taming.data.imagenet import ImagePaths - -from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light - - -def synset2idx(path_to_yaml="data/index_synset.yaml"): - with open(path_to_yaml) as f: - di2s = yaml.load(f) - return dict((v,k) for k,v in di2s.items()) - - -class ImageNetBase(Dataset): - def __init__(self, config=None): - self.config = config or OmegaConf.create() - if not type(self.config)==dict: - self.config = OmegaConf.to_container(self.config) - self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) - self.process_images = True # if False we skip loading & processing images and self.data contains filepaths - self._prepare() - self._prepare_synset_to_human() - self._prepare_idx_to_synset() - self._prepare_human_to_integer_label() - self._load() - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - return self.data[i] - - def _prepare(self): - raise NotImplementedError() - - def _filter_relpaths(self, relpaths): - ignore = set([ - "n06596364_9591.JPEG", - ]) - relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] - if "sub_indices" in self.config: - indices = str_to_indices(self.config["sub_indices"]) - synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings - self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) - files = [] - for rpath in relpaths: - syn = rpath.split("/")[0] - if syn in synsets: - files.append(rpath) - return files - else: - return relpaths - - def _prepare_synset_to_human(self): - SIZE = 2655750 - URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" - self.human_dict = os.path.join(self.root, "synset_human.txt") - if (not os.path.exists(self.human_dict) or - not os.path.getsize(self.human_dict)==SIZE): - download(URL, self.human_dict) - - def _prepare_idx_to_synset(self): - URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" - self.idx2syn = os.path.join(self.root, "index_synset.yaml") - if (not os.path.exists(self.idx2syn)): - download(URL, self.idx2syn) - - def _prepare_human_to_integer_label(self): - URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" - self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") - if (not os.path.exists(self.human2integer)): - download(URL, self.human2integer) - with open(self.human2integer, "r") as f: - lines = f.read().splitlines() - assert len(lines) == 1000 - self.human2integer_dict = dict() - for line in lines: - value, key = line.split(":") - self.human2integer_dict[key] = int(value) - - def _load(self): - with open(self.txt_filelist, "r") as f: - self.relpaths = f.read().splitlines() - l1 = len(self.relpaths) - self.relpaths = self._filter_relpaths(self.relpaths) - print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) - - self.synsets = [p.split("/")[0] for p in self.relpaths] - self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] - - unique_synsets = np.unique(self.synsets) - class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) - if not self.keep_orig_class_label: - self.class_labels = [class_dict[s] for s in self.synsets] - else: - self.class_labels = [self.synset2idx[s] for s in self.synsets] - - with open(self.human_dict, "r") as f: - human_dict = f.read().splitlines() - human_dict = dict(line.split(maxsplit=1) for line in human_dict) - - self.human_labels = [human_dict[s] for s in self.synsets] - - labels = { - "relpath": np.array(self.relpaths), - "synsets": np.array(self.synsets), - "class_label": np.array(self.class_labels), - "human_label": np.array(self.human_labels), - } - - if self.process_images: - self.size = retrieve(self.config, "size", default=256) - self.data = ImagePaths(self.abspaths, - labels=labels, - size=self.size, - random_crop=self.random_crop, - ) - else: - self.data = self.abspaths - - -class ImageNetTrain(ImageNetBase): - NAME = "ILSVRC2012_train" - URL = "http://www.image-net.org/challenges/LSVRC/2012/" - AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" - FILES = [ - "ILSVRC2012_img_train.tar", - ] - SIZES = [ - 147897477120, - ] - - def __init__(self, process_images=True, data_root=None, **kwargs): - self.process_images = process_images - self.data_root = data_root - super().__init__(**kwargs) - - def _prepare(self): - if self.data_root: - self.root = os.path.join(self.data_root, self.NAME) - else: - cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) - self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - - self.datadir = os.path.join(self.root, "data") - self.txt_filelist = os.path.join(self.root, "filelist.txt") - self.expected_length = 1281167 - self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", - default=True) - if not tdu.is_prepared(self.root): - # prep - print("Preparing dataset {} in {}".format(self.NAME, self.root)) - - datadir = self.datadir - if not os.path.exists(datadir): - path = os.path.join(self.root, self.FILES[0]) - if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: - import academictorrents as at - atpath = at.get(self.AT_HASH, datastore=self.root) - assert atpath == path - - print("Extracting {} to {}".format(path, datadir)) - os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, "r:") as tar: - tar.extractall(path=datadir) - - print("Extracting sub-tars.") - subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) - for subpath in tqdm(subpaths): - subdir = subpath[:-len(".tar")] - os.makedirs(subdir, exist_ok=True) - with tarfile.open(subpath, "r:") as tar: - tar.extractall(path=subdir) - - filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) - filelist = [os.path.relpath(p, start=datadir) for p in filelist] - filelist = sorted(filelist) - filelist = "\n".join(filelist)+"\n" - with open(self.txt_filelist, "w") as f: - f.write(filelist) - - tdu.mark_prepared(self.root) - - -class ImageNetValidation(ImageNetBase): - NAME = "ILSVRC2012_validation" - URL = "http://www.image-net.org/challenges/LSVRC/2012/" - AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" - VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" - FILES = [ - "ILSVRC2012_img_val.tar", - "validation_synset.txt", - ] - SIZES = [ - 6744924160, - 1950000, - ] - - def __init__(self, process_images=True, data_root=None, **kwargs): - self.data_root = data_root - self.process_images = process_images - super().__init__(**kwargs) - - def _prepare(self): - if self.data_root: - self.root = os.path.join(self.data_root, self.NAME) - else: - cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) - self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - self.datadir = os.path.join(self.root, "data") - self.txt_filelist = os.path.join(self.root, "filelist.txt") - self.expected_length = 50000 - self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", - default=False) - if not tdu.is_prepared(self.root): - # prep - print("Preparing dataset {} in {}".format(self.NAME, self.root)) - - datadir = self.datadir - if not os.path.exists(datadir): - path = os.path.join(self.root, self.FILES[0]) - if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: - import academictorrents as at - atpath = at.get(self.AT_HASH, datastore=self.root) - assert atpath == path - - print("Extracting {} to {}".format(path, datadir)) - os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, "r:") as tar: - tar.extractall(path=datadir) - - vspath = os.path.join(self.root, self.FILES[1]) - if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: - download(self.VS_URL, vspath) - - with open(vspath, "r") as f: - synset_dict = f.read().splitlines() - synset_dict = dict(line.split() for line in synset_dict) - - print("Reorganizing into synset folders") - synsets = np.unique(list(synset_dict.values())) - for s in synsets: - os.makedirs(os.path.join(datadir, s), exist_ok=True) - for k, v in synset_dict.items(): - src = os.path.join(datadir, k) - dst = os.path.join(datadir, v) - shutil.move(src, dst) - - filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) - filelist = [os.path.relpath(p, start=datadir) for p in filelist] - filelist = sorted(filelist) - filelist = "\n".join(filelist)+"\n" - with open(self.txt_filelist, "w") as f: - f.write(filelist) - - tdu.mark_prepared(self.root) - - - -class ImageNetSR(Dataset): - def __init__(self, size=None, - degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., - random_crop=True): - """ - Imagenet Superresolution Dataloader - Performs following ops in order: - 1. crops a crop of size s from image either as random or center crop - 2. resizes crop to size with cv2.area_interpolation - 3. degrades resized crop with degradation_fn - - :param size: resizing to size after cropping - :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light - :param downscale_f: Low Resolution Downsample factor - :param min_crop_f: determines crop size s, - where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) - :param max_crop_f: "" - :param data_root: - :param random_crop: - """ - self.base = self.get_base() - assert size - assert (size / downscale_f).is_integer() - self.size = size - self.LR_size = int(size / downscale_f) - self.min_crop_f = min_crop_f - self.max_crop_f = max_crop_f - assert(max_crop_f <= 1.) - self.center_crop = not random_crop - - self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) - - self.pil_interpolation = False # gets reset later if incase interp_op is from pillow - - if degradation == "bsrgan": - self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) - - elif degradation == "bsrgan_light": - self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) - - else: - interpolation_fn = { - "cv_nearest": cv2.INTER_NEAREST, - "cv_bilinear": cv2.INTER_LINEAR, - "cv_bicubic": cv2.INTER_CUBIC, - "cv_area": cv2.INTER_AREA, - "cv_lanczos": cv2.INTER_LANCZOS4, - "pil_nearest": PIL.Image.NEAREST, - "pil_bilinear": PIL.Image.BILINEAR, - "pil_bicubic": PIL.Image.BICUBIC, - "pil_box": PIL.Image.BOX, - "pil_hamming": PIL.Image.HAMMING, - "pil_lanczos": PIL.Image.LANCZOS, - }[degradation] - - self.pil_interpolation = degradation.startswith("pil_") - - if self.pil_interpolation: - self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) - - else: - self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, - interpolation=interpolation_fn) - - def __len__(self): - return len(self.base) - - def __getitem__(self, i): - example = self.base[i] - image = Image.open(example["file_path_"]) - - if not image.mode == "RGB": - image = image.convert("RGB") - - image = np.array(image).astype(np.uint8) - - min_side_len = min(image.shape[:2]) - crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) - crop_side_len = int(crop_side_len) - - if self.center_crop: - self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) - - else: - self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) - - image = self.cropper(image=image)["image"] - image = self.image_rescaler(image=image)["image"] - - if self.pil_interpolation: - image_pil = PIL.Image.fromarray(image) - LR_image = self.degradation_process(image_pil) - LR_image = np.array(LR_image).astype(np.uint8) - - else: - LR_image = self.degradation_process(image=image)["image"] - - example["image"] = (image/127.5 - 1.0).astype(np.float32) - example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) - - return example - - -class ImageNetSRTrain(ImageNetSR): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_base(self): - with open("data/imagenet_train_hr_indices.p", "rb") as f: - indices = pickle.load(f) - dset = ImageNetTrain(process_images=False,) - return Subset(dset, indices) - - -class ImageNetSRValidation(ImageNetSR): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_base(self): - with open("data/imagenet_val_hr_indices.p", "rb") as f: - indices = pickle.load(f) - dset = ImageNetValidation(process_images=False,) - return Subset(dset, indices) diff --git a/ldm/data/lsun.py b/ldm/data/lsun.py deleted file mode 100644 index 6256e4571..000000000 --- a/ldm/data/lsun.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import numpy as np -import PIL -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms - - -class LSUNBase(Dataset): - def __init__(self, - txt_file, - data_root, - size=None, - interpolation="bicubic", - flip_p=0.5 - ): - self.data_paths = txt_file - self.data_root = data_root - with open(self.data_paths, "r") as f: - self.image_paths = f.read().splitlines() - self._length = len(self.image_paths) - self.labels = { - "relative_file_path_": [l for l in self.image_paths], - "file_path_": [os.path.join(self.data_root, l) - for l in self.image_paths], - } - - self.size = size - self.interpolation = {"linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - }[interpolation] - self.flip = transforms.RandomHorizontalFlip(p=flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = dict((k, self.labels[k][i]) for k in self.labels) - image = Image.open(example["file_path_"]) - if not image.mode == "RGB": - image = image.convert("RGB") - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - crop = min(img.shape[0], img.shape[1]) - h, w, = img.shape[0], img.shape[1] - img = img[(h - crop) // 2:(h + crop) // 2, - (w - crop) // 2:(w + crop) // 2] - - image = Image.fromarray(img) - if self.size is not None: - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip(image) - image = np.array(image).astype(np.uint8) - example["image"] = (image / 127.5 - 1.0).astype(np.float32) - return example - - -class LSUNChurchesTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) - - -class LSUNChurchesValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", - flip_p=flip_p, **kwargs) - - -class LSUNBedroomsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) - - -class LSUNBedroomsValidation(LSUNBase): - def __init__(self, flip_p=0.0, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", - flip_p=flip_p, **kwargs) - - -class LSUNCatsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) - - -class LSUNCatsValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", - flip_p=flip_p, **kwargs) diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py deleted file mode 100644 index be39da9ca..000000000 --- a/ldm/lr_scheduler.py +++ /dev/null @@ -1,98 +0,0 @@ -import numpy as np - - -class LambdaWarmUpCosineScheduler: - """ - note: use with a base_lr of 1.0 - """ - def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): - self.lr_warm_up_steps = warm_up_steps - self.lr_start = lr_start - self.lr_min = lr_min - self.lr_max = lr_max - self.lr_max_decay_steps = max_decay_steps - self.last_lr = 0. - self.verbosity_interval = verbosity_interval - - def schedule(self, n, **kwargs): - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") - if n < self.lr_warm_up_steps: - lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start - self.last_lr = lr - return lr - else: - t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) - t = min(t, 1.0) - lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( - 1 + np.cos(t * np.pi)) - self.last_lr = lr - return lr - - def __call__(self, n, **kwargs): - return self.schedule(n,**kwargs) - - -class LambdaWarmUpCosineScheduler2: - """ - supports repeated iterations, configurable via lists - note: use with a base_lr of 1.0. - """ - def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): - assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) - self.lr_warm_up_steps = warm_up_steps - self.f_start = f_start - self.f_min = f_min - self.f_max = f_max - self.cycle_lengths = cycle_lengths - self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) - self.last_f = 0. - self.verbosity_interval = verbosity_interval - - def find_in_interval(self, n): - interval = 0 - for cl in self.cum_cycles[1:]: - if n <= cl: - return interval - interval += 1 - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) - t = min(t, 1.0) - f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( - 1 + np.cos(t * np.pi)) - self.last_f = f - return f - - def __call__(self, n, **kwargs): - return self.schedule(n, **kwargs) - - -class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) - self.last_f = f - return f - diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py deleted file mode 100644 index 6a9c4f454..000000000 --- a/ldm/models/autoencoder.py +++ /dev/null @@ -1,443 +0,0 @@ -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager - -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer - -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import DiagonalGaussianDistribution - -from ldm.util import instantiate_from_config - - -class VQModel(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape) - self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - print(f"Unexpected Keys: {unexpected}") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_,_,ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode="bicubic") - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train", - predicted_indices=ind) - - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=""): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - - discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] - self.log(f"val{suffix}/rec_loss", rec_loss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - self.log(f"val{suffix}/aeloss", aeloss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f"val{suffix}/rec_loss"] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor*self.learning_rate - print("lr_d", lr_d) - print("lr_g", lr_g) - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quantize.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr_g, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr_d, betas=(0.5, 0.9)) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log["inputs"] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["inputs"] = x - log["reconstructions"] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log["reconstructions_ema"] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - -class AutoencoderKL(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - ): - super().__init__() - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f"Restored from {path}") - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - - self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return discloss - - def validation_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, - last_layer=self.get_last_layer(), split="val") - - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, - last_layer=self.get_last_layer(), split="val") - - self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr = self.learning_rate - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr, betas=(0.5, 0.9)) - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["samples"] = self.decode(torch.randn_like(posterior.sample())) - log["reconstructions"] = xrec - log["inputs"] = x - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff - super().__init__() - - def encode(self, x, *args, **kwargs): - return x - - def decode(self, x, *args, **kwargs): - return x - - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x - - def forward(self, x, *args, **kwargs): - return x diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py deleted file mode 100644 index 67e98b9d8..000000000 --- a/ldm/models/diffusion/classifier.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import torch -import pytorch_lightning as pl -from omegaconf import OmegaConf -from torch.nn import functional as F -from torch.optim import AdamW -from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted - -from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = { - 'class_label': EncoderUNetModel, - 'segmentation': UNetModel -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class NoisyLatentImageClassifier(pl.LightningModule): - - def __init__(self, - diffusion_path, - num_classes, - ckpt_path=None, - pool='attention', - label_key=None, - diffusion_ckpt_path=None, - scheduler_config=None, - weight_decay=1.e-2, - log_steps=10, - monitor='val/loss', - *args, - **kwargs): - super().__init__(*args, **kwargs) - self.num_classes = num_classes - # get latest config of diffusion model - diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] - self.diffusion_config = OmegaConf.load(diffusion_config).model - self.diffusion_config.params.ckpt_path = diffusion_ckpt_path - self.load_diffusion() - - self.monitor = monitor - self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - self.log_time_interval = self.diffusion_model.num_timesteps // log_steps - self.log_steps = log_steps - - self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ - else self.diffusion_model.cond_stage_key - - assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' - - if self.label_key not in __models__: - raise NotImplementedError() - - self.load_classifier(ckpt_path, pool) - - self.scheduler_config = scheduler_config - self.use_scheduler = self.scheduler_config is not None - self.weight_decay = weight_decay - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def load_diffusion(self): - model = instantiate_from_config(self.diffusion_config) - self.diffusion_model = model.eval() - self.diffusion_model.train = disabled_train - for param in self.diffusion_model.parameters(): - param.requires_grad = False - - def load_classifier(self, ckpt_path, pool): - model_config = deepcopy(self.diffusion_config.params.unet_config.params) - model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels - model_config.out_channels = self.num_classes - if self.label_key == 'class_label': - model_config.pool = pool - - self.model = __models__[self.label_key](**model_config) - if ckpt_path is not None: - print('#####################################################################') - print(f'load from ckpt "{ckpt_path}"') - print('#####################################################################') - self.init_from_ckpt(ckpt_path) - - @torch.no_grad() - def get_x_noisy(self, x, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x)) - continuous_sqrt_alpha_cumprod = None - if self.diffusion_model.use_continuous_noise: - continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) - # todo: make sure t+1 is correct here - - return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, - continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) - - def forward(self, x_noisy, t, *args, **kwargs): - return self.model(x_noisy, t) - - @torch.no_grad() - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - @torch.no_grad() - def get_conditioning(self, batch, k=None): - if k is None: - k = self.label_key - assert k is not None, 'Needs to provide label key' - - targets = batch[k].to(self.device) - - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') - for down in range(self.numd): - h, w = targets.shape[-2:] - targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') - - # targets = rearrange(targets,'b c h w -> b h w c') - - return targets - - def compute_top_k(self, logits, labels, k, reduction="mean"): - _, top_ks = torch.topk(logits, k, dim=1) - if reduction == "mean": - return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - elif reduction == "none": - return (top_ks == labels[:, None]).float().sum(dim=-1) - - def on_train_epoch_start(self): - # save some memory - self.diffusion_model.model.to('cpu') - - @torch.no_grad() - def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' - log = {} - log[f"{log_prefix}/loss"] = loss.mean() - log[f"{log_prefix}/acc@1"] = self.compute_top_k( - logits, targets, k=1, reduction="mean" - ) - log[f"{log_prefix}/acc@5"] = self.compute_top_k( - logits, targets, k=5, reduction="mean" - ) - - self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) - self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) - self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) - - def shared_step(self, batch, t=None): - x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) - targets = self.get_conditioning(batch) - if targets.dim() == 4: - targets = targets.argmax(dim=1) - if t is None: - t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() - else: - t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() - x_noisy = self.get_x_noisy(x, t) - logits = self(x_noisy, t) - - loss = F.cross_entropy(logits, targets, reduction='none') - - self.write_logs(loss.detach(), logits.detach(), targets.detach()) - - loss = loss.mean() - return loss, logits, x_noisy, targets - - def training_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - return loss - - def reset_noise_accs(self): - self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in - range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} - - def on_validation_start(self): - self.reset_noise_accs() - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - - for t in self.noisy_acc: - _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) - self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) - - return loss - - def configure_optimizers(self): - optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) - - if self.use_scheduler: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [optimizer], scheduler - - return optimizer - - @torch.no_grad() - def log_images(self, batch, N=8, *args, **kwargs): - log = dict() - x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x - - y = self.get_conditioning(batch) - - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['labels'] = y - - if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) - - for step in range(self.log_steps): - current_time = step * self.log_time_interval - - _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - - log[f'inputs@t{current_time}'] = x_noisy - - pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) - pred = rearrange(pred, 'b h w c -> b c h w') - - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) - - for key in log: - log[key] = log[key][:N] - - return log diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py deleted file mode 100644 index fb31215db..000000000 --- a/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,241 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ - extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + - extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - return x_dec \ No newline at end of file diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py deleted file mode 100644 index bbedd04cf..000000000 --- a/ldm/models/diffusion/ddpm.py +++ /dev/null @@ -1,1445 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: - xc = batch[cond_key] - elif cond_key == 'class_label': - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - # import pudb; pudb.set_trace() - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): - - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, - shape,cond,verbose=False,**kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True,**kwargs) - - return samples, intermediates - - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, **kwargs): - - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with self.ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with self.ema_scope("Plotting Inpaint"): - - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - with self.ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs diff --git a/ldm/models/diffusion/dpm_solver/__init__.py b/ldm/models/diffusion/dpm_solver/__init__.py deleted file mode 100644 index 7427f38c0..000000000 --- a/ldm/models/diffusion/dpm_solver/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/dpm_solver.py b/ldm/models/diffusion/dpm_solver/dpm_solver.py deleted file mode 100644 index bdb64e0c7..000000000 --- a/ldm/models/diffusion/dpm_solver/dpm_solver.py +++ /dev/null @@ -1,1184 +0,0 @@ -import torch -import torch.nn.functional as F -import math - - -class NoiseScheduleVP: - def __init__( - self, - schedule='discrete', - betas=None, - alphas_cumprod=None, - continuous_beta_0=0.1, - continuous_beta_1=20., - ): - """Create a wrapper class for the forward SDE (VP type). - - *** - Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. - We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. - *** - - The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). - We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). - Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: - - log_alpha_t = self.marginal_log_mean_coeff(t) - sigma_t = self.marginal_std(t) - lambda_t = self.marginal_lambda(t) - - Moreover, as lambda(t) is an invertible function, we also support its inverse function: - - t = self.inverse_lambda(lambda_t) - - =============================================================== - - We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). - - 1. For discrete-time DPMs: - - For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: - t_i = (i + 1) / N - e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. - We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. - - Args: - betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) - alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) - - Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. - - **Important**: Please pay special attention for the args for `alphas_cumprod`: - The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that - q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). - Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have - alpha_{t_n} = \sqrt{\hat{alpha_n}}, - and - log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). - - - 2. For continuous-time DPMs: - - We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise - schedule are the default settings in DDPM and improved-DDPM: - - Args: - beta_min: A `float` number. The smallest beta for the linear schedule. - beta_max: A `float` number. The largest beta for the linear schedule. - cosine_s: A `float` number. The hyperparameter in the cosine schedule. - cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. - T: A `float` number. The ending time of the forward process. - - =============================================================== - - Args: - schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, - 'linear' or 'cosine' for continuous-time DPMs. - Returns: - A wrapper object of the forward SDE (VP type). - - =============================================================== - - Example: - - # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', betas=betas) - - # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) - - # For continuous-time DPMs (VPSDE), linear schedule: - >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) - - """ - - if schedule not in ['discrete', 'linear', 'cosine']: - raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) - - self.schedule = schedule - if schedule == 'discrete': - if betas is not None: - log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) - else: - assert alphas_cumprod is not None - log_alphas = 0.5 * torch.log(alphas_cumprod) - self.total_N = len(log_alphas) - self.T = 1. - self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) - self.log_alpha_array = log_alphas.reshape((1, -1,)) - else: - self.total_N = 1000 - self.beta_0 = continuous_beta_0 - self.beta_1 = continuous_beta_1 - self.cosine_s = 0.008 - self.cosine_beta_max = 999. - self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s - self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) - self.schedule = schedule - if schedule == 'cosine': - # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. - # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. - self.T = 0.9946 - else: - self.T = 1. - - def marginal_log_mean_coeff(self, t): - """ - Compute log(alpha_t) of a given continuous-time label t in [0, T]. - """ - if self.schedule == 'discrete': - return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) - elif self.schedule == 'linear': - return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 - elif self.schedule == 'cosine': - log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) - log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 - return log_alpha_t - - def marginal_alpha(self, t): - """ - Compute alpha_t of a given continuous-time label t in [0, T]. - """ - return torch.exp(self.marginal_log_mean_coeff(t)) - - def marginal_std(self, t): - """ - Compute sigma_t of a given continuous-time label t in [0, T]. - """ - return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) - - def marginal_lambda(self, t): - """ - Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. - """ - log_mean_coeff = self.marginal_log_mean_coeff(t) - log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) - return log_mean_coeff - log_std - - def inverse_lambda(self, lamb): - """ - Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. - """ - if self.schedule == 'linear': - tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - Delta = self.beta_0**2 + tmp - return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) - elif self.schedule == 'discrete': - log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) - t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) - return t.reshape((-1,)) - else: - log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s - t = t_fn(log_alpha) - return t - - -def model_wrapper( - model, - noise_schedule, - model_type="noise", - model_kwargs={}, - guidance_type="uncond", - condition=None, - unconditional_condition=None, - guidance_scale=1., - classifier_fn=None, - classifier_kwargs={}, -): - """Create a wrapper function for the noise prediction model. - - DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to - firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. - - We support four types of the diffusion model by setting `model_type`: - - 1. "noise": noise prediction model. (Trained by predicting noise). - - 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). - - 3. "v": velocity prediction model. (Trained by predicting the velocity). - The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. - - [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." - arXiv preprint arXiv:2202.00512 (2022). - [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." - arXiv preprint arXiv:2210.02303 (2022). - - 4. "score": marginal score function. (Trained by denoising score matching). - Note that the score function and the noise prediction model follows a simple relationship: - ``` - noise(x_t, t) = -sigma_t * score(x_t, t) - ``` - - We support three types of guided sampling by DPMs by setting `guidance_type`: - 1. "uncond": unconditional sampling by DPMs. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - - 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - - The input `classifier_fn` has the following format: - `` - classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) - `` - - [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," - in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. - - 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. - The input `model` has the following format: - `` - model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score - `` - And if cond == `unconditional_condition`, the model output is the unconditional DPM output. - - [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." - arXiv preprint arXiv:2207.12598 (2022). - - - The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) - or continuous-time labels (i.e. epsilon to T). - - We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: - `` - def model_fn(x, t_continuous) -> noise: - t_input = get_model_input_time(t_continuous) - return noise_pred(model, x, t_input, **model_kwargs) - `` - where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. - - =============================================================== - - Args: - model: A diffusion model with the corresponding format described above. - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - model_type: A `str`. The parameterization type of the diffusion model. - "noise" or "x_start" or "v" or "score". - model_kwargs: A `dict`. A dict for the other inputs of the model function. - guidance_type: A `str`. The type of the guidance for sampling. - "uncond" or "classifier" or "classifier-free". - condition: A pytorch tensor. The condition for the guided sampling. - Only used for "classifier" or "classifier-free" guidance type. - unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. - Only used for "classifier-free" guidance type. - guidance_scale: A `float`. The scale for the guided sampling. - classifier_fn: A classifier function. Only used for the classifier guidance. - classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. - Returns: - A noise prediction model that accepts the noised data and the continuous time as the inputs. - """ - - def get_model_input_time(t_continuous): - """ - Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. - For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. - For continuous-time DPMs, we just use `t_continuous`. - """ - if noise_schedule.schedule == 'discrete': - return (t_continuous - 1. / noise_schedule.total_N) * 1000. - else: - return t_continuous - - def noise_pred_fn(x, t_continuous, cond=None): - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - t_input = get_model_input_time(t_continuous) - if cond is None: - output = model(x, t_input, **model_kwargs) - else: - output = model(x, t_input, cond, **model_kwargs) - if model_type == "noise": - return output - elif model_type == "x_start": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) - elif model_type == "v": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x - elif model_type == "score": - sigma_t = noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return -expand_dims(sigma_t, dims) * output - - def cond_grad_fn(x, t_input): - """ - Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). - """ - with torch.enable_grad(): - x_in = x.detach().requires_grad_(True) - log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) - return torch.autograd.grad(log_prob.sum(), x_in)[0] - - def model_fn(x, t_continuous): - """ - The noise predicition model function that is used for DPM-Solver. - """ - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - if guidance_type == "uncond": - return noise_pred_fn(x, t_continuous) - elif guidance_type == "classifier": - assert classifier_fn is not None - t_input = get_model_input_time(t_continuous) - cond_grad = cond_grad_fn(x, t_input) - sigma_t = noise_schedule.marginal_std(t_continuous) - noise = noise_pred_fn(x, t_continuous) - return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad - elif guidance_type == "classifier-free": - if guidance_scale == 1. or unconditional_condition is None: - return noise_pred_fn(x, t_continuous, cond=condition) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t_continuous] * 2) - c_in = torch.cat([unconditional_condition, condition]) - noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) - return noise_uncond + guidance_scale * (noise - noise_uncond) - - assert model_type in ["noise", "x_start", "v"] - assert guidance_type in ["uncond", "classifier", "classifier-free"] - return model_fn - - -class DPM_Solver: - def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): - """Construct a DPM-Solver. - - We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). - If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). - If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). - In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. - The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. - - Args: - model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): - `` - def model_fn(x, t_continuous): - return noise - `` - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. - thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. - max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. - - [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. - """ - self.model = model_fn - self.noise_schedule = noise_schedule - self.predict_x0 = predict_x0 - self.thresholding = thresholding - self.max_val = max_val - - def noise_prediction_fn(self, x, t): - """ - Return the noise prediction model. - """ - return self.model(x, t) - - def data_prediction_fn(self, x, t): - """ - Return the data prediction model (with thresholding). - """ - noise = self.noise_prediction_fn(x, t) - dims = x.dim() - alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) - x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) - if self.thresholding: - p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. - s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) - s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) - x0 = torch.clamp(x0, -s, s) / s - return x0 - - def model_fn(self, x, t): - """ - Convert the model to the noise prediction model or the data prediction model. - """ - if self.predict_x0: - return self.data_prediction_fn(x, t) - else: - return self.noise_prediction_fn(x, t) - - def get_time_steps(self, skip_type, t_T, t_0, N, device): - """Compute the intermediate time steps for sampling. - - Args: - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - N: A `int`. The total number of the spacing of the time steps. - device: A torch device. - Returns: - A pytorch tensor of the time steps, with the shape (N + 1,). - """ - if skip_type == 'logSNR': - lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) - lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) - logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) - return self.noise_schedule.inverse_lambda(logSNR_steps) - elif skip_type == 'time_uniform': - return torch.linspace(t_T, t_0, N + 1).to(device) - elif skip_type == 'time_quadratic': - t_order = 2 - t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) - return t - else: - raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) - - def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): - """ - Get the order of each step for sampling by the singlestep DPM-Solver. - - We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". - Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - - If order == 1: - We take `steps` of DPM-Solver-1 (i.e. DDIM). - - If order == 2: - - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of DPM-Solver-2. - - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If order == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. - - ============================================ - Args: - order: A `int`. The max order for the solver (2 or 3). - steps: A `int`. The total number of function evaluations (NFE). - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - device: A torch device. - Returns: - orders: A list of the solver order of each step. - """ - if order == 3: - K = steps // 3 + 1 - if steps % 3 == 0: - orders = [3,] * (K - 2) + [2, 1] - elif steps % 3 == 1: - orders = [3,] * (K - 1) + [1] - else: - orders = [3,] * (K - 1) + [2] - elif order == 2: - if steps % 2 == 0: - K = steps // 2 - orders = [2,] * K - else: - K = steps // 2 + 1 - orders = [2,] * (K - 1) + [1] - elif order == 1: - K = 1 - orders = [1,] * steps - else: - raise ValueError("'order' must be '1' or '2' or '3'.") - if skip_type == 'logSNR': - # To reproduce the results in DPM-Solver paper - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) - else: - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)] - return timesteps_outer, orders - - def denoise_to_zero_fn(self, x, s): - """ - Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. - """ - return self.data_prediction_fn(x, s) - - def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): - """ - DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - if self.predict_x0: - phi_1 = torch.expm1(-h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - else: - phi_1 = torch.expm1(h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - - def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-2 from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the second-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 0.5 - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - s1 = ns.inverse_lambda(lambda_s1) - log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) - alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_1 = torch.expm1(-h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s) - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_1 = torch.expm1(h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) - ) - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1} - else: - return x_t - - def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-3 from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). - If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 1. / 3. - if r2 is None: - r2 = 2. / 3. - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - lambda_s2 = lambda_s + r2 * h - s1 = ns.inverse_lambda(lambda_s1) - s2 = ns.inverse_lambda(lambda_s2) - log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t) - alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_12 = torch.expm1(-r2 * h) - phi_1 = torch.expm1(-h) - phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. - phi_2 = phi_1 / h + 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(sigma_s2 / sigma_s, dims) * x - - expand_dims(alpha_s2 * phi_12, dims) * model_s - + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + expand_dims(alpha_t * phi_2, dims) * D1 - - expand_dims(alpha_t * phi_3, dims) * D2 - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_12 = torch.expm1(r2 * h) - phi_1 = torch.expm1(h) - phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. - phi_2 = phi_1 / h - 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x - - expand_dims(sigma_s2 * phi_12, dims) * model_s - - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - expand_dims(sigma_t * phi_2, dims) * D1 - - expand_dims(sigma_t * phi_3, dims) * D2 - ) - - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} - else: - return x_t - - def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): - """ - Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - ns = self.noise_schedule - dims = x.dim() - model_prev_1, model_prev_0 = model_prev_list - t_prev_1, t_prev_0 = t_prev_list - lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0 = h_0 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - if self.predict_x0: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 - ) - else: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 - ) - return x_t - - def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): - """ - Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - model_prev_2, model_prev_1, model_prev_0 = model_prev_list - t_prev_2, t_prev_1, t_prev_0 = t_prev_list - lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_1 = lambda_prev_1 - lambda_prev_2 - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0, r1 = h_0 / h, h_1 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) - D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) - D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) - if self.predict_x0: - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 - - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2 - ) - else: - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 - - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2 - ) - return x_t - - def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): - """ - Singlestep DPM-Solver with the order `order` from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - r1: A `float`. The hyperparameter of the second-order or third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) - elif order == 2: - return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) - elif order == 3: - return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): - """ - Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) - elif order == 2: - return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - elif order == 3: - return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): - """ - The adaptive step size solver based on singlestep DPM-Solver. - - Args: - x: A pytorch tensor. The initial value at time `t_T`. - order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - h_init: A `float`. The initial step size (for logSNR). - atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. - rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. - theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. - t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the - current time and `t_0` is less than `t_err`. The default setting is 1e-5. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_0: A pytorch tensor. The approximated solution at time `t_0`. - - [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. - """ - ns = self.noise_schedule - s = t_T * torch.ones((x.shape[0],)).to(x) - lambda_s = ns.marginal_lambda(s) - lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) - h = h_init * torch.ones_like(s).to(x) - x_prev = x - nfe = 0 - if order == 2: - r1 = 0.5 - lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) - elif order == 3: - r1, r2 = 1. / 3., 2. / 3. - lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) - else: - raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) - while torch.abs((s - t_0)).mean() > t_err: - t = ns.inverse_lambda(lambda_s + h) - x_lower, lower_noise_kwargs = lower_update(x, s, t) - x_higher = higher_update(x, s, t, **lower_noise_kwargs) - delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) - norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) - E = norm_fn((x_higher - x_lower) / delta).max() - if torch.all(E <= 1.): - x = x_higher - s = t - x_prev = x_lower - lambda_s = ns.marginal_lambda(s) - h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) - nfe += order - print('adaptive solver nfe', nfe) - return x - - def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', - method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, - ): - """ - Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. - - ===================================================== - - We support the following algorithms for both noise prediction model and data prediction model: - - 'singlestep': - Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. - We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). - The total number of function evaluations (NFE) == `steps`. - Given a fixed NFE == `steps`, the sampling procedure is: - - If `order` == 1: - - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. - - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If `order` == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. - - 'multistep': - Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. - We initialize the first `order` values by lower order multistep solvers. - Given a fixed NFE == `steps`, the sampling procedure is: - Denote K = steps. - - If `order` == 1: - - We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. - - If `order` == 3: - - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. - - 'singlestep_fixed': - Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). - We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. - - 'adaptive': - Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). - We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. - You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs - (NFE) and the sample quality. - - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. - - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. - - ===================================================== - - Some advices for choosing the algorithm: - - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: - Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, - skip_type='time_uniform', method='singlestep') - - For **guided sampling with large guidance scale** by DPMs: - Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, - skip_type='time_uniform', method='multistep') - - We support three types of `skip_type`: - - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** - - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. - - 'time_quadratic': quadratic time for the time steps. - - ===================================================== - Args: - x: A pytorch tensor. The initial value at time `t_start` - e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. - steps: A `int`. The total number of function evaluations (NFE). - t_start: A `float`. The starting time of the sampling. - If `T` is None, we use self.noise_schedule.T (default is 1.0). - t_end: A `float`. The ending time of the sampling. - If `t_end` is None, we use 1. / self.noise_schedule.total_N. - e.g. if total_N == 1000, we have `t_end` == 1e-3. - For discrete-time DPMs: - - We recommend `t_end` == 1. / self.noise_schedule.total_N. - For continuous-time DPMs: - - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. - order: A `int`. The order of DPM-Solver. - skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. - method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. - denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. - Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). - - This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and - score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID - for diffusion models sampling by diffusion SDEs for low-resolutional images - (such as CIFAR-10). However, we observed that such trick does not matter for - high-resolutional images. As it needs an additional NFE, we do not recommend - it for high-resolutional images. - lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. - Only valid for `method=multistep` and `steps < 15`. We empirically find that - this trick is a key to stabilizing the sampling by DPM-Solver with very few steps - (especially for steps <= 10). So we recommend to set it to be `True`. - solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. - atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - Returns: - x_end: A pytorch tensor. The approximated solution at time `t_end`. - - """ - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end - t_T = self.noise_schedule.T if t_start is None else t_start - device = x.device - if method == 'adaptive': - with torch.no_grad(): - x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) - elif method == 'multistep': - assert steps >= order - timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) - assert timesteps.shape[0] - 1 == steps - with torch.no_grad(): - vec_t = timesteps[0].expand((x.shape[0])) - model_prev_list = [self.model_fn(x, vec_t)] - t_prev_list = [vec_t] - # Init the first `order` values by lower order multistep DPM-Solver. - for init_order in range(1, order): - vec_t = timesteps[init_order].expand(x.shape[0]) - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) - model_prev_list.append(self.model_fn(x, vec_t)) - t_prev_list.append(vec_t) - # Compute the remaining values by `order`-th order multistep DPM-Solver. - for step in range(order, steps + 1): - vec_t = timesteps[step].expand(x.shape[0]) - if lower_order_final and steps < 15: - step_order = min(order, steps + 1 - step) - else: - step_order = order - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) - for i in range(order - 1): - t_prev_list[i] = t_prev_list[i + 1] - model_prev_list[i] = model_prev_list[i + 1] - t_prev_list[-1] = vec_t - # We do not need to evaluate the final model value. - if step < steps: - model_prev_list[-1] = self.model_fn(x, vec_t) - elif method in ['singlestep', 'singlestep_fixed']: - if method == 'singlestep': - timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) - elif method == 'singlestep_fixed': - K = steps // order - orders = [order,] * K - timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) - for i, order in enumerate(orders): - t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] - timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) - lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) - vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) - h = lambda_inner[-1] - lambda_inner[0] - r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h - r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h - x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) - if denoise_to_zero: - x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) - return x - - - -############################################################# -# other utility functions -############################################################# - -def interpolate_fn(x, xp, yp): - """ - A piecewise linear function y = f(x), using xp and yp as keypoints. - We implement f(x) in a differentiable way (i.e. applicable for autograd). - The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) - - Args: - x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). - xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. - yp: PyTorch tensor with shape [C, K]. - Returns: - The function values f(x), with shape [N, C]. - """ - N, K = x.shape[0], xp.shape[1] - all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) - sorted_all_x, x_indices = torch.sort(all_x, dim=2) - x_idx = torch.argmin(x_indices, dim=2) - cand_start_idx = x_idx - 1 - start_idx = torch.where( - torch.eq(x_idx, 0), - torch.tensor(1, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) - start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) - end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) - start_idx2 = torch.where( - torch.eq(x_idx, 0), - torch.tensor(0, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) - start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) - end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) - cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) - return cand - - -def expand_dims(v, dims): - """ - Expand the tensor `v` to the dim `dims`. - - Args: - `v`: a PyTorch tensor with shape [N]. - `dim`: a `int`. - Returns: - a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. - """ - return v[(...,) + (None,)*(dims - 1)] \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/sampler.py b/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index 2c42d6f96..000000000 --- a/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,82 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - - -class DPMSolverSampler(object): - def __init__(self, model, **kwargs): - super().__init__() - self.model = model - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type="noise", - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) - - return x.to(device), None diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py deleted file mode 100644 index 78eeb1003..000000000 --- a/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,236 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like - - -class PLMSSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - if ddim_eta != 0: - raise ValueError('ddim_eta must be 0 for PLMS') - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def plms_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running PLMS Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) - old_eps = [] - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, t_next=ts_next) - img, pred_x0, e_t = outs - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py deleted file mode 100644 index f4eff39cc..000000000 --- a/ldm/modules/attention.py +++ /dev/null @@ -1,261 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat - -from ldm.modules.diffusionmodules.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) - k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): - super().__init__() - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth)] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) - x = self.proj_out(x) - return x + x_in \ No newline at end of file diff --git a/ldm/modules/diffusionmodules/__init__.py b/ldm/modules/diffusionmodules/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py deleted file mode 100644 index 533e589a2..000000000 --- a/ldm/modules/diffusionmodules/model.py +++ /dev/null @@ -1,835 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np -from einops import rearrange - -from ldm.util import instantiate_from_config -from ldm.modules.attention import LinearAttention - - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - - -class LinAttnBlock(LinearAttention): - """to match AttnBlock usage""" - def __init__(self, in_channels): - super().__init__(dim=in_channels, heads=1, dim_head=in_channels) - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - - -def make_attn(in_channels, attn_type="vanilla"): - assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' - print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - return AttnBlock(in_channels) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - return LinAttnBlock(in_channels) - - -class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x, t=None, context=None): - #assert x.shape[2] == x.shape[3] == self.resolution - if context is not None: - # assume aligned context, cat along channel axis - x = torch.cat((x, context), dim=1) - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - def get_last_layer(self): - return self.conv_out.weight - - -class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", - **ignore_kwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - return h - - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1,2,3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class LatentRescaler(nn.Module): - def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): - super().__init__() - # residual block, interpolate, residual block - self.factor = factor - self.conv_in = nn.Conv2d(in_channels, - mid_channels, - kernel_size=3, - stride=1, - padding=1) - self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - - self.conv_out = nn.Conv2d(mid_channels, - out_channels, - kernel_size=1, - ) - - def forward(self, x): - x = self.conv_in(x) - for block in self.res_block1: - x = block(x, None) - x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) - x = self.attn(x) - for block in self.res_block2: - x = block(x, None) - x = self.conv_out(x) - return x - - -class MergedRescaleEncoder(nn.Module): - def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, - z_channels=intermediate_chn, double_z=False, resolution=resolution, - attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, - out_ch=None) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, - mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) - - def forward(self, x): - x = self.encoder(x) - x = self.rescaler(x) - return x - - -class MergedRescaleDecoder(nn.Module): - def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), - dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - tmp_chn = z_channels*ch_mult[-1] - self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, - resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, - ch_mult=ch_mult, resolution=resolution, ch=ch) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, - out_channels=tmp_chn, depth=rescale_module_depth) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Upsampler(nn.Module): - def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): - super().__init__() - assert out_size >= in_size - num_blocks = int(np.log2(out_size//in_size))+1 - factor_up = 1.+ (out_size % in_size) - print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") - self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, - out_channels=in_channels) - self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, - attn_resolutions=[], in_channels=None, ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)]) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode="bilinear"): - super().__init__() - self.with_conv = learned - self.mode = mode - if self.with_conv: - print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") - raise NotImplementedError() - assert in_channels is not None - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=4, - stride=2, - padding=1) - - def forward(self, x, scale_factor=1.0): - if scale_factor==1.0: - return x - else: - x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) - return x - -class FirstStagePostProcessor(nn.Module): - - def __init__(self, ch_mult:list, in_channels, - pretrained_model:nn.Module=None, - reshape=False, - n_channels=None, - dropout=0., - pretrained_config=None): - super().__init__() - if pretrained_config is None: - assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.pretrained_model = pretrained_model - else: - assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.instantiate_pretrained(pretrained_config) - - self.do_reshape = reshape - - if n_channels is None: - n_channels = self.pretrained_model.encoder.ch - - self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) - self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, - stride=1,padding=1) - - blocks = [] - downs = [] - ch_in = n_channels - for m in ch_mult: - blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) - ch_in = m * n_channels - downs.append(Downsample(ch_in, with_conv=False)) - - self.model = nn.ModuleList(blocks) - self.downsampler = nn.ModuleList(downs) - - - def instantiate_pretrained(self, config): - model = instantiate_from_config(config) - self.pretrained_model = model.eval() - # self.pretrained_model.train = False - for param in self.pretrained_model.parameters(): - param.requires_grad = False - - - @torch.no_grad() - def encode_with_pretrained(self,x): - c = self.pretrained_model.encode(x) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - return c - - def forward(self,x): - z_fs = self.encode_with_pretrained(x) - z = self.proj_norm(z_fs) - z = self.proj(z) - z = nonlinearity(z) - - for submodel, downmodel in zip(self.model,self.downsampler): - z = submodel(z,temb=None) - z = downmodel(z) - - if self.do_reshape: - z = rearrange(z,'b c h w -> b (h w) c') - return z - diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py deleted file mode 100644 index fcf95d1ea..000000000 --- a/ldm/modules/diffusionmodules/openaimodel.py +++ /dev/null @@ -1,961 +0,0 @@ -from abc import abstractmethod -from functools import partial -import math -from typing import Iterable - -import numpy as np -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from ldm.modules.diffusionmodules.util import ( - checkpoint, - conv_nd, - linear, - avg_pool_nd, - zero_module, - normalization, - timestep_embedding, -) -from ldm.modules.attention import SpatialTransformer - - -# dummy replace -def convert_module_to_f16(x): - pass - -def convert_module_to_f32(x): - pass - - -## go -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" - ) - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - -class TransposedUpsample(nn.Module): - 'Learned 2x upsampling without padding' - def __init__(self, channels, out_channels=None, ks=5): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - - self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) - - def forward(self,x): - return self.up(x) - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=padding - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1 - ) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - return checkpoint( - self._forward, (x, emb), self.parameters(), self.use_checkpoint - ) - - - def _forward(self, x, emb): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! - #return pt_checkpoint(self._forward, x) # pytorch - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - :param use_new_attention_order: use a different attention pattern for potentially - increased efficiency. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - ): - super().__init__() - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' - - if context_dim is not None: - assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=model_channels * mult, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = model_channels * mult - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), - ) - if self.predict_codebook_ids: - self.id_predictor = nn.Sequential( - normalization(ch), - conv_nd(dims, model_channels, n_embed, 1), - #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits - ) - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps=None, context=None, y=None,**kwargs): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param context: conditioning plugged in via crossattn - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) - hs.append(h) - h = self.middle_block(h, emb, context) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb, context) - h = h.type(x.dtype) - if self.predict_codebook_ids: - return self.id_predictor(h) - else: - return self.out(h) - - -class EncoderUNetModel(nn.Module): - """ - The half UNet model with attention and timestep embedding. - For usage, see UNet. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - pool="adaptive", - *args, - **kwargs - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - self.pool = pool - if pool == "adaptive": - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.AdaptiveAvgPool2d((1, 1)), - zero_module(conv_nd(dims, ch, out_channels, 1)), - nn.Flatten(), - ) - elif pool == "attention": - assert num_head_channels != -1 - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - AttentionPool2d( - (image_size // ds), ch, num_head_channels, out_channels - ), - ) - elif pool == "spatial": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - nn.ReLU(), - nn.Linear(2048, self.out_channels), - ) - elif pool == "spatial_v2": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - normalization(2048), - nn.SiLU(), - nn.Linear(2048, self.out_channels), - ) - else: - raise NotImplementedError(f"Unexpected {pool} pooling") - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - - def forward(self, x, timesteps): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :return: an [N x K] Tensor of outputs. - """ - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - results = [] - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = self.middle_block(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = th.cat(results, axis=-1) - return self.out(h) - else: - h = h.type(x.dtype) - return self.out(h) - diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py deleted file mode 100644 index a952e6c40..000000000 --- a/ldm/modules/diffusionmodules/util.py +++ /dev/null @@ -1,267 +0,0 @@ -# adopted from -# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py -# and -# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -# and -# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py -# -# thanks! - - -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - -from ldm.util import instantiate_from_config - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/ldm/modules/distributions/__init__.py b/ldm/modules/distributions/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py deleted file mode 100644 index f2b8ef901..000000000 --- a/ldm/modules/distributions/distributions.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch -import numpy as np - - -class AbstractDistribution: - def sample(self): - raise NotImplementedError() - - def mode(self): - raise NotImplementedError() - - -class DiracDistribution(AbstractDistribution): - def __init__(self, value): - self.value = value - - def sample(self): - return self.value - - def mode(self): - return self.value - - -class DiagonalGaussianDistribution(object): - def __init__(self, parameters, deterministic=False): - self.parameters = parameters - self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) - self.logvar = torch.clamp(self.logvar, -30.0, 20.0) - self.deterministic = deterministic - self.std = torch.exp(0.5 * self.logvar) - self.var = torch.exp(self.logvar) - if self.deterministic: - self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) - - def sample(self): - x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) - return x - - def kl(self, other=None): - if self.deterministic: - return torch.Tensor([0.]) - else: - if other is None: - return 0.5 * torch.sum(torch.pow(self.mean, 2) - + self.var - 1.0 - self.logvar, - dim=[1, 2, 3]) - else: - return 0.5 * torch.sum( - torch.pow(self.mean - other.mean, 2) / other.var - + self.var / other.var - 1.0 - self.logvar + other.logvar, - dim=[1, 2, 3]) - - def nll(self, sample, dims=[1,2,3]): - if self.deterministic: - return torch.Tensor([0.]) - logtwopi = np.log(2.0 * np.pi) - return 0.5 * torch.sum( - logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, - dim=dims) - - def mode(self): - return self.mean - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 - Compute the KL divergence between two gaussians. - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, torch.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for torch.exp(). - logvar1, logvar2 = [ - x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + torch.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) - ) diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py deleted file mode 100644 index c8c75af43..000000000 --- a/ldm/modules/ema.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -from torch import nn - - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates - else torch.tensor(-1,dtype=torch.int)) - - for name, p in model.named_parameters(): - if p.requires_grad: - #remove as '.'-character is not allowed in buffers - s_name = name.replace('.','') - self.m_name2s_name.update({name:s_name}) - self.register_buffer(s_name,p.clone().detach().data) - - self.collected_params = [] - - def forward(self,model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/ldm/modules/encoders/__init__.py b/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py deleted file mode 100644 index ededbe43e..000000000 --- a/ldm/modules/encoders/modules.py +++ /dev/null @@ -1,234 +0,0 @@ -import torch -import torch.nn as nn -from functools import partial -import clip -from einops import rearrange, repeat -from transformers import CLIPTokenizer, CLIPTextModel -import kornia - -from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test - - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - - def forward(self, batch, key=None): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - c = self.embedding(c) - return c - - -class TransformerEmbedder(AbstractEncoder): - """Some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): - super().__init__() - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer)) - - def forward(self, tokens): - tokens = tokens.to(self.device) # meh - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, x): - return self(x) - - -class BERTTokenizer(AbstractEncoder): - """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" - def __init__(self, device="cuda", vq_interface=True, max_length=77): - super().__init__() - from transformers import BertTokenizerFast # TODO: add to reuquirements - self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") - self.device = device - self.vq_interface = vq_interface - self.max_length = max_length - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - return tokens - - @torch.no_grad() - def encode(self, text): - tokens = self(text) - if not self.vq_interface: - return tokens - return None, None, [None, None, tokens] - - def decode(self, text): - return text - - -class BERTEmbedder(AbstractEncoder): - """Uses the BERT tokenizr model and add some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, - device="cuda",use_tokenizer=True, embedding_dropout=0.0): - super().__init__() - self.use_tknz_fn = use_tokenizer - if self.use_tknz_fn: - self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer), - emb_dropout=embedding_dropout) - - def forward(self, text): - if self.use_tknz_fn: - tokens = self.tknz_fn(text)#.to(self.device) - else: - tokens = text - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, text): - # output of length 77 - return self(text) - - -class SpatialRescaler(nn.Module): - def __init__(self, - n_stages=1, - method='bilinear', - multiplier=0.5, - in_channels=3, - out_channels=None, - bias=False): - super().__init__() - self.n_stages = n_stages - assert self.n_stages >= 0 - assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] - self.multiplier = multiplier - self.interpolator = partial(torch.nn.functional.interpolate, mode=method) - self.remap_output = out_channels is not None - if self.remap_output: - print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') - self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) - - def forward(self,x): - for stage in range(self.n_stages): - x = self.interpolator(x, scale_factor=self.multiplier) - - - if self.remap_output: - x = self.channel_mapper(x) - return x - - def encode(self, x): - return self(x) - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from Hugging Face)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): - super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - self.freeze() - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens) - - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - - -class FrozenCLIPTextEmbedder(nn.Module): - """ - Uses the CLIP transformer encoder for text. - """ - def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): - super().__init__() - self.model, _ = clip.load(version, jit=False, device="cpu") - self.device = device - self.max_length = max_length - self.n_repeat = n_repeat - self.normalize = normalize - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = clip.tokenize(text).to(self.device) - z = self.model.encode_text(tokens) - if self.normalize: - z = z / torch.linalg.norm(z, dim=1, keepdim=True) - return z - - def encode(self, text): - z = self(text) - if z.ndim==2: - z = z[:, None, :] - z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) - return z - - -class FrozenClipImageEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = kornia.geometry.resize(x, (224, 224), - interpolation='bicubic',align_corners=True, - antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - return self.model.encode_image(self.preprocess(x)) - - -if __name__ == "__main__": - from ldm.util import count_params - model = FrozenCLIPEmbedder() - count_params(model, verbose=True) \ No newline at end of file diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py deleted file mode 100644 index 7836cada8..000000000 --- a/ldm/modules/image_degradation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr -from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py deleted file mode 100644 index 32ef56169..000000000 --- a/ldm/modules/image_degradation/bsrgan.py +++ /dev/null @@ -1,730 +0,0 @@ -# -*- coding: utf-8 -*- -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(30, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - elif i == 1: - image = add_blur(image, sf=sf) - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image":image} - return example - - -# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... -def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): - """ - This is an extended degradation model by combining - the degradation models of BSRGAN and Real-ESRGAN - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - use_shuffle: the degradation shuffle - use_sharp: sharpening the img - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - if use_sharp: - img = add_sharpening(img) - hq = img.copy() - - if random.random() < shuffle_prob: - shuffle_order = random.sample(range(13), 13) - else: - shuffle_order = list(range(13)) - # local shuffle for noise, JPEG is always the last one - shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) - shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) - - poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 - - for i in shuffle_order: - if i == 0: - img = add_blur(img, sf=sf) - elif i == 1: - img = add_resize(img, sf=sf) - elif i == 2: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 3: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 4: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 5: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - elif i == 6: - img = add_JPEG_noise(img) - elif i == 7: - img = add_blur(img, sf=sf) - elif i == 8: - img = add_resize(img, sf=sf) - elif i == 9: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 10: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 11: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 12: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - else: - print('check the shuffle!') - - # resize to desired size - img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), - interpolation=random.choice([1, 2, 3])) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf, lq_patchsize) - - return img, hq - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - print(img) - img = util.uint2single(img) - print(img) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_lq = deg_fn(img) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') - - diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index 9e1f82399..000000000 --- a/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/ldm/modules/image_degradation/utils/test.png b/ldm/modules/image_degradation/utils/test.png deleted file mode 100644 index 4249b43de0f22707758d13c240268a401642f6e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 441072 zcmeAS@N?(olHy`uVBq!ia0y~yU~*t!U_8LV#K6FCbb*5k10w^2r;B4q#hjSG(fuls zlhis+zMZppRob(6f)lT=TDNZAy_YkezS9v?+3@?seQ$vW@;Brim};T~xb4ntSYBPW zYxyc8W2HNH&xCx9acSXSBxak!Bq}QTGVxv7sw$T!yE*^G|JVP&-SF7kGH=bl5BcjW zDl03itA8)A|8<^WUQpKBUPFcj*Q%`MPD?f19Q9hl*=0t?mU~gcf=aLUzW*JUtsPud zx9`ul+t2mmuYv)?kq0ux4-wjw>FN~6E9t{b!MdI)5pjAUAE-To~ig~ zTkiY!@ALEXx8Kg~`S|kka{u{ubI(8bpKsS|7#ANu^JmSz89#TeOXex}*VH}jQnEL0{q@(|a+PYV=AL=8{eIo5s zyeyjK6?XO7u2n)GPoF+*Gk=EV(vsb=@4vr(S`;}=*kUI$!yLcm%a^Msttw4tV!Hpj zi^C;j@%DRnb(XsvNsFsqy>{2mTM91hCoGMPl~%1<_3hrw%6ZRE|D03A*6{mp-1onA z@xNcZlZ($ubD2`CmwL|f>aBO*XItCE&+|LY)bXl~gQH^i{kp$Tum9fl?c(>`_sI&6 zjP#d(epzGL?XmXv-#TMcQ&B-RK|u%kz5KGP4&OQUVr~Du+X1EK?CcCmCnF8R3 zGBPeNFITFn`}N`X{r^{w_sidvl{tIn4&#EY-@g5e`ks}w{PWMJKP3fa7hK<#d%J4? z^=oHBavN^P-Tqq^ecklkG{d&pI~fHS8dlBH-laUL*;vq+nSp_+VfW9+e%xSYM*G}Kj% zsi+`0ckR*TrxwhQdr<$Ue0lx%`yVTR&Ds%l{aTyV4F0cdDlR8Guij{gzMgygR)&&= z@63Xrid*lN|Nrs*_Wu9;-*2z|GVyHYiet4)xh$t&&MZ9s*P={d;tZEi#t9v5d}(sY z0`l8gq>cr@d5|`D1_S4TgN-u}=q^oJ#nHx+<;CG4a-ECu?A(ry3$g-?GcVkgQnNu|D+0rxKk>{45{%oRex%_L@-}e3g`SauL7SC3E zV)XxE{=XmV|6Q#&)=yU{YPojn>#w@qary7_zt-LUez!zGQBhIp;z#X(Ka5Hb3kxSQ zo|I*5?2v7YlzG&B@$oYj2X>_*b~QCMm!k5P$8seFN^TAg3TX^~cE3M=d)xM~vpffm zzxcha*YH_olAJ!HyfEvfrO|Tfer~Q0AiVRhv$q%X5l~ zKHs}7aZ2X+)|I#Ry|-dh>Ugq)Ni$tcF^Rq4c%_}9A_JGYO3>RNububb|K9ig-v2Ml z?eFGox8ORS5y^U9MODw&pkW#YLM1^yV@wf%ILC-+eaX_P)YkygNp3`Q?}L{~pSJtJS}3qF;RFv7y_Z z?$b{v1^xQ-^Yga5_wT!V?cDn9=jU|x%l}>;Zhu~TKR!NQ@aN%n{@1&6wUufO;8?c3Fwk_U zpYzMRxRlPAIcxQ*St})1yR~|2`hJ#~dR~5c|IC$HYp?oj&yByX``qYQ#@?Ax=By0C zd)L4I{dMowcgO7&4r!)OJmQkvvwi#b-*3Z~)!5BnZp4@%!lM@>aNz8jYqh1(zMgjv zXm+0Y?Bl`L#?jf@`TIrxAvcD;2QCs{8Dtm~m;QOX_Upx+p@N=DN;mJ8^0<_wvj;nO zL>)iQ{3cM~%)0BZ>;Lrs-}wFC_4>cpJx|4bzx}n#Wzvn39luYv^WU%kZ>w}~k$>g> zb+6}k%&54(ZJn*^yS>}uIz4k07zM2sG$}S%H(4JEeA&ive5Yrj*RG@dZZ0hzE&Z>i z2Bj^#7WMw^kHotN|3-gIQc}`ds<7zht+mPP@9TfGPjPK&aCH#@RL7{TL<*s|z`@6eO@*Npzknz;WQpS)RFb zBn}9kI4H<)W@bkx4Ev#lJb6KUP8B5>FS;3^BrFigS&W^+@#{R8q!^_iEnAvT**ch0Mch8vR z^vBwYL1~60Q$bSVwuu}KGgST8nmVMVdA=;FytVFnSh<6arTyIbDyl36+pir9+kWlU zu6y_H@7wbG?A1Fmi}y1qI3=kmvAdW#Sa?YY2r>%@FfL$LVo-Ly$m+uW=h_p+1*$P9UgPua!fLAmNwkocvIoo6qR?C1@e1m zXT3M&+&#PZ-f@G_VA)lxoK<`;o9Iur-V*q@qgq6Yy?^;*+sYgc?|FgV6= zq;g)Ts<{5;E$?stt-GDAx+E%F^J&pem-stT^VO>AD)+_c{obdXqp zmE&_c#EY>(;)>h*+i~~r-8=K9#x7pZUr9+RxhJZ;e8#euhuitL-(LGwDs}(&jqiWk z=xeGfh2*B)^l@V6zHtL*yT4Xg}f2j}0 zie+hiEiNZ|_Ll!=`n`>jiOIw3vPtKMz{ER^22YGbw5F5_|Ia4ly-d zxY#}`>%DWumTQwZlxEMKWvIy1=(uH1isDmM{PL^XH}8|Gs|xTDEuX_TO*oUhg&DxWwdG#@=<`^WH6EV|Za`?8pNfuZBuJz0jccg_ZR?G$;|!qK5J>1;!% zYlO=gpR0@$I`1^Jc1Q@iT>4pbPV@V{?YYT`=bqmGzr6m8Q-gsUYeSFQ!0_6IL2+n7vwx>4M71NZ*5u4k6#KU;kS5bh>dtTEmeu z9V}DUtX6KET+OD^lDL?Or$lv*!2AQ8ft~NCr&rZ|tKGbih2fPO!}I0OD`$FcD9^6_ zV)kgW(Oc%LXB`F4&WIFckZ5?sVdTP)AjaIis{7fqzBGq*X(nd`XJpPR%D&(he))Ky zf6oM!-iw_E-7fZ$iwjJYE-v5?-QLLHZqNQ8?Tpiz7K{k%e#AsK z_>|{XNr!pQL(-HAHz(eTy1lJ-@A|KG?{DwDuKUZ(iD|h?%@wbb%Za{sduPsMoyj0F zA);mG-HRcNoD+h)JPeMlSo`f>hsG>>2b1qVW-?xsJ@aP9p|g+dSeATEbu2HqaB;$g ztAd5|)C3)sTqF)3-;kUTp1t~n-{mHur(xZ`+6*E`SNt9oV6>pJ)_R0JAX<1ep~n0j*8abf9v+{ z^Zi_7HNPTZ?e3YK9_LQ3$-ixDVCb^NFz@>J+S>vPw}yTH`&+)wV*WJ!`TLE6G*e@* zU;JIQ`&#bb4-cCKSl3H+h?zO_j}*kZ27Ermg#9if=Ys(t4gfq z`YktRnB$ios=L&u*l$v3?(J1l9UO)_r{CnQeY>jm{%Wf=(W%c)Kdq^(^3wGD!flmz z``6FY&%f5?{`T^D_wDZY`@2`odRJXrzyEzHyTjE+jx#+EI#zs-&i%bDSF>}~wOcbQ zwQiQoU}7@hR+_O&Kp`ksNrz;KcBA(N-qx>j1<2)Md%?X|tvuFaace~-=U3%~zf zi|W6(YRV>^?wbmWGH2;M|6G2*_PEjmKaR9(w*q86&Kx<#$8z`B1;-A{bU`*2_kwa2 z7k7s%3zym5GAPt|$MrFbu~A?nAG3-Bvy#J`RabU@$t%p>Y27hf+EP$zN3+qSiGo2Q z9BEHfG*5p0`)mHckMb>+PlJ|u&X9FbF~0cq?c3||^|iItR~Kfz%JLLa>bV!SdaXHw zc`^HD!OcY?YDMPkE^G`X#^%fg%g;}u8`0-Pp;zz?_D05pgo@-|1gndzHl((?ys+5zb9C3 zu68d_VRl+@;1|mr!6S@k4*1^Xy~xm>%$V#lJFT)(Gk*V%ICiD39X>y|h=}dGf4w$8 ztwkb3RB6{NqrgdT5?;lrtem;i_5L!3mL;oBc<8uX2@Pa7nyGE980h8H(=);Qlq&z# z7spyGvxEc{cS$uUZdQ=XfA+XUvWB3NlCf-qrGgTlM4I!AgM|za9711P-P!y7*qQUwmsdv4$awK$?aE$< zJDirPH44IAbqWWRR-O^&*m7-V&?Fv*G=>MOPWiaJcqTMC%!^Yndu^czTZx~`(kBn5 zg_OpAubfwOZtwR6E34kGHFXk6lwCag?QgrbS-n%996rtdwl><_c=q$3FAjG`-QQpH zGbsBn+f5^=%KOQ&>R@A~)ccK!o5m-zj4vdJ|+eiX(%D}KATf5C+rmIl9aZy%HR zy>I^c=a*mF9IMzLH-FwdhKFz8=HCC_;JiFjOV~<5=@>)mvsGOkmec#??XN_+{wT7v zx1GN|*ZD~9{qNi6`z@c|um5?IM1_3vjTL)yZ_CM7dl~7c&N5=?xmeO;c730Z%AcE^sFZ_9Syy|&=asgRIOaiz0ly&B=p10*=VzM$BCy!fmcgolS2ZG&83rPyxi5NAHQ$UzJ05r^en&UWSx1l z?w#HF&p&JYEN5q~6}@vTAk>+~U)z7xXifez{k*cJ1~yjb~xs zwVW6d-Yx5E$m4t9{@2M1Sj&$V4U-8junLyKJs z+^wVv(%5PewwzcdRXoF5K$2mnvw(1#S&4#;Rk>3ruaD`oY9%IzVBv)cVvAYJ%s(W@ z85gspo>R(kOS@KGen4gA6sCr~-*3zmylJ_~yE{`-%zI(qfBz|;P@$rdXoqFH< z#$R7uzEv;r~?r2`D{Oum&<7MmxuFn{zoskhNnwEcATaH6u#re!9H8DG6Sq4~C&8ed0Y&qX)s+#bUjs%ow(CZ72#x7 zVpLHOb@AcPGPgKGRmw(n=`sH^1epSy@t?8$qK7IP~=TA$CWrilNR2hsWTj$*V_4ReS zQbJnr>$UHHe|viR`tQGXzL#zMU$k{trnBVKKDqY$MN7h}cWy0d%bcUvFJpW6v2jJ! z%=4dZ<}Z6$Vs}4p`?_$36u*`=g}<3QI(9EtQPQ+h4%KNrcA@tEw%o-QTN%&1sf*uV z^RsAoZ1ww&7%#)m8okwbzeo9>e#Ef-w(oI?+J@g3UZhBFWj^?VnPIt2qT@*+Ug4rI z-5FW68F!=CpS=>vl)~PB{HzG$8Jh*BP9jQ9Qf5ut%0gM+?-f)u+-Ys-x3cE{;pP4F z?WP9Zi@X2tOZ>kzwM(YBl^X|3AG&k=$L0C|zD(c$hv`A%YJ<-ixv|%;X*Do6H7h&i zUj0%bwfH)p*2K;UHx6Dkd3C{j+1azQeTSa0y-RaD!ob&;u*$H)uy{kT%M_KPjZQ6x zgfRN8(Bvq(69=;$Kn-tQdll4T8oua;ZL@?HtKdgqLw(BkDTQ?}(*GnewM z%Xo3)+pgRTXV1)5(SB-jWy|6+#xDnt^!2>xIneMz$KdRwBNs18JY!odZMf0kk;9P* zoI6%p+sDqU|Nr}b{htqp;?q?2evg{HVac50b5>kgm2YGs`H z-kDvYt7D;M1rv{E9|vQSmLz8>Pmw{fL;9fyU!BcFMf`jme_VLZu%Kg!iZkzlPDzcY zn$IVlHd#8S@x)yokvo$&JYACaVZj#n6N-FWTPHI!S~fHbO;nzxt;n<@U{*2HhrD&{ z+-)5z4AW-sa=c>ae*Ix+^=n?44uJ-_=l={}D+nunwUaa2K0|A6a~U(khkMcQO{FYY z0%SSn87!Z3@Sx92#X`gH3nZ>g;n3u6SlxT?)vkNlw;5NS`J6d(Stb|H=GPI|t6#76 zVU;|4)**0$!0WxcW=@$?+{fpCmSN%q#tE&-cf1cNsmz>tQr2~nK+^leXOf({;?I`E zzJFW0!DEVmlGI}5ZxV9yXSO__GySQEf}&ur;hnc-ySH7-$w=b6=_<0v;`sb|^VVI@ z-k`k7j}yN0rTj>eV)DSIy-JJ-+R2_1$TyUF!95&z?_z zo*V1B<63R(^~;%xhwfEh*?({0lu47eTwA1aeQ(}tsmBTp!iDdd?_|lJ`E>XDecSqq zef##m|J~P+72Ccla>C5D-|XD(XXl1qU3dNW{{P?qKa~If(Z2TAmyhN9e@hB}-^*#t zFf%gW_w2UX-FM$zHnFw0XI3!o=2-OYUbEHS`}ecoF58tGott}q`|GdQs>L5p5mMN7 zd*9pm{dKvy{EYKfEt}I{EX65!mci$2$X%bUQx5&fdDpP7KW!a{W8Feam#>!+TWSPE z^XyFiX)`D!Zuyqg_M*-3oq&?S=0^UvHyj;08iGz0&wBPI-o@aXrHPt)#-^Ti?yYXV0|Dmp-!i`|)G_ zj|CqOHg>9V9$NfDNl=(+RjGz?8XZi-U);%&eLUAI8+%YP}noEc-}J0EnAa& za&IpVR#`Ux-`jHbS)~)6i!6_Neb;7zAIC|~9m+ukUtLnn9x2$VR4}A1D|_+cN`KuQ zAI6CdRqvk(B`=!UsBJO%mEE+8OMeqVHs;nb0}TkWE} zT!WZc3c^js?o+tk&1=y?;IW{o%VVEUs6kJY}ejZ0MYFq=unzp-B0r zB~Q{kG<-a++zqh3b)-UrDacz>aiKt?HFtDFyOQP7DJe5n|GZ%6c7NO5V+==Ls9fpm zQ+c<=AtQQiqmcX|!2?q&PCb2Ev~!N%-mhzWtx9?X&+{qEe$m=7)7f0such#C19SHb zX$u$Qa)rk(X>tvWmVyhJEgNoiOcxSkk+w2Ug7UH~w~n?IS#pPslmytC zuU7BApZ|N?^hwKqPAN)U#xS#E!`EAGp_dca#2(K`JN{W)n0xx?mq~nU!&v4_ubeks z#WS>1pySS!nk)PAb7#z+$+bYxyUC=JHu#s>|(&{ ze{0Wezjo|J;7I|;P}v37ZbflF@LS7#z~=l%)7RT_U+;SV`){52#O2GE&$!_<*=*{g zcWbwveyXI@@#E|3@U)rBI%iJ&P!jj1YVPyOS(@Q;>VY3*WO;1wQ67PZwckLUjYgI z3)fzK{Z;C&XH0xB>k*Yn-kgkuUjlyb9qNZ(skuz4e{xM1kPylO#*V^+f{8tXPOk*)+b`H(;l0+|IrC@? z|34unS;0TccCt6TD3hOW>|oOn8tQAxpy4uW*~|C;p4&4eObII}a8Ti|F;eAR_b*;c zt>vF8gOY(bC+n%?jtvrzY#+EwoB4<;2nsTZ&OdYI4#S0#o{NHwrW;QL#-01QyTum<$BKJOcDfuXSmC%{mFxVr+_k)mCSE=g;<8CU&XaRP z|Hl%`|F`Quudn~U`@ZqX7qSjlr(Sfh5NujqR(jwv4Qei5d&T^)Kt9!~0&c0%?P7BtA2Wcl}cv`5cN!9i&*M{e38Z7dR6d$Ra4tb_Ie|qXURb%OG z%WgZfck`89;kVi{oxx2b?VS|QL&K)!Q-b{#_p^Tzn{l>qal(ZqhaX>HQ*mQZ&nW4<>QU?3~fLpnU)R_uqe8`CtBgx|?~KimLy+b;)1Cj%OV5ICRFM z#AG!?o1aU~KAp>Zro?3Bt}G}x)R+8zEqia`GOJ%1@17Yva-ML}-9$x6=I>`DFg4v?nmzTl4sx&F`x*GUaR|A=IpuCr%yjEs;t_j@L^Y8{jbaOqq3(j|NOKl z?CLdXCe}8wUq3di$z6N=g;=P8#k|v(i#1zkMy*~Ie9rRuohWb3+CN{KJih40cOD}z5v^3Su_j1YJd*5os6+YF1PoK7Gu6$+kbo%q@ zwfFbc{5+(zJXKG%{`=kK6B3Ij^#sc#KWk|aYR(aN*rTxR-uK+>wO4ob8%C{d$XL}l zN%+C#&zn2;?BBn4|Nix_uQ?xh+2-+1>H(w8lMR6xcNL${34W%c`CNrla59Hs`(FJI zDM=rjPxHPNW%#j4M=58O<;2F^_p@hmc_&rw-M4q&zPszoOM0&NSSg+&}YUV zGjW1J>da-E&V1f8=hPHN1&#aD`}ODB)kXPzmJppVfkiU^`s|qt1(=R*;PBuRus0B5 zyRFiBaPL;NrB5dLKcBp^H7ui}fzN>5UH$v<%%o?pY`0tvT)}^ZHNcr&GJyY>fM~4l z&YiQ}zZS8xmzO3gE}M`s&u@9Zyxq5T+i#cl2)b%K7h%om5p?eFm@?`3h3(s~|Ng5t z>6nD!#+lYTcOSM{{{LD1FX#8Nf_%zz976)Pup z^2}n6C!U^doH0{Ukm1M~M#uIO8Z1ZZPH7z8X6Q53?Q-K<-n-IfsR|4eRW_`?ek?b7 zR(y*6Q=^&u&0ce43h%yr`}*tY=b{T^&MmK;blGO(62k<&V9&7aZ4T2fPilR=ZPu=t zMojJVcmlRIH?hw4m36;(kTI?XOl%_VM88S?1RA zPL0Qh^T5O-CM&geC<+R;{+i7&u|>dD$ncUvD087RGedy_gPYQ_8E0Ntd{i`%UD(Xd z5bVw*C^&7$!Zs^8p|VFRE*uZes5m)IX?Y&gazaPWL3VNfLT6p8WaIM7D<&^|V_qI= zTyEOkEupqy!j~1AjD=o0b2iEES-+F7_QmIu3|DlQ%T)0e8z-?OiLtL(%ULYL%75_4 zK_TPKtBM~z-E&0dW~qRJiXxM&S=H-g^8lsumV({y&N?PP(s%k?X*b`$bba;i?PeT- zcYWM#x<9^?nlpeet9oVe}8|5 z4eP#FobxqxDSTIFHTTiysA-o?bUW79|NA_@{_of8=i@J3zHIr%*)zy%X5^#2K9^OL zmQGo5-YTg$(aA2p4+ES>X)}a zCZ6;9+qZ9GUHx;44{#?=+I{~$!-eYl`g;GJ3np{Sjy&fY>i+)v>bU%bycZX$q?Z+I zYN{vMEwu^Ro;$Z-TTWUR%TAATE4Rx%n9a9#?Yq6L%N!!EgfzISER9ifx2ieY@kggS>ZNNStMh3#Axb_<<0t?djc~BbGl>nrl-#2H@ne0k@-;<^C8*y-%DSt*t+$a zkavdeFU*&ICQxwd-y>0F7ZTXB%$D}O-b~GQCP1aCoTcOi7X>o^T z&FpE9_+JR@Zd3hvn^EerjAg^jB*456MNmXpj2s| znql>-tzYlm?G;#FIq$jT9oawe|NZN8%RJg22F|p}+4_L3UYvWEe10VIz}@z2B{q@ij&19b_wG0T zVKRS`LE?;$a|L|Q_A9^o@j~{{;l5{ui{zVEHcvjiN$2pqXz@~KF2=KslV&mtH#X0j zxpP(<--MYAld^7T$uQ15+Ix1-_PT%t4a?H(t~jJj)Uou;XytQd5$yEvOy*#@q@cKu z-E@gSfs&P?p^)-sgLdh|tvByn%XMc6yHW5EZ;0|#ElzpyGPNSr7jb}g#gF1L34 z-skRfOd@w)dim{by&X~6tE%>frKvod^Lbw`Lx7j&(wf;9?*6UX`|sE5^=FKHKga+3 zP@hKTE5PfypsU$0(xYu)Rr-Mxmt%J0{H|MJD9C9pQs zv~62zq{Z~pPlb-$+oW?*e(h^EhRY^Lqq*0vYTmzZ@4DA-OJ?o={pI4_=+zw?bT;o4 z5;lH5C**70{@C@|?{!yQ-~O{{nx<7+uZ^Rtceqqk-2w_b~y z$#J1to+IvArCzXJVWGOWnVbBoJgy_2lYE0s6gn*0-j%-JyLC2k#Nux2x9-JN1G_~60$m<{mM3C*1-Q!DGuBpqx)&ucEBKXh{nj*weYw?ZOJ}Lg z47z=L?Y*qK*Gr3yStPOoU0PM6jxU(3{N{m)?q?HWVS{BlnPLJmYv14IV>rXaaQ%Df z{Z}@=s}^s4#;|Hyv7=LBp|MNG-jz;%mg6?#$GNWVN~lUFQzDRD57>V6bazyW<_YI9WBtO2$ma)o^Kym*O&&OrGDT9qSCjHhcVFi;M^Jg)$6}nSt;yN;CMFY&f4qm7;1TnuisN%#MmJd(7sYi zNpO?3LShHQ;at814h7E%kD{#u5@w2>P&C|pK_cr#U*DUI4xSW_zM_^|hqb1jN|RPj znNqys>sdCT72oddp6&Zt#;I)UHS@i9*IhQU^-WKkwq?m2K?Y8@>$h*$-oBQYl`v_i zh~&NO+q=S;os|p%F28=u*6?`?gCh4z$qEba=1Fbte>kmNbod%3yIgRa)3ZK*J;Q+` ziP^n9^QRQw{a(A?L!|1LhLGmc=g*%{c~P^^C%A=8HpY97Amd98?=P}zw|@Wr{l(N{ z5+B#7X-+?#I&->R)q7c)^Us$*U}k7&4Xu_r?%2cFuygs8rkhpO^{ETz*Zo>KWz+lL zyViaGtu;rWFm}5{kK8Pg==M;BVDxKpnww~YrW5xIH*H5%2rCps^Y;^tF>X}dH z9FzF|+xGbzzSn={jF!v@>&%F~eq!0IWh=M0Nx0Q?x;4&NRyk{#IG;LindVcKLmdBngPmQZGWZfS3#{f=ycq59{Qme|Saxk~t*eW| znFrgqXG@6k?PUl$XSq&_angiWFU(vny)3G#veJJ(vo+iLlfKIhpK9sfajlaQ&Do_B zG_%89!j085E46ky7}PK{ESy-Z`mXlL6t9yEeraDU?p4eC#0EWL?^wGm&+W*&13njP ziUO1x8S8{+3mO)BozW?XILfZna^~d*0oy}Td^K^pS~{8gK38Z{SzGMD|d3-lz6sl zed+D%c2?;*waLn@OYEFJpVyeq@1Hs=E`I;My|sxKA3ol8FL{frgM(4HQ{m_B+rPj5 zDz|3QYQ}}x8?;%({;;#>x_j_B9N4r@TafkC^y0j&d-w03fBt#1pMa+lui+)`=c!z; zLp`1gF8sc2jvt5Ot$X`xelA)iZ&$M;#!qKWuFhwf$&qv3TP_W`Wa9h$b0yE)T!kb} z)n1XQ{4$4lpS|Pnj|$_R<$LGv>*@NI{`>#`eb4!F|DTuhCpv^~&#kVm-W|7o>$Oz7 zv!`oz@7*0eQ{28!uI7yDj@;YZa+hD$-0@mJet+HHPhUSjKY#xGbpQGLgcrX4D)lk@ z+l))6ZSL-``C7Vm?KZPhHs?PlrM-Upu5bBe*Q>`joPOHW@%^>d9A4AsI-XZb*5AH- zdFhiqci%f7zkJzvN1xp!mBe$JbI&io{PRrDgA3Kn8+6o@PFbpYrWx*^EX=sb`difD z!w1ehzr6CR-7$mjl`|PX9NgqGLw?pyEoO%jp+>u-yet(eXB8e4MLi0x>e!Zh+q`M< z^Qj^`Ht&mnpP&Cex##rHl}i>SH!Lm?nx-%<)Rc`i*gvvJFYEBDvdXGo|JvU#QlayvcbS(nxJ&1qwUqzp<0I#p)}Q>HBY~$r{dMeF?^Bwh#@ZR9)_LA)Nl7kF;XI#L%4;20p6wg@c%sSY{XggbzkN+>vY^~9Bl88! z&sa7mGAI|7%gEHo%*%FA`e3n#N8s$l83)hUu!-AVa+Hxhd*II84#q1vKN^i^&piIS zsKkZUp=juFOTCT8ibs6J{+bIc z(2~$lGB_e|^nk!jABHJHjl1Ox6HnT-7RfTcFl1OdXNLa$9B;N_gW#)5u3lMd*}2xNN?Y(M#p8^G!>5UYQbo!RN(w$o9xvRtCBKkR zne+CDg2%ZRue#biI)kq_s4_A{z4sJ)R&n7BZ;_X#I)gF)8?ig>i!FJQ>g)EksQufw z&)ds*Zb#<~hQL_KpMOg1;{D|9V()>by#I#gx*Y0d-f78j^aFpx+S0Uhnx0-97yT3! znPh}CU4#;qw08QW3Yn`a1t%{%{c}?xuLK{*Zr&SVennREmt__i7i4c;*_Rqwbng4@ zt$DxxeO)cN;?>`>+t+`;{8VD4d)Y*IvpRG8wONc}4hnippUipW#qdDluf)q=8=6~W zEG&{lnHL0qUie^Pp~H<=ySA6v#sB+YUjOIc@%M4@``h-^-QM=2wC7$GueRsRXP1lq zeYv|lGg2T^>uE_;TzvffxcJxmj>dd_`g;2I>)Ul|?B=#cmd>!0JLzC+QndW#AGY`3 zZ;PFJWB=!?eEqMN&!cW%*N;DRF6LAU+nFG*pFhj(>wkW7J?rLi1$8&|`Dtv>GWPjSfjR($w-q)CMT-obkYtdhz~92ctI@4de+Trl$4(Yx1w zSM9!kcguH!NsmpY1})2to@u#GjVZAy)9I?RgW_}^j@lJ`Z*w`kPZrDWZeE$awsgz2 z-xuuM%s;f-CQC47$nToDlifVdefOMms^Z1jYq#GzCgEwcfc4~j30I>eC->c)+I0!l|L-v6G8}n6dJe6V_qnJl#fea)w219(#(+3yQZd3_R9y^RNv|gNLS~ zp!<`>EBP!$IyF64v?%>mJa26LGQg#(vXbqS5szI?#R_gOaZKhelA;w)UaXgWy7>+CdI(PD-fM(Zp`K;f{H?KRp$xHHN zvB@pVQQ&wIwwBRboHeSH7F$M2_qp2L-XvavLnX_@8!KgE7Ox8L8_!{_hse=uX( z$|q9<1r99wrzGRI@A&)JtB0;hPi$JjZecb3GTV*g*N$J?Dx^KhSV(x`#-pzlnp#a6 zb7suseVfJg!pXC7#$Ss-OS=N=rulMhjxC3^gqnohRS!-vUNGZ!MtfkCJlBCKM>d_< zqmWim9PCuE`CioOy#k#Ej6G)Cw!VwrT)W)khtd1RyDM1^?VSANh?1cB);VIHO-*Mi z7|aEld;@}wSsY|nEpgB|^k9ZUK{)e)d$T)uf)zb?b_ASS`StILK2NWW!HapW@3Wpj3C-Io%4YVw*LJ?enyFj1<(BTf ze?8YWR*zxF^;K5eN{btUKJ7W-h>nZQN27OsXA<8{o^od|yR*<5p2BG+XHCv7 z-deUlK6-uDsWX#4o19O+vF)wLnWkswS}xn{-@pI+_4WGt`sela8RV3HINT2T`u6SH zuV1f*oShQ<@7-noBVRO6hCI9Wd)GS4Ymt;oNfev(3~=lb%dz+R(APWu_!wyzHXqI-V+1u5lDZJqzjD`Td&7 zbLaBn37;iyurpm@GYC=P{LCz3If*Oexbsn7b{98y2A7ac3#<0uy?_7S;d?vY{}zm9 zt=Jj!EVazOM38;TDW9HN9tQoTQ|3&pxP9$w-TJMTd!EjTf3zQPleLG<5$|tFspG|iD-j|;y=Xym}fXyc8Zd7`j;*uZl`hUKu$i6sFPLTJS z^C=O5kSje29F{6ym)Wribs2p+{8DGqj+UfW<#`To8}4@sD5#$N>9Oxy`;UJmR;_L? ztM;}DT{;tLYx}RTf8(X(CHKBdRVn?rd%x%U_BjU@?kw|sRy+0UPL-~a!m{QtA}b>DZ- zpZ-~A=0sVhmnkgbO6e2l*ZmJKarXazWzSmn`K8JE&&oTH$MhT*Bblu8~&15qL0rTfH`Pd56~Pk+9AIaqV)6d#c~??gWrg=cnE z`}f69e91HKvd9`y!|6dXFUoe`d;fi69L#-NgYc>+7uk#EbCa2wrW~X8H;JMi)ZJ{AG>ew>%SP69OIRkI^Ms3|9jhNN#XBycJ=>i z;`4((ltiBM_trex*1znkwXy%sf>Vpj3nx#h;n{r9>Ab$by!}3bhrfSU%P(GXM(3Ty zv5o^a3?*MLhEDDdm^7t`&rd{fvPVXDNe_qB$AlI}<&wkywJQ@@+F2^t{^=hQ_w+0h z^0aCbSU%^D4v&_SOxm{(byuTg|7@A##Lms2sV*yZ|Ippg<1cIeJ^cOtY|edib1%)e zx9_c8y^_!QNd1RL|9{Ts=((WWrP)%*;n9*et%X_h(UErsKKy?VG#8|E%8GIt8*&M+ zJf(P}#Ojp%5e~ON%L_9F8_)PoJ=i((BHy`~t8JFdhEsP>UGHnAW?=GMv?e`vrS@ip zw3(sP4jOClsQ6^wzJC7t*VC`R?h6=ZN`$LalJ;mJ>EDf@g<}^QHlz8_}pwG3}*I7!0D`1UGQG$}4(B^=%SK4MO z8ZdZpu`vtq$ne}bz-ybvUCM4A%zX653r81D7McG2sV(zftZ@9%>s!j+?D&lNPK&$c>B}p> zZ_G7td;TQ#+2M)b*7jHbd6$vw%*6TR9ZSjrcW>qg@AZ^4msk27*>Lw&+7SST-?z23ogO2!{77U_qTj-qJR9!#9Cpz8IN^EFgb5vIbM9W>e!Dd8+3BZ~mM>xU z-FnUH)y6F;GoP<;Sbkaasmw~@XA15K?EMQLB!(W#U+!%3-uGv-f*#zUA|vQxdw;L_1m#>p@uI@0+?%muYw=h!ho5anAwOdzCS0Y#st>PjPBb%$&uU{{Xb>{JO;fwIvx&C|U z?d6)wr<}66II$&rnZ>aei^IW{NQnmWmor=z0zkYrG`uM}MClpuJzwfZ{efsjH=ha8g`)gHy z$GN*m%w)D$STKoWY3|zfpfI0pT`ZJXa(~;}ufIP2o<9Bg^vjox+U|WTU7fqax7aV( z?a|)1sWa_re(ks$?l9l0vdT_y@+$>5M$J^EMN!r7nPoPoIKQ@2x+e5zHnY|TgMNQ? zrKkgkre(id_5YKB^91ewn^$`~|tz^JT}YB!RTp$s=z|`D1D8H3u^@rHMW0Y=1rrhGbz#S*@;!^1qx3LcFu@w zpBJQO`BGx-_c=d){#=ySAau|1WKTxc*)^>vV|_#0S9yL4x@6+($)&_>RG=a(E%eY~ zX+(d5($l9uABXtWW~>gq8}w-U^v|C^|17f9FHZDoxj9!LA>;kt{(J3T4WriHu8pk@ zt*o!__nj*DDSKMwJVrsmsI{@|OiL!sm~rrI+UB5Voe_&xED4gaF*CaGaOdS4?>1jQ z#&dtd-Wh(67W99*P|bgXMJ}e?W5QygHLs(x{W+Jf6ewiTz4yLq|NZ2c*~>C}1Ve8A z{%x(WGU|QqtYz}`e+X7K+SwR#DBnz3qLqX7H1qmJ>LYB}#~MZ|JL6@4lD5O`fAK?#C%u+Ov@Vd4tC( z%QZVDdfEA&KOJCvWP zoaWbZ*`#~Itb5x^Z*RZoc0%dVqg}gI7+Z34x0;zgj+o0@u;v`IxS#6OBRk)H-=Jva zB)@pKnnO~Q-tx~+r!2RbpT?n3a^H!4j!H;<`|TMrq5Pq;Eckq?TMSf^;%2@`*eS;z=94_h!ld?)&{UmC64}|M_ho7zW3D`@xfkezb>@;_V@Jj^J~MB?rk?Ma7`?nw(!?}`~Mti31XXrPRPZ|{*{mu zYTV*G_i?{W|1fEdqX_plkl$iNGpGYtNqH?Lqy}^#Ht#+Qs zjLzisqD&o@v$Y+V+D|+xTAV17X!uOxloJEPr^A_I@{24F6`HdY=qkp$3(b0#>1m`m zN%BQNsPx5^*`kc@Y98%BAI<)7bxy}!%|{yq_-CiR`L1}Rup@xs#LSq5J14JXWa2Im zGFLV_9JgEU=^RdmHcfV=H~jAxH1D*#H0@Aob9Own3Cz~FUGb85irqdiwm zHkl^BZ`gf1?)yV4e{Q9`txVoarW&wrUev$;t+RW!mQ$Ko`-9|DM~#?qF4UcpGA%`7cbi*J5^J3YS6cao|qGe>|Z zL)H8E{dIT0+p6Z}DkL2#lTi%|YUn*vbLGp-V!!j}&&Tht<2`vSByHlA>ize>mRL=l zG;{OJ*!9=1UtfPlME^$h{AHQ5mIs~MmY?{)%gfR?ID2isp>OJ}XA2*f?S8JQ|5-)p ziOhmItImE_vfNp{y7qp#OI2N+WxQTp#moiQmN|QC?(`{CVo(zP7*eFCXlSrq*d+(UU;{M#*A*J`X-;?5Qwr-lfvgZHa@~x%Ct8@7eoje#)x_ABBFoP4S zXYwUKa>PlyKaC68Hb=!~x& zFMs;?ZEfszj&t#+4xLF$NSypSt=;*5bjysReKTIfMIL#x;j_#4w=4`NW}f9yR8r62 z;bl;`F?q_R5Va)>9xnXu$6(g* zesZ<1W9`=|eGv^W90fkg+y5ga@s5|Gh~#E zFHdD~RMv0poAp>gPtUIFnW9jqn7JL#)(>6>1n)Lg@XYl%d?$q8Gpj_!pk+n}zt)$D zTT3#g-90#W;hNW3&-U(Y?lX?@^Z)!(*u<7*wGDO#8c@9yfYl|MJdpiSNI?j+<3ROjBKYC5l_v$7I!+ zKY#ux@|<}8{(9~H<2KWSvi2%pvY8uIyZi2Yoi_`9?|Z$2QHaytcJG|?>1p@A+qQgK z`sBflJGMFvdpevqmYYAkMdscI=0Ttn0s-_xusie&u~68 z{rBtX>W(v~KfS!X{QP|T^|#aV&ikKFtUasAd3x8~=(SnKjOVzO9|YX}R(pF}(5cqT zpFexgZ}0eG7j$alA8*li(}Fd7S4YVomW{fdENH->^tpcTR!asq^V{3jZaa0Pw&(WS zvi)~WnQk%8$cz(XDBke9ik-ofHC*;EN9T&KX1iut7F+4Rd6qga=-l4#wd*g3xJ-&p zf3*DcWJitde(1_)s+Vj# zer#bmlgIGoe}gbXL-c}12bOnY-|P-3q`nNy?@Rw7$lb(IcVwou0kg7=jE_Wv#D~I$ zwvXSV!{v;4*_}IjEhL(kqj0M@h`S@JUupAM2)5^Rh@i*V~tG_dBk4*d6p)lK7AglX(4s zD{_CB6yKG;U(BtpbT9kwSN;7y#s4}}doBOT?-rhUK z`{eQ|r$eSiTxLH0!pB38XHrb)_Nu*W_rH0|ePV7+_g>a_3$C5nwm0@~t^BTe{`37# zx2v=qaLIYr;%wo$_NwAHv279kd<=35ZJF7+J`?tq8}Epjciw;clu6bMfjka^CZ=-# z6caTqqr;7D-O`R&JPkE9vRjtPb+_z6J-knNDxFH1_q<=;KIgWS zu`0unrWsQ%y)4;fW54p%@87#?Z?AK5u-qDU|Ngz>FFqUXjAXe}`|r=!`TxJ1zx_6D z*{3gGzP!9#Zu$Jc!IFrK#Hjb*fA6~YK7L=_&F!(JJs0^Lr_5AQoMhnFx^u@FRnJvh z2Xz=-Lqxf*))i<@V z*LOtJ6u9)Yh#fxrkoD^|>jn4MUtc&ka-O}-+;bB5)?D1OJx|T5ZPl|NkBTqP+LqZp zH}Y3sBvSO};#J<3BdktFj}uvyj<{P*Y5UL~IV03prSbRS@-@tENu9DclV7|oyZzm( z<@h_+CF}NnpI5YN&iRf7vy$z1%~DkoocHYI?mwDwYq!fCQCG-$EjA-X;mO7CEBfxS z8(VI;-8{GWcKM9wKVQClS-kStTm{>a(o z!Gm`m8&y>*7JcB`@p%7?mjV})BY843t{NP0Rr`E6LUf+NqV{Srj-;pR3Jxxkt({#w z7Z38;2Y$%8yW!R6qF;y4+yDKyTz=Xd{+kOXHZ}G)n11}||3Xnv-Y=$secnqsck#=M z_wABj8NYFhF^dGtIlVp1Z`KMGp3^)!B{QReZ)Nr>W&YF0Qe65t4rrczaN*dCykpL> zciT4$H7}eU9nQeIR_xVJ2^P+?cZv^PdoA&O-I0)U&kyx%wPrBl_9}a^aZ;=N#SV@H zzSjBxbc6P zJwr)=(P^!hz_nFB6gU;9{ZUX5?!C4=)lPSL?cM6{3;*iZ|4Y4}ts?E>9Pzq#_4UN^ z%@5bMX**uH~1QX{y*Wf%j zzxBPvfn%3n3Z?4rtDEupMU-JzV}O~_{-btou1@XSylD#TdMuv zFUdL$W$l%5^XA*1`K-CLsItmy?Vi1RU31zsuU!+UxxGDOpFra6cWJ*qK0dBsCimn{ zZBODd7fJrL+m1HM{kgwMXX%vX)1N;se!(l??J4LYEzRwI{o3&tb33k={{3_NdF@@5 zkL&mU`_}*eN5A65K2d`*mS2_EH|Cp`C9&R+wHNwy&|`W`Th^7kn*PtkD#K5|4DnJ^ zlm0jF)2?@W`Bii{&z&~%oqV{XI8n=0<)}uRTZ@{~!jn0BUva!&<0_N-hQXv-<-v@Z zQid~k_IsEux)*x=u525h(v{`jew;D~4Vu`6(isJPLVrt~o3OlS)~BRpYhSK-tP$nn zIccTh#3dp#js8uDxcO1rz>w+Llt*Pu?S^c;dXttJ6$%}xvzp7{S=2S@PSxdw&%`91 z+|AS!p43>)e^#0JXiDlVwcy`Rr^iSBczT$BnN-TF?f-gaEL*wdTJ&p1Cywrk-bd_( zg#@|(>wkBC#BGS2oiACJuwzG~}T~1!+#ANbEZMDPq8{&WN z9-ltH{?9uTQJ&S83)0!YKb&~(=n)==iVyh@*w4N^?q$Tzt!1=L{z#i#@pk5)s_bkI zZpBMld_uPWt=p}gb&|Ew@X^DGLGzZ*naKY1Oi)ah6vHNIpUYEZe!u8gcJHjUL-^gc zFcW990*h;=mI{gnj6yTCJ~2ohXiH+e%w(`AQli_^Z1#}~_6KYa_1bguw^zrmzSel{ zK#h#Ah`u&Aarpd1A?9-Axb2yP2xo>Uzxt#(Nt>5Iv zU;iDaZRq>CbHaY%nbTCCdxj=UZdtW#iha!wi#8=ArMK16hrKi(tT=q``SRz})5Cu} zy}tj?Q~mmT+upu>`RPkZ*w^#(?c1Z?*Vs)rI-0^Mz}d2LYuVm=hjIkkUz)%4dZO!k zwfyt#x2vqC_Un79gh(7QG&byh66CjBSa$WTE#q^pJUA)`gQ1_zXbLGrunOc^| zWd!cdo~3tMsOog8*OVSuT$`77p3EzxoZ?1eWQzq-6QMx%>E>vVgmU@BX^Vbid2(sWa8W z_uPNKcvg4Q+wI$PzucR3?sCYdH(hJL<*qKxf4446r-ecD_}wY4F72x{6%*a0l&uyy zFB9XKWUa(@VnteV#)V6aGtPg$RQl)F>-D$A92W!?ePlO1$0f-P|1VujqZ+1 z3gx#x>bPRe`!}Uyw|t;;isA-i_a_0-Ng^H2EBj7N>|pm`5u78n^^L@hIh>|k^9v&C zohAssd+vNdvO^+iL4=~byHbd{g1hq~o2mZu?f!gtscNXOVz%&t*6-((-k#){GspG{ zZ=-enY$p!R7aOZhBAleNCd!E>d-(avPfEpI5pKa0O5F~vbg=%nU zo*?Vqr?S&n;>+RR;=Gd>UDVz`%<6wRtHa^#zmnIM&Alr{9PVVtzh9r+b1Un|N5K;l zS=2Qxmp@@+QIXi3=BiLGeDJbK@hdLt3w#?SOVp2gd~j!wnfSVlMN4K@U75wACa0eX z%N!D)vLA4C|Lyuf^{D73`fJ^e9Ma zW=QqzQW?L0hCB=V<&`;hu{@T~UZ6a!pr>&6-9;W>@4flx-(zulQ%&`vkoaU#(rl~E|pn)>1D~Q_9=P7$ ztMmD5Rq^*Zn`C#c)@xe(?z{BVN#5t||9*J<`}gm!Wp}@Ooj53REsC9oL44vIVTCta znI71j|7_#CbHj&?H65&+(?4@EEHU$xaa&<{?1gQ{h70nub{iDwEnPC_@;-C1Gm(cL z2rbn(BeXTl;fL7-!AYMtDjs4C2|cSUdW*4WUWeiIo`wf@#~PW>o?bb{$tCy8uKZki zPcN@czpQ2{a%pTatXjhH%*dBp>FTX(Td%Ekoxq^T?qCDS?X$iPaPX>WbsCQ`hPNgOa9C_zAToC zQ=dmy$+aY`>3|zQ7PXe;}ltb{_p11r91BaW$0%( zz{@%D$khYRDe_udJE9pZ%GxUTbOaxm?5=c}*^aZ~l|~)|!@=%^eJVE#d!ywBPTZYR9mc2TreTPO+3_b`CZqoS`SP_-8_zP z+>4U8y0WJC;_r-AwKBpBoA?WOw_IJ&pLap092Y-gl*}ALa-tG$)_nbNZ+<$IaYD=|tybL!m$~qq!YN8OqaG-<&(FSfn7? zI6-2=Y<{POkJVXS+I9%Mb4j{yCeSiX)qpd>Nhzg3(8*Khocp}>cGJ|{4>}~8W^dhQ zcQ5Y#`|syZ7inECs{FO!nWegzlLFH=#wnyi@`Sb7h zdqxFSv)5L~w%uKpsW;gt*wMVix#ilo@1P+}Hl-<(yk~YssI1iX=882{n#FhZUUh8# zsTYDjCq{IsD7&$w1-^cL&3@0GKOY`{pI`s`)6ebq@2$Tc_1eUL@75@Dm-)|viaK*% zE-fvdJw0{avzQ_cH37q=bC&zGtPLwSlfJ!c*38WN*Q(#gR)>H6{L^Tvj89olXRw~C z|D;xdZ+`{8W@;SG7M^(T*8#^P4GKD+P5gsbZq1FBUHk2pZb#kc$YqynxKcF*rv%B^ zsCK$IyUjVRqmp>`WM-6Ds*1t&g%5ZHS3b3iTEBK@Y2-? z7`6MeR=>7fI;r?NqvxX+Vt#@XPHEcix0(HO`uX|p@5|1t{P1(T{_lP2&n9h}`8?)O z&l{=6Ut8{7|GLj^|DHML)BRqS?Y?)t^zOFQdFQ8}4q0|DdadcwCCiqkOjM5Sibz_u zVylpGVbS*gTWy37B(HL0U~Uf9yB`~8XII;BWbM}KyI23tc*Y#m#NH)7Z$0~*K6jY~ zJKbkYIT};y(Av_@{$)kIh(mpdZB0*ApN9tj9fK8`ecvP)7fLUVbI5*5#2e6s|@%1|BZYPu^k8 z)KW8FpfRAOgY&Tp6Sv^6g7Y2IFF&~#ZO)?N$NVDf_G_y==9On$G?zYkaCggfwiAa% zEG?2%+yc_h>7H_6m)>R?Armr}P zrc^%Qur4%aa5{O$V+Ak6ggGx4sNQ7~54LD6E}Jd zD)wnaB{H}kO*Y~A{9t>`1&;OZ46<)?_r89y*UVG%U!Gs*#F$=T)-XOWiCND}yU?Ee%4EBzW&Q_*g2L>U*f`Ve76+rxJvYsKO3duLW|_Sowe!79DxIOi}aTIxZHWQ@9pi|w{LG-dwP?ButTbtikGE-WsvN{eRrH%Fym(?#~~DURJUN-;X;8z7Hz&@AZC$9mhh=l zI5PL%?Od@fOF4m;%|Y>#@)pjvtfYC%Px8Flcf5B>kk#4cbA%OLpLNVq6-qqVqxv+& zs7xmLNyxHFuSwf$>uYOY?_K|Ws(|-74)@5~`uhEG>sKi==?0%Vy=O|0Q;6HJu|0Jxk z>_gJ{4tvk6`}c9V|MYZ~Jy&b*hS`X;1^PLO)o}e@Q|2W!rGB6lCdHhkdV9T+KuGJ!pY()j0pZPzWR-00^JhbyNDvEyx`K*>n~g}G zz3emIk&)&ZF}0gt=lfYEZG+`V{@~jzy!wz;dcvJ%F3k~`t}-m z7s`1EIfXG4%L~km)wwV7P3H-hl|X}>xpL(4dv2@uzD>S=OhWP4qkF#XUNQoKd-uI{ z?hNdlS>11XR!d3$yuORguY_l%dzG8CR#oQiy*4k{&!@rT%tUvmBYXl=HTQWRXnXiA zcde^ZfkQWg`yYjW-52?i@5X};66Zf7lK6T0Y1I;ejveo_&xAT|-d(*UmVrUv$K~VR z8J5rgL>w>WzR0CC?_4^=k6&M3o5+TU3A+8VcxvYqO{b_F4P)j-yY)ZoXxir8mwV#1+O(l>(xg@Q)?LrdPjmCJ z^yi2@_dIjfnWVXD0)>__wL(9ioj$ZGEp6o!&FN9uyvr39t>#+JA*i-+j!?Ht(#j1z zb{VrmdO7;TeO9hue6(-vGOf!dzDLhKwtuGc@ND?jg~}oc*=r@vgiU(7<;k4V7rSy5 zZ3GQHjnq9Y86EsM1(e0tPbspS&-wA~+uYk`!Lz*#zvtflRvW*4>y%F)54T6;$(1}l z-alV&`sc4@zt8{wUH|Xv_WMz}nosxt|6pBHAW^2X#c1M}Phb8V{(gUc(a%3mPp{wq zr)c+mMMLArj7R+;%NKmvmsogz)j2`lC+&Qqa;%Rv)Zg8|R{AUXiItbsua8C>eYyYD zGrF)88UNVEEq<`yR-wH|cV2|KiFDvkzFLL_26Lx>dic|5>X8-g(}WoAo|t=JZ{N4N z(jzYqw?A2SUD`l9-C)C<7>ll&8kZw0URhmv;#Fjca;JT6^jD z((L$cb#KFZ*=PTfShgfB@33yljh-8B#=-1Hr99Iv>P!zg^{HrKaOQi^`nul4t@l5h zFmL3$f4pwqQlCuoeWA-gOy0>Xb9-j1#+sM?tK0Z;91bVW=ngjLF5V=+?@)d?eE z!^8E`&ysn|G++HOC{=${V|V_u31h@+?S^fJyEL12@7jLYQ8ezcK<6CL!3FkhH*D3G z1oZu^Pc8fA{z+JA-udS%irG2+MLsoNbU)B0cuz|0+K<{NfqVDvT{=Z?diTtuR%r|! z3ho8__wBbn@cDDG#+AFeJ0v&nNbKurOLpau5cIh$Ga*gWb&|p)k9p}P=1t-Z+zTK7 z-Suv((8879?j3*e{(b)bKToIE-mm-d@%WtJju?a8&`{mK6N_#XHJX9 z$uq)&a_{6GK3l)a?w5{VNK(hT;wK%EP5gEi3ahqmxfWu=!rx%nc6P(v_hpK#E_-71 z4)Hi_H4AvLd*78yS;Etzx*jIP`dq9wNSQmQNoav`$-SA=e}2B~t+(@9+0%Y$cjcu^ z7(Sa+K2P%EO8D_}nS7AXnYT6jT3!fVh?CmArDf^Ww7sh}6?g-8?A>`YXZzAA$B(-! z?b^M+`un?BufjQMpZ-02wr%UL^4sOy;%6l4cZ6^;2u#)Q@2~&=tI9%#+q{Rb&~neT zwU?Y4jIVCc%g%KzNV#_RbXM}l;%Q46|DQU&!!L!~R#x!fnmgh*vL5;}tZklucmCm{ z^W&$lFzA+wDvLiIk=Cxm$jBIFaI@@%{H478_b1JrvrR7ghY8by#szNx& z&Nkg%=v;6>qPbj;alr(31qQ)IK8tT(nYt`4-uCaZb8D0(bR6C}Y5je6 z_{21Y?D?A+&b*eDJRK4$=(+#rtS;X#TRiSF3Tm>fUvRJb{T#jDm!3w2Ui|ztC~Nob z{o5rp17%iyYi(bAD;c^O~NOYbqz*`@MU&b@rU;=J&V1dMUx|&{4Sm ze!RMY!^VYYtK+lRnzjqs}}EFhpc+KDwl`t8?a! zV^@~?wFDZpZ8)`ebLGPAHq&PXa~LlDoA-Y2>!ADP&OI9oO(PS(c)Fas>?$e~$aB%> zL$+%(SApsdEe&(C`a`EH*7=s*UaQN#!<6Ah-uB&pdp9vTO}Bb~cTRfRw|h$k1UJ0j z^*1kn;zA{rLzgWYvmU30hGoxvb~5DNE(rrIr8nDdm%qP%_s*Sp{>$e)pK_^e`&{pT zFQ3nki;qv{*}mQU|MAHacB)=nq!@cnf|ae-QucVJPO(_f7e~$`(^9e+HLl-T|5PJ- zMwWK8@3dPCuYYk}cg)G$zdo$Qpp2WGPuzMi}|^_3ID#)(~1qLo?2Z8wFiyVt7M8s_SKO-RAwNJ9t93mHX* z)fNuz&*FF)x>{HsOuIVWDymKJy+dT@f=;iJjcrz5+TYwAP5JpnCobbS*M4l$POj@| z`_3!%9y}_NwBbY7?ZoSr@rHNy-0%BbG4p)#ykj=q8&)hxNWFd`^wP`HnzL^omTi|_ zB)nvXmC^*s4hI=2?qti{YfPJxb{4(Wzm~ex(8OVX-6pxz1j&n4y79*zMDFbF&N=W) zx|vIN>LtCMOBEg&9k^+K{SfPdbCDu)N=eth&OYlFUR)p?y!zG88oA>atM}i{aq^0h z;A~+%(KBaO@sTs7c^x7#*PlI*=~9_?t>L8QvIA!|yyUpp6Y`>>W`DLh?=$1u?dp(# zM3)Um4qBeK6c02!ahCsZfY(;VW7X%+EIV+QM_OTzk1+$wdvylsJgLtQzX~`x9GX$( z^6g%$#6`hXSFY(zzjz`+vy{Q9Wob&9TwAZ^Edj;nJEz2CvmVH9WNgSj@#~fH)3=(E zt>>39 zsO{Eg{(SmXVmE(z<|YsSXKDAH3RF5)tlK$dSLHdSO*eD4-DYHH-T8jkHLrQk3Z<66 zH0obGaku~R$FCo5eNtpO|7_-?602(Yq@E{xyuO~>?QyO$RCj03~vZzy9EGFLSTlaUeay+R_n|S;7?XzjY zUU_F~?;oELH!ogpe~skvi5E(1zt`_t!|=fAZT!!cj#E8-A*s*04&LNk;wBt!c$~{A zNI=zk^R#LPC+6zwle1Y{&PYUwIrh%&s=v0f{_^JAk{&4{JUiC;WH<`-CT@DBeB70C9X7#e_H5~0 z4+JhME({6d*XmpIDY|5)bF3$C%eiMxDhwqTL_|0xp5N+AKIhYuvd7FxAZ`WEnV9Ic zTMfgP-(i>f)ytG;c018{i9nkjZ|3UAzUjeJpP8^+e_vjkXy^O*V@;q7WB&`usiB|m zJiC5c`l@fil|An74>EIEMqh92DoTD9mgOdL`_4(DkGK9^auGBOyIn7`&YU4;?~ZHU ziKTIFdw#j-&0<>;TE+O<{9#q|Q=>=+{XLgr3MJU`xSg~j4-m9we4m%__-c-)ejARY=kyGwSzIbhgDZfO=ouq$__uBr5Y@K4`t*grX zA#L~jU2zegZB}OGZoPIXYwNGB7KIn*pHG&3z58yS1e=(qs{X8NQv^0niAmEoJGFMz z&J?9{m+#DCVBN3cDX>{7=>y}HZyy$Dox6C-GUr-e#J>5psk4lf-hKal`}XbC{(Bm` z!Y`bEcS>v8!A)*6!meh{YMDLt(b=@kH*=DE&Q*4^JG&?ZJ&RgxxMOayB8%8ZMV{LG zU;l1Ty>xEV<(C%cZ&u$gox3-Dd8S&4Ve9SN)=HUcdmb;n*47a#?v?p@%IcZ>&tJCi zdww}^hvoI_S>nqIR%^B`e4pHNuQFn1a@t=u1`aE~QDrB^aQIrCWNoT8JLO}@r%znk~|{kw0s|Jpo1 z{Bh3bJ2^&5J&NCc*BR&ZoK8qyH+TKg5Em~^DbCB4xAM$?-|dT>vgPWm=TTYvV)Pzo z{Ni1owJt9rd+VFz)pyozjp-`Zb9p~kzHQ#j=v=K!rJ1{Ble$!=ALH>@wD#O*i`4gb z@4mhJv*zEYAFAhzdjuKYeo|PJIWK$d%GWg?)|%z<3i|Nx+O^|Qn#3IbdlO^lxZU3y z<5vB9Mn~1W^k-f!JFopelpj_muOk+)>C3g%%iEZgX1raoqR@mRY`4L_suO4`*IIF@Zvf6HND68QAgx7 zTiyeU9t4#h`{Fj`Se)dHlkD;~s~If5_A%H9bcUpP&vtV^w5B+$$m@ZSg}B4jW(D0i zpRiS-VsUD#DjOek9oBuOxTHyB$)VI)=Y#cbG%i=_UTV{wTZV>x@#<8+;OjbstYsSaAPB z5ktsSj*zSWL{jGK?CQwR-?69R$J;CWI!;ZS{UvsK?!73cC|@-fr5uOY*vxXv^ZNSw zTpMoY?bef=$mHU(bLz51hD!vPlyr2rax5yJ!XaDby(=({&A?-hXjG3x@_ei1Q4=Q2 znHCh4tMec^!b9D~bZ6&8;mse4$<7%!(ulw*6H#Gd6@E6J}I zGuI+#e`@yTyLsu$XHW6N;mR#x^o%YN^5k4?_IT=`mi zz4WlbjNmB~55DVW50i!UpCpb^h^LpoM_b5D6Vorh9sV}80)i-Pk&C% zKDAahuD|^{v(<;bARohoFN=U7F8rZ3`tuvj^$@r6^tzSq01luun<`rJ~S zp`dSBrqhw#w{Nd||7%86cJaLRL3^$&-S@s&Q*p=b*|*=8x%Zu0{@KL;&W$sBy>w4& zU;X;MJpbG8yB6oppZ9-$`lv^?U5kYCeg4|(ytVmZEtgHc{=KlYh2!&_vj=)w9k2YH z5me++JhT7!vA3pui;QZvJFDlx88qWIxTb7nPvHx`OVF% zpLe)^soCkXng5=3d&kZxQLlg2+{IV>c&}?{tLvTn$86QCrCD;m?{hI^T6fLv z^v12*j&mQcRZUX36_Ch0J1Sl83^(Jy^E0p9mkHo{CduLD(ZqCJbh>Zy>;82&CI(*D zNjPoyGfjutm4?(5?7lS_mDKqy>^W{{Uc(N@uk&Sc4%PGv9A}pPvCs?_ep9Xn` zN_S`oX&P@?wRFPMwJQ`BM(dPJUna}2zAtp!Cl|)HqtyZ-Axo|pW$*T}h}-14Mduqw z_qv42yw5h*Z{2&dtNri4^8Ho+r{Ayt{_O1Cw9Sl%j;%er^Qjn*#0_ViYsMTjKNf}2k=Qvo&GDlBb`sz+YbJs-=?lj41OPzZj z+_V4J)%77)%Py|U(%W@x!-H896Sm)9D&n12_HD0R-}2L}o!f5b`q&oCUdFv}_0EE| zy*I0m@AOJmG;+A4q4>>6qVd9$OZOIk-D6gLQOa@C-MsZYeAQ~l-t6~k7GLJ3dFbqw z3-_v%(>Hi!heoVBQp;^$T1K^ZxovuRX98;;VgsC(O~Q!%&4GV^Qq=T=QBE&rtoxGm4*G z3+joT&d|uP>NBh6b}=Ug)^1zQh5UsEQ}Z;JW*OUcCTblN6t;4Ao}bjiP+&JVYvp$H z?FWU6=c*^>&0hE3?C{f~nQrk}rRNNbb9GJjZMm2f7Gg3z#=G>~QIXy2q<>A>HE(8o zT%6wH)LCkIHCOxZ-?@`#K3in*sy%!5=sia^74w}fkiDVwTI|B?nCsvBGR{TLDBnHTAYM=0;YR-V zyB8{p`ZSK6`}yF6tl#lT+zs1~CA7IKSx(n8G=3hVqw(bb+;8#M!{Wb7a_1-gTy$$z z5^M3aSIu4wi=MTyZ}3}xx=j9LW$c@qZ?8nJl#J(=d}O-h$FAQ|-WQbAHtD~%5?QBR z}H|PHT zdJoO7vErR~Ce;0(zQ3}rw$f^kT)bR+^u7CcObeIC9JEYzWhz*wC954eQ79(uUeybw zgaDUSm!ue0Z;~>dP*S=i!kQ~I`R}T=>igdY6+CQTx7nOQhb{QEP{{PQlN;`|-YQmI zf9TNbpWo!B3*Ox)n#5z*@Jh#Ox@W|W=Tjzq=3^+C+S{dNDc+R7R<_@rdEK+hdpTw< zDQhnnv1_r*Px4;F!JuJDc4%g3PR(vqoyAwIdB`ZoY}vd?w_GAZITc$T zvmn{4v&x%3UZ(9GWcsb3MIA$pS*0de4VoU+Zf>}AtB!8Xh1<7p3miIq`t;??llT4zPuu@h#BkYJwqu-r%bm|z9?RQ( zH=|FdOfe zTKH}6;7i{E)K%r#~8rl+V~a8sW1e9zr?XCBlf>O|OdS&CN6e>ZGd zpk?~p=K14~5!tFOyLbN+6EE$>$gKD;$c zOYzDrzvte1m4>#Q#~LpuO1yi2KWBUNy58v7uhwLpdv{L4D)zo$(#ow~d#ji%pYQmX zDigXd>UVdCrP8_LZFTm?B%EZ-qC+cdZDaJNOH4ldv2Tu&NghYV&KSSt!FsMCx!dBz zEc?7xGL%ZR?UN6(Z<$hNzFl$I@#F5>&N?SQzmv9m=ia@h>jJXRTEE|8zFR+ZZt=H! z_uqVry)PV?n<{hMxVvfB1Hlu5ckkc7`TOR?-93*V7G!LltDl~&6`gHg_kDM2a%23@ zWbK~?OQ){Q=8wLpJ#kHeyrNd)DXT8VINdgpgO*vX)8AFGn(eoI@@7lauj1_0XMF_D z2yqD%>PB@xs3=_Rv}x9cGgd2Bs3^Q{T*<}7y|ezL#NnqmHs`qfj_nB9!g+pbqlw%`!Q=)7TQb12=w=w!YD8vH9P$RU2*;=(_cMJErzU z(ZV6~4i~f0hh58r#l+TL{rzC>?XP#s@9+IR`~JQ9@88yO@7wnC^YZKVe|~(t{Mm8t z{`$}FzJ1HvFCWx1d*5e6jRQZ8UeyvoqwD zWT%{oI>a+6M)l3ET7v%cg!d>P4;rp zn?C7zWaysnia*x**Y5w=cJbz{gR^3y=Jw3A{Pke(>aCGWaz0#iSpOhqW6SbuQMwEo zy4xqspEu9x!*+A?mEDUjeedSGBFOr_ntM^J*j{xdd7I+BZH-?KU-<5PpZ~JSvz$Nq z_RltMlK6D3bmMEQ;&+ON<}6>XKYw}UtDyN#HB~iPcY->;bzPpcy?JTK`#t<7&tHE1 zaHx0t?pU6S1>VNTm>E9%emL0r;8|tTG>PW#j1zUz)t6oPs;PQB=s5SbV_I{HO_e{t zO{=c1X7*TqIdTf8r%S79)&0_w6`!|Wt&|JCepI6Dc5meI++TLfOujUKSA4$nnoy0& zq>|j+bDy6UomB0=d}M8_gg{O1)TyniUo~0g7tj3M<6wC7Zk@UB=Ff_ynx(NPmu|}{ zZJO|`&_>V5VxjKa@8-_alRCJp`WCNsi_Tsf@_cXZ&K;T8zwf?l#&TZg%-Shk4O^#N z%lcwxA)d6syk$j;fX>FHQ?!%}TYvxl9d>*9`T5HWH+_|0m=d&mU$^3nnF0zCUGGfJ zUpxNwaKf)b2Hr%^86g+0X))w&lYW*K+{Ybz|ErMA~wq3jR`E|D5PNQb6?ioyM z;oknAPYGR}^Zc1C!>Viht)yR^4FvQoDCT&S%%x z63eru2>&>BH$p6;+i|{R%89C^$n#y{h1z;9E=^Z2iSM7}I88Bk=fy`Fj0vUu8I^5i z|5kpKX5sd#;o<@ehC zcT4W?|6MTW%VWdyf4X}f@3cEUNB-X4?1t%<&C8Si&hucl$;|9Mwruh#vt1Tj_j&WU z8k*}p3e0$@bSPA11K&Qclz$2ZZ(B|%q|Uy6wN5s|{o~O9Z?Sjhs|?r9lU>GiB-P5@ zecQE|8=s$vuU)`pnQqjm+fc5zc2A+f1qW6g&p=j&B(HA^TE2XHsH-HfQ{vFiqM6Gq zEAI2}m7FHFQahn@S-|tK*4ZlxvX`Dw%9$tD7Rn|*p(A?kr=0KaYhUboGH3T3_xjL` znZ}a^Re$d@zp(x_+xxw*UhK8aKT~Np|NP^pkM{JhWOiUT@(-S-cIa}+DdS}?cCYi- ztjIs?J4gTV<>$vUjdRST{Z&LGq`3PZ=k(_pw7+a$X4yY|Vym}aYUiY9XVW}3uHM@B zcMbzkRQ(iYBDF zI26qiclZ9hbD!A$LK(r>FPX9l@&ie zJapzVd|tD2j^Ktu>$(1qQ&XP(eY@^`d)*7pKWP#{)7RKV@~+;>_Fh(TfmC%|$ll_6 zzvn!k|GGbWU;RXlO`?b|ftj;J*0_+#ErtL)}3 zuPk#pw^w0>#iT4I_D+Q{&-Zfkh6i(*TdwJB7k7T&V%n0?S2s=e z*N-1FieK(>aS=OZ?EBf`+|q3VHiBEjcnu659!%Tqt2wcFtw^Zhc5%V4a@pBi&#s*o zblT>8?ETdqkJ~t6?|;3!VAcQYuLK{QnjLUK`|AO%-nmlRLA=_(w)J==RLiUu7Ry}Q z#guT5dq>0_MMfp@%!Ze6Fdj_-J)eefFqShL5SryHboFeTd-S%O1 zJLpdI{CoR%dLQKc{Q2RR{D1$B|37d4C*Wx9|G)WlU;o{GTQ^%~L5P!kqgCnBC!PL! z$Ntv+zh3`w_Vdd#o`1T-5;fD`eaG(K>i?h2-zTGA|Kp>$0(&n5Ylq89rUd7?(TO*g zT;Te#g;S^0ia8;7)0%H#FQ+{e^S(Q=)I;30U5jf6*IJQ~E0$3L(Hm~Oc-_)=T4QgQ z^oR4il_F-T|E{>|Eufd(+~QboJ@$OdaP~1y@>jb68$4o!hV2 z!qIX3apk4hkUw^);TxSy(r&3cY{n^{QjDMHhq`V4r@*D7JGPY z$5pfEJ74dXU^W+F1ltWkR?v%?GeO?e%BslfK-?L}u9kBc)W|QK*BWTsh zz2>uhyFPm5nln#%Th{Hd_T$Hjj5FrH@0Q=MeewHWyL_FNYW_Cq_j^v}|Jqmo`GZ|ERR2P@x)C)hmCnZ5jNn)SJ4&7IyWUtg-P%6WZk&V%#z1uI2%^=ZBi ze-Nd-Pg5e>Irnkz%H?76)q(;PnvMqavxrZ)&=a}7eBUEK@0U?#YOB{Ss{4K5+rQ=Z z^+ES-magA^C3H{P{-tNj`=+ctp?lvs)F~(-OrZS3+$rsqChKO}aC)UW@4{r{Wq`>X$dJ2!FLth3ek zEN`{FVDw9!bLvISjbrb^zvtKOssC)#7vk`#q^9=nx5H`GpAYxjU%hic(<0s1J$FT4 zgiN#Wp^Z}|YPx3ozF78A=PYAIyI+H^bK=$GmO^4NWqk}{C(|U}m)5CYX`QycL~_@j zOWukiu~kb`w^r6ho?ZM?d*<ajD5ZToJQ#9oie?A4!MyfRgyEh=i|)rrEPX_-o9W<8g0WNm%wbn<3d{h4QZ8f&*o zBwttMX4n~{cl>eViJz=%Qp_~Y>r8)hHqy&-uHQ5H3?2vZSg*ztXForgvwCgRqe`EH z>$IN7+;?|MUpZB2(yk|SZomItUS7U+o8j{Zwb#EZvW0dSzWrY9!lP(1?ex>6)d_K% z^~`7UGF*wuI(f%p>6Y7X^`?V*m}h0Ct+;tK!b?z)^=^*Y?YCl`S*xd?zFDn*>dq?e z6p?i97_*)IduN79T>O^3aEj49{X?^y*gejyQP{fe*qW`J+k=eE8ACI5lnxu*n6>1J zlZ(zAZ#Uy+_mnV(j5{(qM#`(>mQFbzzi*G7?e!q%j_=>gW3T`H@z{U;diLiTreY^s zp2!@X!NJM6tgxoGQt;V~Nu^)T{Ylbw3{sr_W^VDpfMgxj=~4QRZ+Lkt-n(~)?YaK) zVvaZwz2%?hESH#Ee9kO!_uX}`t)ACTpY+j5@T7&)of~QU&Zhmp+s?nee0%Thmjw|* zp&eOkmEP36XkWM0?7Zc&DM7z}eEj_R^XIc~-x_@QS>iPHMyX=kX^Sc)*1fy;-p(sG z(-B(4x}348%k0^l&lY``E%xr)r&qN1m?BHDlcDqbpw;2?v$K`B6tqwazvFg} zS$}{3`Sa(OUVCtL=fk_Z%gf8ZTb>uYGx@oW()Rb?cfZY>bL{fZCnt^y=&n2>rN8)7 z`vcb{TU{9*%rWo2dG^BnJNNEA>RmA<=-+n#&)1om1g|@0eRTgZZTH90)_osr*8SP_ zR9oA)SAbW6DZ-bBS7UdWX3QQ-v7nuLlLd9yr>*1ra^L2O`BC5LrPGhTp0xH{nB>Lq zrK|s557I07$mzb5FTLZ%g(hX)CD-dJ15-91aGtF!IqeN!cXYC-*>Y{h6NjeHnaR4> znLS7P-!a9GtYEh=#peu5uFiVyqqt;B(c0o%u8^$Nrjwn-lf;Dus;=s_NYn_ju3hlT z@%{ZffBt>n|L<-7y?pbPQ>!d|?EilG`2PRH_INwH?|1)|@Bj66H~;(cSQ(+EPofS- zE!n2ia(1G!eDWhR-{}`;E?(E~#G-TX;?t5pPmjyLle@Qg#W`cP(C3X}-Fj@g=g#ZK zcy-R(HoI3rm8(m32j5bi$L2;4N;mI_6Ux3btM=kAeu3CoDpyM9G$|Z3)7&W~SXLoe>oD^x7<@D?uSGGn0MtTKBCjXJK$ZvTE(MW1@^%YmZ+x z{PEzPMX@f!iRY&yqJclbw4pX$6D8#lSMX2)4XekKVq0 zYvbQPfBzAQ--#WX%dWHv=;~f*Kd?+Xf_-1mE1T!p)*1I?&WK*~mzTS@YzkjHvvIyz z^V;HE-hKDq%gf1`?VcOh_0;)gBh-FWZRc#dzI&pP%HTsmmc9q@p5dIoEIf z`t>ol^Y*{q?dOtx_}VIErwKuyZ2TWTP5Spwu}x%hVX1?ng}&b>8+&_O;iR6GuXhXB z2Ig#?8Y57+`}VRacJ=@M{JfklrglK$((dp1_xIJ7Tg~^7bA6~m)Y0q*v?+rd|FgzwKBwW)lIrTs_un5c1Ra0vw|ukaJDbYMQSuQca$|Jw86y^kA{ z_S#H5b?J)!8KIah8;^;FIEV-|az(H%-4?bgSTU-HhgU253&WbN+b*a6`c$OwK0iM{ zrbO)X(ld8%RPLW3;OZLL8r-Gl#yBZP@ADg{Pl{FyL2q|W+10{Pe0r7EodZGp_uSdP z^Y`lgf8OTTe@=W+d-CVgk5Bji++P3Vuza1&@?Rgm9k%~DyS}ixT>sW2@d-`>?RKeI z#i_Zv#UBMPR%+;-e`_OOUsF?Kx6kQZO!_4?kN-c8+t>X1WHFDAi-R}NMf`d!V}w#c z$VyY8xjVftvEI)wFMq%6IL{>?M@Eflnu>QO+_6%;+2rqd&FsZ%)uix)hc1Z-WLNI@ zQck#-f53Fh**vdrlP2l~eSNwpX`)1O&)g0pbFt2IzXi?)oMWGTKy034^klbb4$_An zG#@lmn)G?nOm=T!{&SY0icZg+d_@hX_rl6L+3DTxZ$sGOJwOT1KGFx<~YlDL_W)`s;o3z=+pD@+qZSEt*1)u zaa`0AdHi#w%<;=FrzDl!lT4fCtyXldvUt(f@(HZHC%i6fnV8p_cS2Be;$CYXe>2}; zO-mMjrL;?lORpOR{93qO=F;g3t)=h2n;&DanScKLdH>^&PnskuvKZH{*t#WVdF8WR z85adMOnGS(x8v%yTQ>cdO`h3okK!;a+xuO&Lce$U=_h+1U)vFud19)pq|5q6g|n7b z&hplbz5nw8fBWH=B}Q8vcs`!}d-(9-+0&;_*N?ZecIW5M|CM*+-L~0h-9kQFa2M-U z&Qe!Caa7{>y7#;IIOkeDkG1EC^*U^@B5P&oyl0m`{wS*2nmRR;N8o~BZ25+JmhWR8 zpE`8Wwc2<~nEv#Olf4ahy$w72`KF~HgG=JUEgkan}SXxea0B)Q}H=atiX7!F^E4vRT=Mrr4hpi}d2s_NgE5!i8b zqW)5=_orSzjNg5Bk#20bi}u0O*Mv6sn3}~(mh!E8Qy%;CYeK`Tuisw3ie2v`d1R@~ zx}_Y`{=S{svBPSja-4KFYm`|0F0H-SJPYpjuTh@6DQ^G&&^v!hBe&OtGfutsOwiH6 zQ9y7iL%@_XQ{$#)gjk-dye#!}r?OWqOMgOWq+_T&bp_CLk!&KqM&KGmlMW5RU&U#$j(Ko@v(Bk`tXV2FE|F!-7{r#SN z%RWE7eEfL*kHh@-wN)Vv-`~ssd-Z+)@3Z;$d1vq2%5^BCQ(eg}pgr+i(AKTbPPAWp z@inHd&hr1`ro|0tQV%D+f$;2~@u9rov(u$F>FyA<3(sH35 z{cU&e#vg0;+A5Li5XbiUsC2FD4qeXbnb$5(x+=Enq1AlJX5rGi&2?{=guVRfbSGk+ zXyQLEz12)F_DjC|UmZBB!z)`Urt9;;Yd#(Ss*Jj`R&Slcv|#t$V>=gKb>iq-u{G_a z^tAcUo67sCOWo)%qqFkjK zCLC_x{(b9}prX`sJ9i08(D>nSkxfTzV$k%!GrPj8zeH&(%{+VBN-(Nw&Cff@UWsz* zo1bjbiwiB*?44;dEvW76v7dLouYA9|a$eEA^UqIT$S*pludkn`opT~Fj(5?V;G~(nRex{WpmR}FDXr*WjP9X>A{-qFr}qS{I>E!Rb?fgbQlD*}7fL1f9Be;+e0iqS z-p__^0e5r1f8W0S`QxAM_J2F0_wL^Res`Rstfl)CL6skO>$cy`>t7D)zMXngw}1cM zovQAiZTy4twr>2)a)RNrg)ASti;rR7^sG&~g1qNut~V3RH9LIvZ11Wqx1L#Eo;6ER z>G9JV%lZ3k>I?(K8AMK6pDMPCKUaHyf92<6*5y`nqqDaei98fYy8r#|Heu_J#fDe! ze*XOV_3OmVckkW5|9sB!Nt z=1~;TJIJIZbuCi9vq|~6tcF&?+NVZw&nkai{VrekcXqt>_J*{UE8m;HKYV+8`?=Qn zb-!MI{HS^4!Cdp3%M7k_oykdNw7#yj*wokMny9A3$)CSg7N2|NtLy71d$Hb+S-0Wr z*6bLsN$XfRn~xV3CJV)ARKD(5=Ivbj)cr;Ho`w=z#z!wRjyZ3heP;hBp>QK5tOLoPGIzI!%Qff8pF8?OBfmU+_ig?EU;1^QXMexKH{ad9z5Mpv*Hun0vJ%$?c{`WbvGvR-ULCsR>Z-R-P5yp3 zeYxMiUBBNy)h1BZ*32<=t(M$PK_LzAnHOD4+xBt^FbNwh{;c4)#dGm7R^!}Xc`wSD zCOqd3=Zr60Q0^Vh5Gn-*O04sL$En^`n`@{6xd zo9AjBV$Nc?UmDA?(Qvg# z$4c*$Yog`k=kv8cHaUCm_XB33$cVhPYUP=CR;~IaaB^qZYNju->$$dyskE@wNxA2p zoyN7zf7ia&oIjiDZxp6^D~CFkDE|2V{{B&cX8t>~imycN?g^fEeD>_!`{V^Y8hU3u zn6oZogW{$q|45=6k}@eU}bPWx9iBYE?1WBj`DR= zfBpJZWg)Yt_^`p7@72F&Pb``2zWFE8ikfB!45!d50anW5xK zFw45PMKhyyW2Z-He>T}3Rdmwj!qOInJ4)BDML9CHFKM~=d(LU4HPPZlTf@Zo4sx46 zzw`I)+rMYeZn!xmXj!4mu6sRVd%wSzzbAO{W66`S;On{O(%u!FZf@Z>bDB5A9P?fo z${2LNR5N#5bdk!Yhy#1~+0@Tjw)=N=v3lS{0VSbUi4&IcPOG_>FEC_(ALPaI!uiuV zWxdtb(wjcdaou5f{&SAs|J;w(;gju^7xn&Jw5Ut7^!ML;CZ`!b{QR8mef#6Nr$IA* zetTB7`}Wna?CicfCjQwxw`<>5i+<3HKG0RH*K%TQXT$-cYf(|jY1jOEg?Hc0IeT^C z)@xC&3?fIaEi0V$Z09wlL;3gZ{v3_}v+eyafmprX6TFAn9LjgkUiaF*7F2uMpZz>% z*Sgoc7QU|i&|%>_xpQ&**#?iTSLSFdSUq?C!0<$-N`Arzt@M&}Tceg|>Mi|q_n+=M z<5NzF330pbrba|=d-2-tOq-&pwRTw8k@>>v53;{TNhEbgw|5=5-o8nxV(#{9Td#F# z9nAWd^5Sy1!o@u^5;(hmJ$tjs`JCLDR|<7&S7cAKYFM{D;^tJ27aXk;VrxVN=6qMG zdGzh@%PE|{D)St+d(?95*j2~Dz!CCH?NRXdy?bB#Pd^nS_1=81yLKU$&b&y+snXkz zE!rl|7?Z;wx?qZzBg3Mp$%1o>XL_?Lh1>#-du;b8u#lT)w||e#+{g#l@7LG=fA{d& zyJhbeaf>vmr{^TID{Q@D8J=~i<@m}uap#X0)=4h+Uw{1h^`ACdf7V>fR5^E!lc`I| zV{+I@R;8S+t7@DjRGET=0t6M*bQOmvyx6 zwa?gfFp}r8P`J{WrD17&db|Ag8a~Utv$4W+_uPr=c~)+&7r3df`?#XhuGbcq$em_?bBAX4mbM$L@AaQ^o$1~byTow{wF&!BK5W1Z{P(8RL7yu3U{h3AW^oEAwb zE3Mq{{b7-%z5V_vLBAH>&oMJ(36MSR*)sF#)2C0repPKLu$$lfed|`e((n22ey^MV zxu>D{g4e z+fzI{ar6G0-(%m;X*{E+djI?Fw`JT6mrcIaZr^{?xcL5eb93{C4R_z}*m`~Xbj$OG zOXh$EjclHuEjz3vXSj@kg{@54!7DcVs_E?f1PQi4Ub~Xm=gs9$&bU4;u-SHP#?q^u zuA3xhSau!E-}Za|pVRwy?A;l9weZaP_3JO&%+@<=S#C9Ve@&&#@|!mftSx+XDR1}I z@4sHw6y3|;e!EqBP1esjYePeQHLK+Jls~ysnpk5q*YEgc`FnqV-JW`FYsSG-e%H2g z=-xTJ^}w2dIfZksAIy5{ec0UjvXPGB4Cekv^RJqx6%|>4xMc#OWklIkEV4Z$@oxIQ`#`e2!%64DjJ@e{X8UsU4ZR-=BnQNx5y}7?`M&P>&N!E(pvcL8j_hi4= z{jq4?vXgAj=R7}t{P>hh8*VOHXPkT4!q=_Sq%ZaQ*QHlJ|E!dHzD|A)zhXz_4i zmRc13U8lYC%x9fb8i#zBe6*P7z1Gg{)aREa55sotb3PE}Rd6rAylkdNav0A+Yxf1O zI(MxTzIgayLfBEemVK|owy#o^J!`%F{`+&!Kfiu>_1t=i#mdJIX&LziYd(E@`1bAH z(eG^pjk>cE&8#PhC2#SN6SSOvzQf|n-`VE;k4@zKm)BTL3wqP=rn{=)PYsREk=*FS3np9v&S z-!)^>U(H_&-Io-~h?eZX-MOkbq2>YNsDcGq3r+I{xmP{By@I|16rfEc4vf zjSLLizy0{KWYOn2y4&uo5a{mKV6wc!HzirIVN#B78b>?iX%-xkKpD$JZaX=^cNd{j}h3m+b7*E?awgr+NJ3bxr+i zXsTYJVtAchdyG{!K?xLYhFKq*FP%k_ylwW+zkHO@Q} z%VwFP7IfWMDqO|h>i8iSiDV8%h851nIhUg(9dgT4PgreU(Uf~GOIPibiW2k1x#}64 zex*Blo!lVL5HK~UD>p(QBl7v}H>FE+B=(;Am%ID*zW3&{ds~Aeg2L~Af0k(9``PAM z(B|T}B~y)_$Fx-LzRiF5fy~uwb3Q*YINGt~amCJ1X+jnj6^gjOb=Sz;}BtzHzlYGzQ?wFRO zcXE+X&4$$)$5L+by1tz>i{oJRcBdV;a@Yd*?cIAj$K3q)UePz7e}4J$<nI3dn-R5v-y$y%H`^!MaA>% z>wkSYeO`Wf=B^7CJbQO;oYTMFU%$O_o)d>%%j1a*N7@U@>#r|eclC3sO!nHt8w$@& zk1E^!_U_-g8#Zh(%$P1P{{gR$`OW(Kz?&R28MG#uSqE_71_hT?b)2=KPznfK7Tp<`~JY$ypD^O1B+?ECvg+djoI^!iFa@oOhb1^$$+3qHyd6033UJeTiP#uY-^HuC=R z`)l$-PqZ$SPkVOl)XSPXC54lN#RVH3S+8u|Am(skPVu?WjAH`JcRoAzhsg)+`QyWi$*zM0p?YpK3gAWK|!qVTiNFJH`+-_;iqtt(oRv)t@_>bXjp z$^Gx|?>(QYw{*#jw#3J^2Afx&P2(_}w7Jwos&De5`?>e64xLV&^V~CP?VYIAwfASI z8616aV(L!5!xNH(LXGotvS0Z1WNyD}^?aSq*(pKxIbVtb*B?&Xv3GyJVu@y@cv~Gq z>+X%Xa?7YKPXncCCn-vm!(MkdjJ}W@?()ZU2hrUgwTq z4}Z*7@jfuL)5YSv?~IF#It?3;fZLr1%&3j{a>WUj5qL!$NKjBlOi`Qx!SyD?p`fLK?R1-i^82ECcW;hZ#eKGtw4O5=Mh1L2O+nY z>z~(ezdU<)d+mO~MTdVrO43+5=lM?q->r!Xh0ETAX7)1fm^RHj_tusI;evZNeJ;-` z)0_19-0xc@S|^P5)c*fA|L^Jg|A)Wl|MoSx(K+*Yq1D{aHooW2_uK#fbNOS1)N(7o z(^K7CCOR~5ZFXW!2op>UQVd9u-+7y-nz71u?vgVM3=T|uD|T0Y{$O7J`8t1j`Trlw z=l}oLZ*LNCpI^&jc71O4j*d4H0gX0RLGBF)Vgdp#aB5aOuMt-YF`TwcYs=NHoS(lp z>n*&U!Z#&*`-1NOihm>%_Wqd@akuGe_kXLyx@+W)XgejuIDWmgL$T5MdX%t$i^60t z1_edInI8qZHWlPf6?DS6*l|!?UU-M*bYbtGn{m~ z=j2lIq-Ezeu2qXJWfh*DIYab8{TKiL&+5PD|NB<|?fu_!_XYuFrL&bniB2n6oA~Fg z$gjLr+Wc4jCR35T3Yt>)FqHBr~+OPPy~_@CHeSja$21rqvq7xvidZ z{P4%8PiIQ&8)Y37yD3~6t8_%7xoMV{x%auHHzXhJ@z>;a?svI$XG zetg#7A9KuGZ>j&?yxq5Rzisrs$1Y#f5iNhHZgcqyx%u<_jz9icR9RIgbakQpoZ{Pe z>t0;Gd-w0*-Ry45|jNal77WosxYl1?>RMUhrJI{zU<$ON-vnI~j z=Z=a0)~RO=_EdjAXL7Cm@yimMbCo;?-UjIA2X(_uj5c#{Y;TQv=@BYzKG|WRb4sj(9&6~csHn9YUz^wR zuPUCY&1`Z1?YH`$v+w`=nqR+N{#X0#-~0b9|L@~$5pxcdo$LSoNS*g7$$I03wc-oT zRJ@IlW@Gm`6jb!}AphTE3(qO9?pU>ARf7Q2g6MynbR3qnC}cl7`|2#e_U#KZY}_^l zhAwDbka0!Gx%RqcQYc?DDP>}7&iazxe!C$pA3nNxmm zci8MMwk5Zs&b7||a`?d<^%6T4`9%j6xgN|dep1mn`AS%JnqO@Csiy+U3$yRckPPeF zwf(yagNaz6>4YPvBm{jgfBEw#XP4^ZmC>puw`+c@<}6MYQnfzkrMYy9p6Yz7t2_*b zv)UTW4nOEf;@LXw+ARz5Be%Xcf1h{ZjgyQo z=RNPxF_RL=TC3?cJJ!WSpV_OfsxHensLOo!>~emYOnXVj$M|NZ!4U%`7^SNg8 zeZh;LKYxB&r1(jxXr^0tL~wxMv};a1xBuq7sCG``O%s?J7VT4L%)^nR778_o3=eihzvf>(T*Li8=gnpERBZDGNa<~c|H@9%%jJ3WX?`&Me~#=m!h z7}oCo|1Iy@rX7Bz|LxgD=;LNzsBVuIoFdO3Iej*3aWV$NFA* zRW{GkRaON-4lJE1l`3!A8Eelh^|RCt_;m01x$m(z-aN{C9Q;2`eB1lx_f=gBJFcjG zo+3YgUfsW+)BES!)&2Z5<9Of_Mvcu4D>`PxEH_=TwyVSS``Q@4z_>r|UMGuecvk&Q zyIlJ6{JHh*AHSAb&6noBk?obeal7vFiH1kx z(j67^d|jhCR^D&;bYqpOEz^P(lNhfnb+IUQi>t;oCPc4&#l;>!_4Mi2O>JUp;(zS= z6~#TTH?dE~qA@Hw+hFzQI>W=xTO3@{0#id1)09f;ZaQz=Q2gQqqeFIY>syD{?FT1p z6F6kir}U5;&}MV*WVv(yA%w)^^= zWYa(VW!;;s_j{ju3SB&WxOvUXm_2nmxyvG2LN{D|Yt&a{S020lX3m6;)fvg5xrgkQ zP1rT3cy-f@H!ggJLRE|REt!(!b?$ffyknUm4@DMtab>T)IgxqA|I797?f=C8U;Y0} z{SW!LsC}_qRa@(f1 z2MiZe+%&At&yt64=!szrSsAuErUHjx+mYiwbX}J4r+UEQ3PhO09 z7NsrNRUvAcE2(JpzI=DAo~x)=YT~vV=Fv;7pNm92j*^}}XL+U3rzT4QzS##=*p<3C z&inh9ZI1Ew%6u_*=Y?MmvD0_ymDQR*pHr-OC~^%3pI91I*c%JUj69VyKnE_mF?dD_SZSfg1$qs72n_G>ahw&pIbU*o%ijx-%56RJiU_d zwYL7(!}jafnf0`_ok}#1-p$*5m-kGK)!bt*=R9|EF=TZTPrmcEeD>K5xAV<+`*Pen zzCtNpZob_7c?^wnRy<3i)X+Qk%>({MY(+Ud@UR$lY z{aRMGzP|o8i9StEeXZY1cdbwVA*wP(d-9uI_B0Sc|Ex4-{-vhU~q?7hEsU7fVGvdwiIrrW9x$R05Higi2M};j*Hnl9!U98+_-D4mmWjAAFuEpQO^fY$yNe+qvTdgMrlyWdo6>%l ziSCSJ-4RzDZtZ>b>o3C}_ZQhko?R%}=*m+b_r?1jb3?AlnOD;aKGp<^KGNPA?|87< zf5nqKb^Ey$rDn`4{-&6*`Sjg64lR{4-8yY7{ayMdL{2=R6X&hFD9M$Jv03!$Rlk#; z8pZ$ZTId{hY>i9g_Y5t+--4@amWn9H+_3Md&;M=^cWk12x0dj|9|uU7u)}T`o91F zU-|z(+CLVpTojeP@=5l_`^#VNs(ayjRk(?JtB9I-?e(KuB-z|0f`)36McXnT0}Flevpt(KjFm7B$uxb9UkH{ZUdrn-2wmi^yP#tc4|=7O_bqjK-< zyqmXr!SmBguYP-c-hQfmim-U6-G(u#)|McmM1)az3AH?B>7CJTgc6 z^Q&b}3tpwH}%mJy*TNvTxgY?HS5h9uHRen%%QKXl@tEutCLA z_dtKIV|I(gx~7Gu$Ho4=VJ`ie8S%i+$syuybePS_HOZaRQcsz$3-xY2y1LV@&N(Rd z^oE8?dC3itf_*z}VxPsoZF>^KQ|Iy0X6wtK_&C0evuAA!I(0Tze6er_gKJ}iNNekv z^EHBP*D_t+3m90HDh9lXm5Px$!rxm}%6YNz#lx?k{`~pEU;pPZzx|&-zdpsd3Mf?E zU%uo}%SG!=I~m1Fclvs)Sk-Geea`dBdFq*2o25F9g%Y)Qo}K*RoF%*0;SH5L_IqF4 zY|s#M?DO${`#JXinS2Dyy6Pk+o|$}jSwX4`OLahKkTa_&!<4PpqNJC+cwBJUsPJ*C zL`p>O-Y|!hdU+@J*E{xXa49`nwEvSw`1~rCh|6=X` zOw~4ZDY(dS=Ik;sns{oHnX`&gh<2&!=8nHR%I67~AD)nXcIlK^GZ`IvE~MM=u;y;% zn$)rF>f=S6x1EhQcL*)qzCEw&Vp+-SYToq^kJjGzziiUCSoheP4>~1lD(5}7e7@&q zPEWUe-{RJ@(x2Y#)wWdDuv)%g(H#li=^HrG9E!d%2?;JN*!(^Hq~u)rUx}B$ubQy@ zq~)$HX)KFRS0p~0p``HVd;Nd;Kk4=V*Z-e<|KIlKHveDt*W1YPY2ErBc<_*O;k>jo zwgC4fs~Tm56kj|n*b#H?pfH1!c8KdGgBFP_)n{UR%y=yI|Js~?e)(yMRji))=Luzf3>RV+ zhHiZS`}4~WkNfSV4lP)<)GW_**}C_;f=(%87^o|E>>iD?%^88 z?=EO6wqCI8+8Wb!TdzsYJuI;2^Eo5mkcF>*N&K#y*Lt-wJlj^bzvBx3-q4k*kZ3+heU9HE@L*s8-)MknNZ z>mEi9hL$AJgpL0r`vha!7_MAZs$0wBU8vl>pe6eU>)J29ug&CmK1PQnZg_Rz+SE;N zPjUoKciPP7CdR!r;L?Rx>x5>w9}5b6nQgxIdLs8)-|6|MG!8L^=q+NhP7Y0&spJ^S z;lTRh&>ySm!I{5(eGFNeE}y>qT5-O<@`94t@7}#DyS&`Uzi&#z>|&iqSEFto>veRA z(mq&PJN=j45s$kHne7{oo2+;*eWWre^XbozAN&7*Xz$B zm20c)HSc_m4znqiMdx^9?=ROYe_3O7u2_sUk4I}>Kvr#Cl}xduc5(CV*}Heowrwn! zyzI}HFBMgFZXwTQty0)|TO1rjTUd@9P|RgI@IqF@W95szm8&8uCVZB=Xe}QjBb6Rt zCQ+VvPQO0WQ7OQ&r0-6W*|wa0C6cecu3CO9f5nIz@TuC^DS+9Fkq;@Yqm!D8ns-ovz>o_bzt%~ z#WQZ^-67xZ>8m~ENL#k<`Zm7M2+qQ{TOC_MT_@}abJ!fO&T{3N*GcDB@7~{^)3^S! z&B}jaYFQfozcBy*>Ho*W_y2Woub;X6V#Pzn9jCt5%nL9ny=!yc<;SEr(yE)Y^B_6)0Tsk=g&rRR#sM4R{i?)spv+Y znRmj^q_-Yo*KDqb%~qHc9C{#TmBH&>X)D%l3I2SpT_DKEtU_|NR@;uddE0NVjeV=+ z6fHDSf{EMzb4p$RwKF%X<@YN_>Qv7^x++iccWkkCiQ$xZCB?+2RhmpQg)%thubD2q zz&G8Mk7tX^5r!MqqP|nNev(+rcdDrRsC{(W+9l0j_e9Cbi=Mr5v2Xv^@{L*UX8cV$ zeIE;sT26PJzo!1;dT#xz`B}H4WY-(d%zY=K^SNq`TQH}5!h>=4g_MQ4K2)*XmB(XLl2u;|#m@7D9poF4xC z`Qzp1=kx3TTz-A|@yiOWolG6>0;0KEhqj6d3g&Ilx)E`>k5k56W)q zGQ?J-q-{UZxbSL`jhtTSoo98s?=E@L>C$@q^V5%y(^vR(C{~rs+iL0XknP1flTWTY z8#>ZjdAJukxAY!AwboB~14p3y74CEu_9(_#H|oyaS>SSF;!MfTS(D#y^*t!6yWwO} zE1%9sq3KL%;w!#NuUMir{j{Xx3hym9R;*gJE-x!B=SA?RRl8g^9bK{D(6h@6L6zP0 zQwfFk3$9rU?d03Fv6pL>cd6pHmEM}1+}#Wf(XW>p+&(JPT&~!`v2jY&>7PF5BD+9>hZ=KQ&x4{SDv<=uW8=F)dO z*=yx{c9kdBp1pM1V8Jo@)tvOAb3a#{ioGv<^ka?Q<&x^^+u!FFbl>ZLE-ZNG?)@V# zL^r;#cCwM;qTkOA7l^w*vEJLx_^H^$Ac5OW`}owe}8{(ZMB8m{Q2_* z1tn(juMHK=Tgxi{CbQys&nJVv#WNLe-v0OV>CdN6pO)B7{CxXu*}P}BUVW_KI(PM! zK;pTZx5^wg3A%?&dcW;(i&(OwXLv z-~RdYXKjXC;w@ z-FiD8ICrdHzdn9njb;3~&ozl+POo;YJG1olq*Z>O=M=|jJiD~*_Sto}+2w=8TVKbl zYTPmV?FA|SWe;}G+526q%4&Yd6oFq0-!t}9ty*O%{`sgwOVFlQYCAUE%;QxOdv?sn z<;fl7O+p z?;Y1`70AxjvUr}eW&6rAXL`6))0rj8cF*0rJGrY%!0ASg`LaLrpI4syY;#s~>6Ox@ z*9zz9-msZ2Dd5EM-|Lc><2KXFht@`1_7;0)wC;aw#`)LhG9ETdy4IQMURlbsMpf{` zF8`>1s%UavU|o15TSMTnl`q#2Bf4 z@l;TBU1L5++p75bjjNwM%H|A>4Ys;|x4S60f#-wPug@0upX)Ikd+`5N`ThI%@B0Sy???=1+zuon} z^XKoYVM%3jDd$_iH8reY@YXgxFD$rJqRn^z?R>_`yI&N18gKL6QA6{E#nPY=y(O8OhXkh|^OPvcWBx7O)yIB0(2VR-E$ zr@)v$%R)t+KDfCBu_`Us+G_SbmcubLD>d{shxb~a4l(7_>DuA*p4aS)$;wshVT`%b zWq9mZZ}jHn2L02m&i^%7GUY_&dl`Xemgg^D$uhkqC6u|kth3VJkX0~HK;=u>Tt$a$ z-Se9bPd%8;uVgji-8#F(!1W> zij(#Vh+g?z$a%-!oA!6^AHP!n>(h_Z$NQ(Bw(v{N-TMCb3+I)tp~)QP=GCWV?&q*6 zg?pK;c7Fci-@mg4U$$<&{BZ%tp=FEO1(+V5Sj6>e5BIB9%Of8i{;l>lHkjRW`e~8m z@u=G$Ys3`(2YsW368!P)? zA6&~Y&2wkS?tC`6*|Q5e^&|85EmNs)RZRSQ>{Gz1S960WzCOiSDtT+#s*O3f92j$| z3uABS%WHSbi_c?Q_kT|mcU?2Tl3vB-PX*c&)+QW1a3y!u{SfU~3z={Ok+5sttTtVB z^G^4lUw_xPUid=wp52n)ZtpA9T{|J_8_QL#Q=+v26T{AQIAm*b++uy-bo|E8v%Ft^ z`)o9Iow&|>eSiD>e_xhQ_n+^%`a4HgZ)9l0m5G~Iy_qAdD!A$Fv{gbvQJc&*hZ;rn zJlJZw*0lT6yJzcO{|eGBW}6tcRFkXn{pGwHcix)uO$|)Xl%GF;K0kZNmu=f8M;=tP zoW0?}Dr>f&%oXQqZgTbnDnAHt-C|hP@1S)mH0jkYzt1|Cen>xiUi5Zl)7)cxCwmgs ztz}ua+fdLg=I6!@Yf?A^re#LE`V^;?WPIsP7qE0^J9*`nK<@Yah;V~>EDTqIL=~Rf z_$IIRUbLj6i8(D$fWhbQq>>Xl2bIeA>Mxz+zdWp)f1_o7pY$PL$35J!_XUGimd<;A zO-pd;I^*e4&z^;|v0H9`F@3%Ni6Fh_mH)oHy!^3(rD5Ou^4)i@e!lMH^X6OHi@uIy zrDn@c%{l$EC~#V9nd6nL$=5n$wu=S3EShuLr1$vc!d)>jK6j)hY8Ug_omqM<=(9;z zK^G&(w%xwMX5q=KE2hneQS4_uAjSeH8Kk z%a<=dFE6h=cW?KUAoDMW4Q^!SC4zQuu3tYT=!~Vc`&KhiU(J7?US57Y{rvRLKmPow z+1X?HJW=6!h6J0S?DCsU`+Pe5V#}+mg`HSh=5RC2Xm{02J$Kop@A>DQ?|;25b3S*r zxX;km#^%qr?ezSwVSu zb$Rji^1v2(`TaI^bv815#~&A-y38c>b?@10vDcT1MDoZ8S+kkX?)`qyf9>yghuiu6&!4aVUS9wGQ;|n(P+%r6 zL(#mZoins6-Y0jZPCNay=}FsLKbPWp5jTDpl-K{i+y4H{oPAGhyl&^1eg9r={#)An z+!pSy6;{2UZRVXZUTSvSndFrAHQ{diO<{p)X;=2Wj@3<{yfisR@NmA& zsq4#kgS;zWS9$9NVo=*L-pQk9{tu2Gl zLV>WO({IV?e^Lw->1Y%X5al$nD2e^TvS1BU@66~(u6;|7t>AMO-}_ndOL_jgeg98C z`+a_L&(?QIJ(_#>?b~BxyJ*XUwPoB3nNLPR_R z#2U6dvFV-6f6-EWPkQc|&=v;8V;kP@I>_#{HfH~xeSbcExx(OEc6zH>YU#lTP73Q+ zHJqCeD#92Nz!aL<)AE8dwUNH=WM$@C1|bb(v&|x9)JAN|Nlq7zkmO^pTB=sD`($Wb=8; zVnUW&TjKWn(%kx`cQ;6$5w#PadrZ=yz3#Hq^hw3@{(m{#|0DiyY|lZSyS@w`pFKRA zc4NZ{8{ac?cI~^8wkPYh^AeHW>)0DkKmGIN%bUHo^U9V^F$b+J+jsJFja~dV^WWR~ z`8zD8@4B1k{GtB$_WIvnzEs%FEjsr-{~k~6tmlzZ<<9rN?)|xWOD{lx=ymtH-k^r-p%FJ6IfvHNf4wC^+j{^a!2GfFNYStp-gin{&2{QLLv z@T~cqiql;MG#lC;>@8m;5wg2v!Gxl%qrmhJzcGy_;a_{f`XFDpUD#kKO#%ed|h&VPJRm(^` z_LxiP)|E{aC-uDr6j%hhFD+Wy6B}r;XPV68>YB@8-3)8;7IkD-aJ^{quj{?W%Amj) zZP0bX$vx%;*ZmDor^_FAz2kOH;{U1F+qkc1wJ^A?xgKi1L9RpN^^D4+YeYmuI7O$P zv6TxD?D}vd>O%Xz$!04xwjYa;Srp76*3i1DMdL-ooA-}H{!9oeSRnA*uJrJR-52ZT zFF%$k@oV9eUq7e!r^*EvaU>n750f>k-7gvPw8UzzpZMin8Um|~v=kI8m3S0?PUtY2 z8j~Fw#<1=Fdd}&S4(`07`as83)^f2-$;#dPZ-12yeRkO7|6lpKKR-^VPjk}RY8v>_ zrEAThO96}rEkAH6XkYUQbC~vVm*Ju&D-OA-TQq;Ec@%5v@G(eQm8`H@EYi@V!NIF= zsr1Cw>z!-t1r4TI?O=VpFHAq+($ZBk?^^5KsbXyi@;X=CBiqpTI3xUVl04V-049@n za^`&prP-ZaWG!#Y-a31ty5!tU%T+I?a(X<;+PYBd$<8yEWs-Re8=P`oXC=Lgcx7{5 z=_vcFUjj<2%)}T&#E(9YI0tI^HkDkR+cQhR+R zQ^%AyQ7aE5oqzl9U!2Ra&#EmA4Ua9lPTjpbm%mQxc;>9NcDD9*^ZnGH*LcmnyZL)u zXi(4Yvu>gM`t6Tja;&=l|JQH+_xC3kFMn(}oom%n5vCHC!)YAnx?jC4S#jrB<+E!+ zUJe;&J-lM~-+WVcTjKG{l3zbQK7MMT)-r{O!Rq-oXZxHsz0&EYe-=r`^RJ)&S@lPO zWxrv|{eMT_*L~l;|IeqNpFf{|{rdGm4VBpI2h)B({k+`%&tdx*J>eC3ZFZIFI+t#^ z-~TSlEgu)(@!{LIHi01XJ(b1xrR?h}tz`N*Ry^oGe*Ie1xu-8*db+%O_pa=>tYE3( zGKscC3mLw(CpSp8EtGFdJa_r^Yi-LEvDZ^U$C$m(*I93Q?7~u(j&1Yy?%w;m`~E+( z>3=?-x8JizCZutu|8bBfzP!9_|NnXYXYu`i&n}OztF)EL%P%W0|Gsrw=B%18wbyUY z>xpdoyuJMUyAq+$=W})A`;RM2d+s+AV+#|MJwIJpK6>>trB^bMW-j-a>ao3dzFuzV z<|4Cv<;tq}NnIU$8&-O8?3>CnD=bT%;exrs?N>U>wRdS;KcO07l@wbvouNlyztjW0 z!xtvSr4?MrdQ`&th-bdbRzInb&;s8TF*i((ec2Ezsd=KbWV1Yb#2wBs>A2M@;c1*P z;%f1V-X@2Z*gpO`^TpOLx{GHpth@Ex^>5AInd{2iz58sGIhYtHXf#*^J&#;@Ry5>@ zNg2zXC8lLh4)Vn<-nuY2(Lv$Lv8xl)r)6sG+;Z0L?h@W*ia|e~zC3;Uv9iYPMY2;P zj<$YyeZTJi`G3#ylX&d^e)+ur*ZKcnU!U&g+B$7!_+f)0h7{{4C7)V%m#s;B{tR{_Vb_SA5$-`2yd0xJF&#+L(7#LLc7nf+9hOU00g4uHn?$Gmr0R<)5s6FR-!OJI|e4M}wzwnxXjQ zX$!PW#Qe`$E}9~wG;LD!YBN!*C6{fCKi}S--@7Kz%W%WSD`{UnUCMUPRdtwU-u~CF zy!`vxtNrr!(}IG$p4{0wch2*lMsZEwo#p+O2e18FU?E|A;Z)-F(!Oa77NKrIf%BeQ zp1(Qgy{_aXhb-|&7xK+!uRXN<@WU;4^XBmX-}1Y={_ErapY4C%t(Vxd{pkcj@wYem z4hIzd{6FbyeC)KXo>dZJg6nTTn_)9E^4aS1rQZ|M8XA>ZJz97wkE_0n+8=E1aH(~g z3)d1c2S)|Qo|p69y?l9oif8d1v-RBclYes ze-pPWFPgGheSYz~-S0N+zj%E8QlHOXcHjRSyLRboE7z**wZH$yxj)?Z{`NXyvHf4H z*ZnvY^V~w4>D}DdyS}}97kav#NdXHVpsqR3UZQjSEt-YK?jD&vCe>9&{8p6x9*J7(c~*~a(WjoY)O)Aq&auM+<6 zB>0$KT)I_y3w4|9A2JAMN{pzs)b!dw%+4)bl;Bf1Qh3&8e{Uj8~JY^J%-SoKY=V zdM+h)a~;ZqwrW-NxhS@5d;M!$_AWNYT8`N3ib;OiQae6;t?K=JMu$P|c@9s|^|f2Q zL<9BTc-u;eNnYKg5S8R4#^9U2{;SFVn!Yus)-QdvfkSAmmTZjRiSt~SOZ~UX%-s}G z|DY@Sl2=yzns@OXc^fu}Gzi3doYFrketp&Q)FM?QJ(~*E6(?SGp4a}O@nL7x>)_~_ z%G%p@$SvKGl9(wO6SqUbz~kAhpgxvVuQRP2ol7=wcr&Ir2&AN`2m~=%N;`>ZDmhHh z_CNe$-n_&ALigW4$~50@-~YS$b$|bSIkJe;iFM+t4|-eH$L+8C`>gc#mUBPl>wmob z`EutqSGUz+Ix!I@EP)3sZWmCTbcr}UcTv#vvJXKdU>e{(nF1?}ONI=rOZ-Dl;rS;h07 zo17PBl6Kb7(YS0AsuwFifhDW3;h4aUnLnc{!=@>}`0|ulDmz=VhhtZ)dRO7LYZdQJ z-u-=6;9)q=Z~5!5tCUh-%Sf&^IVzESu2RN5by2pKm!|6T73pFfM`n0wUi-c7^_#s#i^!x-}%x3>PJA1pY@UqNbKVDw` zTJ?5U-tN0{{_3$ma<@k(_pEWuIyU2N+2!xCnydo*|NXkVI{drk^9w9%pI(1mI&XPU z!cu87pSdt>ot7wZEc1+I`?A7oPv?9oIk{8jdHOol z^PApmemigbxylC6iAc*lA8t6Cws=hylaHq5``z14 zTgJ5;F1=xTcG>$m+>Z@xYN{lbPt=|sr9b`j)1r;X^33*b)nlo*xYKeWr(D|UlBJooyR-py&dZ-0+{?woP^n)UMOC3>N=+oG~HO@B;?*>>hJm5@w4fL!fPkbYpb^2*%&`<)7DKFt>hlQ;_{DbEvY~Ds=;B0 z*6NNIPvbLXD`qaW+uyk3%ehaMZvVRDH$CfE>z}h!Vc(U&nC(eBI%4AISv)+wIzs!l z{NG2V4wonU`@6^Jy0S2caX$`yrxxV0uGs4Q%X7OUEPRU#%qo)3Ih;r^V&;-manK7` z^N}TF$E(-RVvZQFIT@w&r2PB&zW(3o{l8v+xBqJq;}_Gd=;)Nj);8(0ji2T7yJ^d- zrFw6>H7x4S62SL{Mzp` zQzvRozgzY@z5biJH2dSvpYPTc*H>Szc+L~Gf=x?z^F-0eyJ}lKxECx;&12%-TV?r0 zTg>;v7O9JinhQ$8}w*bHkcOTB=F8^d_rwWSyTGoN;%fkRQVTz}fkslh5iS=)|D zyk>}46rwq`_{_;Em%i@1zpG5SNw9)tU(~85rv$HKl@h^y$5^a+nzwFyU;WjRr6K6j z=g*hFTJ=0`iD@sasi^pHPoA%RStiQ^s~%0&tb$R*vU)N6m zTx7K`h->1;wib_>I_J-yS8UPB>0vdwY+++>|8!2PcJaq!lFmw(N@K;FO7r&LFHFtO z?fLldq2jrZ8}3Ud>tFZo>fD)i^}Ah0s?#Eg$B!Q;Yc73ne(JwW+rfz2*RDxistA5> zqjb#V^{LZR$+M0{tXgVkXH)m%$H%Ij$0S1SlIEJaoZWZx-L`AD{^rTgDzJ<1JAT>5 zf9t2acNrP7&UPrA*;#b|>-zZpewIoOw@a5luI$_R-YU&;fyMd9r55q$Tvkk%WDxlL zvr6`PsEX9Ye{XKc&yN+WzHuH9)?3P+i$=9`*&?#!P-KGN!~l>=o`AeZ{A!c(f0V`ku%4n zuIh&gzK_u~otngvD{PYc-q=0aQbEk?nE2}A$gYkx><5iD?5X&-CO%TmFYcAVzLe+} z8pqb`?N)4^?l2?O_0^Qq+nQSb9FmW$H>thZxjuT=(|{{o-#2~z_1LJb{^>~gb+?C6F|1SP|%>NIE-`D+pz5n;m_w^FX4+@DgPRu$v(eTWK z;9n2<>wmm@U7maUYShUI+qZ?W31s(fIH}3GLh`tg&-EZ9*TmC7mApa`CuTJ0M9k!j z-F;IA)Zf{w=%lhz=(*)Fjx*cM+Z9qTZ_WL1?Se_q^v64&=p=={?_da;dN`(0kXPep zx?xLfEN9`u*cYsaGcqHi_i?CsaIAHxow)V&53U8$kue)zO;2kN*LwLpJH>HNVc^Zz z!UgA=8>A9nlt-ucGNzUJWh$TgtYEuw_da&9S>lm)Gy)b`xb@t&j={4*nFbZt6{mSLS=m_le9~mN_f^{hHEk znNkLAb6)S3W8m`6kX8!Lww*uU{?DI(^*_Jb|F!@B{r~j;k7n-=DEj~R?_RmG*z0f2 zqO;%1|NC(Gy#4$J3txfC@Pek|GOfqgQbG;y4bn~Ez@8h}G zzw1u_^SIys%;hUz@6Nuy{`bE9bIxD0a#=NX_O{x;zpie-Ul;9VH?ME&6pQq|Ya=4E zEa&T=5$$yPV6~(A{odl1kgRi-TXT2c{1SUz_~FwcO;y2*XaA`!4JlG=KezMqWy{?) z#XAHiX37M6X}Y~$Q?9qn#P709y3;4cDU)XICW5l=XP40KY#hM#W~Go zGn5#<#P)l9ws|d+v-W9J@8zePS5D-Ul$Hb-zw3!cDGQQ4T)vE=x(U+3cuj!m!M@SuN@V1@>d6Gwu+ zX>E_q=^jDlN% z8>byhf4N?a>7`Zk+GR=H({}j1Y+0hBBbX@Yq_b0DqG{iSh;&ZR{d^2^+;4s)b{;v-^IP%wf1c3Ih*2hpV6j~MpxiG9+ zsucQs&o5@qgtbWxbBp8m*0wY4|NZvlmEm%RRc`!M6Q+Qq(>mf5Z;eZ_H)d**)6z1$)&=W~ZZ@|@?9tHqU`#9Ybs zK0U)}<@MJE`(jv9<~{oS(x>g#vdZsQciaEpY`=H6ZA7~FIYVEUv{}m%C5|P2uzR5y z?{%r)`KSFWk8Tdl=a~|%mD_K>{Fb*la+>)$%ccE&k5@cXR&w$vp3}ME{ubZMX1>hY zw^v+?Dk{Ba{e15C7LQ|3rf`X8uZ?Qtx!%~i{<^l{z592~a;<%X^`5?a=jXESaD?{j zUuP@L`yPM3{8=X0@8pw3r;VS_y~nAzBZ!NM<$Y&QL(Z+W-?wYA1>QaSu%NoWK7RF- zpti&r&n{Nn-g5Tr*~I;~OJms^c8VB1E2w@DReb5CxpDFKmw9=43=eAdnfVqM7e8+P ze9!7NsHgDp@mDLK%WIa!pZgiqr4=r4(R#Vm+pZO}JzV=IDzr2-6s(>9x_6Di-9^(c z&E#~D)!SmDQz@Ap82QG1`W3w&YZT%X93y8XEN$eNvDIX)&2QDz9bql1qH+Gqr!V~Q z)V)?u`a|X6W$_PL=S9_l=X`^WaY z33>AL>%pkyGg~Yd{+)1YxuuYoz|QSg#0|=>Duy?bps1?u|&8d3&1PtJI*&|2H2~bV@Dj z=va`pFYV)Qf!9~_cCMN{GjOR>VqBF>!ZWQ<|K&leltQzD&&34$TsFBH)wA*T-E}YD zq&<&aQEZbsk5$`w?NslzhK1*1x;cV9n=}s13|N!qpYeNv!tJ!n+hfyvPOkP|KQHz9 z=g6H5R)H(u_i0sJ*}I$5_j=h%|eEDPPlgzF| zOJDEWmaD(~`EQXrzU%Q7_7rp$e!FMxTTxoiu5kVI*PlOcu6=k~kwx(3@3@&pmra&WnYHX<>DXBk|^P~Tv^b}_wZ6CuiM*{Hr>qSxn$70 zHHB$c+;T6l5G zu63_xt7S|mIc51&q$c3{kwgiBz=KPt6ltc;bLYJfJyk-GVfMC@R8^Q*!St}iEjHP>IT zNX$Osjg3QRwf2{7fB5Z;8NzRIyx5X9rMyvFKKbRgk546E@bScX$VaV<^V|4!y&Zql zqLB5wHgJ{xW_{hW&nN4Z>h37he}-yX#V+grs=4rKKM(H_HsuKhijHf}Y`v9zM($oo z-gDUiu@)|-jGbX>v5i5gr(#w&y#9Um-Ld=ylN|qi{Q1yhS>eH+#|B43c4!!~O9O;vr4c_>r>~)HPTCYfLCFKB z&M>6RVP+|1b5NNo^im{cjcZHgEYpRDBJ*`hXK%3I;uGxSJdtThd6W3MAl^;Nf(|+{ zf?SGIwN%f2E86*D<_;Fa>gRJ5nVz57losQ9YFB`#rsVS}hATc#xL5s3B*aVdShn9p zkw4q6Ma+5pf9sf$f58wDCphxg_*M|=M{TpR>rSc#$Vt1u>5*^{hhD79$pYs|KZ@$ z*0Z@xX8GgK#kaSukK5~IdEBxrHdKtGsrh+18BSiR(=E<#Ud6Gdv?D^B|L~Nc zFZWiPo;2}2HskQa4R_yNX*POVvoA*Af`FY(O-;=a^Yw9WzgPSIDY~|5z3jwaJe!Mq zx1Ng%_HuPelWX62WzjSLRd>1!g#<4swS`|hR<`?Uz`EJ9XREQiePRB0*=wo0Z+(|C zcw9D%shqV;DIojlso48gub-J@?_U$*^Zbs9Z_AakwV&%`(giv`*X)eT*3=2&nrs+p zv8=*Iwl(|u_t|y3Z@&4pkKgxp%YxFUMSFP;PvzNo!lv)~>7_>Yc6O4<+i%C|$|uhW zv2{DPWX75-_na-q!k%SzEfq-%v=3OPqjN4g|83gt#AgzlwWMb5)Uz|c^L6fKG1Hli zE8hQJ_j*t9sbay2g>!lp#dgkFzf>fdw_{I5P%?+=^7{O$i04zc#_YY3ud_5`ZtK1U z-~V2`sIY0#Q~mnYwfCZ01g>0;^}g_b|MK5lzxRAO@nfb9M|NQJ4hTKtKE!iMl`+QqYmk0xWSDY%E$8X5d{ZT+i;2RKOAB7R`R--A{&ZHu zuXWNw2a+-zs^YUWTRB@-u(}vIEaftGNWD1WVu-47wbFf)ec5Kd`*-i%y?5{b`Tvgp z|MBI+n;TNuf1a#+?5g#AmgJIaI!$dae|-6J{iQwc^w05s9`CRC{Os)fzc1}M?oFL@ zfB*j$hPE||lZ%^JcRo#J4%JqWQc()3$lQJV?%jKx2ip1H&zF-=?peA?LCn!@R?E?y zS-!K}7-klCtvnLr$}MQLsFHEfRnKO}wSCjS$kw(8u4 zkLzdBwiiA<{dH3DLeqk&CkmcEe(7VzTom`&GScCO zp|a7e)|nw|QL_8q&K~ZVfWp2xzUOHV)Rb&#PP{y&sMwA^SMBFy|bok z;?mxn|DO)Ouisc5ntjr;xxti8AgL#9;w<&FdpX-0B?Y}!J~oNdYd+;NqjLA#2?i|< z*2d>Qo18T~wk)%3ci!&1Pv)#OJ*E79mtS|`?X6t5E8ctSeYUwCb8A`U?`LPjkm+h-klU$%RD{{44l zyB~inv60)lmmxKx&~oni&o;^j9&UKQtIzdWVbnFP1D40X+yBqVbvSf(S-8xnhaX>7 z?R;jU!@V$ud-~UTlY^FCdsMUecHWw;JD()Vbo|NNp6wK!E!)O8v1N6V*zxDe&bhbE zo=z#syA`IN=fv%BS!Cu-%gwjnt|>J@oI{JRk1{r1z38Y>*1GV8*< zyRvPGD|9T=84a$zI5f3m!TYUhI;sy-C7Py}9y(orz4&qedA11i_)o5lm4$8bVJu9C zdXBB(?o3T$jtM=y;n(M;nrSl*a&H!}7vIu)ijhHM2a7~qof6AwCKm?IrJtnsZJZY$ z-_zEhUsD_LFG{RIBy3B;&562CoFn|wDt;cea#+Xp{OR+IAInetTIZ;?i-Duqp-%b1f>GAb{-)_HOQ@k_AZ~12n-+Mo&ue_xrZ(baEU(<+7 z+T3^h?YG*Wf6uSq7qkAeg_ep|6WhrNlP4KXmr4|3TNJaiOF+=TYi+`sTLByesRDtE zc0Y^?oI5*!*W&uq7hRXLR2@}Tu9-JA!nI{0k3eH-z@#TZyb?uK@0qhF|Npd*V@06x zpYwGe#V;r})dldJYn_tBs-~oTdNQ+1g5j}KLKBunYLyu%`Iht;?T-6W8h7Yh7UUDypaSNv!7A z3LCD?o3F5~Ke=kxTaJU$$q#<~d%wP){X_0;Nu{mUn~r_?o3{P-_S|SMJ=OBq>iYWp zxxarrJp8hPh2{O9vuFQZU2W}PA?N?RDljEqCfjS}>s_~RM?c@Q^;wbSe82Q;-|M9c z4Q$D?+CNW?T(hx#_TRet%{OyyZJW6~u}_&}zW?&mPpy25dy2g_=I$1-QIc#vHY26y z)F%`62L(4Oo@b}#Dlj+*@C5b*e%&u`UsqFAWi?+v>$2z`^R`X1s;{m(dGYyY*P{^% zS1q2;DLsXKm5bI{^#pL3D$-Vv)Q>TukJ`HRaQ@82<7_u`Mf>j37N}x-~LUz{C#%ayyT}$ z3~dvp=VZ5DH7SWsOuqH)_qx|JUq7{2F)7skx>i!b#*GdFN*bDz71xPQ@KD~|!nyHU zkalxnhuZ`eiG~JkrKVqE&3(QcJsByX$m?N9)tz!4^KCV{92e2fm8RG;p8@wC@B zaI&Xk!_Fxmt8#a3%Z?73ttH}eA;Ga#IM}N}g(<5dYi**Kv&I?U+ZO4MOu9WZQ+LG8 zpEoa$ul?^owc|6z)-wid@BYd+EojM;oN)UeSD7X)FHF$8T)}%#`t@$nbtcEM;`iUl zxHiGm_j18giRG^iTm;hd*Mv@7yLzdM;QPb%-|o);TmS#V_45+{55C=hr$$@%!tNdES2iy)3il!?78spRPK3J1TiCsJCdaL?k~y-+SY# z^1Rz+`|sy(w>J9mJ#Re9&~G&>yd|5vJ>Z+GW9r| zvoxAgB(-k(9B+<&+2*s~e!Kd2_3XoM%XaIw_8-4|ykEZl$3uR8{(eI@feRN*->>-` zC#=wWapU^yUqgEgpYFK-HZT8u&qDwCa`rVp8aQ^}-S@sanS)XLx&x!by2Dc^FihyV znOp8xFsEJQ^S`v^f2RA-mz%TsUVC}`{@rowk3SYPdK%JZqoDA4((*fIYYmrhR$SXP z>63+D)t;!=S-MljwbYieFr150y7Dq>Ub>*&ymLx2v$jp^@=%?A`stfF%E^L;$EF0U z8Qt5x|6QKv6z7c>{+E0cWmpj^a8N)%ckRY++c&sGXI)Jai&DRyYQ9AHs+az;=|?Yd zeaj3Ce6(ZRQT@riHGx-I7$OcR7`#^G)Iail`n%f5v$}6yippJ4_heYj9?B{Z5xniH zTGmH>HkMp})7j1u{~t!J3&~$|OlIJqJ1@B)TI0k0hH zPIXG0u65iblueU$W<-Z}@KT#)@^!!4Us~<|{p|Jb@b4NIzTS?n`QOX<^Ys58)BXLK zi&Yn`do8)JZ}Pl?P%XpIhD4dV@4xr&?z=pDdwzT38O_N~iw`wm%R^#i-IFEffSGjKGlo`c|+s@?7&7PGe zb9{c?FHZ)i%OQgAWAB%`xIEKQx+bCYc+N+=lHBRVs>u#Z-j{b2?41{MuJ`-2oDcu< zwx>51N=)#%U3TqO)YA7=j#A4fvTz)=+WfxyflB|~7m+agRyozz_n+;n$xY){K{avVJ!8Kp6$&&3KODsDR&Sma?`TqVsvttrQ*KX!r-gsiomzjsAt~T}7;$<+{{5`Ja z_+yD=p59MtJB;pp*?o4|`OhvK_iKN@-G0BW*+cVbp^Uw)O;7Tk7`^GIHJ|Uf`DI$c zyyr8Nn61;J)AxR#rDS+uY&e;L;WD|M&9d;86gRlUFBcEmD#RoN`=mldY@RhG?}s`fgsVlM857Sd zUs|OQed^*Xm#r7RrAW@1;=W;q+AF8|orYJD6r2 z;F#mOE~ZZI_!Mt#p3a>MLJl73stt266zNzdvHtq&!^iutO?tP)nd5%l=dTt#hRZL1 zJacH3$yT%HAzyE#>8@td`T6nT!=FEAc(2^LN+aBXm(#WU{3b7^Yhs(7kGwWGb2%bg zFjR@TL95bL-8(RpF;nlERxR@q)t#=}rsqz{lI4*JshVdFO;|bM z>=JbeuEIw%7ER!2-^QJLc@^g|ndeOzZ=G_~98N17lS<2TnvuS4>T?PCd2+QJvA3B{ zm&!c1=Qc?%tN` zbYc4T8QcO^*Dd&Gd2hP^er~bv>)@VqmC3xi+d040_NSQd&b_{E`su3?!ncmGu6|#+ zb|z2Nd$mQ&pFe+C6ZK7r?%X)Dj&-u8Y?@nxIN22-=;CB@aB|CZmEUSx9o^wS!v zd3O8$M`STB`TSIg>HGTl`*-iwZ{M!{z7KSPORKtyLd-)FvJe^Z~Hh9k5 z@AnpOwQG5>N7=csZ|RiGo@+Y}$G+aXKD6)r@~D{tYc|iSdA9d*lezEZc+>6k@A}8@ zy40%nV-tJTO@^0&{jW7P-uicF*32FA_~KcB(6T?lk@Q zr{Y&TOL9N6#+}-8AdEpfZL0;3aH4DZBdg;(SwAoxX_?x@tHIQg)5%aHyzRI1_40-f zA3iGU7p{)u(+;5Kfuc;S&sG-#2Hnk(22% z=7}8>A`?`jl$$=S>-0!u$j(_+xr0wtJkdZ}yN8We;OUeIhXAh4t)hJ&e4AS`IaQsX z&)F!ZRsDX|l_)KxXLI^NYqXgb{5@;;qhKfK@Xj?$Llu^XFr1WV-m!nDVs_v3&lWtN zEtHwA&q@k0ipY>!Ih8kbUB{9KB0`bb+Qla$^V>7$Ff4fc>j5*c@8%v6@NT`zWn%d_q1>OF1`@W&RJVJ>sipL`EvXIzJ0ko-u~~8+W%TVE|@g_ zl4I~&e)(mM=F*niw_~~(?%cn-bWTvunIA^=F?!SI1i!L+Ui0A|%l)shnyz*=R`btI z|18NnH7Mf9q>uMDop}6sGDmjX%B*ckKIe~LHt8`uH+A(dM!7t`-0P9qUYfbLmxy?$ zI;%}DvYI>hyYzE`g->QZvgvX5x;Sa?{{8zc?^L``@>#sSL&C|qrDccB?T}sjZomI_ zrl6+IZ>7cMClbdKbAHe3xtU{Ts3tJu`HZ4@%RQZI8CKr^R%PCLVNdPvs}G;4O+B-< z?{SHhZ1K7B^62+-#nMviU(B#OZ!CEBa|+M3Lmto0&Nh$V`}^13>+kv6qZK?f&p*CV z8oO5YXvW;jb9Ua%OYf0v5B$0Dp1IP4Ia~dB)z8|TKjXSId9{{!QrA*3XEztW=f%1= zE!i86MZI40{halB&$Ok}_Pu0Gk#S4YSb8NYiN(l`p+`}E)`RIyUBV1?^R{e}Te!

yVsp(P2N}*=H*yW@N8To%cMd>)B`z3X6l>xK3+KU*+#K*8_%d+pOx%$*<|XXx}vZdYYZkA z>$M#9Sj2hl)&;Xw*9}v8Hrus){%I3){Pa_!zT~x6C1xet$oZ=t&z#pHxk^GQ@tTZS z*vZdnv$GiwHN+Y zwUI%3ma_Bv+uk^}@`uH5JM^#idcOH>k>H+;T)$kuE2<@f*pdilM6hI~%h z{VC0DF3;yY&wMaT)b8c)cLf#|>FO?Lf6RU^IkD+`hY6qZ=dG`Q-Oa16|NrXj?CZ;) zU%vnE?R~la+kQ`w|n<# z&HevB?7JpalhNzCWQw1e3wt0`3*XFTZZrZ-)*N=PfzulS~)K`&SoRSfr#WDPQ{31 zzvrJnfBu|y`T3;hmG6Gfo3Lux|D};jgo3V@UbZ>=nTO%BiQ=N-b<e^k*1vIGN+mNUp}I=Ff%Wozy0vs zO+JaD*1EQG4~;xJby)V4GDRKSYyarp+S$63PSvx#dujTMRpRII{o$ShYx@+QBPXfBrs_6dBSnyveB($%Xbj3-SLuSCuH%rGHn zPFv-yG$rN^Q+2&0Q?5%^{SDW4uFL+lk3FhNRm|`ln-b`(gyWApJHGw4HGVQBxhGe6 z4}VP7$(2`*D5;9erY)~DJ13>$@@ALbQj_zW+fHShFZtfD*v#OuLVDBXmmyhO&uL0E zyR`M}IxmqO8R5`ls$_IFdF`Aer6a$0|GvQTURH2g%#JI^+SN=BfAxIm_}<1sUFntG zpD$ln%4UAA?>PVCvcK1|{8?q2?`mvJo%N#K{!gQKp<>60k3WAF?Ugb*CNa^;<;)tr z?(46=K71H_{oDPz-*4@Is{h{+W+&G#Z&xGp{G|2cIqFV#>v|tw*&<@dc;N5dzxw+nOIp>vuf#^vgp@y zR!r^Os@Re!bK=6^zqQv(uSHF?@^x}Kc44;p`+IwT|Ek*i=i&GIiXUHIR@ilRl)tm$ zE1XxXsbd;yc1?zF`>~sU|D|oeEi^G}QMTq|i@E;l@=doK8&BN5d-w0(y_?tF&sp{` zYNwg6b4tty-`hNg4SY9mGi-RY_U^lF_iKK?4UezAT3*}rvc}5LL-T2gRqtU_P32<} zrn~mUtv@CqyUg{7Wp>X>(U~_Tm?l)tdsg{SSo1RH#IoJF9K1?W$pVGN`va}hUgvyZ z6wF?G{WUA&@=Ce3)u!8WXZh|goh7jBW9gx5Q4BqrXFu2MjjOxAM@%yQRO6(6QIV+Q zf=nzA&o4X?b;?BAHLU-F_#>7zhcACxFFNb;$?UlsSgxhse7+|2QtRc@GZ*vPD%Avv z?B8;r-tgz&MM(>i9{fq-J#%J$_o0G$y<6#_UMH$ zG(6p`kgFRuKfPW5QG>m`EraQdipFd~0oKlii#QxiGY%?rO}*7pu>JntT{rKuoVT1l ziBZdY?(>%~Uv4Z&eO9)4=gr*uuczPZFTZ^MY_U#Y_8!MeQA=}|axfUah~E6BF1)qfx$5%2?_#fDPj|jI9#)M?Q}c+fmP}Qu9PXc`hS%2 z<7ex~d|>|jqcbD2pVR8On0maE5>v^-wIt7GGzE4x`?npn1y* zWxHlw+rsu%h?m2KW2@TNgMty)ayV8TaW!pqS6X*K?rwd$<9DevuqJ>Lz_a_FeGA8R%!R%7N`ymX25GETw0-#^RDpFe-z{P}yo7vz8a zrO9spx8lpa^4Qe}ob*y9<_j))UzNH(@^#tm+_h_TB9scQJqs#2_ZYO4y#Fx=)BT#y zUteEezxVU|d;4;4709mV=b!Jl{I!MO^3R=-5eF>Ql@wVRH{O%W%Rh19@ZrNhYtC7& zUYy6FsH*t5JoovUr?bva|NQv!<)1}AU#(srx4+WZ_wmc0U;g~DIsf_l{rdIAx`J1u z;%u7CXYadKVkOVLJnyz-{;XwhfBUiUJgVD%|K0bd9XE5f@1DEyNln7U%z07r-+s^S zDDi#1$DQT54AYcTpKWC4_dm9{E62C<{jGI3^U4nwh^~yipKE?w;;})G;klhDE;rtn z?S3o967k9)?)>z{msV!$U5nt^Fu^WsZ;am7YrglTlds(F;g}M*>Hd9%OP4QCHhlK> z_Vs)1i8E$yE?)PFL;sRV;;d(LKFg#hWZdR+2s(FW&&@Aq*6_B3{oMxYbza?i_vKFx z1309m`i}dG>Sw7+Gme4o8Vly%hW?tL>?~-WT?dPoN!VD|scSncD zv!z7Hu3~U#Da<@M{qWPLXBM&+#kkE}nxs4Bs6NB-)F|hMuODKYmV}!cay7HHXLWV#9 z7o)I+lGldZL%aWeyM6fGx?HJdCj$>hrDPVyOE&iQ_FN};Kc{WK{dQaZzx4W9Nq>Id z|F3v-i%Nms=Y@WAUfuoL^hQ_y{iW3sLBfY(mL6MUEp3!>giEK?qe1+CVL;q+K^D^k zLMfpSCSEk^?^qJvu{p$bt(>R5Sj_tQrDjn9PELXe32k0JX(u~{H$Q0CSdk}v`l(uk z5yzRw8osy9c+Kv{C_k5MIA9@oEysKA+tQi4c6qGTJ)`u<&0)j2PLATM{+$=5ObI%9 zM(50BlWB8yZYpMLX@4DR@}+O*lu5xso2-webgbEUF3KTf_G}SlAyY+$pq1W!d#xls zBqjSYnI2n_>2b)XBlfmZP*>qBH6|9O?B11UjzuN=aPTIW9+OITSn|v#r6k1Y*qu9f zGL(wFepc9oKAUk#Wbx;N65cJLEyb>#P2L_Y9V?y|EZ*mP?9I2|htKwM&x_Q%-n}#C zO6x+AKEp}B3L~PqvQExj9_iAv^|M9M)5>|vE9H)JGU@~h*XS@PSjOzpBs2zD!FHC-u~<> zIRXsF5_M%a4Vx-`aBa_V)GLZ{K}=ef|Ew-=YN{p3+?Y_@m|WH+%DMvoT(X z71h_fryMJ+sjBMJ+r4>0 zJ8&xfCzJ6NDQ%|L|5ol(ST|q$X3+i;K37JjlmyvG7A?+iV)t`?t^a=8e0JUaisl&Y z&=ssIOPSb`yb7h{y)}1o6yN@~{@we!pWW}DXZ|_dFW(}}y*b3$t@36^#OkonXQDGi z9gJ2*mNj&g35Fj?>S{cuu-eXNu1m`PN~e`@Soz&J55NnL8usxfG1+UAdQYpy>pg$`(t?MXf#v<)^4ROSX0f_kpP2L|d##*e z)0ccoa^kObTknAuKz_DqT~>Ha>+dP6>sxgd&V63P!|>de!z6d{l*c>8uHE#Wk*%tf zKKJ`Rqw~h$X$o7|)|T5{e>6ATsC#Rky%LkX zc5BB6le3yfjRQ}%X1gA_{8A=4XV$cxMw<)v#Z zEc$w{{ej@du+`;Xx80UVepboUur6C|jTGZ`6S;}Ovlt3@zkL60*4eb`y>b4_H?u8L;di-YXAH}c%G;)I^Y(KuWOQ&| zAt=B5Zdu>wQi;Cf%PVJnH#g6}U$^yU&M)8NXO>@n`S79Q^EY$2?=hb+@!gt|tRq;s zHtY1F<+s09&0XgCv>_|u+v9$FJ6qeLr*HF~&#|6=e)?&K1G%?_4?ayYn|=1|*@F_s zy}>;<1)fe>R;iYJr6Q-IMoaa3>~im}Il{$fH5sqZ3BLb*VeT`FYEb8N>$S(93;(^+ ze|6Yh)?fy!m_4_ci%8YMnb%Z>R`79!9q{2#eW*2`v#WkjG1F4-HLMK*2WuD;*S$7a z^oBET^>y*qpmyD$BsO8fnv4viwaJExoXrzBBAmA_lIRsadeoftN$bQPl1Uys@;mAvMA?)9?R`9-sqTQ243P!+#qP_kNPLQIyc%iBp)7p|D| zyHFfE;$-aq`Sa&s z&7)DveKjMyJFeZjy?ghv-1di6c5{1`nPwcfJZ35G>|)nqasHKQ6T`aPvfFEmtW17u zEj`k7u==^o^(e2vlW)Hku?nWx&GWlwX|(LwqSAfSjCzy3E=E;b&Yw50&r&`2_GOvg zt+V$%KXvWG-`VE(Yrp6AfVKh1v}NBf^~gApV}ASi@#Fldgu+!0aKxcvCK_xJAY|8~#b&Q_-X_`0`ki2>?bE_?Rx zKdma)FRt`vSK8*c#pjMcfBdwh&(fG-;@vN?8BL{r&nmkDpX|7qH+S=88{WeO)z!b> z>hG)ne0BE)mh4zv)nhDYI%aq?Cg(IPdmq$u*&>>Aso1<%27L4FY8lc*G?t!GJ! zWbM+!4=-$uT`3yz$Y#IJM%OIOr+?4e$G0t**Xg`voqYLl zY-5R-V&qMwGe3;>=}cw%z-m(?SvPI@CNbBw%k@8>jY_!AnAvLh_0F?WpBRUWSF5uu zH;9{LALI-_5+W~gJ!a+8{*Db6a<(fT?a29U61#M?ol;tUc2`Qr zIr&6HlsrjNRB=n~UZdm`b>rb`7w(w?3?~GHIt?~7FlzRw1n@eDrWkO3B*9#5P`SMgM!uON_>&FVKX;GXX4 zZM|065S87jS*TfizdUz(P+wu6Dz~xVtOu<(qZU7@laKK#))Qrzvd@Cyh{XGM@9yoX zZSL?tf4*wPk~vNszS6gr{e3q(fBvsJndRAbiVTb#7x({uxc-0m{y*xq_kAzd*v;=a z^Y`!HyLb0)ER#BZ`e~5%<`X78-3JYw?tjmZ+yCp^@BDZ9?}KK|J|u$31C3yzut7+j*0dQdV|9ocewG>ryHA7`H39J;&h{sSd>#UqzruDRaw^_u1O+i&Cc*WKNgd%N`ZTedaxpH;}RWphb7 znH*zJ76{6^UAOt>HecfjCwD5H`26|v&l<_~;_H4E6jy(L6}E%#@YP!%y{?N)Hoay! z`BIH+n?&--NQO0r_o~zSGUq)`xlmGT-go$T|NLczbAq>|tSSBuT08%{P~v$e3rn)k zZIdlh4Vw&<1O>Yb)_R%W%`sEG9^-ZY`|i7Ld(RfFJmn(McF?lAynJ@izYhy_w(1NCa=hrcF$y(Hh5%+^mi;!5rVFudFgWkJufRUWYx)bXl)tw_BbIz|is%sXc8u_l=|0%M zXO7RNBigTi?pxzo!pYar^51FM6d|RE?6p@}IrDzya?Fbixh598XD8^`h^1E&cdVLz z-p0RbkJ2K>#Y>f1p5_1lwp~*#-S@KDf+?!^i7?=hu0=9E(tSbZVMR9vDeA#qgC}#Si*Xn;Sc}i@uTGo9_ zNs#k}S+L8JFfjp@q=+v}EgUTwYhNrq(seyBc45*X)jbm#R`+e5%Gk)_YP8{>FndD7 znl8ZrkBAd1zHbOysAZWdlDzRFZ$Wf1lSYS|%gR1Mv8!f_R_Jr-dFu!&{ks3&`Q*=! zA1|Bu9sWA2Xv;B$1jQLkm(6H${fA zW4_EN)@;>2UdhHF_Hb2*^2dcYbIN@$D=>s#yTC8F;oM|Hrlhr-@7Mi5z3#o=^Wbxw zE!n9JQC=I5r97*gw|48bYTJ^Clv&SXx=gRNU-&-z_U+w&&nC`#*3ZxXS?2riy`^O` z?%8`k-;-Qk{;ohaXr)rdiJafP+xMl-d+j5}X~>e^Q-03q`mFAb82#)1^7g#fvoD(- zo29m;I$Q6#_sXLd>C^VgPY-hDu>0})>8~$eO#CL#W2jY|3^AbD&UX#{{N@{|FZub-Y+k&cQ{lq zI{B^z4}X7uSK_f53<+^`^Uq}?fmlV4x3(=NuHCe zV)XWy@GP^IWw`<;elmP=Wcjsh^%+ey#+$k22dm|sE}N{o;d>=nSFC<;OlG z_wEdHQ+Y4#`+rxQep^RJmS$(RW~7TrPL|2_#~N}=R7t!eW4Wy>Z&oTUdN(iZ^wO<5dan&9+SmW{Jo&C8BJzp|FJqFIrK+HTOYZeN z^V@qIdYBlFNvH@-d;QgG%j@5E&wV=nrd{5;*6`ddRo2C?e;q&W@6{0WYT2gx<^1xk zM=R3>91lcXZc%$WCFxR%x6`E7#Rt5C-|Ra2|BU78H|Y&W8(m$$OBF5IJaK#7$-2v$ zyA8s>Ei`O7Gqp$cNRU^LaItC2u>j2&uflU?xze*3l$h*Oem#rrqD1nsjS5ZM&8A0Xs~(B6JeF}HhsW}H=LxIZTXNl{ex%GTH+)t&<5{It z`^4;vqO*$m7#S=2I2q;?r|4eyI%bjlYngaUp&V=0_PZu)!@0ziyjD&>{qyp2|M&I3 zCIsDjvu){>LJ8iPfB)^vyuIx8l+^H`e_uWb_%DAL#qi+Sv$V~3_x=C*{PovcrLh@O z>r9tkX=(Vy(xbU_gMm`t;}zWMVjIqx^y=^YlVAT+{Qs}*{{;ig3qREV=l}mL{BG=8 zo9BOiymWLa`+YY&e}C32&^Zn3EuWuF`~B_h?4VONy@fI@2PHPNHSx13wEX^?xOxB1 z7`^k$i#0R*E~o8gKCpbbdz#rXDJS2JCk<`wZI4=-`6|a`Yc>?@i`i4Hu;^jc%;Ynw zckka_E$e-I+xGJE+Uwh6Z-1@5p5OD;Zp*c529`xSOviQz9H>ZtTX#FUStrJOR`Q(Z zKfl+1|2-#JVbd|+&lbGSYp+J7?^Sk6+!wP>)7@ydNbt(1S6_!SU1~WtVdnG5-`D0` zvN?NMRVW{{H^sj|=zTew&|{)MKfZy;kjwRQ8(9Tf-tN z52pQXZLw>a^x2^2UUVR{xCzsx@3s>w?c`?uOe;4&Khbr~#%qzuKBp>Ysb5_i{o2Ll z>(r%IwF0TWArI$tZ|&3ZoM@pg@IWi)N`B~Ew|J9ftL5KSO+WldPecERX=*y-fkUb<^Rp~Ki+MB|G2EYK=I0{V!czHCpEK!_RKO> z5~w@oHT|fVUL=?IjjmI*OSBfU3r^se$Rl`F{E`*}gG4s(0;A;|21^fVxP-0OVtQiQ zF1630zcBId3)AB#ly)XDW}Linz%5$sZkJ({iqYxq(O zl_$J=^JA%EbXxrck#xb%LM_!$y{#dtmyb``lCwQmn`x<7_S(;OEGZKu7H@Ss@MKER zxvgQzYxl_ni65qEWm?C&v5XrkYoWOHXJ{5BjyO_}HS2r|0!_WXtJ2pR!m{U|Hq7 zsl1t6`M;hxb4}v*Hci#9LT9&b$-TYh?B~nDjQdL26BAcf6<&{G{P66XLxxj9N?`8G zs}IeOb#UB^6h5@<{qjPqxqgPOq1VlPi>>C$$;*HL{kLp;@CTD)E8b7<5Li^MXIWld zU0q(DzxL^=(^?&dG{qeZ} znqJ%CDlv)IyY6+IFzHRWP+e~~&s(>=ynOq1_SbhF7QEQ~uEa8Y+WLJ}pRdkdzw7L^ zsP`oc+`LxyB(tz&_muN-oSb3zW8M3xd~d%(8U5?0kN3;V$=S_4pPqeUV(#s4-@bkO z_wSxWu=a5i<^^SL=DYuHyT7OY|G&S#zu$iQ?cKX;+2787omG@HZO-I*M~^?=ymi|( zrxvLrlZ!WAGxHVRbKYgENb;Gq-MO>U=Bf3%uG{)-(iizE`ES4fvKC0F+g@x-JfQlP zr?|`a@!H~3zXEr!kE)!vY)+S2;(HdlgEw1-&ZUy>stg)(43a zk8EC_;9z*M#?;N_%0nIXud^7l*M_Ms-MrZ91ed|}%2lBj({+@tMLZ47I{E$R^`MZG zOHR!8)3OS@dMYbNEII48nn~}%HM|;3LV>rp`S zFK0Yi$uC#>Mj=RA#9_tOs`pMAJ&H?t_2pS67H@l%)G|{${8{C!=au*JwjZn(Jf|t0 z-O^q5{b1$aik_`4de6@&NxfV?_09JEzkM}b-FH28P+3&Ej!Ut_pkGJnrhsdZ-SW7W zu5ZOxb@;xVYveHgf3y6yf2Otv539ER-{YsfD7GJ|-BBmQz1ibV* zGI|A-oQlrr@~+sb5U?Ywd1=Gieb&E3ZGTREy>8ximzIvHHw73u(yxUrc(Uf%)P@sV z<>t?SR(a#ywrjhBggK(0&)t4^S?%9vUuUn6i%ibwQ|?}&+;fdH<%DVW>m^(ME?=%5 z$+p$-SV!A8^Y&>`EPj>eEOX6jHJ2{w$euRKaM^1s!IfL9o;z_o|18Py_36_oUTdcq zp7%8MdDV6Q%iZg<_vQYIdTry+(xn_;sy^%4#pwLsy^ln9-{oWYqG|bjO=~Q?C=F>k{Z?FHi+W!Cf|MK;p-!nM$ zve%o}%vt`@@0P$U0jJ9*k1cBc{0MySmF#oQ^CT-zP|~TLJcljL7T-48eRtjKU3c?} ztE$d#WHT4W0&7keojFuZC>(}D8J>2d*aqjzkK=f z@_u>yV!iRh8=6%h2`t?5^wm<$T;S`W~zf{6Y z@7lvlKc*`wX>DYhkhr7cO5J8oMT_ToeC!Mtl>gOO#p{a;K0K8nz}eBWsOMZ!=(LL_ ztpX)s+pd-6UN$)x>3&Q>$-w&cmXoVOgHB!REcIjQ==nD9jL>t7^MzlFwkoy=>@f}d zo;&NTE>leE93}5mF;|CGE7xCckDc}YmC4!QwcZ=oY?a7v=07vXd)Bj?k=ISuwq3D! z?!=*Z;QRgh|F5pT{`~XFoU<#}#;o1=x?lLLiAP|erKL*sHZOs*`NIcHU3hhbT*yUVQ3SxHXLU7k;ao>j6j z%G<2Xl)4tpaN+yfCyPGC{?$BZ$r5NdK{45U_SK_}9IPD9w~0J({g%@ z_UjL8O(fsP9*gnTR879EH8*z0y2a8RI@MDy{|sF26ZLY|(kGcI>1+Ak-LuTHwyocK zsaa=E>Dw5`X;=MLv2stD<9xa0!XDG<^3RzS6s6A2cXL)bYI7fR$@C>59^(lDu&b!{X|tI{OzIcd!)xwp5~K8@01jtB_*`zZF?qmLW^ z%#lc*vph<>V+H3FE6Izy8qB>|R4(Pud&bE)dzqpNTbP%2_moLj?`Aq4D$?6p6sjH? zoE>;3at&+mlayysYfTv@Ju^I;+!ZX^XK3mxo?~z?^0`w+#|f?vCcbMj)RMd`m$IBu z6_~9PTnyzhA3VvBpUJ zO37y5{f2(>a~xHs$9NeY(>QwA+;`sY%stQMICB^td;Qfac6s*Rm~-h$T1(A*E55#( z>ZW4-{MNSE6-sBePMHyOF4M0wdvDBo&0_+G%C^5YpFPRD>*Ad^b4x#)2ufzF9E+UW z%fP@HsbaEl%BPUW_ILK*%rQG>Ey===wU*0iQs5+${@=RH+n`OWe=Yp9KC(@m6MU|6^UaoHiWl$B{=NSG-rf7_ON%#)H_quc z3|q|S{nTXnoFcu*BNM(Jd$#X&4#&ZtZ__s4%@s_Tb8OCMnafXW@@5&t`5#}d-tcPf z?wh%c=C|*jv(8`RdMHz>v#0plg!G4&=dR7TecQR~<;Tm(mrdg9|GxY9F|*H@K_-v? z+^)wjOYG)9UURKrP3^R;Va!|ie)qjxQoBBJ#<@rand2WzY_0`eO zEfbyKn!5kUgOe|7!sJ|To89}Z_4Tcrn&H+k6%NHEEL+1ucVwp&zm}TzO!ZOZ@%2%w zH@|yU@cG@{-IaaEpC_l5-HT3pdxPPi)a$+5k2N{;T#R_USNgc;dB?Zuj1KOSxg1;4 z`4ubVL&TJ`x2!2XCt>WnSnj5#YBI-0!D&HSOfP)beN#FnwQh~qDU0K$pKiJTR)4y& zu-30d8;hgXW$DJ;v|O%MVR+V3T!8bQ@-M)DU9foGR@hN2!aX(A$P632W*?#MDB_WMTVX z0}nseo=04gVzpb=c(DsKh$stkDX-@;S2A*uN{!2MWOy#iu;RtGX{y!%UvG8wYEQXk z(yLazBHUrFnuE%sn4*(MBZ{tBrOy`F60EURVBu2LhQB{-j?K6b^}MNc8>jE(GGF02 zuB$)Sos1NaFg!LRDdfzNsMVR%{Qdo3O|a)_uqdXep{E>#4)Gucu>1ilfu%XbE?Ziw5`SWsY5k86|9(CUzQ6U}%x&DD%T#ArUJnX(J+SZf+TzC+SAur03upcQ zvZSw&ulM@z;LGbm*A^CU=0C`P`0LA;oGy9!`MriqMM^(iJ#2o?SfOo0OXAciOEwv{ z?3i<9=@UyQtNG7YJPG=Au=@S1XMQeow%&caP_lXJwQBo5Wd^BahK)rhclJCsiE9s9 z6`S&dkzq;LE35Z^%66CDYZv(CJHPm(Wt^Tk?;~ZUM8jhnL{{9{cJ}PUXKDN04<$;> z^Y^df-R^sRTd$#u%lf#zv!4I>^5w`TopoF4R=hlU%k1j5w+tVOET>1!TBd8MeLSf3 zgVt8Pr6RpE*8ccYGv|4vcgJS4WiyH|MlYMfrEo4%X>;GU50g9>rmu{gc3GmvBYWk% z;4_*N4ciYd%#_MrGjD^AkIU98g);4lCsbcMzMS*%-qR`BYgZ+-b}gNDN|e+2gG^FPN3amf6Jb`*_dnXDgrdbR1-_Wie{&DB0to z$=Si#*3(%$=lPYWu#e_on}vv`3j~&E&^6=Nn$^`MyuF*g`H+q3aAoS54N}s2mFy zmFGFUhT(CwUEY4&A8RAS^;5Iv9h=kLFeRwyLtKaeWsr?Uj^X={HUjJHm^L_Qf?yvWz2Q_4` z{aUp1%x9HQ_lis0@2d9B^HXPd@jEX3i-yeemsNZDrfXlxTFW|Z&gaL6kBcmJyj(i_ zY~E!v-Az*_2?g%I8bbbJ^~Ex zIXe5owuMt#Yekg(AD=msdz~r6xbKW8$XeLvCx1)-3j5#Wo?_jYtjeFQs}?FVHEhew=QIDcYHPV! zoB3?#+ZDY(<@B2UT8v%(3ZC1&>dmGpZHXr=l^aa2Nv*!%x|WrJ!?Spo>x~H??=c>* zIp1-?L|0Np#ic0kX3<5hg|C0hO)T;&?Ax$T>ngLjU@5cHmM4E!Y76Tn#&R1it1r3M zY1pwP$oXUa=jYr1uisyHCYC9lQLZ6CA^qEtc4%Ftt^X$GCZe>9Q+b3~ZU2DqayWRlKF|#ZxAlczG^0 zU{kxkVdDYOB@G68ahK;L?TTxw+q`kz$Az<>b3Jg;nyIDG{r&2yC##p92^JA>Q@FFpt~dt1_QU1(>n^|jwJmqoHQ{NOzh0a+DXE9;WR&#dGeL!aJ&MmgHi-0ch}w#J=*?k|5|`_7%a_rJzw_iSzZ_wW1u z-#;GOay5I*$-Z6jUYgv`!@V*`y>Jxqkh*n-(c31$m{O3u0y1Q)+tM@~oAF z;wqiv-($U7&i%gp@za+NCVt7MBFjHrKYcao_f_-!{P%gwGk^Vfn7nrCG=>*Oe8H)F z_nuF(tN9V&weqy<>wNz0T-RP|ovyxZ;<)6`hr|5SKOeP9o?HF*nJ>q~qMdQ)uFvt- zd~D;Nds|SlA*W~MQoq!o9k(r4>(2jf_q@hx?y}5nWoIn6=O!P}a9ZKEbjJbZkZe_> z*w?*VEmgnPxh%SUJDOovoc!GHwfE0|o)W|~an7WTVb`NNR#aa0y<8zbM~HE)=28iz zr7HxI&S@T7l5H&LS@~2lDtm3{o)wu)o9CQQUwZFKQ0)grmZc?}C%zV)+-Z_)aLhze zBzxzzeDX}R>Kl(5t6&pZmM>0e7ruW4;rJbh;0q=p+U3r=i4cWtRyK9itg;8|7y?~`X_ zl7$5BcD^c&(AJ)@=VqSh+Q0YzF8=-hnSGsrqllqXso)RA743?hjEAR+76f}#SosI7r;&?(JhhTc4HkNpriZF=tb=MUe%{rovoyF6Fg$-w2Dgu;c7b2=7$ z`g_+_(D(9>4<90m+!zw)1-~+%{WeWFS!#LVthBky_I@{SpS^pxb#knxs_NI9CYkfv z3oo=5pWJDb{W-5zp(%uG%AVVKzt3J1+URk+Wy;KQ*|HmGADU{Re;?0lDvH!2?|9@!^BU(E1an72T zyZ3!eoOS$F%=X(-+0QL%|9pwy_BUK)m~!UfyJz3t{VUjU_uckuJEO8g!+j2lBu5r7 zZ{EN2?i;@RdqxTs&wIAc_I+QQyjFjyhH$>j^C{kIHASz)t~I?-uBe)E`&@MMA=Tsj z_O-w6=KoVWw&i9??&UY-*7>z-gSG1Py-tCrW-`-ub{{Q926z^BN)^YZJj&qUwm^m*k zx!1tn#>VHeOUt>P``*tjkGa|qyJl_ZTWA-rh3% zt=jWHf4;2AQ;5AC_4-(0(mKU2#{$;6J`}jyr911RgzGdPHl?7HtCseCho4qmt#w{& zdbGtrYSFUXz2%xuPlfhYimV9Obol!FeQD25|6Js4mpY|Yprc?%T(9Zvv)7`;pI6Q~ zCKBwkF?ZJ8Jaa$gg*WrIE1DGdobh3~_xo(x>pkAZXDkbKe_dpM&p46CRB_3iUQ^pZ zFG+?S*DU>Hx7xKtd7onyDC*QIIGuUz)h3CZjIVxanfRWwT)HBBfuV|ymRI39SJ9)z zYp0%GRJVSrcx=GRrD0|+StnoYz5dmb`~Uj?5C7KxpU%&r60q>a)_!hNxlRG?MW^iJ zu7s^_$d5d>?x;w~g*8F(Mgan*!BRX66pWbGKbyAd@TE+@nL3LMwa>70ut|t1XZ}%A zVh|Mc@Ce%|#T23=xJ;`|@7UtfMOnrQnxZmY7fkv^m%RP8?>_SAMv(`Dg!_IPeul#HykaYcfn}=+#x9&Zw)_Ypl zCw1$c7g@eFdp+}_%-lmds_B}=6RTv;X~wqh()n2R&%)oIL7ba`WkT__Z3;gYd{K{m zRT?YqG~>DD+01!ru}}2AoXRXZcQtB>(0G2t&>qwAVSMX;9~vv+5bQO|Dpfy^iyR`)ukcDo`rn(zUSWF z^036}?BNqjTejVNyX}7M@4J6rU*COK&tz@Y&nGb_CgeA9WnY+~*D}NSc~eH&?A+V= zrAK9-SN5qc)imL8m}(rTn!J5??%K*(ZIx25|M#!5@84N^_EYnQt=EkA9=`Wm`dq>L zou#W+sh97*n!-Ida5%_TMG z@B`PBnd#5(eEsTszjWIQy`=(+l!~&?+&QDoc)+^7qfPEcu((su)m?&CLN}Hc?=);> zb=+I5_aiDg?(aQYSSWw`>b*04_LKNRU37!I7_WS&op$zg zWGJVI?p5}yI+hNOtFN)$Ik0(^)0-(wOblII-pmT~uQR>)W%u>3-_*~)bT*ybf1ruM zA$o0B#vyB^bbr(QF#j7_XDd_9QnyW7JmuFq|2sD_Rhr7B_V3E? zu|Jpn@U7e0U3O{r*BMu$K1X@2T@~cT`gt>3pvLE41#tmiEY81oIJefM{>J;yv(8_x zj9ng76xsdujBdN#_HFB4+nkrTtBJT?+FUNMRz$LhufuTZzW2RaX8NbE$Jbe&zr5Y< z_nGz+y=Q$?+cI{uFq`ZtR&*=zc=9XvZtS%^0v9h|e#~G0~#@@4bvXb>*{CABM$IF+_s`s! zG5u#w*t-QyeBS2{?YbTNnRnHs{%!wyUhlG-@2_}7V3B6rS9;cfu|| zMj0jHl^%j@OOCyfIV!NHcx#U3z!6t=*m3>*(s+a5XAf>|vcm zdAqHW)%&vDcMW*%2{z61TV8s${qMn_59Vy%Fk3{pc=ztz(brpN24A1KS>1cJZ*ouC zJe39KtY3=>76$*?a3ySR@oTN;mn~aV4!w?=At?}|a%4u5)Uku?`Q9%jg4ZtHc1xo^`8t>P46R6Gmg!j>>Q=B*DE8}tzVy)m*@E0biEmK z?&YbL!ds6WVq11KXy?ak0ao$ZtO2JdnXkBPv)ec2ZFBiKzF5J;MIN)yXQe;7sHOWn zN&QOoahVg>?4~c#7WUHj?hI^O7~WbM`DWtkrLp&8BBz@ER68W*lfiI4Z0F401@bi< zb~EPgd>fg)@*caq4U>fumvnr+@`RVuz9eSP4my>${r=lBw%ZoZBW~24k^kB1|L@WI zy{tQL{GYvBuf~M)wnX(V%}u)N!Wgd33f!=YZ8qbK&YdqFNp72YMp~-l_K(&Vdjtgw zKQBDcFPNORZ%G#O_NU8oJ#*x<)ff(>`L=R(XsmV0z96-wOVN?zm90h=bHRcos_71r z?IueaPe-XRZ&=mvX_`XIi#yZqW@rj`gm@inQ!8SwY}3y-;~p_j%CD|KPxIY>ebZO%O{%%8;PD<+R(AhtL6FQ z;IliYep(^1XaDZq$$9JJ_HGK12o;@Js>`s&{!QGT8bywB{_pUmy2NVBfD-t21Y|?fd`bvblczzKpH4?|9VCx z5Bjue7PI2d55ga>=1>3rgri_L`|J&+0soK1|Jd&MG5`0@`@g^ZX=w<*e%;)BI}byb z;a6w-Uk{|rU8)y3y_)o0kRf8nG%H{K*OQiZI?tH)_2Iw6CUeVw|25#Lv^={!Jv3v} z$=K_AjJE3?E8L#^?D9{aj@@@NVq>o_kBZGb+ufA4x;5K0UEDp+G5ayw9-Y&bv$ArR zy=7>qli8&=HKxzsot0Nnpi$;T`R_#m?6BJXP!T4xWI7><%$BVt`w;OIfTAmW>TU+9LZO+Nvg);f^dt#;?^)FSbAMaXcs21Q6+hwT5%*0ak)17=4uNC` zhG%KoTMj==*gMO%#cjRzQjX#t!|PEyC5>HOyiP8E{PODQ@bz(fYiepj1Vc}EC2+rf zcr2mKt>a1Txz#H=T;B=h#`qpddp_srjpwY|iqm4wXGwkLXjpsFtaWu^+l6f(r+rpG z)FLNPA^fu`hjmAx@qgLUI$_`Xe{b`zMqRh*?r17XzQ?uM;<>|Bu@4o6?>2mhSf+%KzCZzW-15`-Vn6 zqm86H78jF?1ow_bgv59`Iki(L!B-Ih*v&zZE!*9pg%R9I3N?fVL;b50{O;_n& zwYUtE&vIiSiy}j}i~SY5A2QDFm@(6Jf`>rt2S|y);{qy_I z-98hTt=ROpPTnbFt>A=%f|{y=x?8VBohf9{o1<;5)$u|gI{W;2|6_)xjkEV2Z#{ka zv2(WQkx9WRFWRqskL8)#8m+za+O?dz?=F?G*(;YnzNq*#=-qPuGe*9_*`<-wrUb1` zotN&C(;L69>g#0veZPNQ4z~||#b5v9aQ?o3$4>Qkr7ZgO^}5Ua{Xf5)HTaT$f1m8H zf8V}+tFhW!{W~xA`rW*QXS;t}i^V?coFFGBw=DBj(5vHy&+lY0w}?Ep;M;$A|IfI( z^b+e(gG?@^s1?Wlf4~0!mi>?8|Ky(k0WA#Un;YDCYe!*ffQq;?kKxyq=a>3iezljG z<#X%^x$p$veOqQ&@!`eX*@aP; zl7fVqr}~KQJM;NtiIP)5z17@x>(;f`Nwgib`*rO`rF&L#g6b#L>0!~YcD++NksV_y z{yetpqV@XedXqYmyzc${Tp2Yzb(VNw>A9I_-Pc}yZ^O{_;_~OqA1g#A?pb%*=Jqtp zmM=fPZ27xm%A1$&E~;GjBF)paE`~aW^s-NU6J++;>iHVB*$emO))i8V&8vBdx1%&0R_ z%?zD5WDoB2JW z?3h8B;jz-#>!!*~n@#d=i7@OHXEFF5tsN+Sa@!Bxli!kVDj({dZFcL1@YZ9uqPDJZ zR=V~6SmrDSla>qRbD3Dg8wUA%0qeJkX&$r|iD{Ei0urR1j@8#0&**Z()^Pc^Rf5VvbIaBxpIAK1;bqOZ!(B=bY{eqtGGzV zZ=3keXxhuCtqc-7n@yP;tmIj3txop~229^9kR-pTKQ=l{)KFSL#6pJe8K)7?-lN6l zo7;-calLi#*ARTV>{#Ot0m01ACX5B=CM&<6lc%}zK<`0EP1XR;PMgSelmB|1s}}Yu z%)XcD5qtf;*XmX4L~U%R>hIt6|JCXC&&HXr_Y`h_V=7tmW={52M`mRuEBD!g6>G2U z*>cUnyu9-L-tUgC)8=Gru8Q)KJht)uuIuaLmfI;gWXE{d*461%8moFcpTEjcz4rYi z(_7reN>BI9=~ri+`y6~-Z`$t3VYeR^co@!2jJkf*^8V6-8~w3eu|4;H{wWTb@$6{J zmSsCaF5L1uDJj}_{P7;0&o-?Id4`jQy+`5)(IPLBZ>BuGWm+I_) z&3-*!uI|(MKd!r&tdmfZyn&WqTli2KU91m=KRj0px{rYd+^q}yppTAzO zmw)&E`kY{s$o~1uRvx>-d$(@8@WX{Y3@eoG+^`f+vY2}=JFa!7cXOjLo9yO}?{)j_ z&wP%F)}6{5{@tN>_KK(H3s+y=p}6W+sqELiZ_hM;?`37+4Vt~Ia@MrWnaN$Ns;p$+ zzk9cqz)j56Y(6UC>md`(BlHcq;XS3*yrOo_h zm1h-O?poAMS=e#n>(#5>;`)BeFMs|xBYSI_Ovvvh|x*N7OI=JER!wI`uIG&XsJyaQc zJv7@>^Vn&#SX+zdvLZQa-ySRM-=TN&n&^MmwPSYKizLc>X(a|zJ@ANCJsK(YU*XmxD-}|?8|Gy1KC&e9F$-7RNS@0-F!=e?7yp)t)WlUzdUFGBW_MtiF zwwbAulppio4mn|PPX>onS%jl|D^5lXgL#_^kyg37xf&;^} z4NP37%_fDBwJkFas5UI-aN^F-~Fpk*KAU%YTb5Up2M+aEvNB?ugc!6`f}oH zbGPi_J!MuU(ZIj)^`x77a!>C*R^u7cX?W|$i(rA!%jcAnIwsE9e164Nw*|5C$zOUS zrD{Glf6unOd*62YoJF@8XNtwHQv9*@;nF%l|pP|CjvL(wlGFOy!dbo^Z%MORcS`v6!p-+2p85^7Npd!v;Ku7jT{4w4uYs z=1tk|!w(mnvzMPQcYTk}>c258vBi#8qfT$q+j{N!^5yNne`n77y>ENEprHS9VZpn< zqE)uvj_-ZE@0ywKbBWrnmD|LUG%8sc-mjR#b!zRk$1httu3gYMU1TGhz4rHCyQLvF zR4OZDi)TDv+jZrTPV99{C23VbAHOFd8>jsF@MX)BtH*AhE#fVRy;h!o|J>y*I-f6J z4$fZt+SwSEB zx;}rq^UAGv?<+4V_X|n>^80Y2`MZcGFF#hyTUIEuD=&CKyF^mQveb~P>t5%s-25zt zVNbDQ%dJNyB5eUj3}$Q`E@l@@!I4GLO3+<7N0<{rQWUd1jHb zS7<$%bANC3`bF#G_y7Cz`TX4C{QLV%;}%JZ-ZE2q?&BghRprd_=Zja)?$nqb6Qx@8 zL`Gqz)U7ivJpuoW?SF5+|Nmb8)R?Tb&nj;$efR3sDJ}LzEdn|ETc>(HcR9koUSVfS z^qnnX-jR$8yjQ;89aMDg<|or_DZHXCGUdB7=e>XbuK)Pr&+o3j4i|f~{r1}=4z7SK z#v^zCUHJaj#Q*f3h%I{6IO~!`g~N@D_1*XX=SpxQssBU>pme2Jacb{U0F5T*+6LKh1(qBDL+=# z@@p@<9g37@WfymJ2x?@lKIT#4ac=PmHBkqZ=N>IR zQA&q8dODU~iR#XsV%8JJIkD!0rkj(9;Ofwnm(Co|TGf<%SIT`mn}kUjO-;-%8WBz21Fz$Nqi$-jzSzwMO2q zCSz@>;=Y=%T1#A778KcSzx?vcySLV<+xSXmO*378Z2QL8j1P?$Br~V(dEX|;@VxS; z{{M^n-{1e?T|bS(R#14u)9%~n%Dd|?{lEMF>;Dz2{y(XID(}BH{oyLUxzc6L?m?9w zPp%GE*rfM4|KG9uKcwsbhR>Im-}Ak)F+FYCtG)8a<6Mp{^P^9SGQZa@-^))u|m zIQy(bbjECH_b1LAu~NV4w%=d>`qz&iH)b&$*rd1WLy67V%X6+vPMlM0Hf`pr%-GX+ z@7}$9dGq%pIcL>6y2|A)K04%kd-b!t)Qg9=kNQMHEVgI#NO|RE%=<@&sn{8-ucTPOH@>pj-OdFD@aL2P_H%jS7Mq{ z&at(<7oWI`mxu_OzwVk6#B*G`i9Owy|Rhok%o`bE-%z!>e#wTdGVCGE&o5XEa8}bL6@7s`J(*R{n3pq zr7jKYw(1I;aAu88@v6a_#P%*88#XP}C-)?A#ev zZw^%IuiC0%$dG;7X+@9H9vyE@SJ9kko&^kBx3R6NwXeC8w@P)M5^H{D?uj!F zv+tcYaeA_Gno~;O5ib|9eCgn%(EJ6jR6VZ-2Kb*g3N601Zl@Wi-2^p<(kIRw|E|8i zzV_Po{QI{UgGC)mJovwV@9*!weeXw-R_=VSb2ldy*nNz;jZ1aD2C-dh*-IqFy42RnreFHPn?pE+ms+Pa@d#d|CF-@a>W zv*W?ejpbWzzduv*fgLy@TLbHBx}U!Pz5I$E(M{`c$pzYiYeWy$+3wJlCrd)Ug&REy^1fcW`){7x;iyK4+y0z8S*}jc zHQG8{)aquPPoLfEWBGi}>}8%V$L?}4ES*w%HtF3gLnSA@n=-$mSEnt1(lMv_&Mgz) z(>C|_RS52Rzw4YxX_nm{@%3DjzX%51|9aTq#;WuAm*#A9eQ`A5Ie%Uho9x&J=7hwYxkixlH`Cg&?-gS~3+D=*@h9+aJ}$Pu^YVeIwXX1)Wh zA01aI99rej(IDX*!cx4cT+i5hsaMOQC@Xkzk&WySG2T zzwiD%lN$|(xswl>#C)rAX=FLcxM1ZcUI`bM2f6#V-W5$N%*f#mb@6@_8c;fqL(3twzV$}&7-(zK8?`b>u6mGv?`F{TV`TfTq|Ew_! z*x`Nm?c1{z#jh97UzQm+jsLx!#;W3dPtIIoSpABJOUJ11@$>oh|8Az|&rxS7(Gz=g zeEIUInX~WM+s#{Ud9>wPslv5mPhz?Rg=byc)Vbvpm%+QTm9KY;#P9vLMEaM+rL%9( ze*L=j^EtOT`KErx33HTPkFW7y5zh{~y(rK2{nCz?tn&Zk|Ih!oebp!b|K|f97t=PKlNsQOqUH7c>C&q3K6MeCJ-|OnkH429>RKFMfQeq|B@4i^HMIfoe?m}I? zL45u5nsco?V~dz#S6?+^Xy&*7DQv&<_FKC<%`x%u#|3s@k^E3IAZQ|p$PP+6z-^WRf;t$W>GCv!aYu9Tp&axQaz=Cfljm|dNI9Ik)T z|F<>%-{-P~_C$u(&2w`9=7g;{$ew(5>5Qz^Tmjj0msLtA9Lu=y$l%M?YeBDG*4*k% zzFvBUHzfF#fVqARYwp^(SlJz+?nl<1y`1uF={-v&@g$SGO*hr%oL{b2`eaU_c<=6q zbBv#Kb(HO%8*?Y@WzvMi^ovzG8_n4rLw&zKogIH|=hLK%-7YMv%;K0=RMlIBKD<`{ zZRXDczvrLRfB$$|$Y8MB_Z438YS!?3Y1dyPxVyF5+RM9aDUr+00;HNl|5#U(8v%%$4j!L=nYP{ft> z@A12vRz)37eOXXp=xF$?fA6DpuLUl;C^8uE9NuwT(ndq+m_c6ed^!2&m6>;%_zhhd zBCcOub~JUKyu4iB@y9mjYp=)0#;-i_oB#d3U%ysgyY;ceNL^0&Y-D#t_UnjU>x`Xy zKb!o$mS4Mn+pVP%P2r+xsS``je%=$&bn)%mx6AYGq}%`fdUotZ^wz2Je;%~+_wz5` z{(h&aL)FJmS7)2g@7?{bU`Ll`+XS738;gq1EnWBd9Ooh}qt#6u3(S7X|KI*^<^Qw$ zf1IDR;3U)bN2yDm$^TA1d))r}`Tv{$ude@9|MB+y|7Z2=7Ow{`+s@+1JrKyZE|!}(tyJ=t-BP_H3XhyLPw!DUXu#uf zCCYlfpUwF`9|qx&Qq%OS`|iIj>+kPB?|(k+N3PiR_P-0|+poQP{I@wZQNI8Ds+!{B z&!3)ZzdyX`%xM#Qd-?07rozfa=PL7=le5oOf4cYeut4&PRzVIg!K(s`co{GMwDEl| zt{=ar=4aOn^J$rh&-6;$SFgUCw_NGz zWxnq&+;4ruN|)vR9__6a&o?TsGIch))5)=y|FVhi<(ms;RL+vx9+Z`>do<;7$Xfm< zG1pz6?>Wi&X8rYB%Q81DeCV_+LAdm7+3p!;vcEQ+Id-VLW$Cs#;_sb%F2)Ev&zv{i z^2davO~>xOyL0$qKz8s{k;ylANzcBc7wMA1(%tvv;Oz89{&{+vBHh<*+xD!crYj7e{PNB# zObmw0&K}`cSif^y)|-X<-e>Dt3Jc!MQ(mgqqWXN#neGj5%63aMcda`em8GdF{OI!M z=A(6O0=4@EABuo{#BK+(YhzTOmxk>Qgr7>%Q4+A+lq}kq!}Y_ z{J8lnJ=J_!M@-C&xv7&()1(vUman~Ox>if7MqpxlJA3HX9g^$bZvOb?X(h|yFD$t> zn_8zZ-aR*Q@6u^&*ChCA*IrAM;cLHZz_TH5;ScU{$QM8!_u zv&Q=*t6-D&qL@>6W~^N!E$DKzj{U;17borQ?4rJ&&OW&_MDNaw2@F$~>b~Bj)6G;8 zyIof6*@M4K36G}Eex1D|pye%hNJ`}OIpM4ZM_0(!CECsRW7`oQ`}X_pjjw;-&HHk$ z{^!@}=Z`-x)m?sB^ZBEW8CMHt@AE!=r@dMzFxSW7$zG?Y^Us_DmrT{`Ny;@nE8n+&uB@u7s`>p*TIp2~ ze}m$Yb^Y6#Rb1ZhExg>@@~g&X(mTUHuNbH^~bgIC9p1dyS2qt^LbGRkh~K^UE>Orkm~F z*Ywv ztFKy`^X&b2_v}Qor6u#u$=|!r_$u#C<@@4u=g%L%_Ns3G_j_B9o$21GA65GO-t4Z^ z*BV{6ocX-z%FazPh<|nzXm;mzOUq=c(@&%GO^tvh$`UY|7gyP@js8GBydo__gWLDSg}cm3>Fa9(VD$N2y0`iJw4KXAF|I6W#H8*R>;atbeV`-Z&F)Bs zM~Im}izrLV#C5NOyx!m2uUs^LU#0Ok>xtW4x;pOto^<;1<;k)i3MGygKHsx?=`7{l zKT< zG9b>J4{X1?!dH*8IZT=rW3e*X@MB$gwku`_3Hxm3Ehvh_x{ z$tj2Hv*mr=jtVR~`;I|?d5W(XkD&{<{$Ih4=X(mjm2?XVyl>WRj?P`0l4@u;d1mpt z*IGhdMAIDI6FzfCmaOv5&waRL4_S%Cy&zrYz4O=_=bbm+HdjUp) z)|+c;=Q?}O-|nlsS?6?6wD82A6(WKT22!>PmisHuNe1w|a`h3t8X*yK(sH(m>y7R$ zLDNzLxqZv}HXM3<`SQyOn|XdmKLsC9@%Qj>w&(D+=h-TF~Ls=CA^u6^1x z&1m*t$K24Cxr^6%$KG!hNbXv6YllhRl8~aEyLSCMIsXS|=-&_joX;6+@W$rX7WQbC z-W89p`FV7Dyl_Cr-OHCh?|L#_Ki}^Ex9#_9zNxirv5%2mm*>RuASTXxt;vcRIDpAjNUa2ZBPG0qJpLZLJ)Z=F*R&`&mPM5z| zKVw?vi#C7zpDJIMUfZTu87ZbD#B`cHavCdx*SVb{-5n->i}Y@~TzeKO7@Yp>Mc)ag ztk9#8r#A7|Uf+J}-v0g0U*^~Un|pr$-?z(`tG{pV6`%ad#($f}-d($9JIZNPt+53(Uo6g*s^*vGIw_eGiGj}*VMRs^TKJ9z_&zB!lrhl!l z>OEYrW0#lVq~gn!^PbpE{hPkCOIsd%-yQ2CyM5Ybou#uX zyhC@b`+iUH@g|*K%cUk2=?99=UA-s#{EviE!?T!WnVbI1Si8kqJvwg1E%EC4bBYaB zL~HfVuif~Ot(HS<-x(zrk(woIr7}6@UD$rjZu#^3cKu~*SW}Ge-r)LlZI#f@Zr6pT zYKs`oSvNBu3`{=u_KG&sg{9Y;ScUVavq}9^o6hen7&0aFY}1i>lFUYd7r&eo`*6nf zfS^@=cA;=)oavf# zw6zrK9<@^pIq-1*A|obU77|F^DxxBLFzciq#s|NeV} z*P3A!m&d#sfdc~V6B-0|%#c~}#-+V*i`+KD>!#cu(|1j=sy-O1t;p!)vr_(;0JHhL zWiNmJ+11B{huPdO z9Yt=5?fCKW^fr60|Ig|_ysy)#<8tcIV<|cpbl~A9rhoVDe=k2K0owjmz+y{n(+aiW~~yQp%9^Sf(=)f<{`r7I^INU|Im_umCg!(;PW`>>tfwvbw(PgK ze%>vGwuaURCwDGfm0=#20?> z&m5PM*zks|M{EDq-7lTnG%GN8?d;<%r3{A0!fwATiJEp)j&t|&EM3cVhLU@X44W<5 z&%Ay4l2afrKQG@oVdC!g-?yU9u@&x91k9PEM0s~d;6<7 z?o!D;T(9QjJzX1m`*5p3(yQw^idk8=wen1-XW4d@Hh$X@-;~SA;I+~+-N5#|g_Kdg zuIa?XzQVa~VkvW`PqBK>)FyzaFK{9Sv2SiH*I>(?85-S9Y#zDcoiwu5e^o*0rEzKLZ1!E2XAo@Hhu& zD++Zo}sx+ z49^1m?@2~v8St-|WVL_Yu^9mjbEiix`~1>E^7Zv2svQ|0Kh$1Ve6(rZTGK#ZL#yXv z#ShGag|AL4UTu1$!a`=u6n1{eSdh)vc7cd{$2Zi{k;FR`1wXEfO zy}JuvRQq2kvYdZDzV7GK$A=FupI;}Hykmd8#aus+6HEWd?tjkzr+WV2zu+?tL|8q$5b>F7$XSMzBxs_+_c2v%L_x`>Ab7Q4rFL?6b1f}2Q(srmrUGx?TpejpKaHQBA->Vg{<4N=JmR1mtTJQ^5x6JzwM>5 z(^Bgyc1RyjtiSP;|MjMG&hyK5Km7Xo`fpQtzV>zf{NMTW@9&%Y{HC4l?!|%^D&NnM zKUOz0eD=4X>2nqzPQ3s9@5>Ua`F?ECk-KwOs|B20S`vHxYi-X*n~8TXZj&wC)9bkP zYURv1#oNuzOYiFx#DqkzU6fQ6_x#QU{(HYuBj?Ybe}8{{evSCt=p4~yKngBo^f`WvutPK?YHH2KMdd2@T}cBiCd|xmVc*0(*8#Kvjy9_D`rM# zZxyRPX7_1E=vV$fjrJc-*FT)@t+(v}-?^n%B#$5bRLN{9wsPy-<(mteeD1R`Xdaaa z&J}%jbd_xPmawFr*PB+bJ-1buHOY$Q>(V7N_LM)U{g8O}^vjY>5e=EMrl~47JzRRM zWs6|>XH9d>^~&BcCcJK%rqkCj7_zW19T2b)oH{2s{n{3;=q+lupFf(jc|~FM!EeWu z0zb??ztz3vf$K{q`)fg6#b3JfKiK~{b&5MD@AGtH?=4#wg~fK95EK5Dyp>abne?~& zbB)ii&2%X5`3-LZGV$zY0e4d$8Q&_J0yszu+Ls4 z6cTFKP<(*t%&7pcNIRW_?C=BcHwsM^oGT&=J_qp zj5=0$US&zN&G~F|erp)BSKs;bmu;?Jn^SyxQ{-s2^5SaQ;2%#W`}0f=y%2M)?oF`0)L+I6om)0>tuV<{3i8g2+4K3Y!3--d zM_tYjuL?Ns>7UNI6RC2Zd-@SYH@ZrNhOWy3d@_psDO*-ih+nOe`{=3D) z+Oc4Tw0_5&{~opbCPeD;Ufbc!we9y-_fr}BHc2l1e`B(*xLd~|)@R%9{ju-eUVlVe zP|%mZY5LJ~>+3$h?pWfVFFK*&&W(CGYo+K#Vq#_IEagK=Bd1r+J2UZpjaB{6r_)bw zig11ZJ8}R0nI@%?)4KJmWsWDlnA~CH{WQ#bgH-KBGu7jr27XlRLj^&(05YLxL8&F48P=F-B>97?mV9qRJk{bS`O4kecN=8wZz z9L}ar^|7M)Tg!;urxlu}FWA)I^1F9(W3-b=qrr+UA%&f% zC-F}RWt?KZbA}VpAp$q%9P4mRxLvq{sqwbf0vVb!@ixhdL&B)5_n)P<_ z4UXIY@qRUfenjY2A68|ii9xBOrboHt(E+N+Uq6@BO zeORMhbTC}_rNqBorIqrQ*v!_ zoMO}KZ2!%0`k^yRr|ntCAXWQfmrG09?(MhV-rrLx>}2ro#wMN9o9^7dzqkJX!Md%l zUuzeY*w{Uf38{z-&zl-)LIb+MagHm>9{udAtvdG`71*{^5czP*~) zKX>-Bmp^R$mm4$v5QxfNd)3T$?bRuljMk@~boo;8zoS8uO-bgLVrxWn?$)VcJGZ9B z_8v-@(-gIOsmrZT39-|!Ma=NMUT7uzyztuAITV`@jknYY}S^DC4q71K6^t9C2F!z1$tDo1pq@)RMxU=Aeve3NcE2qo}x?Vb$ zSMN|#U+%e!&ny@Wc=i^va47zWI<@Icd3jOoSq=mJ)3>+hduwjrzI~bCwx)w;jz2bh zzIDnOo!AbykDu;IGaveN&3sz&hn27Alpk8OdRkQL%@dcGPAQG7;qcfZU4DAYna?%# z_4j#`KiED0^W}%es%`D;p%3MM29(5ppV+H|KOZa0=f+{A9Ghr* zVEL4!ZVmaG`=y63tk1p|a$boc;e*fGr%$`DPkSL-Q^5SRGQ)Tfpvx7M7S`yY&c~3dF^q4?f;TGu)Q|{4f+hb2y&e~9*9UuzR~k(e%s8pn`*w!5i68^dMdq@Oj8`w?f3z@uwEva0c-d_> z4;KOR^E?yX9vrx&lc-&EP9b2*={XBJ99-_W^t!OItn$j3eB;`eRYFawU79o3TUiy= z{5<|GtJTqU!W^bkZXd;0ELd=-gR${|yZ)w)@3!xD3$<;sU^-!`oMc||*y5Og-LD_7 zk97+wecCdyGelHi=FSzj4m{p_UxmTtT9Nc28K;c3LO~qM!Vh~hx;x!jm*?P;v$ZH$ z>C=-M4#kR_iwd4>%5>3A$hvy|Z`~B|f>g%!B z6%SlZ{C`q?{=TZOT1#gHv9wIOuJx$q#qU6$gv6k*sR?VYMHTBkDZc)_k7?5qhqRq7 zt5l!gTBXXd{&dy6bKhp~n!bUfZ;+x=U$metwKkxP6X$O@=9#VZ=4(b?2&|pU7~! zHaB;^z#%qmhDFL#dDq`#vk8ueQv7VKM zJ?$2IZDsv;rU=(asGQ|l8ggXniE8;IpS2rX*KLjI>-T@YT$y#d&gsX8lXX*N+?hXm zX?`wYY7mIIUe2&AYf|dWU;CUJu4R1)@rs+abc*SZ7Xr*bcdY$xw)#hEoYeJ&^4Y5| zZ;^<;{(Hjtn{SrK-??{Z-;SNTcCFkx@4WEDD*2*yT3+YQp8oXkU0&q$?)J;q4n-Ht z)tw{kt}Ji)JS%&r$YU?f6_q=!K5e_WQTj~4j;QRO%oo)~=N<_b#hqP{zy0>xvc(zk z-m%k`7FJhP)l}>-e|h!m*ROT^*&F6;-pO)frpHN^Uo`1vKH*WKYQ|llERaL4MD3{&Dwi8ZP&HZZTcLB z3Qmrz)=s@Ot(39VH2wD0lGyycl_!seynSX=)uS?$llaR5GCRXw&pfYl)=oM2sG<;qa?<_pYBuKmH7*Hk+HV84ue%fFpSH0^ zQabF+0)B?jD=U7U$#KlM<|UMv?bEXAgfGvWkbty+wG2sjJB!Z+2MDfAI9qT#Vohu@^NIlTiN$kMdaMg__ z#-DvQ@ATTH{fKEtZj|8d%%``7Xnf8`HiivPEC+?Aj7GdqtP>WR??V zH?CgOv)tIQLTTxgpjChUgKKa2e{mC@wd|x;`njE3r|#@r_5EJ^jx(EIYM*seJhJ1N z&Dqbp*0p%_Wv$&gWl9i7UEH>qo+-b~=hszL7hnBeds0+n&fzJ)_Pv*%FZb@}nS4F}-k!>zpPs%BU%z`_x}NIu)Op343*N+ju~Xb7kmR#3B#rglQZu%kd+g@R z1d>KO~YE&m;L`{fUx1@rFM_-3Eoy0tXU!DN}nRy#Wzn-z9`a)o?B$P`PhK8ke(P5G9=+q#yFAhKjHL;ywAG>0ENRJ% zIdP_oo+a`pu=8-9?MVGRn{hA0cImnm;^q8IEbeTJ>r<*8sD&Nrea6V((Izcm6_ya@ zq~gk|w7BJ0gPzqF4;MQN@lW0H9QrShUcD;5|IhLctI*uF)512caoMu2TXxsJ_kmM6 zzJ-TteR_D9UH*?n&-}Vip4qxB7Cp%y7H%uDknuWcIsNaBb(Y;3Ua=e(eM()IY-{7X zbIah%?n0eVr=G(WTv{uxT@(2D@}pqLzOOfzpPsV+;r6SS-bDoqE@|+%yy;B5Uwq9+ z*3I+IKTnj1+i$n!*z2f8Po;HEfByLM>gw>_prf=)BhNqo{IkZaY0`JQ(vL^G#cQuC zU)rSe*=D(fU}4;})S0eF7P0(ZX#IxY?#IiTQ)3Qj;EK!A`I1-<96X-CnJ- zbj$6x%Q&x_*8eEje*5kBc4h{b_l$)SY=W`ZkIuVM8oSXc?(@#M?TH-suHWi8Q$5l6 z^``W^G!P9AJ`Ok%B+@9(9`k z!?yMQe)haF?)p`xMLEH_zRz~8yI1Y7cI%AL&lkQQJ{x+{wPm;7Jpc1Lo20+HY|&fY zu|%SL_ua&L2N&bthFA8!UQ|9~lD*8Xb+41YfBJlWe`#&)*O$xZ-@9|?h={k|&L`XR z_uI|)XIp6TJn4;-m(scrHTG3}Ax-@)eXcbehHjJh=vGEPFO^|`7Us3CGF0$=&ADv7 zND-Nd+zguj4UayE4S0Gh>#|6ByzkiP}UmjGvcBg#RG_5ts^S@oc4r<9(R#(dgSJhPg`_#RD z-_K`%Gp%NycIG+QUQsH+|JzopuXg@k{Z{7>I&-`$D{3Ab`&?plcLrzkvr56h+i|lm z*j=x&lFeSLSz0?isH^?3T@F8g*NWmknWGaJE_O&*KHsy>OsMt2nVa8Uy%K0rZ&~+# z?&tn~|H~#vCC(~5TP)(aM7J_@;(7c3e~z9OS9Iw%=T?;A3qNeI?V6k%Xn&3EldOY1 z=a#M$ewaC{_eJ`Z_}LTYES_Q&Tx0!wPq8L1*RL->u0*X~8yV)BeRkdPTTvVf-?xAM zsWbI@YUK3PnQ5El^j=35_uVVb_I-W#u&v_FI9b7&Gqb%lg)YwjAzc4m|Np1`^2r>E zN)E5A+*90+-H}pURIC(Lth;S7(^lUj@Bg*l&)st{EL*tD!)?m;IZ_N92f07roIhV~ zzRQx=yB_SFD;8L~iX&l}x?)S=9ZUCV(TQ`MtygJjsr1b$&WoL%Yx`^ur{a~gFs0{a z*DH+=Z4W&7Vq)0~fxFw@2Qn}`-@{@3Dfa!fQdS4y?Kd0q&fH$k%0{i3Xy-diFS7Yj<7MLV5dePr$8zc<>NB-qc#NE6TK zJF>)|ZmV)}*&aIi?6(um+c|>Y&oN%_W9(}XZT5TOTGOI{olo~{Sn>JYvu9Ea(<zgI3>Gxx62(wd|!$gnX% z!LaB->zUex)eW4kS0a{_C9a%$ra$)7Q7)<2FM0}0md_0)JboatDwUrMElzX z1m%tw#)dcic(&Q!x#RSt>8WQc-~RqrU{Mll>^W)Oe%rd%mP7Pu> zxMQM=L~z&A8;31@Cm+mC+1tlBxwm-r=UozAsZ}3JV{Lc7eyy#voNM>pdoo&Y_;k1G zolg3BYwcEnWW(<)3@#?7xAp&Ck~iGYcc6n|Ymu>Uzp_!cpNmMQ&K^DO?QHgJ^*58w zEVwe~^RBe#G1tGc{k(N^%L=~Tx86+KUar%Upi_By`wTUIy@%(vecl{nlKn7y^|okM z79}T#)Wd;uzRx!H%NEu5U2^f#+B0FM55J|Y6LT_%*lqZqx&HV0|Mvg){y(zhxYnu? z%cbIC?Xr9NIlAIbXr9}d%eG7407C+|;<)@(30nCandh z8k`K9lts;=Cg;kFxa@4?(q>>#o>jkS)e*%g(XaRNIo|H#_{a<0TWZ}#uz zOvYRVHV0>~-l>o07>noc-j~D9!*TKW6d}t^v8IAE-|k(zbB?m;w(VT{;rFJM_`k}} z%hM@7@8Nq}MNjEWnx<^ho}IOcFJ5obSthb`=e~Ep>*k6p)fd;kt*xl3dU8#maMrSy zHi{2-t!cUSy-?=z*R%h=z7Aj6t-18lEB?9U5 z7IW_S&ktelNbG#L+A;*RxSv<3mtu$W4=p*XC)&+=(*RdVSQpy86g=LrG3o7Lm#P zW%~K?dR@z2-t_1>XDRP=Cye!+^?awM2YWU>OVwR_bjdwyh7z{^Mc?(5EI%I$a{HQb zML{$nN#vL9|DE-R&OP`Y|I=Kj)+@AZcdYi;e}`k|?~*vwzMKF3{rfXnv{=hax7L6C z6c)1Y=dahFKYupTzyIspbN%>zdv@;ocdmT@@7MK@?Kl2Tjh*(g#%BI<{rz>n&t8B3 ztWxr~;u4X`>d!-FaJbHLtUtV?`sb&odD~}CfBN$!r=Tv!#$~gT+s>`&J|7_&-AnUMBD23 z?AUwv{rCM}qxYw3C^1}#%8|};J!HhdTWT}c|FcOn!xmlsH{lzq17=lxGE{q>+rL3F zU}k84XUdy`h05>eD&NnV>l$L(R$wz}Raxx!&Gx^o>z;SV5clSk+ zQ}=%BsTFNmcUu0};rf=_Vp*@6pZZLjb6s`)yg4t9@6j{uyqza=ea`l<^Ro;Bp3mu; zP;7YYSk1g;L95I6y|;c|b8hF>oN7&0(M{PC&Q?Cy^ySvI5T>B$trdC9D!i)}aoZg| zv#oJcI-5k#mvxIAZY|xi!eDdPtex?DUfd25S+o6|r9rzyl22Kk>l683yX(*I|M$GE z)cS<@p@aQpu1eC5&fA&NwjXo5!|>F%ywPJv-~++!8l})>IN((<9yQ7gb{pM5m z0DcY)E_3buZf{EZTta7>FI!^5(i9ab8sqTEj8QqDV_{+JbkpZY-&C2;n%JEdl&_*} z?XJl3T;utkb>5HNI##9GKJ9h#nEi-1D|fBogEaoUC}+{!wRtfC(piGpxo-P|47#@O zElhZ~IL>9>ym@E-ob=n_Yj3wj;n9?Re}6uIoL6o)cUq~b+>wU`7H__v`(2)YUv|F# z@4x%Dmf6^)KYRRf#x9Xy8@c(-@2k_>AMg16 za{kYix5c-gt9EK{S^lzzL38OkdDkUdSKY3?G9!8|153`br7kW8-)fV?1e9h>fBEs@ z$A1s&%=_F<-FVNhpBJxF`S;Vi_WpkUx^A9Z=Qg{pep{Z*bMeyg<(YL=6+8Za`1hw~ zeWj~I*qz?PuV=4b_x@Sw+iNR46pubQoLB$V{}1#3v-L0i|7$NeqP2uedgg^idWqSM z0)hdB)sM~piq*=T6A1_~|1Um)?E!Duf!^=Ty{@h&tX9(Hk&a9xVX!S2sT_OJ*l~XqoVYnDnsstB}N<-Can`{KK=gZ ztqU%!6c+4%ady{P*|n%P)JDTrQcj{A*{=0tM-d|5H`+xs^czS&8*AE{y%<1c$t^R(leA~YF)yyAxrfkmZk(qlweRtT7 z!iPmxvS&3`TsgFMWfT8BA#qvDPa?sa7N+mNUmly~%Q3g(+OBzh$9F%;&i(%VyXMk; z@&d8vBt>dI+^c;2O!stDc5PJ^Q^SM9SFbuAnHm(u{Q16MgL`FiE7Wr-5)bJbt9^Ks5G zzxP=|Y3r5CKUI{(+%}sY{r0$1>a$h)`@POF(_BO6d_MQ^?%lm>j|iByT=0C}u`WL^ zFF$WtX4Ius3eP{EZ2!Q$|M&M3Pn!()ao%!qjW|^p`RueTue^2Vl~Ruo@65B;o}4ps zJyU(%ntiL5_sOM6hMnac2cx0_T^YDo7-FY06hw+MY*yR&S(T0TX6w7!7wvH~y!Fmi zD+}JZ6Xs=lSwD}b;heB`cIflF7x+(aia33{JT^BsGR;`QVEMY#NS;c^Lt!`f6bQc9 z|8f3*<^Qkd|JA;Ih>KCgPLGx0z#Hzl%Auys^^Y5FzWDMnTV~Q??ogkrC)C1rhg=oZ zT9{XwDIAcmmHOp&joS45(|gw5I#JWIMrnmoQ?Z#ZV}nt%0$WvGU05i~r-RW+J0cT7 z%Q=F0T7p|$fCw>JAFFutd zd2S8YncF|Iv%KA(#4v=|pD)%+-G940_IhcoUg|ux>mH5S+P@zbSj4moOv}8>n<&Hn zxP*82-NSzm&+>l!>)*Rt?X6q6PT95X^Io;*e(kRNZ{zmtsQdV6n#|N)&r;*gy{)jh zXKyq2{O1dA>p!X2J-fQv{K*7Y(S@d>fhPV6Y0uK?GQO=7tPHiBucqW>xT&`9(9)31 zpMPq0{r>Rq<;#wSnF`ZQ`jR^y+RT-gpYQiv*z&m2nqAKxb+nXP$@|#}r%6mr3l|=>PJU0b9H7`V(tEpR4<2@y@Aa)t>j0R&9PrGo z<%!IKmIob9UY!~Y4_J&hKQUEKd!GEk;|T*}pl`yWJu`P$K5vy~vMO=oX5;P*U|>J} z^}YVmnjODSFP(D#&)NMyH{1W-toUR0>%-6Q|GM_tKkj|`ZVBeiYrGlVFDwc9`}O{h zwVywR|1++?E?@uk)3$U?5-{=NuK-*|;Vp_frpe4f3H!RxPQ zbMN<2zF`Z&_tr$k%7jp1t~&usH7i*X5VD=$t>Z=l8xG<`2SCg|J7apP(RvM zxkzi)FB6{HS4x7L3eNp3vHP#P|NZypF-{x|8NdI`+WejG)NfX)lB!c zxYu}I+K?9D($YFX%lzu26PE9txf%?7i#2aPIu*6L`E}==O+E}azQ%SOlnVMBwtlPF zDy~z9RMx$(a^bOKd7j7&K7H-~i~HZ(|AE%iCtvYj7DNfgZ zL6K>*==(X93pW-dD;^Rsf1M?mt8;7zBdeIPn1lSHt2Qo7rzDk^3R9=I-d)boOUPbvAlIE<(#eprJ>eIde9rgtAJYnZ_O#~I^Tit`d>2{teW5aw|I#4g zN<$UKv}L!l9&ZcNmAAEW^eduyXxJIj}(Q;Z*!>op%sF*0-*X?1BVl2AQS{oHv| zX-8*CM*zc}quGKX7Z)ym5W}D#_Hp($eKz@%szr)FmdJKKuzG%L)_UG529~0fI7SAh zCjy=hvpe|DG1Q7~-P`fu~ke66vu-}vhNuh#kVm#uuy9vT5Uu4($7 zIm=B%VIhb>kq72n>n`gK;@qiq}tMeT>>7?)*Q z`9DA2?*2T+r|`w^8M{(z>+R+ruRGwMc$3$qLu0#n`0l^^rf?m4krA7nd~xfPn7zfZ z_6iql^e>w{pOmY+an0$^pC<~$*ME9-v|IdMu%_iPlajl;^bWn5;~g8GJ#*R3UA(Ww zSwp5?udLd!pWCoSATNI%W9B^ZG)9568UhOb_zBluV&tn}e!?xF}vjsghk4_2t zaL!sKL_kSoYS5|;e-6F>=PJm1(enNFWovFu-Eq-E(BPQDqiLD*o}auYaCw)h7~_p2 zxk+|c<6NVYOSzJEFj=|x&hBzxcHz1=TX|*PZZ@aWNoqWHEdpnZJZH4tTxPpzPRo=z zi=(uwy8mWy1bErW^~ZW~S^0my^?jxH<;qh_E8eePzrKd!&aF*nj`?h~QqkfN*kdTo ztyf_vmT~T9(CW|cMG$8FrklcWI2!JI%B4jFclo zv?tr<)%UDEm#z?27OZGIc&gZ{p?$f%{p(h)u(Rs9?laFVri|MS^V%pa_-xYA6I(s zEoPjMbX_qq`Ca?J0tCRTbDl|#O$Dk9A}S&+Z#Hy=2Lz^bJerlg za#hy0>np?^fA@!NTh#HUV}>LHv(@yZ=~nVfu3XC!R53i%pSv$n*&~-tb(Pk#e+B1U z8#tsKlvnJ0SaLKoTeMRri&56%ed|Kb;{q`s((W{{9IR9^Te(If?vKyK9QTP6nz$v( zA3wZ4PptLz`CVoIe^l7by*8(H;gna+)qh{EeqZ-o{o2xd_wB9T%gf0XY3ARnDXaav zH|k2eMuPXsw+zZjx=%~2&VTlC@!0od(&t&;D^tA>a~bv=76=ZRIwi>di?hASy&~D- zb6d@n9jDHD`sn^+=N-GOdLDoHQ?teTTg|*_zkf$%T`zr{C(%|nYq`ju^nAPOkDr!$ zdi9+@|Nee`MOD?jbHbM2|NQy$=;+%Eyq9gBPvYF@ZEI=e|2%b`W9jz$@v`gq+M79~ z%O97_Tb9|U`L1mDLeSX=y@z#`I5u!jD?Yn$%e5q)mKnu{UKef7+U+p;qS%r;({cNi z$B7rW85xT8wqD!+wg2D7OH#$k%feo| zD=2y=J09oAYU^Ssu$mjPElq-lCf`vR5y^{ISH!OPV2JW_sc_-Fz{^7ojB+li2?VbUxg^0ZgDOuY>Vhfr zyjSa7I+rfUsmS)8@dLa4kB5K%?zLr{wtwFIc?=3mpUhpqmf^+OM9?j@*Vo70yAxM# zX!-uE$>WFr9kR2=f++SyS+(AbLr~Cp(jN<^YYfM&tEt5_8y(JS0yf=7S~_DZau?; zmoHatUA5}ToTFRbuV1$-(c#Penx9Xv+y6QE`0nrJyUV}J+fCV1^?cc9%{GIQ*phhy zL0&6gx9xkp=b1#(eb)DUicAGzd=WMc%n3J45}ou)`zDHbHZll^J}-^+5>DIvI%HzZ zj;+@YG`#JdiSy7~W)K7aoBcK*IyyZ2kqKbL>6=Gu4L>#D~tv~+9=-uFqty>bY21q*su<_{m&9^}IX$ ze@{=>Z%;H~-ROF7p5OA>vuBHM-_F*|pkNuz*ZELFKYp)FdtThL)vsrp=ijqf6H=(qW7juQ5U4FS`RY|OP(v}U_UMBj372 zUu#=6p?JD^^=6^C>r-d`UT1D}`gqfn&sM(AAAjEb^wy6T6HW9hV{<|uKKyiaYuM+% zNe|O5*|rEcWzXi?P`J5QN=d4LrD1_c(4Y4I^X~s&{_jrxmX8PZkG7|7b~Q_yJ4<-S zqCmer>I|!Td#7!%^w#XyB+>oaN&C1r?`E-r)nzUfCo9z2#jROP1!tU1IwgI!+5Xt3 z36WMp3OAVo(2eHoPhA=!(QgRlMXzXFfK(>AP&{kB>0 zJU;(_UHp$f*7qGte&7A~b+!Nc{QL}$BDIXzy01P z|BG?Q$6e2YUfJlz|9eoKzyHsppAQfJ{F3rOC%pJgIE%&;js#8BsdJ`RzT9PZwC?Ti zeeW-1^+?{*(_wTQ-_|f^kW_pL;<(&q~2U$be^GfF}U-x?NXNkfW zzwYM6M{%FN^Y!oQaDDsQ&uoJC_ttKIc4tTL;Rm*D5)50W7@hyT>)936=W|>w+a<2e z$yJj+J~#HSq1sJ8sS?e|xQiFcpX^btudlbCd)_0$+j73&svA+Ff~+sv?b;IBvYvlF z68eVeSH>jiCUdo4yL4Xf`WCzW%;`Nk({AOsNo;F!YLQ41)>}Fy=$GEn2_YM=nE75d z(cQT%^m>%G(6*?x%3F)|lv2+wO?e)3T|MB&w)itHJ9fUm@zQw7>*JdaWos%P+xPx> zVS4FBOVNq^^6!#5K)WCMJ=|h*)?N)t`eN8J|L@NEe|zKqmooHTWPdN9xMbm#AZ?R8 z9j7OqF$L#-q};OWY%+jlKihVlw7{8od?ib;8 zuEF5^^DC1!8El`En%uZqqwD)Tfn*Lt7KZtZ*Y^ksY&J+|@HJd${CBdbP4ka!_I-Ek zKdJwJ@c-}qf7u-x^5tw9M<+_$=s07P5@%k0EoEul6y9Rj(M{ax-$yDPMa*417+S3Au3U4f3r=DJ?6cw$vbj1oL zgW{f(kL+AyIyz^ZIr*%{>V26q)0*tMy1IGKHJ4ub{`l&{4bNh{N}v4Ncb?5b_jBF$ z;MUYg9noJ8A12OPcKdd8aUM&9+uOJ1@&3<$e*F0G?sWSd_20i5&F?vo7QozXGIeXn za~8(A9;xg0Jw0YQJxZE;`GaWfRLz*k%?iex%9#uyEYE9I`o%s<*Mv$qnW(NfcxC67 zCq9LS{JrFq zU)O~;!yWJcu`rax+Sh)(9bfbD=yto`*V5#7BdFfZXJ2CP1@7$YZ`@n!tvF-EElJfHDyY5-*YdwFdqqO^O zMQyEYa!9MdA&Z{a>%tdzr7_gj)va4wn5=Z??YDCmcj?65=dX?TYK;P|*ZThc-Ytco z>|Ra5N+tU*ic7!z`LkzTcDCq*ZK0pb*4?SwZ(efl(CusGHyc_+f;$#oRXe1b9{TZ| z@#jr?)2=Vps-6C3){3obvp1eSRcZCyRGH~>nYN@j-+?pdPVU_BF;uYk_zu0-Th82( z`dZtwF|I=XUD91{hK<*zOk!NJH6*yz{Mn?v{cqbfBzMf*3O+|{HAZJ^!C(P28E@$g^%{eUt7BM(Vd8BV|jL^ zS70Vb}66!p4a%081x0>vkI8*n+>S;-FHOowIm6>v#;hk~PGA;9rLJrUQ zb+3;X?%7}+tDD=n^L1POhxh+#>tFrP{cT_@d#`Vj>VivZI|D-<6qP8X&I228{Uv(z|IRac*TpsE+pRIm>((?JEZgG8=hyUK?MNZ3{w=6U2x{<#3<8Pn7 zeVgoW|MT4T{ldv7EQJ%jJg4m3=-l72HPo5uWMfm;jHi1Aeg@52#kSw~(LL*;C$^n0 zZ1j7xH6yQzDW?2eB;T0k9$|djT~F)uo+qWIH8yP(H{SDyn_G$(>HSFimu6Nh#d>j1 zfdBH#A1hprgs+d=U-wn3>>{JBt^L0G-{1bdJN)rw#s7zI58u6AIxE!tzUA+ii?(^P zi77aRWj1}=a4q49L_~A^bk*mrX7TIW4)qDRFgcXmV(qwCYPe(u*9$$h%(X?qp=q|e z?^Hf|>l2h1>#cp}!~f;4&d*ahIq9pP&-BeRX6|-($=h0Xf9d zbKU=W$iC~3@{dE654nFf2tMTKRAg;FXvDxKp;Ie<=iRGSr%s*e_;B{jxkamL0zSWR z>MAKO3rh)Gvis-7#jEar|IPE~o{!J*=iVm@{YMMt*C3y(R+VPt(@PrV-bV8W?%BV4 z{<4#nSJs9Kp0yNgpK|GGQSMr$OA|gXjh(i1=@e&<*HLXYQ-b#WeP?ZY*0T5Tk8f{p zPbuQ5*%|Y!a%E`Q?zyu=y(4E-*57!)takO^kCz|6EK)jj_~D0VZ(pZI?l50;W!LjM zM=!(NH@Tl&Y_r~l-|gA8H?(|#lY&U;k;cWkHb2iUny)SadW%FZ` ze+T9NJzd^!-*9UC{rWxocT2ae$!fW>MfstHucFHhD_4gjb3X4nQW|M1b)`J;^QINw zrfgczkfp2GGQ~)-ZHjf=-eXB`v3pz18;i?+^e`# z)-3XYZNss(4!6GbHJ>&XIB~cnF}m!hLb~$$JO-=xPXl%-2sM6>krNTy_H+N~7q!eA z|EgWj;}EztjiGt1sneUii<=*Ye&2n!&RlSF7kjxPV@Rf!RFXuI(vj(X|E<>V{Vozb z{a*Inj3W&VZ7n`BK|cG^m;@%MZ592vb9$av-h#8fF8fZFZL?pMlu&-3eYu_I`=wJV zBjxAM>pTAZ+O5l%FaNAzY?=S}-1|RQ_x}pt|K}6;vH~01=}`=8u3Ub#t*?E^z2&&A)$t+bidN|9v<-zIN;F`zs@N#l4q*S2M%-{PowRW%(B@{MN_W zGF%RNwd;(1&G)z8@Bb;Xn-AJ6oYlKdWJGR7dW~8?N*T%2ak?74@!(p zi*K90Iu>%<8ZC~@d?8IXm{qyISpSS=2X!ZJif1c{sEew6NsZTTc*sdv;e?GB|pS&+- zz04;A-}b|abJRk=o|tz{ddAgDUYcn;&YU*!jj#QBb+&mvfBW8&&RIRr1J7Fzl47KnMtaM|LHEZ_3iPY<{AckH+?zbhj_&TZE7)YxhFjvkOnJkz-R z?z*ctzeMc zruNTwW%)XOeEDIq{wRaq^lsy;pA1;|?9PA4cAS2%Dm;*ZTjDCuS(mwuOLzbMx8eT& z6-OBqT((6{uUrAKfd%4eG*#d{hp`75Q= z=T8;7yu1AUCxcy5!RFc$Pv*RT_x{S0s=eF7a#!y*ds4Xl{@1wvh8z!0-oIb4V!u2NCSM%xP z-$}{oB6qXVKZOg>z~24;q6|YFGqjy zFtj|F5?9|{%k9RImA1(3#ReV~m4x88YlSDjzj`5daojQI4&5N~G@LxK^bHpO& z;rxG9Mwbni6iRhO?!FRM`$0Q)&axeA7tOGlvf%r^GuHI_B+=ce49>_>RM85Of`7hO7I&b_DY;U_XZ3|HOM-~aRJ*Q!GMdY^~q|Npc% z_ft09TlY6@yL6~)NKIKwhvssL=CY5k&-BV_uDvGU#Idk*#;RF68Z+b8>FG)zH)P>U zlwJPv-@m-brkUGroq9EEog#~w>&3Ho1d>^fq{f}eky4-Yp=$3uzjS%uB~yY=v9+DK zoX6r~%(%IL`}Vi}fBybGoM#^U{O6y4zXcvw?X3BI+P?b#>UgjB&o388hVMW8O0CV% z#hf|+Z)d*30iFXQ7o*A-W@W~@X5C)1#wTHs+5rwGwF?uaA9OV|Iz5{)v!lVeqs4bg zko*KMaR&yajRk$%QbHG~i5)VK{U+CDy0 zB*fmA7Ih%^Lu}PW0Tf!`K7Y6J=U3S-`@!;2@y>a?mO0hTLJFoxz{Xeh5 z4^Mbr`R~(d&=u;SgA&^3{C^`}|Lw<@61&)>p2HS>k0%7%$nY61d;IZd)!u7ajfcKk zoxgtc{=L*#mtQqI|9oIp=lfpXZk`|Cc_Zr7%bJy2-!1$2@ZW-H9=-VW+yZgyuV4Q8 zWcCR~{_^(2dE1#cq(=H~U0I={q6E4QdFh(8SJN8G^W%QL4{E;rbk7>^wYn_7e!ZUG z^Ee=T_VmcBGj7Okl(W0ERCtos5s40o1;4{;%zgQ;8E%eR-RR19>G#%ccOP!J|328u zuBlt|=$zeOuB}eyGh?+#>iJl-_n5?IgNi;WrDaq8Kd=93|8M91FYIR?{+@I0`RDe2 zc?O4FGJNHlscB)R<}+5k`dYOv_2u5#FD4iZugfm<{q}A{{pVk$)BQv`Ues9SZCq!! z+bAKeuX9G#>s7~I{QS~dIeUsxyKWqVLhFh}u~l-LK6CEv)lQAQwrWxPVY{b1X^#z7 z?F^Eyet+p{D}zw7#PXBJr*@dUb5|3(`r1Z0$VZT;>ixe>!V^zD%k?n4>o$w4Nv^iN z;dPXa?AeXm43A#>wQ0?&NkMp$N8|NeeWRp$N&W%ro_Ob^Akm*uIS-*%koP60cE zU4o%`tntb`?vJ?w%xuSASXJ*}JznivvWR`6x$*4U=V~jri<{al=>EFfW9?LdU3B6uC~^UPf_vuvzGYX=RWUxn4_?C#jcw__Plgga^m?{ax{Zs@AYfD+P5m@ zH3tR9SLA#;Hhp>rgR8mFW3xJyG{KJ^2MVR`)qXoytn}{v`|`_6I8JXW%k+B`=^c4` z)uL=o%cV1v7&`Lu95XlR6!#cDKNx)J@{6m1-fvsX_zq8Cf3{$m)~q6{H|1N++&32_ z)z7SYYj^I|-q`iwDl%;c-T(iKPBpD%C#kLN>`*@R=Wcc8G4o|A2hOH4-tK(!LbOmM zAb+c%qIXBlZ70T#PDaM7dTLn=JO|Wf6?-sR3N-UFbgp1kVB#vgI)TG_Vi1ep<9~@( zcg)Y1yNK|wHd$=)?Qrd)j%6}CzfKEYms)ysgZb^PSC@(~eA=URb=6$+Oew4PJ!dY2 z%r*^s;Bev~&s7)`lhuhxCl(xldi19pr&HlV>sL7$U58a8#=0DR2VyE zOh`<46MK4B%(CB=aeVEco}Hb2|KHpDPDc}^1SV>p`TXha_WN~Jf0nHJeYfrozoIKc zMNO5~(m&Vo|6Ys#!Cn8b|NoEv|6l+ASzo@R`t#TD#|AcaRqL`X=lX3;5&ZdVc7BnZ z-S0PV#p7!}etUa6HPUM(Xn#a?_9g3y+&rs0)}`9X`^z+SgiMrZn&~xt%A)pFPi%NB zud7XeZECQ;@^jjDdDmNi?>@Y``g*G7{kq?OU)G3B-Mzbcc^y07@4t@QzqCKg-|{oNPklEQ>9uan=~KAqG4=KqQH2ljcN)?Ak={osWz^JM1di9CusjOK6( zZulPNmAN->%C39S`AVPe|1JMNfB*0A|IFvte!YDB`0>NO4)I>ShD&DrQFcCaBx6yvIkMLlM`x{-mg{1e!lF_Tae9~DYV#na z*qJ-bM0+*w-M#yCj=a45Q@5*ysgatjB2HWl8d}#HXKENIZ@tGkkvaSFEiL2cbN=}0 zG_A7gsocI>f9bT+lbU>%|JUB%`@7xVe&3slKhM^zN@(hm@Hk-2|Fw*Z@gxJst{L8( zZjT?RT)3M~K%)6`R0Py;(CC zuFAF4$ur;a^L5VU%H463XEbepBKPyhySwSX|LQ%@QH!nOV41f(Q;F#u&$N5lD^KrY zx?#{$-tO$P@YlpPz5{uI*#RuODyc|80MK`S?AlMa3s;%wFqpRWn9E;I0;qTXp?x z%OW*~+3rECJWMn6=WpjoFfkO^dT(jx%1KgRZj~iVwKL3&{oA=nCn+OMU}A^zktK@n z8FQg>D2rKc)-FAV2V2iqW%3CY92Kz8ld8S6?nLHp`1-;m$$$F|KjD()s2h^k<*v1X+Jz~Qhy@T&XeUbMdy}& zd;RrR^?l#n+uz@-{r2we#}b+2fxi2GKAZkJO3vQ)>LrT{QDNWJm=A2fU-$jx<>R1* zKC{Z+-`iXL_m?R{&F8b`&o5_2iYb+Ztf+X9rF4tuHC=C`s=TU?+RY*J@?s0zgS%G zV&zP8Q{VT}M>=G7ckSJNE9SG1fWVbjo1TuXQ&J-tF8VzG`ATl(gE`g39!;#z-Yu&* zbnVtIozJCVn@i_bukZMB{@+Xczvus*yI)!Jr^IUBl`k(}-h5l$7#VWbplw3XKGo7h z-KF26ymrRvX|e`HPd9yTv#-k3Z}M}Kxqj-Rnp?f{-&K@WUN`YOY{6ssThr#r!Zo2= ziXR;KSYYvk@%nBhB~}HM?}fXw9M0ZyGwIXgEwX<3w1%<3e$BUg#|l5qo%6mv>a|7& zo8{9)3C52#cJX@C1r9ybn-;a5MK8beKoG|hKCP>}#Nw|9daavM$avbKFOM&C?XEwE zgZ%{(Vz0mc8WjEa^?uX8i|u#*^6PdG6%o9`Bj{s2Gx*8X%8uwet(q1rT!IPR$3s3F zs82VP7Mj_(IWN|7h4c;a3ujy!Bse<~4)e@jB{W;%bJ?kdE1ho5`u$erOLSoIW;I6z zwgkblBex2ey&BJy^zOmO@|;uR<6>m-30~C(v!=}N`#fFmhag)aa*tc zrLw>OzRu)HNJu=9Ig8DBcm0mf39KqyK1yvToc(&9Y-CE?)y2Yh*iu#?oJRGzE-VMR^TZ9xRQU$aGxdfWXdcWp;D#h)uXO&28hf(5?EfZ$%Yf zI#XBgaEIgUzT1=UF&3U+oOtEUzjuk-Z_mu)d|I{l+r4R(AD{g`JKOxym)Y@uKK=cE zfA9Z)zr)01uOH`s@83VaD%QiQ)>s;=_x%1HevThMe*AfO*j=US^M_w=XPd8@6|}3m;>F(g@86#dShz$Y z*^A)=|G$Iw5^aey#aT{UmcQJ(X4R{hg;R>XH2ELjy<0a+?fm4_WxIFpo^#oN=k%v# z-^=#KEZzQU_w5-P4T~>M$edMXnf|h9=N#|c7L#}1PTa}*`KSAQ-M7v6y)^Z?B^pZh zZb_X{R{p(wl~8e1wq^YKRNdcyWgS+Q@-`Ot$t1mh`=jDh>6S7^gNyTmycz|TOiQ}e zuDD#p)zLRv_xa|vuRr~%iJErxa(Q&_l#`uX7!DlKObu_2T|fPm;>)d5OV^gZt=%zO zt|KFG?b_+LyfZzvw6u0Oarm%sWt=)HMgx>xh;XHlQxu4P)Qp0vE2GUG*wWxT$)l0wqu zd)3^EeV0r2#<{92+cCRrt@+zo&TLKXzpIz9hq8X?RG!Ug+|VB?C@|3};<^(@QTI%_ zqIa<&rL)uoUDqs~a&PI@RcVo?PgAyUzOi=i-rFX83E%$w3E9T7Tjvtz&hn3Orv<}i z6m>}ox*VxJ@@AuZ!|p;SCB8MQ6brmA+vxAz{yH^ptM#>ObDyp1YW#QY{rmq9_gl}@ zGFaF3Y~ZyOt=kD{|pQo+DRO@94c`VqiTWEqqY9|j*Pl#Gt184i>NukwuaGb=g-1=$5Pw>ITgxQA~t3OpsrkmR#FQ|i-$e>&cD;% z`|rkxY(K`k?c}4d_^VSIVm@DRl@1gaZocO_YevQgy(nFV6<_r3`b=bIV-D(eJDhUq z^{!P-tIqnLzy7#*=ecXEy$)ZCD4DY?+_}#=>r}fUqXeTsR^$Rf!eP`VMsvY~S&v;wPo?kjefLZBURMy$Y9IL{- zZomEX_wPK*IeyPmXSOFQEcyI%%FJRfmz;S?Qq8N@o;Esf%+PY+a-O{NjraT_74yIG z^YbfhTIVg!c=-Lk+WS?%PhPkG`S$+5x6j*uzpt;Tuc_M?C;oC3*J&TS_+G`T84H%X zeZ4om^sHvpgxg<>cE;&z7Ea%HrdX4gVcqp^uhY(Xx!vxLPOtYBdODxmbMT7b#4`u} zGDL5G)A@$M&?Rk>uIkiyKL>d&tL@j@+0WSY1xH?As_(Mp_S>@EbG@zpzqJ1+|L^qw zPwxvCJYTMUylUsqmX5pEs;rlPDB5dN_p790$Nl&Gd)6LqoO$4ok$!fhx8~Zd?d`uM zg4dr7|Fm^lWt_kPS&_6`QTc1HhwZ&ya<*1#amm}*TAgRd*O!G(W^g>~(mLZ<;)}JM zTnrs2o)(op3QAH-GmiFNxs}Pm?a5;j6OqczczNymU8y^EOa8pJZNdF>g61bh6&mtS)i6DG zGv4FrHfPC)*S(TTRj&eKE9b4B*Z)Q~DEq8#n_=6W;tvXC{Ht=;Udvm?=huJnzOwtl zsGYW#TvW7_xDuX(9XZ?d_JH%P85bBDJPxGo>NIBjQ(AVDPoYhKdCi8WZ3orX+>4O@ zd?`zEd8SNT)a^|=3^Rh9mUvv`SvQ^gr~uRUsNz$W>EF$++&c5hCSLI4!-t_9j#IZO zzRx}X*uw(ZTbvUETHT(e4An3HjWk0_^4;{*}WuVJg|eCrNQf3h%F6Kfb3o60q-Wtscttd=S^xoD(+ zx-{>#&oZsO*V{U7Fnc6F6gkwlc-Aq7jIvq!>zUpzH$2whqhvel%&nu$3TFL#N=_Z? zl0GXp;oz0)4bQ?iMLn&Ubk{(tq?yg3>sB^v0N*~Vy&u_ryK8M|J@?r)Erw~EJowXul5hA23p+BrXR2O%#>OwXXW@4v{m(Y% zJAVGCv738dv1RL)P1ELFvhe$DyL^VLYTJdX@X%!f!DlS1E7CMy+vw|GO_Fdc_uqES zCLuNQS?av2naf@DJ_c=`5oDmvz+mvMjIFGyEvTzM|61xxrT>rS|C_x3)Aj#sxxc>& z1TXPh$5zt7de1{lktHzl)U4Mlv~pye@4PKjt9|F4`?Y9ahnMctkC%hxCY5|Ve9K+G z?j18z@dUlKTT>&CCH83UTI02AmSd0Nv$J^>v$ZrE*QV~(ldY0(H#~B|?`)%c*0$Np zB${5g*cLpfvHN~4*voL4#G>>2{$#yM-_|iDDAe?6%Y!b9kEa{8SAU$gYK{47Q;93r z7HpkUeB&Iiqk?hv)@xDB*47ED`CKe`4qNn;O?qw9%&7P;z5d_B|G(GET|KgVN}Znc zjIIUULr`5T-OQ&6{W>;+ zCD(3W%+lN8?DBHol$&k0Wo{hFd&K9G_P@BJ;)JroiVP3Uz182(oyl667qL)x8dH(q zs=eE)qSj8XPWo&WK&weA#5%wfXbs_Z?5>_;`2sc0Ml; z&9`ON_icVJy1nbmz0*+wh8YhIFt{Clka^nh$n0mw{ya|alNO7d7O6Vp<i90S)#@KdC{@!k3WBWdHK(xS8MOiQg<$ioWASB^`k3!)-2J_z1Hux);!(j zz}u(|7f)_Gd$TOw;dr)|pp->!GmlHO@WgX@mMLn+nVK#x8}e4UXfW=aJ&~jI;by5w ziz1&K7KTewm1``RDn(XztSM&q7dbXTLt~-Gm9pa}errBC;+Y#$rg`%{bM|&NP7Rvgl%Z=hyL95c*q2GVM<)a|9FUnYqhVV`RMQECEFq_J zl@ZTYJC^!RlVljx(C+FE1T^f1)-n+#Ry<^h}d^V7SJuV2w6>2vu;^#3nE zB$D^+-(COpRcPdO3FhKYD?b_VJhs^KnN@%(SmD&}Sg(o9+n@d0cCPWvhjxB>Ir;el z0lOp|PFXIK(72J3_F(?>TUo2OZoQY4xwc*4S*2d8>E_o@qoRE^pU>Houbmy(`m<{5 zZ3mkp7Ej*D%e(Ax^~&;FnRm1L4U>KDQtdxW?f;zKI_2|ElecQ@?9+q1boT`rF{gcE z_2kHS7VMd`w~#}R|MX#%B$qeId#8Mk3frw(zhlLrneBVc-KSZ{pR!!KJ9B!_r{%T< zU$(Q=2uk13&08Cy;HDOus-k@E@B;=*18&2nn_o*-E&X+1f`ILt-CxtvmQE{;WUTqd zu-24cI6W<-;4818@Pv+%Ov9E$89|1L+dfYh;Q6yvRn7f=G*9xJwnTyQ;#q7g&Tsbq z+>>?d?WYMdZ-f+|GJJO6&;Gx!-`Kh~S zHzhuq(5`qa!|hC?^P5G6EPRT`WRmAqYNnRnnKMf<>b-48*?FhEJ5S4Uz51(kH*4c} z%QKosRhDSTNk5B7G})Xe5pQd=X6?NGY3g<4~n1Yems38gk?g4 zv$>UWUvs_A)f}1QnX?iuUzxP!-^a)9>i?f5dKgx&l`P_G^!LAhoZsH#{r&&1Wq(m_b%)zJA3s;U`du2If6hxT zxym@LSkI8`u8EAk915)o3a1t*F@-$+X|-OYxWjVkoT4<9=}wB91K9 z%!@wP*^wQey|SZ6Ve9d0$>%yHI!<&|K+6yd#s}aJ+GW6D|Y6L;)mDQ*YmeGch!9VS8cpwj{jkcJNNHyy*9nHSLU<9 zu_?;aHTf0Uf_Lw>?s>c-?Q>1u+P=$Eg4S)7>3<$ITWae&rMY!h^8^pn-al=m&wcK+ zkBhUM%NL{hr+00;$JwZHfH#z#!LY3~*1qOXLHYLw;;QvAu_Y5?H->t7X&Pux?79;6 z)y?kD<@x`A-~YGwe);_+KW~ZVfR5DqT_|<_d2x1Zs7p;?|GuT)&rVq7@%pG3d%euJ z-*YBeZ@nI5^Mair$Y<-+*I!@FpEtdKr@4k@dhqJC>r%gRhAwzf%PeSJ9uc~G)s|~n zp`r|*4rCZev_1Uy?qAOio!6Hi2YY4Cd%j65bnV(Aj>85kGS^-`wCylkfcVYr*QQnO z^o?+E5m0Qs63VL+B-K`9A>*Z)8fhL=l;9-Q{_x+wd!MW4HZNUfcx=w5Sxu3#-g0eI zHg$cyruV%2{Kjv)V|gZ8{4o3+|99*BKY#1L^?Gl-l3Q!Odgf{0J%PJqt+$0LR#;Xp zn4NP{D`A)9)Qiz#f{rCs+;^oS??hQMKi~8Ct^A5euS|)yDN4m5SA)FH{e1T>Z|3u$ zFow^rLWKdrciOV|ew}^0=gtE|qx`0ba>ic=j?cDT8||=7zin4Z<|aj*GaZ)8rUZT4 z<{ENG>a<6uN2P?w!mG;`B}PsU3QPMIer&%WU!n}Vx_s5!k3O3Pk~lP%E?A^=t2%nx z(yemIKAK znJI!%+KE~2;?};7g3~=)qu+k-Km6m*%O8L41ijk(Yj5H#H6;(jDW$Q`ZZ7uE%ANjt z&pK}o&F3~ek4<_F)f_+-TE*8{6aFj_etFrX&TMb5Vogm=*4EhPGdpG+ws=w*$Dr|4 zCRzEBp}W$z4}FL4Chos)xVgUi^Vdyld|i{ge*b0U)W1HV*r+*5Td8G6$dih_$0U|# z&Ux;u_Tb_EN{MNe`{LG}-gGUf?z4IQ=eM`F->>=XyINIX^@*>$0v6oO%h@Yz%pP}* zRWSI}`naQq^YmlaeNJN2d>S-+yZY9U>ED)2IiATfAysqw=~e-GIk~dkwH<$Y+E~w? z5s~R`=J>ovVbRP(FXwzV@UxbWyBZ|m;38x2ag!9|)k~|&Z90Cu|9^J>zt8nA`K#XV z-Fiz(G0j$Qt^w}Bp<`5Y`Mn7*SlJk<@u_Wif+ukU*p{I*y6dR2**gor%NpV@_pbJOUt}ZCSt;T6YypRh$B|`KR`c|(b2A;0XfNbA z64$mobJbcq`Tox}dw1_XuetqN&FmLbrb#%Rigd{-U7vSClXLUtb^*iZPXwp`+sC7L zB-eY@hF-~VrF(D78=L2!TmD(I?Zw{R`nPM|U&#sGB$<9W*rk8OxkHhlN(ccIKxkp|9%(?YL0aVW{w z9?N#~=(zZH`6_|rTW0HzJT@>Yd|#V9Yqx%o>+DOt?oao-DJm$HguFIvNtC$I9KBxr zB~zB-j$e~c7=Cc+Xqm^yFyZg1m8%qwT+7Oy+ZkFJ_pCJbdTH)j;iR5Mj@YAlY;3Q7 zs&Pm-y@>r&%@w_(XUjd$wcekvd2JVoEuOZ||3;@~%Zn|SQzL`?R_&VhDBmYRZu!Z( zpR^twZ5BxG@Rzs$^T_}IkHh8nuSK2OHA7QqLS@+OdeK+MUfu8N{r=o8S@7rALm#fc zojxmD*`)!IAPH)(*@^M{|R7aVq;bplSI}1JTwXr*?m|oOUx@MK? z%lg93F-A|m=MwCqY!H~skQU&YJ-tmsalys1$@lcS%zvtBJmIh3#vK@PZo9_A zpsv+10t?n|T^H&bnkvy&IOj--#Bl*9@x5{Dr=Na$cX#>jyZ_$(dv|wtxhCJgf^T}K zcg^ujH&$FC;q*RsmCos+8}e~`>uSr@uUF=+Wo$8g-u~N~YvDDczQeQ4^Nak}ZvFgm z$C>M?Gxe{BhTPjz`TBh_kKmbWQLDbBM*hhcvwg1Bk|+`V`g0Ah;y1nJk3UxJJ@@(Q znF$5kXBYX^6|Y}=?)9cMukR-MccghK&N-hR|7>sN=U)XDN9I&tKV50I^}xo7&*p5b zvI$tZv@POHi3EGQ{nV{<{L}^a*6*nQ{p;?}B0IVMbqf84yoVQP@pS|@SI%30_uk>| zi|H1-TElm$o|8{zdU9a-aew~&`+x7v|KGjN_>O`gOT&sqYqwTL7WrM?a$0lk(u(-! ztqZRmlF8Ctdd-@z#o)=ogPYchI-EH?ft|zf%x6)Cs_eDq#&_p9q%r-yre+nj&$xJX zZe+O9w*@_gw@TkC9!Zoa`z?D{Yvb!qrgjIN3%pD!JNEB?zjuD0JHrX*dnMmo43{2I zd~VZsc#qCJ1A`||+UD)2Hh40pBST`jO`oM&Qq1+QRXbOB$sLn8Y~rWbvUN(- z>8hx|PMaTkA1a%vwp^}pR+NwC(unNUZ)3bpR=(JKHHvqF!1YTplRw7)Ie7d3xA>X@ z)p>{0-j+SHc;OOqRwQ=i4$eP+wUR$93O7keQVTO++2VV}@5SNUZ(E8UE{vb!c(&mL zXXkY=}IV#G|SDGB$Ve`W)x| z_h!UbbZZ4YPwm_LbD|B;VtY@&X`fr4*X{2&T$7!7$$5bQ+w?8F{MZ?y-tRS4GGpdr zbg7y8BwMBY{Eo2SuFg;6LX3q&<3)QOOC)P7t(wu=vEulGb+70DJt<%R{dD}CdGdP| zOB>blE{oi*DU$wiX;01F8>-uHJ-%qQz46W4<^N1)a~=I(Fkez=)69jr`+saw-~D%C zv}y2Gw!16!CU!C?83%S4E}fIq({ShNgru)|a~_D*rTNZgWSp^5FoT&lrE^byid(`H ziNDeZ)^Zztc3@SRB%q-%Lvr<%YqqD3KfJOk?E;U(_WPWRwlS{VIOR~wnKLbyj#g!- zzAR-8PTJ=oi2@=7P>U}z~O%i zj}8l{aP2K^|5W??Zuxq-xW8}yr8~>rQcGDXkv#q1$H&J{Kjle_)y|tM(=sh3rOI1J zXUwRaRTf?2-0@FBw*PtMzA3wmE*docVk#)m^HpYUZpj_Q~A7 z+T-&cowZ*tf81HxYuM}IpsO~W?fWKw{6LTMy0x)-&v(rX_0m0iM&_Zh zueDN1T~O}b;%!Mio9_C$hVJfg-gG_4f+b$2?)T^V|4;qv4|6iKSnxF4A2=gp*&B_kx&F^(+6A1vG>`GF-?Og!aCfkm9;3pdd(q)*w^l8F z_M0imNxy69uO;(3LS(sCT|XV^!q>h#QKqKAoagYab!$WC&;2HCD=N6WaLe`ep2Jcc zD?TLi=+9lpb^dhK&bao6bBbrJ7k7XA?Os&&(slbH*KeK0s3>sp{%`+#|IGjYY1iE; zShI}1!y#cuY-jwfHPYo-o0%Dzxfm7*ZETt;H9>LF<9iQ}FTZixkLB+jr{dhjvzm5> zb~ns0+uPJwf31@Pbf|t<^!uxZ^KM`B^mP?zaQo1exy>=w*5PJAu#cW*(j$qcSzhA* z_h+#=)LdSFST;*7r*bF;8U_MMt^y6{?nI_m-oNlKYP9Sq1!KcpX@z$ zO@F15l9bfFV+S_e)Op|<=fS}6&!IZ>?vpC>iRahf)_1A$T(cm$Y5U(@k1nqL|7n-{ zqJ$M|7Ns?;+uLi%XL;0yf%WC2%~!9n3Ro!Zlw@()>KV=)s(QgefWvCl2J5RAosUaQ zd-U_y$*2e9uyO}f^n3%J-|J?Pi%yOy8^=oT{ zj)p~RXZsd&RWG@%H}m8CfA|0CTRsmuwMj>@<@);jTdqwjl`Pt|yZ-Bjd*`RGUKtu( zR{aE@wTz};sw_v_VZ{qo;0KYmf5c;g07Xm+{=IX$A^OXpDrw&e=eQn!<~m} zx~)f!%nN+^DZJdeB6aTb)w#c!oflk7{WN>6rqY?ud#;t0fB*3AZk^QfYhj^JrE?u! z=Jy{zzWn)Zb}xaOZ@Nz0-X&|$B9gprX_d;oP3yE~72kXRE$&;?ZVOKHV=7v8ef)@bVbpAEWoH^h0QgyDb!T*c<|8GzDHviw-{f`Cv{=NLB|84&73~_;^ zu6spG=OpuVFu1fIe)#a=L(r1a(XgZ@M7=Rdwa7c6=biy zGRt=9y{9s1%c^42TzoVc9mP3&ocm%!mahKh@Z^mKgKAO9s_qRP+b8^9%P0Ho_iLNq zCM%{@?TlkMaJ~Go1>fNf_upPm&0H&X^}wCz>Gzn~=SR-0ykNY_BQtyN)Gd0Owne(8 z&kZfzK6^%`1e;3x^QOv2N)7#SpA8ruJbrw6`SJeWcNcCKa}H?OB<3LSa_zTk(#FrH z_^jPNP3iyp|Cjau&Hw)*JVE;Z1=~2KN1kWiS+P2p-u0T8_lYMy|HsYbMR}fOAFXo? zXCyStOW@qK*7Jp#+m{?MmKo12jf4f-a~*|eTy?36pZ>MT^7jru-#N-}jO*_U{A9YV zZ?2e@zhRHS`qaI?ryW9^IoJXgPF%S^FZ!*)X-odcFXqmFKQ(G?)}o|))lI9)4hl4F zU$`}_*rfJG#*8yL3IEnvh_IY-;?R_2*s;vnbt;F`hTI?5Lah{(426s)d$1QPbmjZ* zzGJ7}k&(5xe0%KndrY4#o=!1Vnw7=T=o~OLHFDkbUDGZ_$+lhGe=;@JTa&?|(WNwY z+T+i`k<<9^|9w?uIXiEM?D`n#Qw6U#cX-_0_xjGW>GMwJ9T)oZqP?Ge`&Pb1s~+}S zZJKWMEXw--&Gq{aY`g#L==uM}vyGE@438RAD1~+ys_}U)mW&VzJ&?EHz_!}GogSIK zE)xv|jGWXjdoZM(IF-gTQ&B>SsoF(#lHdwO$(fx}YeHF8tm2+KfoT!jWw#esR`3e5 zE@NnF=(!RV`D|X$C;ydM;ww*AnQdtQXxH|^|KIEX#cP+A8#cfGdZ~*4%>3GKHxD1a z`}@1S&Sx3sM3xG~sdXP&BNrH9gz+W=C8)gTkWHwQHxA#@fCplelET zvsRSB!p{Et-ITOdLNhoXynOkwsE&^@`t{N+uT$f$i#Aq;s}vP&ndK259jua6_N{ll z{G;{rZHw=|cQ|v|;ugc!t=DdeEq3#XzV;z3*nH~rRp&Wlj8oI>POu)4cr5?-cKzq* z{hzBVh1sLB_ih(UayfE9CRt?Sl%PIOpJj!g?8BBEpV5~1WwZ6MCv#tYtrFD_Q(7x# zpP#+DESgIoFk6(N!E&j|Ck1oUTWSmMz1#FeB=h~N85Rbm+0N(EZp^+Cl&!gTYpHI_ z4(m69E4F!gOpH8upknRn<@#*TPJfN^u08mn{N>Vy4uu4-Od0oGjyqkByl`FGn`U<2 zSV(E(%$-&fYuulR{12MD?)kFVU~kLHlj4_yx{BW<6&UAdZQXXK?6<98>BP!NnTTti zP8^oc=LEf9J8jdNY}@(&KlwkH|Nm6p=?W9p+dc>0{b!v1HrKI9T0?zDCwpfcn|)T+ zgUx@}eoT_LP;Ag-+*^{rjLYNP)*!*;BGx)xUQV55rF-M%_S?nZU1qk#MWjJv>5O7G zg+SIut*zUnoJvfb-^W_bez{2}VOrapSh0pe4#6{*w;bFiyE*S$Onn?vVDZV;E`=nY zz+EL1uW1>#XU;kIHzaH7I>TQKBGuWqgPJR4mKV-R%I}+|Qsi9tq1*&~eP46x@TmGlN&*lc?p1nR}?bfYRN@GKG+XS2puBOH^ zCe+_B4qBO8XJ>CO+wXt=^Uob;Ht+kvRh6@?@c>Ue= z@L%cjZ^_|5t_A;;yBc?_>=}>Ojd@$n2K~!sp822A$3>=y%h@kkE~wAtjEL;HWoZ{L zaO`$`dFkqjo+}q>(pPl29BFira0%7nSTTt~(?~&ZfuMxIVYSqv`Q3{WO=s|mESz1+ z!j@(hV*GY5=LNP6hJxqQO3%OEwB??eY_FGaWOV*V#%K7X}|pTH?#`=hVZ^2pzZ6M~Ey&fnYn`%(A# zPR@7l5Bz%P(uRmcA+5ooTKl;Gz9`m6GBH$sZFFk11~G-eX_)r($oM z6NjS9_xbliID0%k+w2ZkSyEA7&(5}%Q~2Y*0=xOPPuV*=J(jF}T_rtrLI2u4XA~0U zj0FFG`1rW)=61EqTh1ST!P)K;a{7_hv3pyuWN|S~_$+XP`NyWNEow{6`p+*olHlO6 zFW{u5m4KMQw91bkjQhDe6{{k4R)_z&R=@Y$=}j#X(Q9?{w0Okl{ZN?R@$c^bf8q9j z-_D;JR5R~^g|!lw;*vSr-f~BDFiNhKGA!hHy=%_->4h>$J@=-rTE+OLEZA%34)@uT zL1I%=BN+~`^X%=;D&DZ9ai!NeX5*Ynf8Klsg_a3PNqvfL&qE)4EBQ4^RM7C)lA>>(j}7|Dzn9lF?PRoMId{0A*{)%h z*ZT(xFL5aSEtzw$A^CzzcMS6MnNA%PC5MW*eNFVvKci<1xg z9!pF~S>dol)tI?~Q-PK1kv^k_=3S91ZZ3^Gn>#ma+}$gnWZayYeb!q2ysdet!vm4x zp2G%gX{Og_FKDdFYtC^@y%x1{>o27+mk`wg15*`&P(j5<5|c7bHwZks#wBF=eEYOr zQH};lZpOJ%NeU{Aig(WZc66Oy>8hZVw|2YU+OU_oLEL-p`d-^qHGfgs36YM89%n2z zb$c#Z@oN3tPCK)=YiDLAq$Me2oMmE{Z~J!7NB5XSFvI50KksBIyEN+tEMIeUadM-F zYjTHWdVZj5Ye$mLi925N*FP<>k)QAP`RS$~Zn837dqds)<3bFYZm)T7#-ZYHH%H)h z`ps=yU#!1v;8LKo$$#$Ep151^mxSa(3gNhU}lXjWUI zOmWx6k2d>4-rL2cDDnS&xh!_h+l#JIZ@pZWG_EY4+`y#K*41;xnI|tcNWo-p?n5(2 zYlf1=SD7Y0OsfzUbagQ`WEC(~37vXRfbpc~Szg`)hq~Q2O!=CA<>9XiBgsbvH~i)6 z1Om=J-f{o!x81Qf*sgsou-I~~Gb8fa-Mp&L&vND1td1Tqd9&>G(?zmQDa;+3RWkEE z=1zb8VnY_ovkO_XuOB^gZ%<|LwMC2M?dyCrl^*QTQLzcr`O(KKX!rlm z=H0t@fBsoxW5=A(W61K2Wz+KI&o5Ud`mZx&X!x6_^yKx^ThaI5e_zy4!|LkdW%%aX z?nDpE=huGO=zlKN+q&zTXXu#_m)f1{vXquy+a{T8`(kzZ%i72*Z;h4y7VTvE;o;2i zrlh9rcF;B7#jAcDF5my_W&Ync-~auIw~*_3+}7j*Iw zWUMth^KSdwM|mw0mo3`N_+Iat7S?fNy788)b-spg%Pw2+`qb>3ldajin}hK{-W|iD zwY?Mm76kZj6bRiKbgHaOdWB|3l=(%W#mSy-iyDp`5%l?U`C|3jwOw4wjE^R4-agO6 zqVKUuU#!aV7%$DOe3tBeTf;MZ1sXlqc;+WaF&!53Uzj>mP((0hgPY>3+(hYQmSDjT zfzzHJ5S-Y#=yyxr>{=zJ4XLp)&*mn7(9p?TJI!jl+NJG#S9|zn${hdyb@hk)e@^r3 zPgXQ`&EF!(*pQbkz+7DUwfp4p@aPAJ)$gy}X(HcpgEuZK;obc&#?c|X2lHMz?myKc z!L#Ms*DAZ~VT?L#e2EH7d~<@`4yhe-(Pt5hzdd`x^lP_H#e}hNDE@eTVQaLp!{>mn z5<-Hz=Uw*TwfOVmPifrp?>iU-l2z31F0$sFu*<9B|LT1G({nz1D0)oZIwk1Y-@Nr% zTfeE5?TQm9*na!%vXiM_Is&r3iujon{3A*es;pk(Uw(@V>5zWrvIe(~8& z7KVm|wOcD^o{nP7*>|q0c<1u9GHiZl8kyTAl3&Z7nc+7zmDM_z7 zlG@mp!^HIYNyF@_4@);Ixu~j`3-3P>VE**@X`bg#!}DGF;=X*YYOZ;$=KCdcF~8u+ z11DrZaJXeT+^b`KHs{iW%QGf#KHqubfU%pX%uY78BRv8qtTblLC=t$F=dSj;$!Ec_ zMSC?h42t_#wx4*Sc$Z6WqWWza$I!J5$3&diomnsIExJ|l``q3Bu~tCeP7M?`|lYZ6xsb$DB6C1Z{=q;hIjAftEQL6UXN-><*EGrCkb{q^3h&raAGcPeJRZ?tF?=x9_Z@UT=|*wuG= zO0e6N#1redveyc>{|@Khy>0io&+j%=fBkTwDu3<%>hGZO*wdSA?CpgEHB-0Ge)o=V z_uYq8d;fhne0=$Gb-srW3(R|%4PT`_pJ>DL{@z~31&?3uSyvkC?WO6argYzF$|k85 z!ppB*ypy%oJhD2H+t6_0s;g09y44I0Uk*e}eJ9mX+A2`xrTIKj#p%H3pEf*~AO6!j zS`n7J_213;e=f@ZdAi>!etr0Ni`j|}95&~dPAj_c{;&SjV!7p<3>F5Av!Xf<@+5UV zyJ@v9UX`mc^rn188dF25Ze6!*yJFi7SA|CwJne@QPv0=rxSA<`^QVK}#!rlG=MM`^ zWOf!m|EEWj?{Gj?>XhBTfB$a(J%!zX$3>*6i8Y0Bv+nia8I>|_q0UnZZwY?R+t1v5 zKyld=fzXnD=jJI_?6@6&Zu#X+U#=ZEkZ8IrYgNMuLxbYT>(`>P_j*p(x}4{w_+<)1 zz>@sQR-#I`w!G))cp+)1_-XC#r}ut^>ecP@z3k$%bxP{KYf;tWJC445Hs{-JS%x#8 zpXUEB`*(c*i>$VP*Ogi4?`OExuVcO4SJH~Zi}BCACx`vNaJkLS<`CSo>-DpweWgni zv{tP;!ojvRL@nsaz1hNve+(;D#<4xSYPC%9*n{+amlsC8*V|eUyM403cHNxY7Psmi z=kFEldj0!P!nN6a=T`4Lcx-9;Y!)evyrkB9tS(OZU#~v2d$woY>&l&7CoJB*doS;_ zASx?TY<=Uz=G}%hV#gc2*IxIQZcChX>_DT@N{)%gy*kSn)6|4E-grqUHZ7@QWdQ<10LoXy033Vy??Y*Jz|zwpuroi*}=0p zIvo>&^BZ<=I>4W>WR_I{)h?%gyqd*4sk0 zX562|kf&9hl zh;^_D!vP74>l1=_eC{?}2@vJw5j}gCOM$Itip9*XtaM{Vv$wasf{x^>J&S*zzQKK` zz`yl%@3)^jp8U2pE-ct~p5IoJ79-!w)8lJ9I~pG^_wVmN?|=UM`SSAe{QCdrc`c8M zw7rhWihOqUH%EfiT)(xiOXJQhpS((jul&-|ud5aym$6po~+wFXJ$7{<2cKqXk;wN&UNPfy}h^J zfA>pRy>)G>r={RSoAc+7AK&zCO4}S^1>VOWUR`zek`$=_b@~2{w`KQhzu#T%KY!iz z?6qGXK0Nr{;q|V%A0HkT?YtJX`S@eT9Umnd86W)nm$&`))LU7)(@(n|P2%~yM_}#a z6|Z;A^Gi4W{Kl(9tHr{!t?Ki>JA|5v?U!l@v#c+XGk>a}~#u50W%ktm+E20r~d`7-&Qy0K1^ie;@~)U(8#$VuT8?qr1_mXvmnz7t_k(ebyn32 z{$CMRIP0KEPsf(qZ=DiCqo=uwN;rhnFirgYAVEAn{%6kOYf-1Bosr95P&S{%g3r^% zg5}ZK*=mrtM6hGq^}JV*>`^Sov!HWU*@ir|OIf*#B%BhGKQ(5FyKX)+ z=Wtp$tMfP0f8RL6O_T!y*6zBR!Mxak;h>}9Kjsr0GdZflgFgs#-Jc!w;KRS)-ycu7 zw)JYzv!B2IdF*cQjqY@6`VLnVx@tU+lBf zA1h*>?VR?j;Qxoi{PuJD+>dW+IbgWV=1BYDyC!{xNB`FCzy1D?UsUwpFF!J4!~I{+ zTlVmK{r~b~nHg&ZE*Zt2zV*wZEVJ?D<97e={NL?=-+V7{@qGQi&-wfRmPJl`{k5vR z-2KS&&rdHeKYsb;mmfb^7%rRiJ)RMqvvi){bH(L_v%dX)d37~=+N|f{Xk1pN3oz=`6dvW&iE|$1e`NzsJg1 zeCqYCbDv|fcgL+~`O*FV!}7;@|8I)N|Kxm>zVWsoWae{*B}mLHIdU#Yh`Y--qSkLmZEITX{=%8U=^ZU5dOAgEY0 zX_MtWVWSe6ETe^Qt%G~Xf z@BP^A8V&}R z2(Q-O#Mj#l&S+Lk%r*~wd+b%W;V;G)aoq9KnHutTWu)=FSyrMnU7SHd@#i+{mJOcl z1&d8R-!n9bOuWFo^VPBgJo|rdz8`gM>(;9J{oB{}b1r|KI#1l#c*~SaW(Qdw9AJ1L zE~dnH(c*&Hr-1N}AJ$4Urm?ZSbaT=C#nB#S6L3{Jw&hl=~Gk*1UyFvcEw%mIEgoM4^43lQK1}C%VCp?Jh z%U!i1MQOpJy`5XGxvcemE@$DDuuP9@RgVPIZ(F%!jts^#7p~WJEeH)+D9q*@5Wr+~ ztL@eHU#YVX%l8OQ$@%`{$E&Na3#CF*W}Z)d8kCjGdNJK7BD?a>7Y`SXFaO%*&wbuH z&C2g`-uB&d^vi!MvbePzuG@eA{ftBW@9ksOO%Gpx@6OxzOzpQ$?ONw!^W@>x*Vk7` zIcY3Cb2=+4ZyD2On+@Ho+5PuizCQf-aP|6qHD6!d zb9weU^z60N*y~?^zu#{^&+p%nZ};x+tNm@g|9ABIxcz+-KTo(?qRHJj;XBhr``>>) zuiyXgSB>5Olk@+atl2xM-Cof6_~)O0{(yFHYArn`@%{JRJafLsE{zNcCw6Z2T59n5 zW5t|fhAh8ctzQ4EvbN6dn1tZlqMbH&wnCTQr5Qy}JNneyaQPG^LnZ;Hcl_1rnU+DZ zA>Vi3eQloq?f2Z#L*_pI`p*|M$`U*Wvwt!td4pkBjXI0E4VPpz59MviIwv^q^rfiqfS0~0(xSh--tvS+C8sHfOpNo^40RPye7@)Q z`?EQ*VxcpiuU_@G+*)Z@+rFkf@p~u`96ih8w6)On#*|@w!iJ%x@~4~jX}qD z`Eybx!PlD=k~p5|X?}~4Ps?x3U+TAJWqZSyd!gGnm^S++PAM*9_3gNoB`nCw&G@v= z{@&l#`>uRzzkGiB>*dd#w;2-7O!&O4q=@Bon9A;+!#AqdvtHZUAgL5>687%K-Rx(d zYuO8C-YnhM_3H1#bL{sLu2u6JE}7!gv1-%0?ANxfE40of1sX3>@JiBRQ(ENq?}+rK z_cOM6&gGx&tiY#s*F8bc?v7ji_UYBP>zPih{qN-Rb*iX@VwYr9$>e=KzqiM|PCfNv zFZZ36c^@Y2-o)u3^PKhep##zi1p+2}I2>}M=jI1q=vc$ncvWC){)M{fGgeqycvWZb zy&C4d`Vwy^b9TU1fg4AgIONuIwJX3*NThPWf6%LKO%mCKg zF6s@xw%7fd{r}U?AAf3OijtN^-QG0E;={uNiMEB)qWX@f#)VuI(JY!8=Pf$*_ik&! zpFidQ{&~Cl_xgRmf?U4+u08kjaJ&EU$ASWPxUv!+n=G4i=0VZUDM4+83@w@4mHU=o ze);leX6*F#@b&N74tEQG{QLX6Aq#`Tv)y}_*M};dtE{NlF@M=f)t5(p{QLX+@#V)) zYaZ@sz47JV@xndzJH#*E`<*_2udNJUote?+37<{gojDP*`{wKG>*eI+>VBS%|94XU zzvro!1?A@FjRkjR|9E`d|NMFXz24_CC5{!!%ndc*Y0ldzwDp=)e#o{N@2>?t@9+Qp z_n&$FXY=>(zJ0Fw`|Y;;y!rE|S4LjjdbO+L@PuTZGj9sx4VO(xIyF;sO3=CKo0Wf; z?Y?{WZsW{z(}P!+*6+COYi(LvRb@5TFYUwA>G4;hK5wv|@1N-rS)_aV>#b^Wf3r82 zu9YV8)jbQ+G(1)q&~CT-Ra6*rLFt;~I}A29=vQA^+WBXa;MLt<{LW}D%zIMm#K6A4 z^MnOoT3A-##lMO3gRItTGqea?FMYeVdR0hv?+k&1kv7Rv*(;YFo7j1xkZ0){JuP)5 zgN#epQX{9a9nl4_5OzXU)&#-FQwW0?yZ~a(kypwICtiWef8%VkDS+jt+A_~s;Xpv*@@$^1!L39 z+q(|F7kItv%C#8bCwugj?o%m(O zHV;dM#^vHoo!pY%?0&+_BX&>G@+IwwR>8B5UR=jve*Hh=5a zd+qJ*<@=YX>hbe`e{c6&=a|L$&%4fiwy~F=4{EdS%?tEwd4F%;??2t=j~5oXX7nUJ zUdD7>fUo`Vu4|VIzjfWN{=fbIv->js>h(Y7&!2yNs{uQMg`EHEUC%&=8y(wn`)yI* z-tUPrbL$Oxn!m((g?>KZyk@Pvy{!K9*Q?vVeECviW7pZ?A>StBbYpg`{`6Z>J`+#s z%D;+yzScFr@BH)AA1g{OR%X8~v0{8-q`&T7&f3^@;rGw~eUong`@_e>ufx|zXCJ?O zawoj)lSNttlntNHnL9(&IpdbM zrlG3n%C)H_r`NXWDn$xtp{UnKTwEGswyu9{(eqd&iKQhf`<%lTPB#~;izYqFN>esn>pHz@#jS@0 z4o<;)W6mwI;(6S4z~gbA`IC*?qOy~DWRLKK{JK=_VYuwc^pHTsq|lGMH_dF7*fl}= z-k!z}uP#M-8wv(>1Xh{fuga4Ok!s^%Xl(ZPb^ThiZ%Pp3rd3(lb8k#d-M3=*-l;!| z*369D@lg1}^-py-Rts%fxiQ~1rft==>TbbHZU5cU`wyy}Qa0vj6x_H~D!EIs#IQd} zj6JMs1*_skw~|llN5t3tD|9<@TO=^{*rWeSE=ftD_rp47mcCpjklbU~5#=CptkB%I z@g^r{c4Yo#CFcf(*Q{5kl^zt(TsHA-{m18Z?hV)0o!%98P++sy{O6VP*3Y-Avy$`I zJSuNjW5E2#*k3FhJpx?DReA8-p+h>IpY5it9?bg#y} z(AaCwH=f%5?f99xkkb#;Hs7dobI7VP{G$vrrdMu%D70?M_tY=AeD(ekx!LAt z&g7}^1iK2EvMWt#YEg(i+vEH~h-ZVj=2Aw+jsDji8LPLaMqb}^B`RurTFQ#pqu-w& zInuG#+>|vq*io=e)2K*=g*i0f?YTf#k$^T|=haQypW3W=_5Nv1dHMJC|E})0lk=~C zod4(S`u+c;1OiKE9s651J?YnvmzOVJ{;bWFe}CUz`#a4y_nhhX@Bj92vA#-MqKUD= zoU(R@KVM3+{))FB{#Uav=DF(AE$dd_J#toa*%Tu~!^!<6QSX)h7VO||d}ir;IZvv5 z(V|Uxm8Gkjha{4c1f`txpg{``6U6Sp6U5MR=@?b-&;0i z-RG|*Hg^vR8ny&@?TwqBI#Yl8=KHzt-@W_uQD~z7e7mprEH1nbU(ft`*Se)5N;SWK zoz<`Z*=?SGU+&v)etCP_+NzQ{z8rz?w!V*a)471=9Uo>cSRzvlB;!+gbBNuSGk+ixo_>X}_wmnwwQsl9;TUwFc_gbd2fvwwXSk#_5tnIE^woGf*G7rnv2DS{1M|yVb^BcPI zj^*9<`R(!h`SQ$JPB-3u|2_ZD6ZOLe0*U9Gb-$KWJyXB+zD)7hU6Z;$U%tG230i>2 zzh18Fa7)MX%P*_;T8T6pJSpTjv-HZ0O_JNLt$_#dR8sHR#sjvp8U+A zuFP;V>o=`9YFcXS z^mV(=d^U4<%)xN(i}bODTiZMoT^ORn)ep|L{u2Fq@9d5*_c}C>-l>YcKGkyHuKK0r zWj1ryeP%uQr=+^(kBiGbkLodsZz%8%XTM9JWrjue)FoFd&@HC zlv%HjTUJ(I9v{7X_ip*)8^2kd4_80@mcKe(i>X2Il-urraAyXGF2RYv+Bky3TUKYS zJgORX*r1~Fv*oJWg%d26_jH^&Xgb;ONT!TSNu<~6wOd!EoiXv|-Z6YBu@VmSDU8(rToc+FJ_GY-2 z`th>$vJ|#;Prl3z<9JxL=UP-&EaQc>zPEMVw_basBR42%p6o-U3d z-i1-y&(BQmIc#Bd^#ViH^lL0@D#SMG|Nq#wf>*}o-mYucqKj`WX+Ofyu&YndaLZGt z!x@V@W;nOqJM}YUx7}PlG3EF7Y-06ZZ(25|Z1>;v`L%|=kH1>k+uF|SdmeNx$~*G< zJt>7&3H$$lx-;**{kHqst-S5kzu(2ae`LuD8q9oqtgx!CYG0fm-`#g>W#;!!zuejB z+%dh%^yu&F?~`sZs z;xu*lJCk#gX#!`5(&UwAv-KAirSLRz`Y!zYQoi=-Ya$DmeA=%bJ~M4qGgnQR6SGIfWw{2%OGuwTdknE$_A`z2+%&^tjkDveh7RCu5ie}dTc=&g@ z|9r2NzyI=0ytY;7q0Q~@_v`D+Z~O-}ML(_n`ttJU%bzDNzC1H?mYVVI7(F%ym!+W` zEKF-fzu!LdImT<|^X%-e@7`^={eI81<07XnUAvl^@c8q~7Z#5EHJ+M`EYBYBKQ`(8 z{Wl~!dfLsyWy>?8o-H-c+uo)6UftED<%Z7cHY;C!wVHo!df4n)evd&X2lVa!t8w>Ps@jvnFE?bRloGf$lE&zZd4yw0=RaFvFlz{1mCO1ZQ7`Knj1J^NWltK&vi^!3tt(+`}# zY_j+FpYFmE4mm~^_rH3dbJAY#>MCL2HasRVtx|L8m8j1X+6yH zy64QA5R)YW`LVNR&C?fma7a=1m>{$Afpw+}kBr8K^og@2?%J(tnC2v+aQ|1uj(0Ch z9yzRCxBT-)(?q^`Ot!wkGm_-ko;z&2CF6NU#>S&{rq{{_4v#v)?Jb|>nItchK{{ouVuYoyYBTrFIlW zoaK-`-EnV)q{G^?Zy)OYWp>#fK1-6FPf?&SK=mclD#arQQqr_!kMQmOxMj-Y%9G7I@?B`{OSKKJ!^H=Era{mH*V5;q`v&x~)1}#avi= z8zX&fy#(JZbNt@)`0$$C74@?v?`84lC|y~5zb}hj zpk2o5oOMBPVykL_c;2R3111MP8LoF z-H+V9zTbA2^Ou(=ba_}Fef@Jr(7yfkyY*fh^{(r<{r+3#37?QxS!|Za|Nfg0^kVJm zD9x{r`Q`hUCuiqMwJ9$Dy}LbA!u4s1)oL%fq7<*j<%uWa>&kAtulw;~;kwj2zWn^_ z=g*V7s=yNW+%<$FL1OveyyJ>HnksicS)q6O((EdS*vRXtx{pmPtzU!Z#K3?v>*34bwsKe#F2M4xpsoWR0KD^=P+Hac6 zOQUW-T&A|av)Xmbl(1_?=ZkwPZ@p#MFy&Ttx8XNEyW2P3elv6x+#56P-tYH+-jx6U zV*lr5RJP_-hGm&Kd%xYg%g>zfuc=+){$AO3n~d<6lWbj>Vgq>hny1B2&97Tuv3jmn zkoVV~GY)U0w=Q2k>2pE3#hYcZ@Aq!&%Gx?@lbG?|z`TW9zkT_!Wm@U>+js5n?0y#? zI_>f%z3!R5mpzW``&=!V@?+|i;zZd@26L{EF2Vet{7Fi&OrK4BJ)}83cqa(fK4!Yh zaLIxxV(rrkt97R@Rsa9-FO0*QFI-oYu$Q+QcMy zf6D6@?-t+teOjAo2dfPGjX5iiyttRRz4DRnHSP)3*#b-g1`Br2+|7CL+RjxHhTYTE z448k+{x0vg+*@)Ndl)fScmJ8IP%XM}8>wrSmJeia@ohm~88 z-T>{eJvcLco@sN(>0MJq&F)CJL*4XHs=3ywE8R{y%EvjS1nd^Fbcc0BzKEu6B(vsol zx{urXSQv~2_d8A96yqJqDOj>W!cp|XjLsRpyX^nlf6B?-bzu7G&dSW4jXEE{O_smC zXTk^9C;a-dv-bXx{Bpnzv<>%#c25xL9uVU*5evKW=|rt<~IXPk+6-`uFhR z!*%AlEG$c=T)ulZmLpMO`Rk`uJL6(}S1$80xodmp?%m$UF9OOKB#s*{e{hQkl@ zVyEAG`tP6J^DR2BOZWZzqQ3v*;r8$L{}<0MPGC`4mMC$oP+{4%Dz88n=bqRnR_8U3 zmB!9owCd@M<%Ld*D(5lwa8|!(FnH3~F=4}+rPC$_7qu;Uy&=X`EJ-8VX0HEyySkdc zzrN)A|6*OF~viE!5VT*U=yI%K2}dlDU2Mv6ma{Ev7iS26>h0vTeKB75z0z zPp<2%9OuSTQ6(lenTyts=eYMz&Re@`l|Wl>%$YjfuWKjO_a%8LvZ&pY|MvUvKArB< zr`BDMTrzW~hs0jS6PlH8`Z~6#WgR$T7j;L2cdLSuqIbEu^lp#yzg1WmCfs{$(bu@O z{e{=LC~byc2N$F*I^=SA#-7v-mBA}}9#1J=w>3up`YrQ2TuK6$C$tMN3;*4^``h{b z>B~468kra*0$<-PS5ISWNt-9QIEQD;%lVy$-d6R?MEIkiLIi`F=ptz1cz+H|Z%JIs8x{*=FwfcL&)wCrV7X z>2tP4K_Kt}Z^*Ui@QH7ew$8kF#W!IC=M#q|&KozJDdw4y^z6Xabv#m+wmjbPZ65RS zyAsRiO4-Wj-{1fL;m5@YhKGB8$Gk&(lmF4MAtJkP|`|Hx| z*RS38v*v9xs?9wAVHp?Gj>2OcqHB~ZT4EJ$Z4q9TrN(wC|K^sC7ak1G_kUkq7aPKI zWCeHEBBonw*M^CzeEPlTs~^L}1db_@3y-^~X(l}hj$E}S*)$eC#KYzYF;rQd1Kkk;_XL2~lEc`TaMJe-RCI-X%_IKWw z|DH1EwA8c8fB*h|54tryzV_$M-RJlHc+~y;`Qvte`E$=d|2_P7b@=+Eo~6@%TKz8E zuk(4izr6qQ(RLKf7L? z5okT%fBI?FoYyPFk`wsMSGHD4+}JYv&Ixs;>#t+7<}O?Lz0NXz-aLP;2|;CSyvAog z%Q#j1{dM*B_V@4Jzjs;k_1dZ_Pj6&-YR&Fg9E^hp*kXQlRDh#2;cSvZA~!=j&@e{rk_MgX@92v2p8x4Qtly+HJNK#!Zj^`)c;{$D2Lm+Y@J%)ujhay_dB%CfoG#;fDnlGG;DW z@9o;pTRu1UG-paMzYrAe!eO}S^Yv|^5~Bk_+*&1k$n*dIoWJ3d zb2E!nJ9hqFzRf~OY2ArEkuxJZofvsV8VvcO3vXrkO3go(@n?69#Oz9mTLHYi8BDf! zTiuv>r+Ae*%j`V5{Laq@)$@0-&dyE^ow;49@5b{t=kH#R+9!53N-&UhzRQw?hHD0_ zA&e7wwzAFL$dItsS5ua+{qe^WI%gI|t1NvH>gaNJ_2$nuy3y`ZQp@_5UAuMl@%@O^ zb+6h^NHjAneNs7ZFMrip#sikwy=H7loeqb)KPMF~QY=2$zAoFDLvv~6yksA5+k#`& z_gma9Iv9O9Uq9*pFZSgh=APKau(?f8TYlR+ruD~#8)9wvIe8d_4sTl0+~Hx-!^}|e zUcht5D(9wo#kV9CoHCle^D4g7p0jM_v@=Q^8EYv3miM9sPjOFiSInx8 zi01~XTW7|LY}^&L#M-4}Q(w%hG$x&@kJ{l*NhvN~nm4B(K7F@t``h2kvc1-RwwMuf z`|LJ;Jwt<#p3M>8Z|=MQ>-_WY{EtnJ$LxA*x25(}N=}{raRUM8+rOz>6m$@r<_syT%)Sn-e2+1|ojeAuUKV?$V zoJTg#Do+I+h?})-SF5^GjILjtBZDC@7{4Hviq5HK_lz* z3Lc!Ma9Q|MmqlwW*frN< zZ4m1YBZE!>V*@4QNe8tWM6Yu07wX)=`f^XEZbD(NxpdR(t!e8TLRJ3Vp8xmyebf7( z^<;`HE>?5VUqE6w$;^%uCt~@9Hh)g*G(B~)a`Ww6v)S*S7MxLy z&W>zgowAqx$c*BimATPt*Bmxj>k`P~ki6_|+3IVDU6#GC{`&39xv8N0XzYK#IUN7@ z>i_j3ZYqr-+U2p{9)~Sfoei4v*kaW}!5;z%56f<5?OV&gblMCdrERg7TMC+*rDP+Y z^URcZY&l&bomH1nVQIzYrhpB474nyD?Dfw{-9F34S(-QD=uy`#`!%;JB>Ds%ZRd#C z$@YBj_s=$S&vy#k-ljau^+05I%PJq?-cI&QrBfbO%{7yLzqiPH>656(y?O$Ttk=Cf zpXaQ7^kz=lB*So#4|W#Mvr=W*?d$$XB$u4yJ#5;tpv#&yA$#p*lc#gGI7B5+)KR<^ zG-qq>AhEe37R{n?2-s@Y~G}CZHwRKRZEun`&sJxW$m2m-S~A$@=BdU zuQn_EaQ~XmdckVB``prV9l?G}rgSk}lj@asS|D-xXHj4Asy#=HB5ohp^x1t{PC?%Z zj@R?dm=4G!YpNcL*vZwsN}%{uM@CPe^Q*5*n>hrOigi03d9Po0)yS5W3NBJpO4bp) zarh&{FE^gn2fz2ZJ{FV^j810_2nd>U;imcRJifC1Z=ao-zVnRg(J9M6s~-JXw6EsR zkH;UMTJnY`lrgA;Y8iAdxWZ%k)G|#>Ze4RwTZhNBsMoO>v6pvU*!Fkd%k9_Lue>TP zR&j~gR<5bk%k{eDcX$!*{|0-R(u>RlK zfXCZhxBZ)ON8w;NPhskUfv@8@>jEnEF_?HQkx7!Es=Vy|3-g_#0|PbVcGneyvw+2-3Ep9^Fk zO)}B1`W(Oh@ZGzAZ*R}PCE#`P`s=SBKYsjBlyU3X-QDH6w~tB$_kAHdQ64sT;%gV;`8hOo%P?Z=-fX2@Ns$jeKK(oJ2Gz-O3a@>f9~enzVTkOl0&5B zp1UQlef9GA$DqiZ4^Lzid<>_(HaH)=HfL}86s9)Z zF0o{(FW-JUul)M!r$swgxmH~-yH|b5gd?}tcW-?~#;j*gpMq{+%6PH)yj}hOe|zil zEYovKLsjI1y$p}dII*Dk-tP%P=Pd8vikmj8*vrx~d-p!qhAGdYW}YaC{r&Od&8W|Z zzu*5pySI2=$Bfk*w&aR8&3|42IvVU;wjHZR#nR}=S8r{7Hyd8FTprYCaXeGT`_!vD z$!kOZuMH~F{WVEt^`HEW3GGo@*Mllg_OMTUST%Rq%G_DYGG}f3dQNfSm8`X2?+IRd z6tm-An3hCf_Bz)2Tdr9yx88Hc=hbt*B~>k_7X9W~e)*rt!``J3l#7K^$odfnRU*pjSwb3VT;xpcr&x20-s>1BuT1sx|M zypLu0GBK!r?cL~m`&yLV^vxcjatk{I6sB`A#AtI(nIn0`@^)G5#niO3`z)tt&TCov ze!K7G2yd5TQoRhluS~+U>r?mtHLG`B@z1B@zow_G#yo)dhovQP6^WWF7IvkjQK zS;44#;nIEFdls^uR$^sfAZhC3%Jonh9@1@c6!yo^A`p7xsxrP7r?7q#;5ypb6OqY3Rx^fkF zZJZ*NoTPWCA!SYRHFkNc```AxjpbN)yLFlJ>xn$CCN(cw#lsY}FU!wxc}UV4%WE>O z4IIUruIcYS;}*P7bA7gA_S8wK1yQeVRVjX)%2wT*u&+13>TsRX+s3Is;%(j*UfrPA z@buJ{+8}OGZ9cDS(n-~H+fIo`u6FQzrXv;FW+N&-e2A^VfS63pr=)`>rV5&zv^~~vBR)z z^G&6u@Ao96lV#epxpU0+=0`g(%iX>1^)pL3zK)Jn>zNxanD}1)^Wian{qOnzKRkr}TupH&C+GIZ2QA<4;r30AED2m$kUMMH``znaGbG3y&yTisR z97E@~^78NBzc>6SvIq(MI+uCnJ@)k2Q%9@w=jiV=NWWMvKId3TX8o!x?PQ*GwgZzU z7hjW9Kcu-drmOs4jpV-<&+A__*L!5nT3nTK`d9AiGqKmNMd?rPRurybI3kjKN|ND( z!^&A#gOYkYR?OeAZkFJr^0Ha!Mo;GQE9RI8le%8!frnq!T zwy33QX^#lw-H?xV2YT%s?#6G)z0Y{a#&?zc%x9b|bIN@$D;;=o>}6G?y+t^Ku*zFD zo<+reS!cJ-no<;1^omtwnM88V>}{`aEz9KyWIuKJ=bWT9XWGszd(HS!`upSc%lK0| z8XCoq8tVpSr|Jh>Wymt%chcDtl=&0polYKQqF--2 za?LVv+j28+`|h{@^8QcO-(OK#SNZ4RaryoK58CaiwmyIHSKCFNVwvQ3TI~WorWv`r zTm%(g_i$&*owyhgB%uEm&80DV(@#H*$>v@7^7ppPTT?C-$kzRxeZS`a-SX#`89DFmtN#7% z?d|)2US0P;ukT^ud-+}7XZ8PY=70VBEPVg3YuNG<0yRe7$SV=LmI|O>X91XDzh0Nv@7MuX~K;rhp=w z%A-?DpBz8FJW;}bd9YV!wxvv5-}m3U|E|lOeSwAX#rEptIcYgcNt!vlih<`;r=K~q zkW*oScKki-WDXat@O4|SbY)!Sdj0gNYRiM>vcG%UZk)0?>l^*<{kv_q-xmF1z7`WQ zcVTJ}vjpShGYlPe6;GFXt-SY0>q6|mZl1^uTC3ig?|qgM*S;_&`{cx#)7iKWN@}-d zue4kyc_BJ|t>Y2_r`ua&Z|mNk<`SwYkUVEIhk(=K^xTUkw|BMlzxX~^`+Q+o;X(t( zO&w=G|0$|?`CE=*-C>0@XF^?qPF*%RHswTw>ZOu66PMGkd{4%3PhVxqDafeNqIT@b zUVqJ14}VoTXS-`#^}U$G&TwwVy4Q0yDq2X-=@VyqaO0>2_k_*oq7F~(-I{kZB0`DN zwfNqQ;&aC&(ypnhC!ewKbGfl~?(LmwCRyv(9NGU%-*|KBIc3RBQvH1s{$>3B@qYey zh6U0_t6l_&30zCy5e(e=r07cK(#tENjqc6JKXt!l-}iIdW23W0<60z6T^DNXJ9_%D zFq8g{iG54F4PBTV($=xMTzF;S*e2eR$!U8n+1I`>l*!mQ%jL|ax823wPnfzRoKiHD z+EzTN*%`-@xc`3bzlWdA`M-bvUS3xHy^MWE@w}k0ZsUU?4N@xp32V=8oDeYU@f7b< zPpy__?b+(v{7J9md;a~SnO3s>AFC|v{5JZj>f5oVZM1P@h%kQ>dis4%#;m&Q-*zv% z-By#m}2Rf4-djbBW?pOO1@9 zlauG{`~Pou;f|=}-Rndr6w2_oA3pu?XheF48$ zo}$F{D#>^4rv1COU%M7`yl~Z40fCDTn*aa#%pZIG&!h8o9}E5`a7e2B_%M0?pC_~D z|7Gm>^ELke*Yf*yzdt-XSm~eg?YFJckr`QQV_v20ej4Tf{Ibc_ZEvqEUijorOG9^< z@2q8k+uf#Kwn)GJfPdceonc`gS8pxbe6u9>eKUJwkhap)7;nwP1-5g~<=(y)6?Do< z)6ULrYi@Rrvr4Y#)c@t@m^*%T_<^ov$zFLa>UEBA(=Bs}Z0~d1&%`rKzMQjW=ChsG zW)-hoy5e>JhKTGbr_ZePxqN4v?`5;XzO9?G4h4B?KHsDex^#-s;TW%*2d+shKDAv$ zS)tyA_?{m{+e`A-BSqra*6(8NT@BOa4PPdec&qL+c zB+3+>J9jH;IuBo}`x;ZLIzh#bwab1zn0Ykv{IQ}r-kKYaJ%0FL?)Q7~>!w>A7qt^~ zF}cv>a#5Drq*pS#Z1;Qf@_l!2T$oVW;&N<~;eyTA7cCKR+VyeL;OgHM^6M^Yuz1QWADl`kx%$v2Lbr~|tgI-zmwp`hotGD#^uk&{Q|NI3V<$3P+fis4w z*=s%gG&?(vxhxHuvo%WEDdS7?zPR;<=Z-}smP{@Wa@u$k+%vZ`HY!WxgB zU3pLC~$-=9Fqzovq8}ln7n=w5)y8_kBxZiu6?VpU+)) zq(C*<#bxHIb+4~Pnx>oTGnBe+yf$T4?A8R+WjEiJ?KaQ9AMrfMhvP+4@jc$_ipwng zKbP)^N;aR}YwE_5mCAT=gTwPV8`YMYJY9M1(HiCCD*=l+6I0Gvimy2S_+quW@8_2# zXDqkJZoiwedz)!oZTm^<_6==?3Q^gND$ltZtTtbZam9?5vAI^SsTjt+?K4DrU>&p?fp(^&aV|&y_coEtK1NZPm<9 zk6Ei1oiug1vi0uAk~zydTeZcX$6hbH*B;bmX!hKuw=yx5@&BK*FYo`ovFctxzf^*o z>c4`6HleR{I;56`magRzV5qzJr|m>T!>Y1Ko*>b!6I@v$IYgY7JX4$W^s@HibLj^H zyghdmC>^U(<=!dM7G9h#=;&m6eQz&UipNX;)b*V{sn^21`H!n6GH6!Bdu_b>Y)<>b zSk^8*t6+VoBk0PPuV6w9NH{lAuBV z_0um)Y-RO}Pr0tW%IF!;%*wqvFY5CoOW$kl%Wjv=-d(nN^E$`Z6FhuV9NoKbbMEo@ zedWEZz_blsI|_Xn7GwrmP7gY<zQh=V1Jc^1tCRMJD^IuLI27g9`gQeY5m%R;iETGE-|ycaQ#mg=_1e=@``+9CezSSq+hvtja(tT0&A-fK~uI~&{DYl!xQ*5f% z3aQ(_1!_7p=lZED?b^Nj^{ue|0-NT%FXnjo_xJbm^6y-4R(uRvTRKa9(Ywv(?^!<& z_VV}lpZ+?uxi8Pg-dNh`vWYC;@j06fMfQA;bw9Qua`)zcPYc#;eRfJ|#l8av+fQ8; z5SpnI87b^LJ>Y7o-^*o(d3?9eDrSGbXI~oc{E};u-m~xSdvDF4v2=-qV76cC4!#tP zry<#q8Kv3NeIuNARn7>SG-ZNM;BE0_v)j2n#hP2Ey_zdN;lhf1eZ4ch6U)z8Zjac? zlr?{yLQ0qA^rRC4iMO}q-`i99`GeXr6W?IHo7Mi)ymZ^AH*YQ5e|t3-n`4rXrmMq+ zweEYjmbN~yiVm{qgQyxqGbX8-9kabnr z(_>OfXH30%;84`$%@-@XBYO;UcYA6o9?IL!>{4qL7`miX^LXa0D>s7mCvH8az}R8c zx#ImI4uMIZ<%0CKW|%qYPUYa0cFNkdAX?y8)v>}gTeE)g3z;cyiOpUs8W4T-RE+3i zCkJO2w*UO~DgX2Try8Dgj%eD)z-jW#`)-ZBWmu`LBMZlYYWw4ni&n5~ytZmqo$W`L zgSHG2PCALXFTy*OFI6pLDlUD}W14&0>UEax)=dktqAL`ihUIvMDfS0GY~OsY%81P+ z$xEtX`Fi&my+$*${wVguC=_e%EZUfRdr~RS0iNJF!J0h<5|_Ug%}lD1O78h;6&t*= zValoDoDPr5x(WS{pRm&?nVtu zX0fU`cXP2Oh6?Okb(?WQiD}QZ{``g8cVx-bsATi5%3XQ-sG#UN$;7=W4B3KCHM+4@ z_YF0azWcAbmHSU9@~hPP6^{?SIF!rk)BX1CRsG2l_vBJVJJ)D8YACdvuzGn>+ozm0NOqd-2+75w1lmPhOkC9pqC}dHb!-#O>!y>JC|o_Zjv+ zc1U?7{x{N)^n!5pTFI`T{-Jm;f!Y+!%nImpL1@x zx7SIjY>uFl_o`p7x%k7;rSu_>qQ&!D+d|!EP6_U^TpDxAqlF>FYvyvzr%&eG&E5W; zzdXogY2ohOZ=cKC?T^tvZ1Csk{J%eEo9EYm?f?Jd|KHR5ZRV|uy$%}bP1a=<0cV1h*e0kLC-R1AM-Oj!KHA|+-a=y1_d3kw!&Bw|6-j~R)xVEZD+U3^! zvhwop<=@M96|pev6p1^MGr?PTr$q9zpH_U6H!jO`+JEy+nKk3Q<&nByd$t-re{C@> zXl?1WD26Mm7Obs07kT_m=DaT)4^Exi398=Ty?gijeZ5HHS;OnREU)K$UiV~D@w!G< zm5Oeymx6J8B2Oy&>@2c#ZkRAAERb#qUE3-m*Q2SlK<ih^#Mb6kn)Yj@VG-2BpfcJIbz z(+*7)Wmpr#a75|cY&%ggtdrzZe z>uDQb?zppSl>Jt=UNv_3A?(na8d<8uThBN#=(P~%^EF~`Bo=@7%aqGacwQ;Bd}ElJ zbH|PE(&@P?EtenBSvo~4Mq7Q|HHr7qO5BEhmwN(h1#O+qvikU(Qt9;Q`B=3#DtY#{ zn{Ug`-JUHLJ>z7=L>`9P>(7I-&uX6ac(M2Q{y+J%{=bf&>&Va<@}rCCbeCx3h85jj zPn|s28mH!!@Rh{5gfI#URX#6S9lb+ug9wM>)S$|pCEjml7K<^I?7eFJd~UcwKe4pNGNI zoJQ|6bGXj0jCn3mJTK|emEDOF{nIywS<0LXp3|AbTK#{UzpD(llJLZ+P=$$K@9hZK z84_9$a_!@utlNt(vnxeCpQW~C?pMdJQreHV%3b@Rb@xL3^(-c(?GM(LhG#QgZ`k={ zN^p^#g!{3m*Lyt;l@%v2+!u@8`d{h5*3^W6qQ2>(u8D~&SFZ^8^1JG_W8RO4tK&lV zmuD-sxOOULbH=RxD0G=;yY|K}`q^u=)+|!k73r@3>&VlM1-;%5f=Vi3w+$|?e-ou7 zenTw$#ey)6fZ~;g=`1gtuIHL<<7_fHUSt#hSmra+h8a<+a|5`~7bB^ur6EObTY2IOXxAj*bsMKE-(3%Jn}sX&1;^$&tiknJv)5F*CAUPCP%@ z`5^o2GdAfD56$sD`73ftki!z*hY!!*&D;Lfj_11N^G$Ef|G(+~U$Omdd|ceW59#*b z75^On|HXX&pQrlmi8sDgI|XlMZaMb5-8}!@xANbA^S0lwde0c>5Tvy=L|*E>EyF6d zOO@ic5_(xD^vm1Z+1nqN*!w;AdZoC?!<^k^JNnwE2cMI4u>1dpS#YJ~YZJbuA)>c( z%l_tV&vpuU_|)dCX6C$IzLwADB>QX)abEV`Dq_WSEtg|Aw&kwfeCsIx;TL=U{#w1h z?$=rK`p>8L|9!VxzU=nD3Y(S6AHEhDUMq#}Q^?fmn*d?sJc;ou3}I?eHf#PbuY_r&>2KF`*TS(+-|!3x#z{#zkBUjc)-R#a^dYck7ZmJWX`&q$IW0l{q)mSbvv%VzG{`s zd(>k2j0*xC3JXg0R`oV)Y*-(=XoEzvTW~nfvzX%_rEa+`;Cp=F%(XzD$p#G@>h_(h zJkZm@wVKQOq#$Ex@G0*}MiSX4CnP5GhI*@J=Q=YmvnuKRa@L)1eqinW5(c5?5!q*h z11DbOt<+NOKRn~on>B^UEEeavWC(;z{!_(v_f|FoL!dyn3&RV2Gkb=%efK*&^ri~T z{oEDA6t=Xqq`}Gg-0UgS&zOCkaeebysjS%vy{5+&bSNsdtXclAs&K90t~WhrvgU1I zVrE(0sIMe(=ymFzIDJiV7Z$NCO)mSs^3UJPspxZghX*6e0g1$o{r&c5mimS6nDYFv zrKsK9^VbVK4CT*Fe`q!3)McBgTE~9e{cD|UV_9D880S=WLhAY2{0NCvqG=65L0X|+ z4_U<`_!2%X{OX&&rd?Du`i9D%6t>8P`@)w5*fU-6bpHE9U{x6Vx}{g9Wa+)wbFb){ z<>Zb|hmwc<;kR-Wr-$rR*?vG&AfmkUde#=6h^?Cpk8qoQTgg?w*7H~H#L{<8ww4n; z@`O%oJ)#p-cS!Y7(aJdPkK6sSqr~#r!e3`!Yp;;y5$v46Sm62K$_cBOxGtreTPkjD zG(8rX%@clZ<&rta3irgVPwDxtxJ=|Z2SW)NgEj)>RK zDy!=1_QmMke!DF9R?zCQeY1W4|NEP+x%BhPH*<=w#kIfw8Nuo@{o2~CuMP>+=G+dO z%e$wXpP&DlRcPsVUv&kK1(M+|$1J~pNc;VCdVIxuv)ON7R{V?qcUZsv^X>S_2l?gq z-Pj`iyoRH%N`+|Hb~Vef_Vi)A#@WRxU3u&&kR8 zyz<+*{kQj5e}2}>z<-#Pp~v*vj(xXd+7qSJlRM^QI<01FGdyN_OzNu4iK|gs*N<)I z|Ni}6d!odZPr8DVi?VnWR&{q7rmS&I%a6TXw)I*>*RoqipG|xha&|1x)NIR`I7hheCofMi~+#J4hwRCu0J#cFe-Wb?X~lyTi0g!A1j=*j59FVtHk=0fpH*fLsZtB#j1Z5 zDk4k$R|?IHZrHl@-tV_*s~Nj4TUfpBFqp2Sv&wOOYw5AnK26oc!h6!r#a%TM>p%1U z*XUE zg(aTVa{h#L^UQs}zJFZ4zfUQfcbDic(=A`Wu}0Olh&M#pDSo$Gcr))OPgFBwAP2+h z1x=c-^#$+F^NLGs{~EOF@2y>}VaYy^E#&x)XG&>@zqL&6*_832=CYaIn<;sP=d`|R zo$;uUJyzN0vcmD@(kG`(P0p6>{`=u!b5xjgGUJmHtD~9M{I|T0Qkk{vWcBu&CBC;i zPMG*F{5Ze%+s$S7p9FRITsC1^!z#ph!r^**`TP53zQ(?nzu&L_zWaV{xc2{}@qdoa z-p+q*r;WIC+PvqVe+GHk%Jtu`|Nr;%>EfQ^`=!?`pV!_lJr?1#C#Jn$-o9(YsdYiC zRKwrj$=mL&X}no&Wwt`aTg0j!;y#K|Hcs< zleVn2*Ap+OX)bT-$i4oqyxPTP{`toitGK6^UbD;FCcye!lI_+t~dt9t}z7S7oFJ@)?0j2YsQT^th04oBG6 zdoFZ27Ik~;+$!bR_18~7&6?|~BG05SZBDkX+@3{hXL8JsRmW$|OullTwI!;nYI9af zs@~Da=|Lfk^U_Kf8#Wx76u~LY$grYoiRJzcMmmQIl$k$SwXWV*b2*>IB|Lub_uBi< z=M3uowD{IcM{1Vs}z*QGZ)Ivg@OiZl(EKJb(lkCofrc0}js)a$Wd zU4=HBTceb~WBGheX;Jk0*z*gbvQN5Bm}bhKSlSa3^>*s3bO!Dgm$sHviT27_UAJNs z7-mIf-!9L;zt7d>>*Sb76T!x)?!Vt2f2^?a|6F1h|9r+IE;-35(*u92GDJjrWzStc zRc&vtKuS+(Z$7t#a?Fx22jk@trR`By>s?ZEFPHi5-gr&vlGe!s(UvR%F*}!Ccb|TJ zrE~P`sanzhiUN7At@@XrvWE4O;&Y4XlOmXBE$!0r%r`FxN}by<-ze<#w~x6um;_?u zf~PNQ~z08>&_zEHKyC9Tq=EgdwagA%+4o|=ImZ7d+zg$;>_D&y=n{_GG{#iEoi^JJ)iMo zfrZcIl(oOkZmzp#skYhl?z+>hq36nX&Jnro+OYk-oL$Y2nptXF&;IzN9^bajS>^t( z$~kr4Z{N4GwS7JN+|D)UxpT#}!j;v|70*e3ZOh^JdC9%Db5>h_>RUc%TBUs6()V$Q zm)OL8@ArOY$mqS9XEyt7+1y<{?<|y?{BjPmv-qgSo_jw1FgH_HSVw8tO|R8zZl#vT zi%)_spfaNzIVzwP?_D(Y(PXAOmGH`$z1?dIzpHdtYy(Q-_JsWD>lvlxr(9UmS)KAt&CZ@PC+vF5c% z?K8(JmAWp=JYTU)VphAMudQAz=&N!}n0fBXv;9)p zv)5^E<>-+5si?&5p{bhwdv<=k%rQX*{l_6$Pj~gOG%!>>I;|A9>Dt7?V^%7gcWJHt z!@IJxko}sYZj7<9u%$EGj;=0_lM&aq{530)mEztqXG6p3bu;XwveIrw?XnXUShVnH zcY0)S1dI3SB*u;_tUJt)h2;fhc>8Tld8oCgLvxGVi)$P@MVv9}GAaia< zf#-DI=Dwg?Y->JlG~fEl*LkB)>HJd$cA@DyUwiWT`FRq=<^)dj-rSdTnpMChsn^Xx zQ7I_%->Dh8S4HA}ZkFEJ;XU86!@}?J$AWFg_Ei7R+kTsYLo&Jd?XGiHzi+0`x3#yQ z9^~}+_Bz$%=aq3TMJZqIz5BiI+NVEXa@O2@|Gh=4;_>B|I!xCrKFlb-nP<*6jUyqs z=kve6yUUM1&YZREwZXMndq3w2KeDm6x82OJ_(wEd~>4%bvo8M;Nb<5@{i>)p%|Ni0Gz9~ifV&-gZsIZ#1 zJku@tN|?jq&OL@MW=pS_Tx(yT^jjb*TUT;d%(_<%+xOpoEwsJr{VNTJEk8E33lyE) z8p0S@;HhrFaz$Zj&?yUF)#sV>TvPfA=eV(`B%7>{zCCwa=Gv|G-^=UG&u=!J$8n`a zPM~AO6Qf|0=ck`m*~;)0&*7cOe=utM-M3}CbI;wpDe!!Ww(l8-V;Zyfzb=z%ueFbg{M`>YtltxK=o9qeVfrYoPVyU1WO zKj>mj>+-#EofoR#&vkcr{--Yg-1Fid!}KrnIs=Y0G;Cy>IAQg(ixSzt_Hi%NT-xBU z(sk?0rPm%>t@s>#vh=2O^f~sx`0Ir--+%v2+nhVgZMr~)v7eUH0&EsC4TV3yWZg=tZd3#NE+T)TCOhqYwty&FeuWEiK-(P{RQ z%#!{FJ+i!{E9rQ((x4 z$>J*xiO!AY474|Ndux`$c{^sexTw(GE9+bsE|tyR{kwfiwqNS*yL;>Zugi7r`S|qd zg2@NVcJumvzWIFq(kbNI_u@ohU?qr@>{zUTr(>6DU?t%`0?SxhqLDPpASBt^?P0Q z@3QASe}5e2-~a1)eDv$@zxSHv$+$EAIbQ$c>eHfsKRza}Ej21m&D>M}`B~!byL&4? ztC`fzQ-8fjb@%=Ea<`99KP=dJVnXr0YqNrS45PEx6d#)vwYot^F?#8$?_U;8e{G(% zc8RmwxtSB3gm+wf+Re$p(01nU-`dODw4#D{S{Bb;wlee9@88wgZ@(7#8m?L`rzn*? z)$;eR)$9L1l>dLwa=A~CrfatP^;OC26E$0x7s|E0HsFa$ek9@;!E60!PF`hmR@IW| z(@JmlezuFSeqUy7I#X4tf8CT>L6`mtJ73whHgMsRt+DsDlZ^Y6n*}Zz@O4P+e$l-B zn3=Dzlf=4kL5F6ZLtB|19KQSawt4=#!&|3F)m)hTqG~VC;R~f}pXEH6fe_nzhmTyFb3rzcaQZBlWU=JhqU%ePMBnEO>hAUSLH zHOp|Xz{kl?-zIgnTxnT0Ws&;&t$nS^?hGIA3BC;3@Wg!TB-RDb)`-QNvy4;8Tsq~@ z)L<{2H*>z9TiwlN%$(ro8JUt4@0<1NB;U?ov-On~Tf;G(mYANxIc-r~O%ch?984e7zdY7&|IRJxpHSi==JxfP zD5G1Vm{`e`?|U1b%xO-~F7-~SJbmug9B!eNaSf3@ju%pdW6U&q6d6~XnX1jSNY(q; zZ-%gpv&pYJG^>*%{u;gN~&Ws|eTI!ikiYzSFtW$|efC&MqJgxG1~ zf@v$a-q3uSx*+;?UF400tP7)NXho@fIA)=8>66leQWn935$tU(x<$Gi$E+UjDRqn} znZSQHZS(H@-D@M?n7wG5w)dRnr<40N9&XN))BftpcJ+r(VN4{a$;DS2{uy3lT9h9p zuAY2+QKHTEO?#SlZht5xD0V^Mn&zuxrFSPCPGB{2WVm)~>kfZig}Em_sdP2pOK=Vf z{&>_wIR8JxYah1PJgw$hujf1Tt=}?jpF@n-AKum&dEc9Mf76XpcqNiKE=0bsk!ag! zWS*Nnck8vi|KDuRoYi;y^5@URewFpx|Nq&{?|VDs*{stKpBC(kaa{6xS5UWHJx9Ub z7_;2p@7~^?@%->dmZVt^yjFHJWhBYC_Iz^q{db*_y`7yLUwc&K#F=U=&+p`I58Wi^ z@XYdE*|wLrcK>FVw^>y4@8|FK_V&|1E6;uIm#?p>vwKtiJL;x&{mz(q^Z2)hF&enU zUXOfUvTa_nid2tbgOTrJo3po;Zi&6`licGen32sZ8I-k(v)63Wy-J6GWY+B5``@Ju z4#o%`lyF*Pb$m+D?QP$`e}8N+E$E81{ZbYV-^&7zz81awweNkk*v#c?%-+tfTO78w zBST=~g{bTk-)iNP|9m_ypD%OCfM@yVl}|3UJlMnD65};z`4s(Ewi|EeW$WInO#QTL zpZkH=r4^NxorQ7N=PjACCjMg3sne&q1>bJlUAEtOiN*QHCQogAFSjuGUVbv?c;THK zvklvte=Yrb@cEp%#r)s-fB)SVBGYcOpMd7pFJOlMMX zQd+D)*}1pdcF%b9v}oS4i`DBV3P%}inYjJhwW}tzjOH^01HGG$O_{Q&CARS1>Qo=i ztCkE$4nI7)+HRWUM6J(Hi`)+Fdp}$4OtFgdxk%sVGnPv_tSP-`&6*Q@O7pC?DsPHG z!}-^o*}c7**Md$?*vAuFAtaEUdunF(Md`4&4yB>9rf+xPvQ%dUmv7mvt4%j!v82ReCpLXwI|?^^|?_qkr5K7aUo|2RKG z)jS=U{dyg*mc6#GuNTeC5`4a9v)=MKK}tqTpUg;OElqxOR4(kZy119mcc!T>%I|9$ zV_x-i+vIx2Yj3@>>c^B9lfpk7tezj#_4$lasQIm|gQ-jPuLnuGqzKk&?>nk6Yue2n z+csITtYl12ZH!tSyK!AA%cQ9vqdpiY1@Oyc&*i>mY4n9_?OMqN3#KSAOkg|~)qBn% zW|fk{xu1EP-*5kYp)6%j7KdxtYn8X$d8$15H*Q;%FlbF{@;KEfsCb{lHilCvQY0ls zM(VYb(Yff?o9AjjW-{LQOX^h9x{teZ)E1;oH@C`qxc!<{7svLlty6AnP5E?4d(*V& zxM_REB&429zH97j!JNAMibdANgO78sMKuQJ+Pk`J-_OtzmM-*HvE}yDgIiBMUOPjv zVV>Zzgxg1{ZS&OEdE2Z1 z|J$52|ktb_VMH6XSe--wmd%H?C<@5Z|_&Muid`?=h^rBe!pv9opOpuLROXqFr+jTT&*0zm$3bRCv8BAht zo9QYonbT@H-A8cut)jJ>uC{Jhe5-DQh5fnDpm4PX7V=5Yp1x0U|99u=PL z8@V<}<=kfe#N9V1srqW|h*|G@+v;`ENdsTs%XjYHHT?XsHvE#?`(4MVuX*dX*H)$(>$VCt8n#6Sbsf06 zI{T!>GZW#73RTa0if074>p)QWBWi{3>B9dRa zuAK5!zqRsX;F>C%+ap zPRdMDG~KA0eQRC)b18>6yTV+aS=9gk``!0)M_go4?rm%NqOM~z?yloklt^xS{o&)s zsVf6C0C z7u2&b^Y*gRdq*YY7k3by3)e)er?#lJNascMA5ad z=gSNC#OVdyj0(?|Oewf#bH{!~#q*CJ4VRi6pE3nsC**NFwsoawqM%5KSw zqLZ3#ESJ>Zn%l0NztZ99QLZfCs6fj+Ll=fcn|@3wo|7&vr69B1aCuBuk;l>}YyQ29 zsgz((4f56sy%D9%k+Pylbi&=T(@MMV>a~3M__+W0#;(6UC+Mu^+0O=j zG2P#i-@ySpUSnVO0n3B*o1)b2?T`3Gv?8=5zh&l%%ez8yyDR zTG4Ba4fXzUzP4F>@XoP}geqYvSJOG0tt1}y{$#w+`e4fDsDg*Her$cm)ySb?*S|x^ zRDmJkgoS(T{d{xjBDWdF%Wp?*471oB)^Odv<20lG_N=B37d;cZilh9gwqc2#3!=6% zo#9x~QyU!J;A(l{>+HPErFIHYF{@ej{Nt~y+OfX>+i#|h(g?-H7GD>AsTTdvxx&Kv zbhl#i++sTm`AMa7gx8<)lnw2eko|XW_@*5v*Eh>AIw3WAv)FalB|o(6_gswH-nHmi zP(}6ZS$ql~qE_TzmyQn%sonAP|K6+TSz}}Bo2C^OF5LB{bO94hJH~-rjz0M$jn(p6jQVPLaBm zw)^(m@4I*Jt`j5g>TWzI^U`)F#H{?aYE$-NhI zH!RD%wT!i)a#qP9jjbIgY|b)$di?nK+qBzTd_TwuTy0Tp`S9TxFT)avlM_tei$tDX z`eX{@0g1~dy?GOkPI0+l>761b?c>4_FfUa1cCX_5J%-1uxEZ8&Ez^y^Z8rO@o9~3L z3yRLYd~WAod`^<3wxG)XTF~YcRjH7bCr-au;IMGQs|U*spD$9p%XA>-xMBa}j1v{l zOPV-+g`E@*MOF3{pIUikRb9`*%#IaC(%H8`n`UIg&89hVbQqj3_R>6>GWTN22C3&$ z7G22_SXwlZN9Wit-}^t8{m%bC=il!C222dh%*+Cj$sK3@|65+VzkYi0z8JrbCv#j) zgp<~aa?IU2mAfHxR@(`^rtUR*6O-0nT{3l9F3-euPXz_nteNpHfk8<~t4WLL^u6e{ z*&&&+b66QoYd1XEGbh^H=;o$5U$0(||MznK|C@FD=O?8^XC2ep6H_ndvgP%%+qX^} zUOn}i(Ncv;8yCK;$>?~q=u?GiL4xeOX;DfYDRYGx6cn;+ojr1|w=OfERj978>r9EW zQhrcpH0$1}et#QTmH(*M|6R59!YfUsnC!K#EDa~3@@otB71~`95fo;UFj818Y#em8 zv$U3Rb(a4%t)l8;D+a?geR2#8C)R9u%Vd7)?a{xN+Z^Phrk@O844-@8Md`ZeXWORo zL_Y~Nk9}A*ak|njnN9zUe_x;v; zerW4Gdz%BhdaeC8^WB$fi{9aPxx{KNN5PzBiL}#BUY)N0yZ-;x@c7!V{ri7i zjsJUf|L=#t-|zqRYPG)I{|}#j{CJzc|NooK{O|AA?uc7w=%ygi{;#Ir$=JQFSaD14?d^MSJ$@nH*D>ot z#^Gfety`b{`?v3Yj=+=S{r$%eU$kETeq-3TdoK&ayi(8kURHQyz+?GbWHMjb=Q$A( z4!hRXSM0E#ua^wEIC)b6> zNC<4arlfG@&&T8LEIpcjk2@OLIzmI-w%pBQR9I?r{b^78NJET6~f?Yt7??Rr;1 zB(5ZK)m#HMlbNj^XHRsVx%)4!p*``zoF}E{g-X(uTP74cw#*Svzn2=C-MJ)ZH zv)O6k8qBEX(%AVn5?Dez1XC;1X+k11p7OoRI9AYfM4pZexJF{a#n|F2TSbbO%lW8_QTyjng;#z*v?jXm@HD>mO4Z5b zjA*Djk-$6PZu&rnz||LBky6NkXd4gS}ZbTsAoE-D3d zMan*RIAWPT;obc^(#d|yxi4NYY3!-2*zy0Rf4x-A=iT@JzWXh|PyVi|6fW!dIcdZf!APYzuDVg?)ljAp!xOhZBukS z6t@)mojId(s#y7ejqha(qwWIF!mu8dqY}Yf_qOM@b6ks33iispwJlfLbOCdWSmx`j z8BR-%?VD2EshL@ODYVw*QuT69Ucsk9&os+opT}O9vF@lfSHi5e!w;9{?lv=ND_VO^ zA~?HnwTBc#k>6(%{&%;wtu3Bc5cVxfx-8M~T15KZ@~Z8Y@7I{#Qn!4*dtK={&E$}x zkM|UVZg1<5NDk>bAu&5x@|dH!>DtX68FN&db#yd#F)E!o{M4#<@~)gEQy2^O-`|?c zzkx$rBrkA+!SN8Ujc%JQOD6O11nNy)y==weX^VossGpo5-}c(1H|50d1zHD}aqeop zTRyjVT~Y7lo{nQdJ%*|quT97dvx_bHDb6C$5!v?os*vfusX_A|i3Gpfb*_@1;jxX- zB!P+L9J}7=sI~n4x9@h&1&PEN%MG7@$=`qQ|3~?0hc8O0wEVxU&&1$!xn?h4Vc(h8 z1}zyUzFZTWrL~o7MM%H+O4b*4^EXc8>GJ5(T>4_ovctW1p{ooCrn^*ENw0kzM;Eo zpJ;2e+l1@SYizIO3tuZ*w*K1I^{=|7?q+sW5ET3RI)#nJbO+bM(`RR-&rc5)J?+ez z5EyoG>b$t9e_~--N14M7I~#Pm&iF69`n31*sy228ftbugOD!u{GwSC*jAqk`nHUgp zRO+;J4A)!U)mM{#d9L5UcTUQAqs_%#iCVXAFS_Vbz^(Au;@5?9hH-k+r)=gr!1Cbl z?sEBkKb#!q$;q!jJfV2noaLEPZ5yAwngi+_eyy^U>+ksS?d|TndBt|~gM5U2r=Na$ zJAeQ0U$56c&-`~czkd7OJUO|0Z@!oBo@;fWYpdDwKX1$L+wHe`E&`g(c$T|%-IYg9 zpEu9{fAM_Xx62mC{$JPsuX_Lc`0?Wje=c9XY;tz~-fDl3{XfsnUuynKS3}Qn8h$OEdQ7-{x~EN==@_e=sW8ORw;pW%q$e>$+3c zepD>Um6r1N*i{-`Gn=`}^Lipp03wPMryfTUaNy#eq%Zz|NF)Z)zK+c&&_GfAMx?{1QX9i9L}m zvpOn*K5{H7y*6QH64QjJtT&5X5Wq#2MQ;)y;So8YSVkQ=6HPsHou8v~6V>d0I&$(_RJJA&y@bmY$1xtyO$-rI_oyC08EJ34gV2%BCZ_+!>Z9m#(zS|Yps ze{7+3ji}j)s5jRR%Z7=3JjUO7r|a-9k4+qFdL1{NW4~@vmzDnfq~#-?jx8c`ZcZry z{zs*sZwupYIocg|?YicQO$VaqvO1h~cW;S4cj&Dk!xz2S?DL<@1j8LlDuW9yx7QST zPMcXY^I4v2_D%_otk<0!4qOiR>%9Jl{&nkY__9aAXXg|fPAT7%>yceNtnYX4)7~oK z{CAo8wL5p;&U}5m-ygJKyYs`lcmLkK>&xi$xpLxGP{r=s`P;3Nc?5ed-^e%X2LXT0 zslR{!7GT=UV4(P|w%iW1-?)5t?EUYzHr9SV>tFxrYWVm3dv^A=cD8nVYd?Rx`@8)8 z{k_?qViXNF57;$G?qI? z^2mzE%CqK7He8k&Eg!SCbXNLS*(9+8+=?sfZk;tYmd>7YncL6B`%2F3E#>9a4jqPk zhbMSn+p=Tddq$O|cgofVJ}o?B`FvZhea(*zH*?Hp=j#6a`0``TUZsg!^^TqVy+Fi! zUV7xaNFHBdhdsvkEPZ`XTOJj;8YwSOlCE*=#hmQh=Omm{Zf(!_2D(w{h}HDlLjyb!~=f`x?D33p-xutiGLp*(7{j z?ETVObsdh1eRI68iDtHzIJ|CG+9DMZ<9&9gP|(^^nf5u)x8;V~uAWzSEvTz<-eU{D zD5hHBUF+`WRPmaf)|kP_aIM8auHEqYqKnJA9j9#Ax-IjNMc*p95a0c`5(U0?F_-7A zZhP>nlfmKs_uFstX2x8M;65gjsIaMa|H`e$ETo(7l`68VtFfD(zE0PQZ-oJq|AKaRX2-=%Q+@psS+2CdskD6y1_>5Ud!%jY2D~5o7@t);aYUV zs;fdOtNs2=O?Proam=2}?<;=moW=ePa~sxNJlx%XF)D4^{-|&H69p3gzN+OiIkZ-W z;hvD^G|rs035&$GR;(`TSpA%5(Tewi`uZUyj8d*o+TL(7NM-9(%g?%Ea*XwoYQ|sH z$fN2lvn)601n%F}-r0BPN7R|GJM);jRIP66ESj`b;^p_uME=y7A4BeMyk)u1FK~Ni zk?OWM`FdAZ)`@xFelqNc3SQU5zsppn$}q!Kcl!V2jht*gmd#EYmT$lBK5zg3L38-a z#IkdIeyDCXZ_mu|HIGrzjy7HfBEz2%a4zS zCJ#5Cx8EJJZu;lcdC%9q|GpcvKla&_V;8^AK5OG!{=IznTfOBw?!Vpt@8xx-Pv7gm z&#(XSul-}y+4veqm+$=LvDYOR$?yCBuE;Wf`|NGmk7aJJOMX^3t2sgVmi@oS_IbPi z)$H42W2?B~yIe!^oMoA5>+V%LtyvoNe)m4^gUhF!5NJMN^E}8~lgG!SLoVz~+S`T& z(^j#q=dM#aYI*km8byMR>=JSl6%J1*)`qzCDZr!POy}RSiUE7%T z;+LgLcL%&=S<}1coxxF&vppVHzTMqaDm}Slh3x`{DKSCkJWA~5_O~x?yESD_a!>Ak z#sj||?F{QPJUU~>^e1*gDFfkvO@P9se!8Dk8^RqOWB=+zsT8QuYu4r*za%aHO%`Nrr4$O{_w36#dDM;KOOraFlX}vh3R3B zo#%d!S$*$*vCn4{tL(ksEtk)^WWl?0$B&B)D)+@PI(qkY9GOz(T6^J7 z`1|9DYfcv)Oj^VEP$Kvg>%=vR1_yMezpk9KX5KEtD3>b@R=pzE{e4&O@LTdrRVVf1 zf8CuKO*_qQXFYNBa1om}b?tTK?9NASH{N}Vulrh>aeGG4(^=lG4^H(cq;P0oo#LhE zou)6u(7hpaPf?*#zvB<+-J!?;vnNRaAKbJB$*P%Gi!G-r#=GooTj=D~mz+;&`*UPeLW1on| zh35(?4*RZ7@sD`))Xyp5-(F76!0zoQR+UHPyD~92{$zQ)n>S*9RnMAMt~{MbyHv{# zD;Mq**>R|O{o-~1cJW1UnR4H;+wjy=%qOJdgW9y8Qne zhxv+!_~q^P)c#g$d3HO$UOf3$9%$FZ(rbqwE|BE1N_{rx{PW4LCh9#u-EUvF-s42w z{{OF5ub)4E-n{wq``Z_9efRL;IrjH+w72G%OK10PO!2+^^=C z*}RR9dhUO``|i1@_4}4yf2h?kdtZ8D>dxKkbhq*y{_*kg;;qZV#2!5Ucq-`jHq~U7 zgOc0~$6m}(S^d&&-<)$xk9_{Bq_b3qQRCVQ%gF+g@=iT3i}n_+y|rM4#2QXTowA!Q zE*?{gI}M-B3HI}4$bP%&ddIP--FNfezkk1L-Tdc23o15VYrAmFBKuCpWs@$GSGRFlYqKfWMAva|b8#t%e#Y)$8LPMF zX3nm4>1kWVy);kmS`(EmnwX;Y++^qW_tm-brR%p{5n8+@H{o4TnNp3NeA~lCj2|wW zocDfxA>(D%tYxpG-pg{I{x$WQqW`qHHy62mRhUp5cDeWS%Qa=9xwpGI`m)VUcNSZ{ zjZE(8-FEGg$yuo^<&+TL;2y(U;VYXCM!k-CzlQO^l%RF3Jgw`p1%F&S&t0(g(4#${ z?>#p;UODf$=i+BBEE8%n)`Uu&-{f&}=anQ5!6j2<9=}nXGUxwU^H2YOxIbETaR)<3 zOtzol(;3CvuC=LUxUboLtYM-Duf(5^6-!iimUNyCIW%=?(ONFk@U%ZEH+NpqV$!-V z8rhPz%X&$mPSw4I$LyIIn)03Y-&Tp;qb`Us3YjIRBEX-U`hVUOaGr71NZ`(@}WN!oR9)oulmOBTJPu7I3XD7CpS`mcZQn zCF^F~^ZNPF%U5^S*HneekMp1Y{O~n7y3S$sPPKHU7iHG#rn}$oxRWEfHF;6^iIU1o zt@aV0GV?xL>j-~&9U6b-zl&A*q^*C;q;IWNuo4$|xGk_FtM=HP(zl>HLO=&e`7Qtd ziND_dxTkfMlFV_>yJx=a=4LokaOq0%x@$l0uDi&yMC9?|!_8a2J$*VMxXN zbS;mI6!$D$GUb#B@6Idc`S`sppWb=JEA8WdRtBYC z58LJIetcZ~uC(n!%Y${-wH0#8qixlKv~+}OE}YynC1*|XxjeJmmdhSmy_k9WjEBY{ z)o{^&Q|6pDN;kQ7t8m7zeYv;QmOC9&2<+Q9t*J$GxkT{V)ZKT>VprQO|7_ELxuC{U z`O^G9kNO|a@mqe`qW4hHf@M=&RF*!O!^&I3y7AueDf|DwtInRAWjJeDZN=v{j%9^i zhUY)O*~5L1+2NGy`sT3RmGjna&CQ-LA^U3WrcMKR!&hHXGvF2NT z%G%O<`DU}{Om^ycY*CcT$YE#V`*_dg4}#gc9COY;pHm!QFA^hY!0_It>MOsK?;=H| zyU`D#Lj6T*JnVF}BMq4tu3N>lpW2giN?Ldpo1p2O+t%gV_uqb3uA~zxA(r8{)^mpC zF^&xzw*5-H8l^YA!b*0lvdOWiZ<`DPHS)TPl04 z>8+a$8?IG6HH!Fto564S-qurF!Wt64{6`itDSTUbN;aX6U-|H;n7wYS7*J(bD z`pOow!duq=YEpDyQp7Z_8Pk*8lomP6T&TLzA%eqmyP!$PWy{kFf0zBgDgWqcL=x`Heue29jk1`1Rtqe2fe=1(XoOr=XAW#q(Z9} z$<^yb*0}B6!58j*fML(M1a7zO`?_xbSXwM~Xi}J}letmAldy|dLs?i;7;D0B<>`hq z%;MO6>$d(=ulg=i2iEIJYTm0iu|>VAo+5f_k8NX7%Z3>Js1t_Cc@HMYrR@GY*YH_| zO`P6!iRb&?fB(IA>$9r8FVYSB9_QEpzJ2_-`nsL~Z_Rtw?_<|`UD1~|Zb_MYJI7p0 zn|;dVmnOcCA6D=vPKoL8Nv*4_`*?4)TUlS>yya7bp1+!N_kDT({k_%yZ$8h>m=`~f zH6qsQcxFvqom^Yu?fjmt`!Y`koJ^gip3EazZ6#O#cXs@F!>zu%@8|AbSA1^WsnP)b zyK^?5;t*#nS3I+nZ%tT5TKbyOjuf4TMb?e^Qi&&3{XbvX8k;d|S+2>%X(t%8CI?Q~ z@OoEFkxw9F#QVQl|E4{!oONtQ(XBl5<(Gf{dsnypcJJdKfBEfY_}e8Gm-}7xP;zlF zTK3wyH`rHGd6t0Beb6>yCQ+6kycl}UOPfN;LPQ@i6 z=bvx>{P68;^W58U?GLN=Ry{XawJhbVbE0m}Ws{{-oIh;hivH4Xy6o+)13k0CjCl;* zqSnL(G`cGlomCKCIHG6Nc1vb8J-ErOUBFhqh*ADZdC@ z_A{xQD=^{ecKru8oR&^dRBGO0RN8j!;{VCNN`qgoP0dcs&#Uj9KV|FJmg}3oNBw1S z>3?Hd7Qwx0jal$7;Y}}|Pp@os&2YUVPq#eLEg!B;alKy`+G(!ZZp5~oA@y!zhcLlb1UoP0~#iRXqtBCiW z*q|f+N40dcRGnHqT@z+@ehlOIs@NcQbxW}w%l$3B$vw4IRW@?{3=4Mk{Qc+GTkgO7 zvW4IG??SUKe3xFgH8(nYZpV`tmxS%N>pmV8zy5kckjoN_UEadWz527$$)_cD%m+#( zj%C(WRsH+$@bGQ({9nHRfBdMhnYZKGoZz)R4fiS^uPHWrzC9a}v&!&!;N#s?f!W!idVt(*pnERBNDIoiZdqr+}@_J#>rjv$mvNrnsYACdHnF{ zk58X${3e^G?Z2O2Zk?|y`C9tJKJBFz?`K`m;ppSIZpC}Oa#m1}>T{FxOVb1{20WWp zta-Kl!Q98ETu(J!kZR^wIwhv)oaMQR(`#Iw?+s65VVL2%`DW6b<(W!GXV$EmaIl6(B`YF4udrwo%>z`-JW-{lAx<^Q4a^|*uy|Of1@Q+oP?&XVT zxBNA0eyD!iYk&OqxcabAUZweGl21qOm%F)t`GxAgpFQ`+yk7F|s-RSM%;Ok?l&vqc zI9B}QxMuabd1IgV#8QWSzH>L(sW*N2^+UV3o%`=L4#NV~?+nvgwH*!}?RvHBbi>pm zK~bqU3od`ETkSLd%;qfJ`s54KW(KT3sC9q8=!RVnOuK)uFiz_=ik#pa(3hM#{hc{@8h<|JXK>s#$uLZ@_k%vl_#v{uf=q%ZmD zqJsN(-<^{%?t3hgtd}aMuJq=&AFI{jr%61fxxaVk?>D`+sw+jJP2~By*SqqP=Sm+h zoS`NdC|;JV!Sa6Z_1~*Q)}3)kxjyY!wfC{Gb(+tkA~g$JldJDfk1F)wY-Px8K+tbMd%U(D5=eutl}HQo8-e(mqwhfkmW zdw8#5+3uTDg4Q()=qwDD+P1np^PDSVi@>Ru6?XA{?S~E3w^}@^ocDWQ{r6`FBZ~LN z9Zy^loVxD#<&RH4Fe$k#QS#5$-OO{?ps(3a^!ILCrnvUiYLg_A)8_OaKWxCGwBhEr z*=Lu%zV-L+z3SP;nxYH5p3W*3OB7iAPv^{uSA*td%Wz7 zno^`o`3&!i(nl;`?-dtw^D7R@@Xpwn&(JXExk$H^->wH{v(M&jXWnY=qO+6Xz~RGt z_mw|iBV>QCI=l1(t4o1aVrks$^yD=Kf&cp^J+?Ui+EJfl!Ec78Qzku%Ic?*A{dMK` z+qq`9mwjbR^!ohq!yX@%t;-~m=d>rzIkqx$mhb)bWlwdOLK!%nKis~n*P?dx#7?cs zhigj}b!JXzh`oRQ@)DWlrQ5D?EjXsy;<0tgj75hwuh|x{Nc@6jG9wFT=bT59zg`L} zNo?No)qJO9!@8x?8+IJw=en-5$?HU~#d8OR=Z6-#?PcP5{VGv+;`!&nbC#XWv%WYd zNpBY~ug0NU29d94XzMW9UKQfFzRSZmPP8Lidu?2h*AL0;lNWLmqBNAML^qwTQo283 z)9f>j(|WujKJMT8tHx`}%lGU3{8vADKYv2(CO)^*{1X+oo>Y9jX5+ES*U{oqkN+ec z-}j*6SX6#(@>=5*ZK27GD}NQmeN|mQL;cjFSk4WH%D0R2YPD9?zRY^{RWo)uEC2Ns zK5HX5cC1hfpS$|n^6-n#ihMtWW@_oInK#d568ag{Uly!}@Pdm+6`J`a&bSn$# z=QX!~U06`~{B)@Vo8ZkadyCh-wtD{OWsTK*ziYSt-krNO?(JQHfbd}NwYB%#FWk9% zcYE&V_y7O?|8)EQzh@8sIqLGfdncczsmStt&G)>`H%(%{|J{Dye*c~mLg{*nEbnau z!}8{Cy`{fgWO?SiXO$A|r+;!VFgP|MHX*V>ESwM z>KkyW&o%3!;`$Dcr&&6tzB86(N~~LcdgZjLo%1?9V%fj{{=0j7{+i-jtgCLXshh5K zeU320q`)a_)+|}2sG_^QcK^j`tPJnpy_-{f*g_!l2-E{+crocTP`*Ib~0 zGmQ1iHOb_4*IyUNCNM^2Kh0XhJGuN^>xSIu*K?ee&az)LUwYKpVbAx4nGLN=>n2^) z*?MKl~D%}Mr&Y`8Gv0W(8Zm;UO=md~-*C0}PH zrgtk|-j;jz=hF&D;ViYvj0i(ihju zdP7AR1b8(QLtO=$7`QHM;`3`bprq+)`=&(Ty5D{$%RPJkd^r65z3VF}r=1Zu*V`S^ z5_64MD%HlYV2_pXx;5vk)^HiTIFZ>C96m4n;W70pOI_g|QvQD?+n?_ED7B~c^_v|E z9sgbKeC(RMHz+OpZ`EF(jI~>@af`bxU{%bnd9fz{gw^xr!m?V4mQs#lJ&EU?P%al@C zbLg@Oz~OKJ=m zCU|eWCXu~FWa;t5zyd?>xyh32u1Ecxw&z0G-L%b_x5LhUDt*qz=*$s&|7-2`n>lMX z>bFmS{q@w=br*Fnu+(qA|Ni^$hgEZzdp@4J>dKj&VfVhw{+lQ6G&y)xz_ZFrhumD+ zdZhle9E(n#QMvi8D5r~viUY&*o>Df3XO%iz+aCPh7k=OM`JV4_bMG>Uz4P5`P$Igk zXlKm%(_y*}Yr{nh+J4nJdrW_wDz!Xn)n~uz1p-cfFKeu>25nIgab=yGmO5iyw)15M zhdIwLw5a`C0LI{rq4~DHf1t{a^lL$dCO;Xe?BM?T`X1fL@ULY z!_I`U(@WPwKx%%`x;ZCjo?RxMmS}eD*`(uz8MD%M-{o*xxpYd=Nl)i^j)^^Kv64ns zpEg<BISpru}K?$T)do*13x&yc|pB1fSX3^=Mu3oT5{=tQ8h* zy14xC%lH4jy3BW*;j!xvOaE%`j8#h0)Y)0|P}H7f zN32WC++AUf?3VI1m)n*ZU%UThJ;Mqvy;#dnp!<{FJ$!gD^81%BUw-`1P*^I$ptIB9 zdFHI=7V&!1Uw^%2;`_3D-a_AuUCTlbPTG3Sdb(hm0K>|qS0>HG|7AA(pN^R9TBf*Sf1NBg-L_E zh-t>!XNnigwuhG8V$bJ2I-_@Mm2&yLd~<1+GnW3BOLoSDRvMn4ICHIO+nHtW1Enih zcHG$3`+Z+-lzdH`?(MJ2`}&m68tcY3e7!d%=vd(yL#Dv{-#y$~wl1uB+ab84NK^Hx zi-+%r@8!QQusEGjeL7{v>e7T)5s)NQ|;XLkGSviH@`W4w-9uFos=Uu@TyzQ%A5gVH(6^xNB94_TaLID!t-;;nll_;tk6(Z$FUrpWGplY{aA#;jCeBoP%Ln zPgI6f@|@CX91iQQUw&E9_1T6mJU2T)%dz)!fY-~Kblu48M>d-`2(@R?s?KJhmD+lpQ;@%e#+b{)rdyr%zhFQ;2$AbJ^m(VCK@~3dQ1(4I5<+&A)YW{ngjU zBo6++RkCf>2QkmX+RiN57B`kJxbtbkX@jTE8PN=aiLYaBe~aB;xX?A3@kG1Z@21-` z`&^D$yxy~s&y&4A=;K4)=M!%ipURG%U|Qxas`9IN|Npf7q#jWn)|i#2Q&%{};zt(hHj$58$;4w^9XFo-O;{#+uL9J`al0M+GAs9w`wnw(AHezK4FL2sw%nTD|4f_I_1w+ zJ0=nw(y7Z3vhuPGr^EGa-}(7fudjPwbm_tOOFKS1ogQCW8@=h)EhkOYrA(_&ZOy#B z?*6&rpY8uYtv>wFz<2WZw>C>3f2`TtckI^NyxT2ql`o^OZw|P8j?v+Isc%N4#q*Rk zciS2~G?&flHsif!*%-1lK2fkipo!hz<({=t^4zC~&X~3cG_fDpmVIj(qr=&=yBgnU zE-jp;_q^C^<&;-@@8_6Rxqq#_Um80He0^ZG%FT&p9J4GA-F0U02<%ERw>K5Hw*LP= z|IE8LHlKU5$~JpWJK@44`0$m+=Dh~r8CI2lR_s1{rlrH-$j0RLG5L>Q%>Di2{6SYtl)H=zwN5vqQd;OTcf&CMZG3Ry}H-C zF4K4G)zaAOTi(6iR~B1tHP@+NSySi6)mxuUso~%_apcUDpt#Ff#%gW8Qh6~GPYP%0 zek|RYxh&)%=?R8rZOjmmb{5(Vo;lQ?TzHRwaccQac}f6bW_r+ zo;Ph{^V+Sckv@E{_GAamEkASa_<=qV#+g_C_}=Q6%k(5h;DWV06Z`v_1{TXNRa0tkWUqtX)lJetr?y_LlQhNGm6TNjrYggc;9qzNY$pgQndJSj}<@)A_f5 zuJq$X`(UguT16{RT_ryi6&Gis|-di9BKQ!3uKFYGsrI(EsX|MV_7{^K`FKbx%2 z91Kg-Uc1_S zU4A$ri2G&x!Rqx3IIfh=l@(qZ(Z6|xfx?Djo;0hFMZJBujNfQ3kI9O?oRyt+&NXB& z_qm;^k;_*8{m5T`qos8xLx$$fb#6;V7!;1Jd!5vA&U&)_n%9$xU;f_rdRJ=XoaEzu z9dkZMZqkbQ$+y{ClQr|0MDDue%tcoI#!7sD4@@jR<-6ql?}r9U=45XzDOUP2>GP~G zJst+%Dc85YwmQzkvDVC5Cb`3>hpSspu_fr#bp3dSgw6FER)-ax&DnfbNNL8-E4Rw; z{uIettyrOCBgZbxH04rTV!=JZg~fkmOY?oBg5Nvk=FVCc#p*bF#ZM;21@DtS%oE5y zc|UI6{QY+QkI#C1oKvKE)AIR@gkXO~p{g>mGyXnXua?FH`>nWE^7Qwj_Gaf&OAHQt`gTfLALF9V$f9#fLEanrd|b91vn?v9l5tXC%?x6F z@P=>e$+z2df`ty2>ZztD1+kXhOR!6OKDT&w_`&Fm;Gib=O*_fH%ep8TW)MDJ|=f--7bE2fxCQ~&-Z+8 zoRj{|f#Yi15jU5{i|;z#FI8HVZ=7SCe0RC{j2l5;^;X_VJ6`+c(Z93WLW^Gf_T*)# zF7L_|{eGbD_vS5IGx8oRVY>Bh>g+Jb2j<7J|8tvhg&z45BIEq#mt9Ln*^0*($~P*X zI#_%zGxGVu_j}D1jpm(KcbT*7wbUx6O&zAQBze@^J3S!*Sq@V?Uzm2SNFH7 za<9U}!#^LE*eyO8B%zzP_)1sDo0)rGi~KrVAkkJRv-kTw*3eSBKcDCC@81~x{OSGw zfAe3@+VlOg$=!A4X8v3ccda?S%ja^4*+T~7{jNV|Prj9<_%=(lDZT9Trn?(mjyU*< zZe4fBbZOYEviCs{bs6cm%3IcDs9 zZIxVj=Ecr+JEt^xthy#KouNU4>3x^}vwg3#G~4bqWND_pXit>5mTLNwO|Y{ol`U$~ zf~IFY=YAeGka#(%xbxur^Ut@ebGs$!w_oRUJHPxML64n1kGJT&erOQ!tL?kp^F2DJ zH|c4r<{mn?HB8pBBwDif-!@!W*w;eK^QI|MIV|l`{F;Cwb@Q-v9i4Uv+nBrT2PQ z2PHSPyh}yr7Bx&;ym0pP)jN33lSU_Y6XM@m$+i!Mnc^h;)lW$eSo0y)x-wUnf z--{}iIlR7IwR!IT{PfRa(Ldy`um78KY%NFHZI65ZPJDY|ee7G=wfqaRh8ypHdVPak zjW70#qE))%;RPR_UERH2kHg{28|Ix2Z(=(xtXRn;dSK7lcfTd>9h=^DA~u9mdV>0b zXuJ2ZhX4OHzdy%xXnVg&g4Fl=|96?2=RVpL=~wVS*rj@spa5%tXjhirtZuW<-vkdn zvpMj$`CW?QPk|)y=J!b*_oA-`tTJ8y+G>4wa(e1nLzdtLyoVK!>9NcYvYPA1|9o=? z&+)?#OVS=qpDw<8`Tjk7YHI(TI%EIu$K#hJtJZpD+f`&nds`WIckzE@xRMl{!6x)9 zXx{$X-=DtzG~l^k|NZaJj~l0SZQ*2e`tbLHd|NaByQ-bD4jr;Ax0)|tkT2b-I*~b| z@XEXM`CFgH{JM8kB6@9Jaex^^#@d^Av@Fdh9yaLcubdbXC!IK4i=Gy~0M|;iffxu{?^;_pCcTOK3$?d!fww>Cw7RKYkQ+ z$j*-KS$yi{oYv38Ii7YUy4WVzPD`3<;RDat;Lhw&uA{y-~Vshnd1AU?&T?urUcLH z_O&>@FhQyNXT&qhr9CZg=5%d%Ea8+r>2g$4`a8!SZ)XRSz?e){&gAP)r}(n3*NVMw z?f&@fuTP&oy}kX}%D*khXweFVrgSr_!ig%Pw>9Mc z-+I#FJ8RB=Em&3Jfiq`2`*7v4f-ZTBlyCBB~-OFNUqc(c}K4B?e z)8Q&L;pjSx8&j1lwy!^Vn~kkJ^Yk|tDK)M4YUiW-KTEfNQ(Jsv-rd~Ahsv!l@0*{u zf4gwTw{)}FCI9De{r-}5i{*>ljos^W-e0?aR_Ngz{)PR9I|ObRDmJVr-&gQ**SnW5 zUw(XOc>rG3h z1o>W?#mHpWa{H~7Z}D2)9~W5jPak|+_4nJ`+a|soAGYuR|M&Ih&nCWHA3jvPOEQrQ2?~+&cEl=d#WDbCyme2j%CuADMD_*BUX! z9eaNXI(KBo<~CHmpAmb%T+L0x^0u+F&!rGJz*S0pT4codgRaq4JK zEVy^}^B%qI)ob7HiYfByoDr3G)5|zCxo1sIz|_unNj!>fX>x6jp=Z@ST`bcbO8R#1 zV{f>fTeGh=cKT6;V3(?j3Jp*gt$%FsH8#KWYS)AyCWr0mQ&wc1dRex+^0`d4=-TMs z^*X0(cHfQDpWZKTS8{j3f^r9*Z?7$k%bnY~v<~&1+V$_-C!5~KxqG(7@Wkb>ZF-ex z@x^(5v7e^!M(%xAl$74Yw6Ew|Iwk6K+lHrRy{+-#Z7mSjn(mT`PBK9*c{HDRTq=x@VtW=bxW`{rVI!28UY< zuD@l<{ii!o_69>*5aZ{=cjGI*O;c33b?S(8SIF9)1-w!w!e1f_*cDb~=)L^;^tOp_ ztW3?9ecZWcUuYbAut+*##xl#Ndz`&)MCtBS@l!k=vA89n@v))Ht@^83rkl>@vhS73 zW>vc$7NlYn%)k(l{v+0HNB7$@QKlDlSMF_GFUz&(wFtwDb!RIM3vK_Fen)V}ZnLE8 zm-p5N{dU-5uF9cgmCciuloss5u&C*h-;Vc>D;~c-@ryaBUn^>1&RWJjwknmk4{YnI z)b72zQt!q@x7~MNgmv!imANYr$h!N&+r8JMf2JN5Ei{+QFJrnRxvAXj<6gE(`8`Ru zdRHy=TzBh=(zD#Zw_GOWXTRzDI7?Tp<;7VQ_X}Mb8pieCf3_b#?w*$A8=}P!lYjmC z_0zk~v>*QP<%fx1u$Q*+^EuO}=wClQ|NqbV`+vXtUH|j+{v}iTjw>tG{{EKhyZTC) z_sr>%A#(HP+1c1?9=NWbxu;jLM4({?pYwB$>6vBcY;ESqT8GA7FP*+CVdc}Ry%DWJ zQQ%a%Y^16 zA4&B+ZL@p|-{Arax%R{gi}~_$@;+QP=P$2SvEH!X5OE=8Y)1I!Rbb8a1Ih_}Ft({pJIeki)^Ml;0#v4mlnzmKg$OWI; zy6sliD%KU=_rGtr&BC~4TGEf6&wK(pF^rsZ%eP*6{WQueZ|k>Oo`cda6u2~(S4stY ztu-ynnKW}s(5aVlPqN|Dm%`w3{jG-)V-t*T-PpQmhiYQogHO6o=|2|X46-tMW z#NM~Hvt4!QZB^(6iR6yPju~@1d<<4;3BR3?v6(ULYRNsN-q7Fs+*>N+d~bJl__8wk zZeZY8yLI~X@Yk<$yqwq=2L262Xakk6#S(iQXNX zf3C8O=Tg+kEZ=t zSt(K5Qy7^xaaPdQg&Y}zTum!n4xI7w@KMlExU}ZzgtxYLmfSriDCm9e_~lHAzU7}+ znw?y_FHZ0OzxV%deQ!>kA=qS*R1|x?(cS~zRj^Y!t;QL|6{ zYYM#ewDq$Ma|35ZXMj(GTgHYZse!99PVJnMlvt!R>smK=kDu+v;x(a-W{PL(>{cp8 zO^VSy+g@NH>%CXQq5RE!I~&DEUyDHH_^eG*rVk_-1Pm(P-(;2FC81l^V%EyH;d9%B zoqz8Y#?Q~WcO#}?+8;%w@5@}BqMv^|(J8z8mdo98)8lok_PP4MQQG@9|GsN``N?)x zwYwKj-J- z7Gdu-w_-~6|JHx^xcqYA9>sOPl|>9QN`>WbT?x8ywsrf`zfwQE8dfnK*};3qTB_q% zzh;vLM}nZIZ_w0TnQa`LygToON8i{U<*HP6&-(q|@_U<$J+3Ud7B+q9hQ)_9!!lPs zne*93_i;sa?cSLD>+8Dft>!&4C}}@z(RX@}&*`F_G1pAL9{%v*$A^!H-G!C*#l^+P z7e!4@mTSwi__8%jd=F!};j#1c?XBg#&ME%*{8LBi`t4BBUv(14GvkDejW(`WlX+Hi z{pl{3<(F5!{&nrt=j-XOQ)WK@`S`g1eEWL0BT=s{`ZS*=O32CYx1QVQ`{d_#{nNX) zMd^lzEO|O-dg;e=l4{00hd)-#S^n~C(bt2~mB;Vt-Du)}{<(7c?Uh-Z8B&#;&&PQE zH1m7CYu0Be!P+lp-pmzVIsJ5!Y;x@NuVrN(W_u4Co!@$8A;0r|d4G5BKg=xI@ytgv zqpp?eZWZ{*tGImXO#AwOIXk7-dae}>Y20wttoQP)YuBWN+a9S-?*5CC0{(k5C-)rlo-6x;S6z-m;kaqLZiisU>o^>%~#_pC#?y>tJ z(A+WO=1)VLX;H4Ov%l1)b4f9HRN5X*=9&AZZ_6ZZA)(~ltBmVUe|ow-UVplHw5`CE zs@79tYC?<~pWIopDfxNxQOWXHe;31+Ip(P=9am0F(mZ;|t|wC|JzON;zk6-WH@i(6 z>;(e_-`8F*=P0PG-5aB4I=wXZ(xzkk{{MLS_~8ZS1i#hWvY2(I7wwK+)ln=Ll6CU> zmNTy}EwMVjBqP#J%lCd|KFsap_FKg~{w^Zm;%#H~XgDwz~iNzx8W>zpXA~ ze`i#EXIn??u>$)u_tWm>39t!&kv?6vUa{`5R^eggZ;ULv*rOFE7Av;+IdpMd+;gy6 z%w&a9nrC33Yw{k(Z8|&^)l;gKukU`?Xx;dy?Ehuku7AFB*YAJ4&!K>gp-`{VSlr^fOfUif6wn(oS# z!BHK*?|;7Tef{sJ;VfVJir206%00T%!)L{e zYjcWiW%}mZ)t;G9k zkC}bsg`mOHHC*y`bvaujx4kjx+xVLA_~C~YTaI4&y+P-*jsCp7-|Ob&xqNxdZ+}Fu z?(e6kPj9dOe(($HqtNrz5;?28x+f!rDM)Ni-4Vk)hYg)Pf>gsLc$2P5N zNc-=z$cV>z_1CJM_wpXUEi+zSq<7TV^0~a6-#JTB&8=Nmbbonlm==_Ea;HbOi22Qh zT{EUHyL#>W-p~Jj{`~p8{?DJcPnTYMbi<^p$3<;<&E7pq4-87q{k-sJ?2fAEFv`V>AvvrjK_8| z3K90te?GLicP#eOBCY`MjTQ=SXDt7J`fQ)I_Vk`-Q38f#u^fFI(JN0IS(mN7RyuQf zWn{3|Yb#%60b3h8BjKi~Lq-Aun?ei%C37&#S)YXBA5xK9=?}Olw3O-Bo_i)zGF_d(NERwc?f0`qP^tW@uWMTglG-UjOH?{J#(F_OrNtR_*TFe%r?X zut815FNI5SarscWd6%e_x<_UN+wmNp|NqbVRRufu^gNzXTzJmb&PL8(J&ipeD%Y@NTJ5}!qI0Kh zV&nH}+1T01@jJh%N^>(wG(3J|uHW<5Z-wR*_cd%V%LrvWV!16~pTurZXR*Rc@Zxg+ z`86LHW)#=e)$Q8@s*q=GS@?dfq{>p8v&F)J7fNG`HN|ulFV+2c_}l*P%lV+y*RxXv zJ95f1GS^PadLD9a=ai(t>bA>%mt9=eUVZ&_*O||6&9;WM|1krvFB!XE}EFK^~>Q6I;Z3R|GFMu|MRI?pUb&Tk;QsP&Q`}= zH+{;&WmoZjujl!n+81=W)B{oUNR^h4uN4tz|poLccyf&Tr=8J3q*q(@@IOt>X+| zk)CDk&(eK+_T5_X?VRAGlU5v>(`|aUm&SH(@N^DHvirKX_+EsPps(u6RmF~l+~1v( zayRH{KE2vvP~N0HJ*h|{gI7>_68E`PZhO9Oy;3=ID^rB!%9A%4Ix1z3-~PJn(u5P0 z`{#zO-BkS9X5HsEbIg-IncefrU%cwVtxad1&5M~Hy5;CP=Ear_K3DSECwFZ4%C&Xb zR*&qZrVMwdisT#2c;lMEkY()akTv;kBp(Ay${r~JBaLmVRxK-?5EG^K)Xdk1k1>!{ zAjt1;!HQtTyFbnIwp5^W2!cubX=iF~bPhZ^LnDt|7 zp|?jwcCJ-1_o@U%?G5Ey_s%_Bexgq#CHmPn;h6gl$KGC-{dg&t;ovi~ZO=Odp8hlc z&dc_4o$sE*lfG|#n=5>4!=BtvFW&D{n6~r=)kk6D9bT-3nqk?Zg`BVlKny-!I>H-1*$os=eE$ z%>41N#7eIJUVD&N!M_?idpoH3uEQ)XsLRNYnDdLXJtUQV7NVb-zI*v=OfHYK&k&%EBXWXi-9 zopI~UMNeujZExTA-nr*QifGd3P1}On?dJ5^CIsj9Rs8sK_vIfKnP-)Cm6eur{r*4Q z|7UsB?e+0>zkg1zFNsZe&t3WR>C@RJl@&D{hRaOjev ze_!?=PWgS9<}-+XUB_X zX+BxOb*oqGm~>g?lIL}o<%L@wL_O!&ve2uCablLH)>-Rswdr3I{#}^wzx;Xr{dx2E zGhRA;xLwaJ&AcypW$TRJ>x%mrlRFmfX?@3?Cuk?|`-P3{qmvsJPnoo8O*Y$+sO%ou z$=X{rL;|<=*;Wfa4~e_A`sTE?R+CnIiS3tTOpJUMw8ZIt&YOZOscJEU@V&}%d zaqoNVmwAk(GpA3PRH>rm5Erc7TOIB#nXlaNEmnMw`R$5xJ74eGvTm!`>$_99LRlU5 z^1Dn4s{8k?cfuN`w`IG1FCRLy{KCf1TvtN8g%iY^_OCnr_17!e>d99(7az2KE}A*_ zc&cRd+OXwKM-C?%2o|kn>R21dLO2v1e(~Y0ax)veubw*K@OH?zH!v6;OQ6Qdq&b{l@e+*XArS zb&+UW7Z#qaYpHvBlDDW~vDK=)2MU5r!J*xTxAPdn!lOexXKBsKat`g?xu?BMXd?eX zYxPw@Tn$-`ZK~e_0~%!)XfbU%zN{l`$)z_5Y6`9anbzvAi?p=r|6G;_FN$hg{C(bb zi^@Ckle2X0ZqnOo5;#?jLqQ?WKCWDV@2yRWdrbDhQpchrOIOa~I%>%z@vgY@iqG~I zjp%0&cYm&(ukbzQXSt-9bls1<$lI#&Dcoo6)_ps<O{D7_XJ4KAUsty#4>j&*T3Yp63>x$G6;H-d^uB3zypS+L{`Z$e&-U z{(ik4pS||=F1}`6XBLL(r(0!efBZODIo+GnFzQBi)aC2X*T?UxDLu#UJm33TVa)X} zpFZWzdw!$z_f_d%`|7KzYAPz`JpWm>Ga}Qw=i;L)Jc09{|NQ#t)f|39pU-Q!&b+I! zva7B5@#W*kiWf5spPzb@ciGBUn;~Mxz0;??^`1Vj|HIrVCnxt-He=V;#~&qXR%~5& zdeah>+~Q|Z%WA&JF^W7bknnqc+2UN7cUsOiRW*zA#};)SS-$-EoKu_D#jMT^T=Xfa zNqnM!)0;)2=c79uwQp*%tUY#!gFiXXOSk>-L2K)h1E2V3-ZkJklQ(nvjaEH5pWPJ} zJl~q$?poJ9w|L!^N6r8LwZBik{`B?P6CFXTFIKP0`YBRte?)+1%Cfb6m(HIKy3SN+ zaA@jO&Wk)gE?++UcyZJBvc=iUpFeN^c4OB%@65GJ1q_vDq@P$DrHHuJOjXRN%?$uTpQXXCS+4P3^C${v&1c6NH)RZtRm z?<~I9mydhWinWYQ>FK9#*pvaQB?6CU#*Q_p*dVm)y-;;&yGq*6yh> zUV2(ez9CXVMjMLx?p%wy9d+5(=)TG)hN$9gW$&*oVd+os)#&Jm_UfBB(Ier^#4gQ? zXL9y-ADuFD+Y_cm*Opupose;wnJHV7iS=Vxo6fWeyB~T_<6Eqx^+tw$IhS39!kV3m z3=@r`!oMf~tXnHC%BuRTQ0o7$>-)dOUjKX9-Z=2&(WoQu)KvuMo`0Sw@n-L}DDAE5 zUVl1wVvnGIzVV`GS7r%Vs;pW%?V8h@BOCtx{k>u4=G>lbylt(r2W3BS6r?y6VZT@-7WlhC)N&kKIDcQ>& z8JxQm;is_p>;0sq+kO`P|FkNpQsR5!w!3-v-p+EHeX}dQzJ;Nyq<+_1N%Jkm|NFZd zI5;Le*mfZI$Af1T%)g`ZCtsWwrj(yK@#2%^3%EXS(zzUy<(IuJJSFN{?1xni2D+P7 zS>*cJ16QupJ6&ZVm-J@F&L@9L=S{CPRQA@hJoe+VfBlA=0bXnuQX}jB{d^wvx}6`? z7(71d^z`-T|NqMWziZvfDPBij&l1pd*}AJe=#|ZWozF#9^MCD=|MjoNPB_u)_0wNg zzQr65ORT=t=BH$Tm^XKKoZe%LX;H~NIR^s2Y~5BEH7zxB>$%f5eV=oq91J70-Ph^s zuV1_M^3Oky{q5If8|^Mwn=4WE{%`J{v!9hN)qJpHdw)1Bq3E8#wy5IkrHeM@WD4d* z3a7DL)m^h9%!TLR_FKBAi}v1qGw;^-+U!za&80{BiqCBod+%)jMUC;qiVbIunMF!o z-KD~!pmv;(;kxPi*Gb>sUyI5Y#ppF+3q`-YDSSEmb?DZ_;)4gL87swJuYSH{W*2|b8@~3%uJ-%}Z(f;{&6AAP zEq%Cj%9>53Yo-S+m{xi=;p7*IwPEM)m9t$C+%&gDY3;O2$KO3Z-rw@$*RQJGalG8I z*S9`-vzOa&RhF(O<4lXT1o4<%!n_Td%T3p6s@^{b(UPIw=pGX00zqZgQL57)9ydn zc7Mm}vx&Pe?6E%m@bCZ4T~dK7mCCN34(fK4VYpDz&wo?&^LO_T&-`x7J=wQ0ui5in zOY3cpO84TMM(hRYyAo#q@aU=HpXM|>?sl^H-v7d)Cj}xM9GHy03Er=7l}*3si~+&Nj<@De(KSfx$%sp8o#+Y(Fg{Uv8x~iM{3R zhZSdB)_z{L^UUWKH)*qgt8;oUcipRYDmufJV7@r$^(GzFaJ6k2j_+s8v|YIBmq1i@ z+OuxOmKXlVgL9{LI2;tZR?BhCSh6Yo(VSwhlo!Ek*KT$4h!yu$UA;=IA%$Vt(Wg4x zlH%H0*H-0}sV1{uFSV0l+Z}81Jp1IcF6B)ZM03`zeeHDk!gt%ZpQ@Zh76rZ9wdQ%T zg{=R#y`J7#?L}+XZcW{qaw1Fih?z=HG3$*WqMx9_TPk_b3nT|0x@teN2A! zt!okt3M~SA+?5_LpSjbM`GAD=fu!{&H~FnTFN`kbTD19Ao{P@X8Ot|sso`gFa5;Th z$)%cg+qJCBhUj09Vhsbbi@8=^$l82TVHHEtEC)ryIzh%nVM8XRl?>HNi(Y^Ib^rhN z|Np+%|G(XSe7XO8`?>!7&i7jd-n`@EIJ@Rb$5u|kUA`+ed*9k7EvUfck?k6MJ3@BN zTFnG$u2*8&ztuwSD{H>fzVpqqu7B;z+yCFh7yjS0L-zG|<+}L--+peJ`)cpMgKWoc z-*1~68nh$4;^MlMB7vK`mWQq07UY$;bxYLJ(9L;wKicl)H*jH4{q;Bb2mk#8_ga{K z2Pg7ZNB`btTJ&3P``P;fQ7+lVYzz#W95h3(zMb!O;%>6y`)pkWL4kw+N?zD%*2||c zG^FTVbf{h$7bUDw^W($E6050`_SgQN_UYALWv9y%|9`pM&(^%+)yD#fU;DPUb$qsw z-W0M#>3~HaL&CXW!P?5ola{y5jQh;R|NN}^X1Bz&{^zHwyDxnI+s-ead-Hr{?DeD7 z{44ru(|xoSFi(_gO>5q)oKs2#L+YiQ(i+v8#) zj55EgOjN%t>pQhM~= zzUI}g8O4#;mrgl!B#~oPnUeY8EEg4){1e7X9@&=S$$Nh9OFur_ka^)0mn)^QE=%SZ z3s(EbiU)grbbyiK{%BQml84FWTF5_YB{7>8NG@0xm|K zGC7aA03ZKpYL95yJ4O=mu{?&#}X zxB7ZqF6Js23u?ZwIeXaPORapePj=Q;tqh)v*6l9mDx2A-r}iq!^|=dfJR+v1q$YeZ zGR5tb<@2|Ej%O4WWjXPbocsFM!R7R(6|2sA1l(YVxRx6CM)zc2$d|9bbl1-8*nMMT zvc`f19uIE5*Q#eUP~5_tZd)Ab;&pP>xo^gysYxm;)@o{86myqw;!uq64_Li6Z|!kW zfe6kEGNvI}^PWjE=r~>s<8u(nTp6;l&hFyb16>In8xO8srKGju_BIuzq=_a;ef`ifr#RTlI8maluC+_A&3I7HfFhxUHP16yvd5fe;bnws^S zTf?efD`((s@Z>s@y)G+ ze^0+#vC4O9t>xw1^l8`D@NP}XT)JlSmNmD`M3N4_xyS2tJLkpTSF!i%nSX`NZYo!L zue(m}y_LrSS%zQr%V(RjpRauXd-$;P{ZfVx zZ*Om(K3)8M%w%1;+kI23=PD?+J&>1|KUNrZUG;g*&X_shHB|#5rd7^TlYeyK`Y5!Fe+xK&Fl6(0;oUMt zM`^36=!R57mh1eoR$O*0;&0h>wbpu0oaL?dNT8`B>Ms}5y3?o5cyL%moO-ou-Pu(M zdy^ksyR}Kpa9z1xMc+A`Ke#7h)j9w993TJfbKfE+s%N=8sC>Vy(MJ>C=btTE_Uo*i zlJrCU=53d4x0mn+rg0tIe(hRn?DdqxyS=1;DIVkFJmaFHw07&1p!?-@`a+x9-#a@T znsV*-TQhy*rdb>TE;75zNuhRq5 zwX(_NtBbg_X-VjNza`7K&PJHuXt}d#sd(OPrkq{bB2Jqc zHh%oMbnE&hhyK+6kl3}Zx~Aq={XhTzm+OD7|8IJ%qvMv2+mE*!u2;7xHh8UGn)j5i zOsaZQfWM&#-<00M%Kb8LpNPNHoxb5?T$0)f^Ax^=7dC&-7SUqJjaEPOKKj_VyIH6A z-wm5n`{-ru6~=q=7i-z_+z%N|-?QS%oNJRdZ{teyzp?C&S$b&< zP2W~~U3$SD@0aG94{DchYxn+LvFxw;y=BImy37Pswk_k|7|ycjz+d4zi>(U8J74tW z=k4EJd!FH0;E zF3X(eJgASjfGP|&zl}Yr=$tb z_R9bISMa8NAv;q*!IQb)SIr7Km1*j0;IM6as+a0gl}j^jR*NTpIJfb$VT&qDK)}Rx z-TnTc;m6~tzCLHl*0PnJn;PTmP;WPXX5~Eb(|#^4X>Ki1(uVT=?RsUOjb_Gt-u^rCX<@#7=ws^T$Jf`=?Wm74jUc|Mh3#)mfjTve%tuKJc`vvbHw0 zXz7(}+uj_x9)4xFitNR6n#cQOtKVNx%i>`OSh{xm>btK)Q}`|10;Kqu8Qx6IyigkZ z;oc&XsNBrho})9DWj@`yHBGYfvQ72-L(_6KzTZ79backbCzFCFrnLxgath3By}4_1 z{`Tnm{Au^Jc19dpxHmSb$L>gK*gFgd`1Vvr8yn7$D%BolK zTW$T`*y2;$&fZzKy-+4P+wj-%<;SB=z22U$x<0g{@yI1vSMQa#6%saHaI&uSG3;o# zGjs0gDdtTcmZ}b=XB3YFT~t`_JIj%S;mn(D>x`OJXK&p;L*dqhxWmbB9*10!sowCo z&c)-%wl5Ohs!B^mE{EMXmvSS`Yvooik3C02xN;t*hDD~c^G2Lnk(^#yy~=BgVn$x| zGvC~eb3ZberN7}nVy1;$-LJ>;=bnGQK0SYF=-PxyOcmD0GcL=#^F3!hY0KQU zXcL9NC~wc!gGxQo`+P6w&RPD`|Nj?%v0b{~J~YlA3fwZ^R8`O?c<7f?7z*T3+xlW-p!xAC&_=wuHdxWTW(I*^rD{{#)FuVow9hzfTlm||XFDHP&BzS}*F#2+CnOaJQu`?0t^F3c(q(C#5RK-VeK( zyFq$J_NVHkTaTZ8E7V>z!Q1n7lJ6RZ55Im{`6jRZT4Cck`F`E++*!+lCKYd4w>oU+ z-Ml@O!Cu$DyG1H4IAX@rtSQu<$dPCq@})L@>D6m9EXwX2UH5v|wIi1KDvZX@=HzdG zy=je0hQ`lL>(12J&ELH*db*{mQ%j-D+L=u=mzu8L5X6ujy?y)YWbtGHhRYh#_t{p( zcKFN7$;+Q#{`nyL|380>`n$yx7O5p^m8Au>MoBwFO)HHJzFvAf@kQId_f{-Z!q(2Z zWVk-N=-y24oIUrlR*H0V&bYhlT$>(E%9A{SS7r~L273YSBvGv8?* zwiIu$Ip3}8!?0jc-`rvkmV55I8?2W-UU+?>wJg8Ce}C-#^PlgO20HQ`PK>>L{rU5t zEa#i|IU{a|omjL>emWO}R1%NfjkyORw7q9WUCz>%IexC;W|(x9)1pa}ZS%KoD~&Ba zw_uvg4##iT-Y#O;wnS-?N9=W%mUFk?`Q5Y5c6M_pOXX@5R=@7+D7w;_z%92Qs3%fEj6ZJF^kVNTbhPafZ^UURiY{+q}2(y5MX>l1um zUNKv}wYYC)Qcvym@Z;vkAJ3T0A>5&1n5nrmOf(>1_lK)tf*ZGGt#@G6-P&_GBiT*V zrAY7Sp@laL8-xYdUQ4;=u>C2U*|CYCCBCZ{85vvM*_zCjKcPV&s5z~@`sC3(p$yjf zNqra2HZEDzv`NRCqfWw#<(#T^hUQhvtdq8#vCtRR{Z!p|NrIk`S#(N=d9E2n>lbcdbpZBYBoD`;#~3G z*9xKx3AYPpcAB$taV=DSK1Xw3?=6EU4hPoTdXH{w`SAOBo9BG4%=-jG5g_yjw?P{70VwmG)@q4NIb1{TW$NnJ<`sZhtDiCKJa^g!|&I( zOD2@<3}6w>{A{9sdd;m}>)t1YT$On2Z~wQ2WBct2tGP)grj=f*wl+30{>#I%HNUQ_ z^VQr~Z0L6U-R;lYuV26Ze*fz3J2=CnqQH^6As3MLT&83oJ_STPmXR z<6-;zzT?Ua(=O>vzg`-fo!j<)_O)$W(_}*y?OJvv>aA8BZ(I2K_eZmLH#f;> z9FcH|o_E>iyobdR$Hd_LzQ;FiJ=+=gO!e4@FJEqy@0}Xzm3^*o_14$BKL30(H+nj& z;I4iCzxCh$Nwk=8PI%&;{Jz*{ccPrtPPc_$ef%+F=XOhV;bqJYnyODzV#F7P9%VDO zUovOn49?^Y0lxeUW(PzR)@IIJ9%NLV&arl3K}W{rjP!VwBNaAsf`#+a0$Z=mSn>XM zYU8Bto37JLx2koN?2SwA=?L)3)4j4SQt;Ql$F=*GfwmNVdiwf$443kkyVv4wyU6VR zXR+q>u4PxUHg9in^_w1apv!57>z4P`@`rZ$9NRrTCTnugYQ>X2Az92RkNr5-oOL-X zoO_#HP$TE=ol8f4GVJOtF1xctsxG5@Vido6ZlL%<_PdjsrcB6bV3iN@3TxqAB{Oqs zb~eMbsjScTtzEl_W07I-fonqTD;lyUxSdM-nPlcSao4`$bIW5G60%d+xR-u;!@hp3kA#A;z+qcaVO_I*vux+KJ-xNzx7;u5SzoCQd3(BL-^Ymj;GFa) zq6ZEX-03>RV}0BH(cSC!?BB>Xtyr|dFd;kY`L1(53PDTK45t37JEP1XICJ~yZI5$x zO`SI}MU^OOyxIBb_LnO2c?|X66}Bxsm(u+GOx^8SW;OgN-TT=ULhiD*wk&DeEySU~ zcsKKKc|03${+))X0Kr=V9Czk?T(tDbq|MXX7*wCWe%(6n#pmVc`&V&Q+0D1B{q-Uy zV{PpE>zPvCbz&1MEXoX>-f$GoSvE!YTLA+Dw{z9MZ)eV}+#+CWJijl!EKcOwHmwyM z(>C49Gf%Ud>wo<5!RqU!ddDPopIdWd)rSus4DRLa|Mx9=?NO6tv9uKozFRzVNbsuRwOd&$#mc@ve0=%we(n79Oz z6O=*|%U*A)tFAn$IoB^;Ddw_O-{RsqK~61)3mlH5Mn0?b%UruvDz}-#`TkFVwtfAv z*S)shl5>ADBRVsFUHrQ1+Lm%{jDqX8Yb&|Pcii~B;rgw{V!I!EzuC2<&fITTZ)&w< z%BFSQS^^sbU3_QVPd2Xouq{;QwAQM$Jzo#b=R17(u=AT;5B8SpO%LKpI%hduC)4zF zV~2#Xqf6}dtzmE7>U~$fuYNu!^}O}6ATQmchQgoI+;erAUzl8t+Vj2abl#<&wHixH zVi^>Yw#^Upy7{}nN_J)^Lw5EywPkbW@OPN-9JV?8dQ(a5`?=D~U*7&~Y&ie?@_p}j zOH4HQ5pe3|?bFla>;BE`2#FTo9Xf4Nkm{jV7fNO>*!I`@c}}kA#9Li?Gf(e6>Aqyj zqG|a{E_hAkS`^?Fc6D1|(wbFAJbYYICLVmWWZhOr?^&)#U!Cev4`%Rijbv0<$|LQ$ zX|Ce3sSGz(J*`~5`_8xg_j4UDvpFd4oU0jrZ%eGS$I){+FD<9viMnmMYHh(xfe8V# z7zC`(?aUCq_0Vd!YW@#lhv1cK6&*e-e4V7HIX$V!>P7>kLZO5wy!-Mx8!KK z`*4Ujmz-H@cJtC#hM6I}g^|AxC+su6^D5-qbGCrDwF&F(qi-Ml;N_UPc50CKwN%aq zL$`npJJc%T*|wcN{N49ypUl2YTb}5{+iz|?m}vd(2it_ETlHXYf}_LGcRbpnSK7f#@gJ4hJ|O=opd?idt=wkjX8gf1U{cV_|430 zduV}tUe$l+tJ_%v-%rzLFxXdc-{S2cs4~U)oVALDy1Gkayd>KmUViRh|L3v%?tSL%7fx@VzCM2cw3vBzHnMH^cElF{ zm{`2FE0e3kBcxcbBjaz&)+tlGvrFeqkD9&8#?Hp#c~D&`~2h6r%Si}`tjw+x#+(7m7!a! zp665vDp`Izx4C%1g|D%1cD=p|x{&Je*D6lM2zSqOk>18p7k@vHiI(?WJ;~e8Qgg-J z`#wv!vb}s;I{Mm^HB;lRXB{kgP;}0>WmanF;^cIh?z;`n9NSOpKCiKv$9Fhk?-DgV zQ^xYvr0_kyz5z?Oru=ARRgC()jy0eoL(lTahaW#))V|o&!0>w3DWhnfb#ra6sopI+ zbGcGHIp+G-sZ0!tEK0j#;(7~r-gPtWwb*^(>(VKyvDsedR{z{RQ-C2#`&=Q<#dkgn zBzDU&cr2>=EQC)K(L7n;SvOBNM zI@+B3{VnekmFIJfR%$sUm)+PfWtV=O^qIw%c;?T~y9~N|`n*5?qFW&|ZdT6EzWc$# zD8lQCO*)rW?*ztj7H-WpgD-_I*jc7TP0PHttXGQ3;KcBfKHgBuP>02T$TRP2HTYewj(y!ujH|Zs#`SSZ_+`QGQ zHOhxG7H{dPo%DP9y~azwb+&f?2(#DS;40hoCh6Di6+(i%bt(d?EJu82czhQrY}nqp z+uoC_Ec*L7!Bkk((|(?we*fI^$yz?fPwW4zy7q0=DlN-& zwUVCGyKL&K^H!ZUT7Nn#E7?1eH?}91r}p@{=pFaB=bsPLY-3sV>Fv)--4tENXAxT>RyBeth2c*AEL`?8=il z9#zzFafVr1xL0Ie_ZurUh6JTDm9=ZLJzt+Pf4A#g-142%Ze>k$^LSz-eI|(~Aoz8s z*3u^-6E6I|@%66B*|l5s+@9MCT0G~xSSD4r&olY>Jy!2Pqt+Q~nL4_%q_PzkrN%xF zl`$4fIPnoHS*E_x4EEUysbzOV-*6#rCJkj;nv;9`SUL=}wf8BQd z{eM9FA5wD{DVE){UcJZ3eVe0zp>XJmb2~j4=bp~qB;N3y!=)?0>*Kk00g(eGr@Jn= z%&7=nyVEp~Eo0G<6M76Bn~gs2TC>Q>XVt9WwC4;D=G|aQ$cq;Zbgr3kb7AuHZEHie zc5aDc{8(VaSL^R`ZQh+(COOiIS*Z*R9Vu^gHVQXJd9U_OSshl`C%PbWYTAWb)&LbD z!R7@bAydm<$}=-8_PhLLef`hVhhGQROEh%M$qt)dvb?Wg@uKKlk3Szjm)rmOPyHt6l@p0{PccLYa!hqy+CYHEo(EHu5C^>FvI;={k6IBwbUvsUs}U43JziG+pc_ zj4z*5*nQjGg+-0;o?=o0yOQS8EqaS)1v|a(nBdv@;Ne4&rub=`g04~bzwg|3{kp!{ z0R{yY`D7b8{xk2MK7CrXcV_-#GsdIK`TLh&zFC*0G%0A;p16=FT;VDbeC>5FT%Y{> z`Sj~or!&hxuY4zZ^0CGBs3oDzkxm?nM_zyZ_Wu9B?a!y@D^=Od6WF+|%!coN?Y|m} zFSYlrzpYcB4zoE!9`zu)&Sh zEi*YUTFQw;WpgahO0g}uH+x0?-)-A&Wv%v`!+K#RPtp6?YM~ihbxv;r?faK$+ZSp& zQLcLXy3AS6Uq4miGGtQ`K7LPAW0Uu>S(Dy68yW>R&;L8(d4BY zlGU<;WIsPFa5xg=b#CdkJ6ZxkZx#u@@}0G6TNsauyWn$M>ut9i6fB?5=1lR9yuNix z*72&&ul zcfQU|&hq8Re8DVq_gLnvWkK4;tIseftm>P4H?8NZnCNkv^P5kpT~0`Qe#6-MSx{GJ z&GSmVtp?V2*If&`JnNF#wzaIm-#7QP_!#DY58O7VT1_PIFfGtarY%Zr8HKH+~2OUCf;0QsUdWxcnSr z?%9`ZA)E{vLP6y(>VAD({{PSX|EJ6E*SD{jZ&&v({=fbI*A;u^`rNCnE3`|#_HoRb zu&Rq;GqY(jr!s@_%2g(%EY_h*xjM9}EkCZ~-toKFC2DnK=S^{yFXt5B{W&|g`Ie~c z?~ofmBu!^rney?se16EaSlf>G$8Ou1CCK;PUSY&JzjjgXiNAHiw^uIHtvhu{J9}&P ztW`~19-FPQs;_VyIHGc6f#Qs>e607?E?D;-Ra_yv_F|7*LdwM!QJ}z$G)mu&+#}-Tpxt1zMO=Wc!~?YPnA&!JeRcM(tAk_IJ}6t{a*9YdQ+7=Ff~h zr@4H}wNSQ?2R*Zwoj!l`QtP%nvrt`C=7z7UpT_tt59T;HCywnl-`@vw;;$cN5})z? zhQp#M!p7Cl8Mk!mQ{wBz6;roe zIm^MrcBGrB;q$%rgw5BYPF<_?`y3TrTlsIB&g#4BKEFelq(et*5|m_YrnkNvSS|K9(9{Xgazr$}}#!;O5?y3Jf2tr|v3MI3XL z_Zf#NR;a(%s-4Xt>~%TUe1p8~8|#*=+^Ny7PTVsTmsA(+dAI80?w&{c0^UBD`L04~ z-^aw?`g`i9&i#6;O>EIt$GjW2%x0F%PptU=qkP%M-fCm7vu3BlR(r9E=7#EuT>Qs! z#l(`g!R4R~e@DmrpejK}-JE+u+3zm<^iHe_e)8a0`X#;FH{|b3vs=WKJ3DTNt;>?# zDHj=>+}MKq?`+-_!Z5RA?ql8W2Rk|46IW*^-Da}4v9+6XZqct#pQMsLoOx~Y^TCXj z*NS$>{#y6`ka~b;_P=jmUvHnDe|lD^h`{eefvbD;KHK!(J>n6nWmbCbs03g8!ngk} zq(<&qS6xxjz@Fh7QWV@!w-M_{QUf3!HT7K@*Z3Coi>_2NAO{Zl^j32TDj!< z{eM3_t=fA>ILAA8t*P1vpRHT>|9$&@Z7A#3y69^2IKA-9syd6!8K*2+?0J|KHWsh4 z@}0~(V_mml$ue&X8NOxrZ04Smx37D%D=#vJ-5V+dlXQ2_`vR4U$%h>e-aczGLOrrtfcT4yAo}Jao8_L-9^t@X4J|VzzCN zGVMDq(R|^k#N&NF1(QT-0vdd;`Ex81t2%V%_yxh)Jd86gR8=m$612KJPlE5v4%4WW zQje|uKiiytHfc)O+hfaC1+8u>t4&YpOXEq;ZkXYnw^pgaYxP#CXHEAQ8%zuf-{kI@ zIwyPWXPf6!_I|cY_!4`+{M_$t?ze@Ku7B0qWGZ|4m~h0oYwt2YE3!PV5oWj=)Gb)i zaH6l7KkIYclB>JAOm3PlmzcwUY1YI43<*wmu5J4zyxIT2yz8wVTXP?Je@imkJF7S1 z&Xz_8j!n#A)*=rldaz8}!*KBM+!n%-Tr^m|NnXUSv@V@EBgAA zz3!}_HH-cVzw-Zon;vGYXrQbi*T?=}$ieF}8-tRsOB4T_JAW6bFR2z&YS36DCA)j2 z^!CJt1e5*{C#I_pYB(;6%-O*naAU=dA9H?xDm?$=!uFZp{2caso+|(OxBkagea#*D znfqsJd|&(GZ{HThb-UKL?DE#?y=<%$xb4-c+&2Qo9i7iSL${x7-C>+ieQfH_-`m#j zxax6>`SM(b&HwXXP2|-(vRjY!iV$C~+!fnrb*F!_?mj2b=;}N3=!G94z02A|!gUu>(*?;Sh+m8qdmz@hJWvaavZNiPcKsRhJNubg>&&RHqX zrPpqolx-}&x3PHVl*)O}Dhtoa^B@0qtM2dD@ZFbh3TEA8|K{vv8N2>^(Cf#CA3uKV ze7$t*l*)O>3uTrYmVGqfi4|u|h!hgmy=>I?*k*lyKfk!)YVd1Jw6D6rAP+p_(_)10+|n#VHaw!S@Lc=++Rd(19-?jE~- zefpHm2X!_`=J+06t~vem*JEcgV{`jW7HGCixOUagLB2_|cIM2vr!{>)r#vcB`lNV7 zB5JZt-=ZojIp?0s7OS!}O{X6^vo-9r*&1D8v7}eK?pcO=&0SVGZ~D5|XFtFG``F-% z<#V;OAK7L}%7kZ`UJm8CC>N`_^O_sSwJ9C*boR!D?p~*?viTE3gP~qa3cv67fA`hS zhCDlGU43uQ=M5{ji!yrYWyT(zRdAE_y@T0~GwWt7%AJ|+r1EH;v52cUA>j}< zV%m$aoc7H*zA7o3)_Tt={w_TuIqW&FqRA;ShTJsf3v6}^MNMX}5zL;fCCV_{qvzr0 z=l+Jjro7%hefsOCUOw~f>i&HA*xd1KPlHmme3C;_blLv@58D%89RFBUSsOP^QqZpM zS4HLCx|#bo6)(sVWO*~^@0qvHA9dV4$Fxw?_np_L?+a$W(@^TtQvM~dOCs~~oA;+$ ze#_i;k<4#?lKZ#HUa0)V!E36&f4fQFNf*qSDEzzpQ(fL$XTI$%{LBtt*QwoHzS1z^ zT0nd5Vg*)*9z$aWKOJ$&clQ-?R>&yK%s+RiqLkg~{+0TQ_Qv~lxl^8+oqhj5Wb1Cl zeShc5Rz1CIJFH>R9$pwR^&kcYo&I z@&i9Td zbJLGI+n3ae@A1#f_3bIuKANyG!`{f6^;u=yGgg7N#Ey-pZyxFWQBb#G>&s31m>N8a z?OFsB8(O_Leturea&_JNzT?dwS;U?*GCW)AdZaW~XI@0;+q3cBm9f{S1fARYzFKhR zGR^5Z`Rt;q?qdA;57idD+WWk5Z~67xZ>x4jv9So=JH7FY%5)Xu%{v!%{(Lc`!~5iA zlj}ii>bAaq^swN=z4F@lqMi5h)H))j9pwoP%h@40vyyuX(~TX|rUdg`+z^nqZHHLk z%B51eDU32V-F7dQXx8)&Re3t4NXzniP~${1#YcM^I?w)}P|SDG%0BqRy(_EQ4>!)7 zs1_QTcE<8$9OIoV#V-!0H?3(icpi1N@yfQ=;1|#SYzwo^ zQQ2p6jKkKXO7Iv@{+t%D|M$WC?b7dp>_XQnop}>uBg4m#uw6B9?NSB@uakEDOIHYQ z3|*`ksJT=xH1w_0Hc#f6P91B*Ts|4jsy+1QpQU>BE*p05F2-8x!@aMK#AAX}V)k|(VFT|noDB-tN{ulwijd9(VzfBnDZQMXTjHg~gDuw>z%w)@zt837OV7T-U2 zRjujw@3Ve!(XX~oo~h9yk-Wv2HQ~Phy~I@O?OP&uY%{KscfHNce1BrxhSTovQe@sF z_4`Kb|9`HwYUMpqo(;;qJL>P2Pkv~Zu#NB9w$sY*Z_64sp0G>Zw!i1LFZ25;w|}3^ zlt{d|0wlhfYD?(5wTRh6;NpMI?QrnpRL z>51Gqvy$w;_%_V+^5c3ncl+n>6&6t&mwe^E9@T&R@yCiaSF>8D=bslg{@T9yXpfSR z;j~H5OKoNO@BK`ToL0F~Y5Mi+AInVp_C7H>Yb9-Sdf)u|*Exg2>b*CaSQ_kH?sez?+PD4ahHHzo4*ov;@PN zSa$hmiu}GctMBgqz0X*5nj>r4UX$4(#*9pY-6Eoi&-auwNF;CJa97rP6Ror;$m`_J zZE63vTuZ)T;^)Us&j%OnTF87-^?8Cv0%1Uc?&hTJld|nwS`uxNP;rGmoY&AL*8?rR7&e^8O zcI(a#ACEI4P8mv?YdhSo@4j|xlb%b;C0~KHTc^y?eRcBQ)Jsu%Q)fP_OnD~2q-6Ep zOYh##eXnyQ-|oK0o^QV$cw|JQHP= zn^&beDl8D1bdqW2gquv$%_m;prN%u)(a2oFxH$XluZ%T)rym)ZZEY>y8p4{fb={`e zYoJ3wrfp8OE$%q7N7X?o>Hc>gk#@(B=czkm)`>asT8FlI9I5H=WO5L1`Yb2t@=Z+Z zvaNBWrpBD>cfWD1U6fZR$EvB+a=^!TqHNv2cbEC?{}tHHZ&)zxRF8e#|5(kdb8eZr zIIucQI8mLx_ugrX)gkXs{>cq(>Dd4OVKu{_``1eM_1d|7yC-P8uzVdSlg9qZ7vCrK z`uB76U^N$Hv#~+r6|FFDVJW{dl+FH#g6{vfppZZLN2@z3X01Y<^Wu^3`t# zzo>0>KVs`w$$x(D`Z9)u%H(Y_Z;j8~Q8{P)ahd3z*TEs*Tb^z2y&PBn`^iCB$1U$a z+v#mrRAp+|vbe3FJ)lrN&iek2|E!@m_8z;v$M}MDN5{MD`Ap14oA~w0>_S?nyiSzh zI~?FG*m}I3T{vk?bl`{r#%x`>@|DZ&*|~?jz{eOJ><75v;Y6=_UF%= zw|x8Z<;%yfmwYlDS}dPu&MQ8*b?f1W33^eV4vM`{`Q-y|`i?uVEsdLY`suXd*0o3TA73r@yKJH8wEFGChl*_$XFosvlx4W%p5>cr z<}Y5FYDw>sTo`m%1+ufB#ZTORX4cwM-{vij@`}8c8Yy;<>3U?y5)KBdwCA5NotTjt0xOY^5H_Pw!D`$f%8F6`O*EfGpJr{YT@_jN# z?fq7XYo=#E`?TEq8I&p*%2cwdl(j@;B`%s{%owSEaF^c3_h)R? zE~HG}p3C`t*{Ro^Z#A}GUVN>q_00pjxqi$IE1m=;iOcSGyP6ZUwlnr`3#l3~iOu*2=bdJpSBYykpCv!R6dZGJbj>{4E|9_YBXGc9t{rBhY@yjn6 zI!6)bm!EY9o|2sZ^-~X@E z=O6zV=EcU6c{W3bDM2N@Xz^d4>9xi;L~k){i=6z`OL6Ai_3~9kdDauRKh2pPw|m-S zyHDR9%e@nMP;2$(Tf^=zkL?Shzu(^{KIP8$Lx1z{9AElRZ#&n!Vm3yBj=kK9D$f)A zFS_oi&)p$hcYFEmLnevyCso@W`xm>nFsDj>i*;^V@S9w(+w%|BUdcPPV(*IEVS217 zPM1ZPHkz@Pb@j#c9A><5bxKt34Q;_`cx7&&$gXAMAbnw!(_<_`=tVOs_FF zY<9BmzT;-|rMdXu>CfBu|GEF)$>sUw%oqLlYya(Q?0al+{&o2I zmrJznl^#CMc$Pc#l+F3mv!+#sg}$$dJ@>r*_?&B*v2Ty(ah2{SWY|0Gd@~@Z1PEQR9RjaC~c(KdwxlQln)WquZn#)V)_Ah_@vVuv# zrNrm@Yu)9g!X0M9E13kpwI669p3 z{P_CwCetTf{_lSZ%FabH`f4t{cE@Eym=lkpf|KycZBa)vWPGw)cO`@@E#KFDmuvM@ zo%?yGjr3cd1qm^%y*Kq%*1Bx3*LyS?7`EM-AzPUIY>)rPJn2(!H?#w(j#Lj(fi!7jC$F z<@beKx!F@xISv|c-m=z(MQ|#|8B6h|wa;|7t2uGV?U`tP&1vN##bb#t1PddlMP+N= z)Z4P>_Rd#*x1I(?W#8Mm?mg#5)7QmeWouTcPQRs(jen;2e9Ys$B7eaqn6B0A^$L3WmvYgcuB_6vQMS+{qO#rfF#*3Yk*3Vy8r zb-sSy@}ECXZ=Pz^?Xf!gxSV3XptAIaGW)6JkqcJ|<(9Lk_1(R|&%RrA?{%qtypw-_ zxixd){nJ)o{@GXjub)=&v5$RsJ@*g2o>#mJZvRcZz2}A<`@X>7-_AFunDov4e9nFK z#+&5_%I&A#uRZgvQS3$0e4F3j{Y(D;dsCbfc{a9~IXFuBrZ_apL%g}QDb?RA8&iQikS-JA|bu~3L6}5Z+ ze7l|RJ9Xlu6DGcoO|C^bcPyX0J7(SV+f$uwzBt?S?60fc=X;vpzRukRG-34X!v_PI zzBe5mJ_}>BBB!hP?p@zuasKpMGppx(hc{jg^E$h824Cj9>uKN3Cr%1pyY)4bacQhM zPrJb0j-Jai_NOXY&E>DXzj8}z+%;9xO#kPX4}+^8zD{y?xoo5RIZHhB{hyB|GJNfsv+}mjF8=f5 zhrsT2TW1xoTKl#3eG}+l)a$R8KH0Ty<+fSDE4K%Fs+Y>E1ww!)?DQfHM4d3sv`P_3V4+xQ+SiH_td#OcxSnJl#XU{9+ z(sGrQ_NczRs@BzEShn@ry4N|g*KD;sK27Li)M?AkEv9QXc5f2=H(UF3!4zF7(TvrJ z%pz0OCV8)1yY97=)1Q~e?QLvq{``GkzyEGt*yPYT&MeQFnO3cE^|%_9m0f$?`uRCU znKnj-HPH@^Gw*KFnZ`9cIPF8cHK$J5!!JJ)ANN}O+nyo?6 zD_2gLvUwHH;e?iH-}jj&sy)APuEfpc=kK`B&lntCYj~!WdsXk<#Mx1#={xJO?FosE zj%z!DS2INw8Ol%f?CwpPQog-dlv6RHz|Bi@`lT%4P&GCdrj++J9QW9*rWse*$gdI&&e1A7K>{^n)j)r2VnMgyJOCW#y zzSp~=y0<)=GyB=guT_;6_E!1Qat6Y?=O$j{T@)gkx$K14@ihxR&v{nLso3_SX_oFZD zZi`$v#4>a4XEmduZ?>;~cUH}JD|o&xA$s-wzp}N_d(H0!y9gdAn&)Hd|Gnbx+nvp4 zJ=!uRw%r!L#q~&laiYYY{=RJ8on~m?}yNqYJU(VPi@MbFetu>#S81lBSww-MGe3FoY zLGbmtGS-2fn$PFtNLSQUZP}Ngq_xv4o8#AA{`-Hw9G`zJHS%**Hrv(9A3xatv;TMe z|HJ)%`L|yE|L^|)-|^4(FWKUE*_W|OyFTcR)APH_SS6O9{Bml8<&;F3<*&bPJ7;YD z{o&*GYfs8#y-%*)x}3lNUhDq9U-S3>`E>egm9tA|&ZQ!~RS)*QuA1y1CpZ6`VT;<; zn6PR73l~U#v(SIW_IlU5j~@-6?>W=`F=fl@qBVQ3#06fxbSHZC(x|5CGqY+ro=v^? zNo38R@T+bwFYtd0+j8xAY3%jXm0Q=Hj^gk=_h63r#VOu?E3e-A-kkT~(zjS=!=)(oApm8Ye8uOC@!l zHe1i-|EqZKSfPwMERxo`k$xc_x=C(n_u4U*MqF=CNAH* zY4(9?eCLD zw8(bn?df;EFVA&2H2eF{*ayA3TWrJX%o(ww$uh%#*HR?I&?J-4X*jYxSFf8+MAG69^jR&Fi{>-Adx z-6?ghw`t3hSe`lSx~lkzTnjmQx%N3h9fE<9nNr6$Jv;pJN^73)%frk2?f?8Z{P?i5 z?$_=2_eFRHW&V8l@Zsa-{PX9{o43Di?(~!Q_S@=erON$!`0(Y!8msTI{w*u7O!;ilo1AUv_WtgkHJvwG z7*22Un*F9^+o7j7T$T33u3s7wsCuZqJz4&D{lDe)ewwvEe@QFWeZO12^{T}0sMq)Z zecOKe>9?ob?+aY43@cT-6;*oVuR+I&*H5>;u2P%zZ0D0rOP5(4@9S}LxES_Y-~Ruh zbBaklA9ugEQ~t33-|PKpkt)kVinV-1-wUO&Uk*;XwX87a@*`XR!van&YnG;HKAKa0?)KrltugD( z_b=P`o^iufFU1)bZ!h|2TP$37?`qI2QPwSKof}(Lxa1sV)mR|pobIuBt;@4}yJOe4 zwZ-a9*JFEcv!%*LcCPZu%E)I?-W>NV-_OX-z1H8=@oLLTrYU>7riEm$a}6>7Y8klr z){=9Vdv@DT5y)O^sUFIBK#gfrufT4H;sot)H+E>NTz=D`WnFeKd2$rjgr=;OqKqeg z3MegjDZRM(UDaNl*IsXCnOO5iPgZN1$Q1dfA^Et6Ur6rW=w&NIXU8mJIJaB3DQL^J zE+t=;=T~M;St~v3zz>_I3&rJMURgce<+C)6Q8iOa!mjrJ!*==or!Du#i0%Gj{L$jx zv&?flZE7o&!qSdE`F6KK;^OVF6L0neXD;0mwYft;Si67w*RM~R79F%q>*w%&`g{I9 ze!U+8d$?CQcq9ag+z1i8Epw4~&5_#18!Z3Ntb6~v&Z0v5S9AI1r)M4isGYrSzV`RO z67^eGxaNwc6ni|})}WnhYOp{0z?=JKqU=m7@(yZg-HH-zes53_b}aVI#>UR1gawRF zf5MivCWy6JpJ9LhO_y2heYg0V;AwT{`P;qB?_JZoC6ikb?#r~t^W()wVG|>AGOGBG zaB??bZY+y7tf|LcFYVSMzp$WZkD;m66@Ldh%_rgV9jdY}AQ@#Ed^_idLyems2n z^y}AOpMHA!y0~Xy_ubd0Ri9rfuU+Xn>q^-A>)J~HKKj@H`SI}akP`@X=( zkB8gaFTcFNzw=4SPQBAfVd|T0g&sYA{J5Q8p5ejv`*pLuzA?C%eEIvZB;exfuURK6 zHopF~&RULtzFplZ%l~iR*E=teulo_lFfH}%Eeb7VQ%kMBZTr$9 zaWmI)l~PoZUkk^py~$|_I!gsEAO4vrvpj}Nkniw;GnUbe?xqjVJT!>lp0!YgtHGr- zHuk>dmuZ4mqc*PzRm+L~$r>&kHYKUIrB=MNVAk^>E@Q@owM(bnT4MQp_qyz@>#_w6 zADn(Vr%2EFe!E_Td`ZvQFB95i&U}6|N4l+fV_q85@2At_?dtw)S$EpRZ+qb#vGtQ? zc5uv$RdHe{yJvD?EvsPU^(S-H&9+CfH3W5+U+4Fi|5azOO;w_5LL zvU<+3c+0fx*k`S4F1c=t-!paI>W^{X;&LvXN_yKjTjJCf{#B1^H#i8Vn9b>uHGa5B z=a$(&o2N0ng{*IrGnQ?-d8AZ9>znchVW-f*tt&G^SLe>Xd@IUX$z8_DVcDDo*EX#) z7q+tc<}u?|RMt)X*G(PMM3@(xIcMGOl4g5OQ@NobYPHPndmDpK-m@0CJe`w0KhsF{ z`K48oy1(iSl#A9Lt$hF1ufTtSYYO+iq>?A4^-Q`u&N)q4^GoQ8!0Ss|Tf&Mp|HrTU zw-(>EDmqjpa5LRFd)hlJ z^xBk}&*#`4eef>ZH;OUFbrQDsg=X?I# zuzaun?T*{duDh3=tB_@yKdbz~UgIsC7d|e}ub8{MOEV)=KaTzV*Xv>PUaR*WD=mo& zJm(vSU-wQ^pu z*SVhp%}x_jUfkVX-hMbyqCGXX=-jo4yuH`HO_|q!{PN2hyZC8yHh-5h6im$5waA~F z`tWb!`iU}%$1-J3UbuWYSobRX^F8h^O2&M-@iOZc9A=S+sKNF^TD^ zKfk=Z{IcYh{5NxRb3?W?yZQdpPv6P9{kF{TU4+G2Ps!@uh0aF=_I``yG>($){b(p~ zF>kwgnZNGBP-#dM}_}tY>c_#yxxjV02o4qXT*LI)%_bOw>1r1#8RXZ;$`F5~X zKtw+I;orP^3z?|w=bvX$<(4 zvo!5`>gF!yisv#7mzRCli(cK;#VOizRX$=}-nF2t^m)rNC5~J0yjt{nt@ZMi@+RlA zPewY-So%GWGgv7*VBwx?yQI{&9nAE3zjvFK!aa$rX~L2M7eA)0z50y*y)BnVy2%33 zO;R%b9S7Op?iF!M-Y#UsV5lOczn0zmwXgoEHTNE#^F0;nx<-3So@ChV_^H!{1mE8^ z;5jqNYulL=--2TjSy4M$6V|#4CF=-Hjf!Luxcbh)!{LXpT$>WZov8fYl*gBAdidYl zzbQL>`eoG{fMB`DOjXQyyo&{ggRvHad!Md@1^s$Q= zP020%@9*!g{{OGYGWJ~Y%F=o1&-a{Cy;l16(Auk?e~L7HSA0>Qy*77m_4{^-U_UOE z=dYd->~(!$ivC^ice*0o$={-q$Ak-c*eF}&(|oqIbYfOb*<^r`&at4 z+oH5BvnRBlw5v(M{WyP&vna$tsd_^W&!0FNn&b*$C8lJV^qc%PA zRwTkFnP~IwTqwaB$_qXYuwoR+4S}Y zEdgfVqpmHcr90Yguk5YdZLXYme`1HV@k&PLo~=xK(`P zXSK-oFwTQk`WLG6@BDuK`tS2w^xYNoMNZ&aDffj<*QE19=C!=)_{Y7VpTVX1 zv1fM0gr1FdM>22u?{0S5xNCXb^V89*E{O(&SpJ*MS8({rj_ud7PQIRXDJZ&ja*aTw zJdm=!Lv3)(tmi_Y4tWm&qG*KhS~J(l#map&yqwjZ=kR49M@uA(tAu@hmCnw(Kd)Bv%kQhIwQaMA$c~STi_$dTZho)Y_Q#V~ z>=Mg8XT-jmsK1Bh>;6ApF26i6<=N+-JucSCR^@Bg20s@1T2^0PUtBEg zTv1z_Tk1X8xgkhfiS2pOIeq`ruRne*+WBX8{pb9DFXjJdMozn5|NXbTeeJJ*zvZj) zUHoRVF$htsYLKK1>&!~9swgJhX!sXrf_b1Q}ufO{+h_^ zrIFVXCH8*TTzYrQ=5rGb_0Q|S-lV|PedqpN>-T!oBa81O_gv09KKE6bj#=2Pg+W^ZM_!t`kK?~w^0|f_ZF`xJ-=4zp#jf1%j}+e5&F}2 zZWVLB^Y;6(3)5maR)xnDi7^-)wRpa!_|~+^LEEl3q*(%UJHDuV0Q7QJ6v zs?|DkHrq@q_J+d-D_*>F+HCM!;EK_ovVYgYrk7s*!^d()Ra;}%*Z8XI)u*!~uZMVT zy>@`(T=lzKPwxphPCu&q(J@)2B|G_Ds!V(4tV_GDL}h+H>GXVFkkS%?y+#wIj81ot%s#jHsxnJ26kC|(xu+$jHWoki{YT?eI;@3W+PcsQ*3 z_hPGNcboLCEq7NvKVLYel!}9g^ ztv!AA{)?pv~@X!UhRhb+y@=l5?E zi{w9|t5NWHNA>!xYfkUc+iYg?Ew1>~R@3LsGEHBO?G=1D?ZK8vh9|u$=`XYH23%ay zw%0h`_ms=2i;6j&Kc%lO{XNC-mf%Xq=-GKsd+#-APj^$fo%!~llw-{xU9XOX2`|K+^x^6Cy>3}wExXU>YsKesc$d1a~ZY6JbxfBw$@FXMFO z)q|;^`Fh_AMdTNs=8L8a_a+cx~$~pqjz40S)DKTI$6!% zVYzh9{%*%XSMaDvon@SQpN=BGkBk^F}t8 zps4EC^7eIqj_TJLo`2nM|1Tihanbeb$_*kOAzR%G>8XO`{0 z_sZtH(xW-D$RWoP%WbrO}&(B`lt^e8eq3Qd>27C$vS+DO1uKcchSZ>|+ zA{T>cJel)~6qT~)8VWVJFR6_4Tb``P%IIpae9~II^feyh-79$RNe|xx zihPB!uCC8#Z&~s^$-5)2cT3wEjtcpA@8nquW~OJ)ebp7}`jqo{A&=n-_3hi#*7Qop z%wW72(=VQ!dAYW;!(nacUg5o^Y!kO%32S)G(0q-%-0?_|dLBwp!|R7gpS zT`v&maBg~cLxlG=YxZr=qKdAz%(`~NVjIKS(xqLd6IfGl2fJ=EJTmBet@{6C|Ni;spYQ+oy5800R_uZ|-G;|3xm~P2 zM7>v>wR?B_{QqyBYd&AT{JF$&0j7?IgmYD%NlF}wM%fP%D))q#c_g#f@;`o8uw(8wc6sK!OH<}gu)TGpdbZ32Mu*d~f**G| zZ(iq}8K*Zrb>EdqGjD%=m8hfLoEyE`Z%-xn$^hR58w|FuQhfQ`y#CL12MVYX5 zMrW_R_0ClNX_r!5qKjzes-lpT%?tBaOBnrEnadu!m{CAs#bb%UyUDwXa`#Kj*q?A| zTi|j#X2+M;Hgl)wE>6sz;cPl{2w{pssWnZdntkU1Tdnb`1 z(H8&r>-ziqtIgk*{r?pI&$~zQNJ7uW@9VcLljz#|?cu+?KTm(W{5*a6@#CrUg3n!> z_5J-l-@_73_uqd%Y_KHCbYj?g$=8#vm1gd(|M%ynWpQc^V59keRtNe#7n!<7y^FHk~m@Ia%G#yUDv4WxOjZ-FFQ_%@~tUIBjqx$+} zzs;YU`O{1cU8WYTZn~3GW#F;t+qsSdOeq;3nioY!RT>{zQ8K02Z-)Q#&rfSEdZq{~P%2sG%W-CnD&eK8y~}T(sS+3P$yl$oeEZq{30W)mmUaaSu8orBsCw?iu~f|2 zZGY4CF4H?*H?Qbt_cAL!ySH=k!Ml0OC!Kq+cWYVfw0m8fzTdud_|DQ-skieM^VQF~ z-=J_OTQG2!;kBrX5@)#{+>6fL8Mec3&AI3Ud(;#=IHaDOo4VS7;{@AhgM5?Tdq3-M z{X8I*yv9*UjLo?rV~!|edhGLMvr`}I?LC_4;Ox@YD7ew+dlZk<>)6+8=9jpxZP>bE z?w8pwz3*19zjfy@Q^VFmnG?r9y!+d}|M%|w|6jdcU;REi&SXbO;{Uf#!nd`y3amA> zulx6<%Ie*6+szT#yBZWtrg_v0ul{Jm!mu=bwP_CnBLjn{i(|;N(?;8Gtto8d5R4A5 zTD_^n<;y|-!v;JXw?12AoYL9T=4Q~b_1n95W&7{>jSbz0lBdFFSkd)ubM|uKI&} z7#m7l^{ev__0&sNvQ{m15DfDB{5H4l_JM5$P7J#0Eo;{}zK`i>$tntIiaPPlV8g~F zp2;s=nkrOp{=5Fa`un@YS!F3cYrmG*OpB^1DBo^g|Gm7v+Tu<2+U@1vugw5$xBPti z^y`Od={=cir$#+bjk|8+_qcBR?*03#zyI4?zOOtjFEjT0C07L&`NjKp?!EiIT=B@> z4=-yf|9pzbKD$--G#AgYi|_vFvDFvvxcRnx6^Fttu9+@Jq^_^{9+tiKy#M+2#}|7g zGd}oTnYq`mMXAP4wpjDnf@wj|Om*8rw%PF<-ZZD!Z|kZ{=cTh59@f}BT|TM!;kjQA z`jR)Ci%MQoI{kF&JaGZGWCoR`Ykn`4lQG;O(Vl%{&yp>Be5Kldr$WQVWmE2CU45dP&AT!3`cvQYX*{y=aeB)y|EytjID27NjNY*uck|k> zKk;*JIrsHmX7}xh*KY06iGA8}F7wL5hA-Eym-Z^Q3Ct;8yY=(aMYYWy83AiImBxxI z%{dmxdzv94JMm#umfp;nb4&B@^v`pAaM!}e_Jc{^0v&T6LCAMC@Cpj@H|8wd- zuSG8YN||#vP7j(8ARRj6NW|$H+eceZTe~IaczJ$Txgm6L<&{ard3%*N8z%K;x>#P% zRC-?Xp4VKHvGDr&!r-3QUNcM5zRgE4Vwx4X?)nr>mSoNdlO09dUw6z1&sqBI-du}pGcm8b;tXtCct6}S@(9t-@^^av%pWXw@cHkF#Ul-VJ7ujlJ)6BS*q1iiV|IX7cQU(&6r zb!KmWx&$wgOy2kF+wAu8_wx7l{{DHs{@>MbdB%fLZN0l+aj1@CYtR}dsC>UI0d2e!O_4cxw+PA(OC)-!7w`BMd6}7g*L1X8Yd$SdjZb^L) zsc~rINRV3oyZyIB+vV4;*-lfAZmD|zpzoCTC9y@XoF!^x4jc45uG_x?n{)l=oJ+U7E-^ZIFtjyZQC+2ZT1)K8L*+wz zuZDU1c}Go8wR>9nZtaS?ec$!&tlv0u@3cj-wMSw@Y&4s0T@Us;yK>qZ-l7E)zD)bO zB6|G+HKqV%#u(3y^QW}5oY=&heKjM`$g{!sy;DodjbE>nv)?pvnQuFH`gQO7U*h}i zYIf^odKT+;{V!Ab`EB+3|G)qLyI=q3$G_8$r(gcr>Nmmr{MLJSWb*RzBMzJDO`lSH zs#E8#%r~iryUuuPTC(iF?Q6)U8}t2Muc3lwzOUudaJKaanB`*uSX zUb8i>SM6#e#s1o~t-fN>_Mq0{4Zmi3wom%axBObWPOCOIJX3iYYq9=iiIwqbi@sm1 z$vu}-w!XG(*`f3K@#V`;i##=%6Edf^3aVd9pT1&RRJJ1H^F154mGdqV7JPWFqolgJ z+2f4v(yz~+efW3xx9L(}&x_G<#VbP_!)_E_~j-nC94spsMWZn1T1xw_*d zl4YEDsyYM~h4Ban^*HOLMoycvd0k0|2ix@0na{Ug)l$fEFTG~@e$Cdk+k(0~Bo-M| zFkT4Sa!tD1MSjlbjw{m3rnPT))1ZBmlF*RV|>Tl4dt0^JhPSYgEl)w`7h zB$EX%bR^kJ>2@Ua1im==GhuJAfXB8=%lBOrcDW{*o!en}Y~i(SanDZwT%&Bv{QPIU|7kIRsk99C4Fkjhn?SSDNzvuTN*6-t6cdE$SA!>d?@7Aqb zD);dmHj#L|C;SdmE8~-|U!Rs#`I>4@N?iN(o@BD)x^FQoMpJ@!&3V2@toON^5=+2G zD-jKio)xN_on?+0@F?;n&PbA}?U52)b=*9E7Z(G|C5O^G9qWR!FV71TNIoHSEo$27 z)SYvRqq5^ntyvgSs(D!_dNdzjyZ6@tmx!pfrqAb0-*fJDjpyf`S9SJs8hcMaw?eS# z%-*Z_vfuaaY@X1u^APi)z0)TZ#}#|+4XX%_JlOVEGWp9T-RMo+C$~NnUjO9PH?F2Y z23f`(+>>@Tt#j~jXsgxH+fdl^qb-mjU+DO1m7?gQeC>x%pZ@&QMEdod;@^i$%FDl( z*MBd!|9#V3fBpITUzg?M=FN9~@bBN@y6u;>Q*@vHsJfDLASycQ^6A$fFF)7UTpHzd zYW4Sj5C7>MkyuoIO<)0sa8mlZwMOQQ=QNv7*N#1S1g(xb$ZsMR~z`v+rzqUNiaCPKXE3=%(lZ->HXcme~6Pg!!Pi+DbFYn{ZR z4xQ7BmZownnDw}k-Q`NzxbDIjXOG4p(+u&*oxb5{yco zxM926p{no)_garCNG`hk^2gKD;zdgq9+qnAVcR-egh61#luy5YO$yq(yV{&bI4@Qs zV=e#Q(i~aNIsJzX`gY&7@Kx;7b9vwV*-_v0_OFlI47oFIcbxhhmAf=0_iy66h8Jsc z6*Skremd#e(y3?eI6AKqnAsY@aZ%#axps>iYcn%9znH#fS?bBmldt#aJ)b0=8N2_` zw35~(EYa(IYk6X%FNZZVGAQlc8dT)qAIKamFKWQ$Lg4wp87jhe3zh?-ae`of|H*e1?JFG6Xe^<0h@yOw~4<8@z2QA+D z_3ZZgP_5eP;@^M%9Gd%iR#xuxpEFgouWElZB7`{(ryT=`A;X`+fKA+qX+2>;8OEUHau+to%N?$corYM$4ma z|J-zhfA{b9c8MCV?DR~Xl`1YWNimmS?Jb=aPA8fRWln)ChM?aZndccNea z+L*s~{qpX#b>~li`tzlwW&8H+c3-aT;Os3dxYzr5%kB43D^G7);IQ)T&!?pl?x8Xk z&+o_G`usB_`*O~%&wKtn+rEGG(le)HiX1QLB}~(LJ;&>{7n9&ai{69Q8Cj)FPD~G1 zn_Z7eu70h5UY|ptJu&Y3u_hPYbiocmfrmBwGWefd(`)INn&S>*cs=3X+P{3F{k{n1)p<5UPpkpu%J`P zJ>iL$V&__$1pM5mnmo5z!>}bND=Iv7Kd+Sho(_putP#v-I#zs*)8`h9N-sUp`e1Hx zuQpTF^Lt^uN-2#YS^<}g@BB_!6Z&BLlParu*>BI?JpJ^Nb#}_yL*87cCo3kbSo-bU z<1NlD*Q&$w1-T7`85v6$S&}(!tXjQYxj=L0*0kPoGX{Z-;y1I`#cpL_IJEcMDmm#x z&uqWOg>w5U{FU15-X?Km!o@WXjEQG?UO&Q@6If*s0e>rxGzRwqv2K7_P&T2(h=PrH(D7}S0{bk zwzgBkx}l-sbt}W#mupwn%!}^-{qg<(m-7Gr$^SceJTW`hO(275&a|)m^*_#**Z=T6 z)ya4=g5}NB$lZ;SJRFOgB2w0}rXMf-Vpn!gX2RZ5p*t$?uSp~edJ06XKEUIk*z(}* z&p%OKhQ}(h=YGFdnXD@r656rqrLN-lIEfmW{P*{HFCWyB>&V#KA88a&dW`pSUj2@l zWxp->?zz9-t7fpbbqBM@`#V{W&vj(15fu^0+k17B=Mz)+n^MZx48CtUlNlSOdC2$5 zo|>uk8c&`)7C3ImbMfbfty>JX2VF8+e|qVXtxI-3nHqi7ElA+nD_IuCz8dA<&{ENcD4P}sfn$?I(`3``}W-}7_N>`Fa(TPFBTz*V=|+)@+P zzD-P-OB9QA_0m1PNhZFZ|Gj)kteTO-CF||$ zf?nmVARwxQ*zEf-+XAf=Jd>2 z-`Wp{EK8kvetGc9y;sWY=1xnMY*XY5+#xNzrr5Sz@Z8Qn9}a)d^qs5P_F(V)=a$ox zbe_&}>e&7F$XvmNPxjn0J+@*-E{7vS)avJ#BQ;kfUimC*njfyu!uqNv@{3pM+OOxj z3s%R@X08@b+}qh*x^3B?sMTA|S|pqlTBZb@ds$&4m0gvsnacR@&rYU<=jZljcewW? z#$L}|e*ekrSPnR{O{r%_f?EYLQb1Y`ltZ!f0nQu6yEzk5>dF2|* zg7vFErIeR{SK6b+toe>nd4$k4k5^9G$T7Ske7Mnsb6#yCSFMZob$yJ#6(i zR^f^Kb^C8;E|{*y&{6R?r^AsUdiC?uMo4ut@O|I3TiKhX-Cd8}2rJTET0DQ5 z=8uH6XD!!fUghuZxUx%C_Vo`3l}|1$T9eYmoR_p(bQK$Ldx;;3$}&6IzQJ?i$rW|( zE^_knp~vNuzuXg+%smtMB%tU8sJ~kblu#VYYataQq4(9;@xws@yMW4(Yj!ma_h@ zH$^@6%3f+xdWmUc!R`Y z`olW?EAQQ)$pHzUFAH%pEmV!3ZSg*-N3r5Xk|5{7Hj8V=T+SY^*s=5W{X2b^m%L5% zXmOC%Uvek^{n~4jyx-qlG5znY(Q)K?g^ocD;5YW_;Qc8^jgA)Nvl7n=n6?W z2?VTl*L%6Ej#1KCVNsuJ=%PMPO8#aG zJXQKOi1~5pq}*bIHi;ygH{Y&RJ6OH>er#4{Btz=HGyDI(%|Cy<*z4ufr=gZSch~Iy z`EB-kySlHxe*OCSS$wN}qN>A_bBaGQC3?4REqk}dbi>wAA(bT(hi%S(uC$9icWeUV zr+vI1zuo;^{y6V`RQU3%k8Jn$;;2@H{X8j!p9P;d42b0MR8e{@6s z!0u1aeomTn?e_M=4-3kxci(<5&^R+V%3Dv7?fD&>_`cA zG}rGfU1a){Mdx0%?6+HvtQ-vsoSk*8r5{|bzA3cH^)mO~&mB{WuSNN}6kS{8n=ax0 zKIX9+k81k6y%Nd?R>@s*Qxzz@xAopu`Ic7w>D-$u-fw+1ufwbH+N`bb?(S~qm$$of z=T6q{scE4-j}3S(TkvW1Ig~IgFx+%zO`_i{aaM+FmPR7U8C!L;&uLx{3tgSG_T#<6 z>mS5|LKV3utIBU#+2W-qttiq|rlQqy`i#}y6B2JdLUR4|c5Yvn+|!YjdCtl>YORRo z(kVe!<<7?}bJk9c67aNcx?ZZgG$Om#u%xz?F>(9ttx=*EL!H`swocnJt%u{PURZW8 zn@7s+B7yDCq?XPqIUqc#G)!}ac~9V1WvyQUi%T@OZe6xEdBgqdx(uGeY6qgYkA1rq zLRpRB-P0@6rn`(U&{JTi*sm4=4g=klThA@}`B+EEcgfag zd*#c775;tfuisT)@qhilU-8!BQ?73}6Zj<1R(MA-f??Y+({+0{D&%yAbZly!9V{-U z#-O+K$~DFd5wF)eo3QLSd*zu6daq3wyqHv(`&Smv?)w1_fzpe!a)xsI4Kf zAb;gQ={3t|%jaB8{UH8{SAel8w{cQ-C6_{0fZC$WYugP9ln=sU+qC{hEAnV z-|zn`-#R7gb>b{LKY9JnpQ>tVYwG@7p8r3i=i=nBQ)W2u#U^Un8`DfR&ZiYb<64r*M z=bdxi!MLFKTS4)RWtz*M%(?ykJ3~eX&&0Or(b;p#Eydp+_N@#J@M~F~>BNvc*GnDN{q7 zmDJh>rK#t?ghk74pI4zettwrNSNqesNrs9ocTK)#FI`?fZ|3w=U(N0JLXQZxGfp&{ z*|zc4nuW{u9)9Rn%)PZCbIm7OO)Sn>>*!L-vbu?r@qm@@ae+fI+L6NT6197F?ymju<6h+Mb1Sc1iV~j? zqt!g;-1B2|9eWIqSqU-*tryz&|3_%Y;pyk+d!Ia4naoq3d8hYV^+d(B9c~-WNuQr9 z%%#xam?R*Uw{_vd-I+f=T)iG&`}gbd{`lzEX4(Qm48`f`e~*8U|NG(P@8^$=1tK3a znA8Ve)ZL+#8WGNTLnPbge7B+5uZ+2hDh?0sNr$8CuePz>IEt8 z>tPbfb5s}K-rTjXI{$Lso%i3^8miM8^zHxsod5s%|CE!g$$PmEKYUkLzN7PS!ko$4 zCCl24xHfyNHeFsi^LZ54L`MN9%|n*YOJ^2G6?tt{>&=|lm$mWPzQ#A#Gpe0TQr3LD z*DmqsMeT-jtDgC9mCl@d{pXy^m$IkbYmm+nogFT|XwSUCqO9Z|DC|ewlSACqX=Avr>+r zob<;PXU{E|XE8#QUQnPhrfnr!~7W?!0>adiB2{2s&lfB()Of2^(a;`?5!Mghke)q0_ijUA2% zEZlnP`qVH_dmW{wN!PZl$<}oJ+cS${iTk!~c`Kh5`)0;YON|Wif4Htua8=R`-^Ug% z3<^@-*E%?yIo3+<2{Yi%p1CXe-Odyjo_p5s*Jtk3<@xvUVV#5$!?#Y2AqM(mb8C*1099NKcZn?DyQQe2muL_kEn` zk|GfKy!^H7$||0!cg&fe#d7<(Ud6Sn4P|(-H!_>CDduaP;;+OWEeBPjJ6oS}%`v`q zG;`LP;+7k`*A#1-GH;0D^k2*Re&PiQsbdFIPFZp{7{1uM{ha2n@M!&|3=ER)Z4Nhn zGYEuiT6Xdw*XBIAU>BC0y~2{y+C0w8ZRAwx`~CfXU3GQ&^ID0dp366OaC1*6-OAL# zpc(k()2EoAz|wXFhDEBU&v-`_YwkW=H{;mnpIeq~-MUCZ<`qJ>PH5(`nLaI&`j0f}Qb9_N51+&a>+G z&JGuF%3iCuTa#z|nkd^UOBZ{d#_HJX-L5{W^}inW@BjUFf5hWG9}_zc|9fV}xAlw{bw3Q3m(H9X)b(A_ zZST{A20UA*z2EoyTy=Tz4eQrIUP?t4GG*rzx(dFsPCUX1r-SjWUndZ z6D&GuDgOS>)0&@u%kRhP@f#jH{q$0l`Lb{4Ue7A&P|C|@XV5vl$ulCdb^d5uFQHwb^=LCOs%r$LESbOtXd-1F*?kOQM{T%0ZD!Szg zp0M zV(+d@=_z3qsf9uT1#i=emJm5aYL1I&YhDNuHUcu`{4tF$5Im|=Sz{*8da-R z(_3cGz8fB2`}uYJ{C}70|HXUhhTk`lU|@N%mvgnm@rZ24BbMiN7<^fRPF>#N+`Z(P zfn(ygbN-i8*1lcq{r=FR@7EJ0cI6~6WEHeHMR4%&oLQo{R^fBerej}%b}W;4ng8$E z`ZZq@_wVmKEYNnaYW*ea*06P=lka5tp58OZ|Mb&IMNAA%nH+naPFgNq63zF)Ets)$ z!#bTB-@Og{@9bT}e~nkMPm%AK#aYp5yc!;sYxjl=usvHn$DhTlMcmu1kJRoB?c^{>A+1GL>__wC(@5_x%fNgW&~_GWI9 zVd6cUu*Q^)zuo!$-=9BsZe4aaeC^lmqKd~PYGy3EyZ_(y|5fI1y>$PduKzXv|7rXG zpP$xn-n$w%xA^gv;&c6%pER*Ih8257`bUUy>M1BS%k-;HEzCZ5)Wr9kcWIGPn4##oMxEFVE9*<<>FRpZ=^YJ=gbmiNe(HAGKzuDP5BFc9@pBS4;Mn1>g7Y zq6z5J_IZ8#^r`1Xt!s{2g4T~6=O3jg)-vAfc$t$L>y@fs4=`8;D zsS_9R2A#S0zt5F`@9B@8cz&9Ga>xXYbg#ZC7e^%u(fI60SO? z50XEByB6gY7|Y4DU|~k{=X*DO^P7(4IIx^B(&wnKka6PJbto@G{dDNp<59Is*kiJ} z{ybmpb+q#RSAl@b+~s*|rzLenEU~gUT^Y06UEL@U1W=3av#}=Qv6YZBdFFpIs z-@9`S)s}uOv08cbQ2PoWAtfQl?5U3HnO-DayHUNVpM{@IZ`zUrFw@4Ob~U0GQfd%g75``Us#uG!^N zOs&p8|NOFI&l@?pyt;3n=iBbJ`Tyf(@Y8)8tCW;xXEkv2Y@Bs%J&%b~-qvlms@~^K zZpr>W);X=MW@@~Cp?cTC;iGBUAB1^l8UkrD= zG>CYnXNxpvjBQ-OU$;-<+*iKF$FfiKdN;9WW8RE} z8CuD&Ynk0VcdhbX$5rj?c5MC|Qx%5GCWfbN;&&8BU4H(r!fI~Osnffr#Jqd=?*9J& zAD*6meED+m^LM#3j(vRiaNqZTKYoN{hyH$U!1Md>y$J0)$sF(R?d>*vS7!a!^xs|3 z*1y}c@BjI>zxLP1V4jbU!{z6Go~t_j(VF6Ok+bHMp1XN3>s)E1xWkqwZHWww4pPSq z+c+4epFZJ0^9vuEZ^lS$N6U@PwCVax*i18xnfe{)MvM zIj3$nukWzIi{O+KTW)12DSbM(`a_(q)SZN>LD`YhO_zT8`1ttUyyItH?}|IO+0EhU zobugszx#*TRBYqy17yI1X%DZ#eZF5>d6AfdF^cT9X=ZMdeZQgTfrQTMLcMANlzOPr1f zIA_PUu-;f0obzGrS4Z3Ga~0Wh6tjIqZyK!@Z4bCz@qX{tI4{{Y$LGG9ma5y%{gg1h zx^L5I?ys!H3l9IAc&ogtZXhGL0uuKY{9K6 z;XNBOTAuDwC>LwtTf^wb6lDGV+PyEAs_t-i2wwfj%wcG9wV(I0i^uZm;d2w`?lyc@ zwDXVu|Bv>|w){JL|Ih5*yQ`l!8(y=_)-gQ2X--{zOj&`0Tls(Iv3(%e{?eoAzGSjfrS!l{_Mq?Zk7gO;L#{WQSI) z>+(tKd>!gJ|1Y#_+F%;D?`@jPwciqVOFUA7wPL5desz0S+1!SX71bIsk=aVWZhjGE zxOexi;LNZhz0e|0?_Zan{+|B$_20K|6ZgxvDLywj&n?8~oboPSPQIDnuuUR)?b2zh z$|SRU1Tr`M*mGvSyuEz?@t-vtQ+gJDe=nE!?f2b(ZZ2_geX7qd|BT80dhq|(^>ts@ z|GOIhd`|y*eeYAtFKbSZ^3vS8HhW&#HA{2-(?5%*U2BX^f5&w0cc#p7>(bEgpPzpE z@gt(>o;=^NSq}~y^c;P9E9;czu?@H1e);q3(^J>KYZY4#KTPO}nrgT-h4c072gQsxt-B6h$^UZocz^xZ)$vzX znO%*_dV0@$?J8B{V;Prk81z|A-;(B%w|DBMRPmJu_+3s&BzkJTlTF+t>g6^t8sxe!q=&)An2;<;DX~i&k#6ygT7k+K;OKjmSF{>GBz^|NT^^-|GB#bW}GE&ROt z1rIIDG2WT7enX*|*QZba{PN79_P_rME*y$`xi!x3`RSii zyn_}!sImNhcFO8GVq31IK31C$y?X7HD4w7%jf_1TH{JC<8L@$lL7?M{Oy*v}MI1)) z-+!G;IT5w~deU>91s5~61bIDvx2In*BHNLB$JZ|o4of7~Dr#-zxU%lvtG7=2u`bU= zC%LWvoc8hJg)oK0mj_a=9OJTjQ~tYI_4Ah>Q-bda=p1=|{Q3Xy|3Cl#di(#c`|sY# zy?g)ee*M>v;`@I*{rh*VV#mR%^uFbvm!DVdE9^LY_%Ji~{ylrd)?Je*N{d#XFsJ<7 z&U;bf3WpLUIDOA8ohG#As!r4os;p*Pz6%ipVdzbz+ zl8^7-`<-8Y->%*J`wvRL(0Kdw<;NdY+qfo9-gx^L>+PTWHtt)yd`sN9G�QplAEV-w$NJPpkj6Yxb$4&0I0I@gB3g zrccz{JascxecfroV9;Q-LQ|+_L_j%Kq?c2Bi{#z$;C^hna?T-krx|*6_ zpMHLRTEsTdqUZFcGt-YMd`2D@yvb%Bbn-mQ6yS2V7%iMJA$rOeMY1_A7{_*8Y zME1|8`t^cNKmNS@yyuL$xw-tk`kKnh$m?6LM4g)+GAVBU+|BRhvb@KYsmj z=Y+D$@4l8;&GUc$IW_irp~TmFZ)+vWtpyCPS!H{#J-um#Pt4kV=VqRM`e*n3zix}I z_Jm)uzCMLh^Vfu;KEr>%*8h9;?c2L{dAq+a`|DNS%UErdsdyeDy@*NR^&Q!Jt5%2i zX1uqrF}Sv5ikDu?l`4L_FV{r5HXnOe-sd{4(r%v|tIJNA_g7gSGPo#jT2tEM7AYSi zykNzeW}eKd+H;lj^sns*Dq5@i{LS9od-w9%Et#VCoLh#mj zv|2n>k7?2IrPmx@zk9d${o3Vo>i&E@{`hra-V^5UONfrl%8U_0PC7nTf=1c-TK^=vSYI$ z8z-yF$56q{wXqkyO)`2e^JH(lCg7TE5)?ZlRf<_^acX1LUNt4Ly4p%hS$}^1pU=Lz zwG?Z9Hd@{2zki=$+6=brSk0|*!aX9Vb+;C2b39lRY~-UDmVH;$?y}RNTkD_qkjK;+@4?e z?d|%1uf#9cZDHfhd$#D-s;%$-y}LJ|va;4!ld=0$CBp?NcIjs<*CgI2pH*G^`e_a4 zxy(XFrHwgHmw0q6$`+8Zw-u%w zoFir+Fz0jO2D6C8Q3;E-zjaP;SCgJL>H5=}opY|Gid{S9nYwJ7j0DTeHQ%1RzO?nN z>Fp=C{xzMj%CEi9qV;@nX=8{2!-@ojh84BG`+u1}i2KiYsPs;b_QXF=`L(^&qtbq5 zu3;2nl`ps@Ipx6`MIFt~XNK`p|8`mD=W0gRT@h=s*m*LJU*Jd*+ zI`_6pdWh)#$t+uG#F28k>(A4F*~z)9zkdCCwYs{v_{Q5~QM)8;-4*oaWv1@t*$YNkLqOQ@mGheKsk%V{Orwi49$+Ij%@qKkq+&`D4k&^Ofgr z9yd7RI75H>&l)@7yI=GUEn3z1U~loe8oTqSlh&SI`sJG7)5E{p^Y{OI_UxTr%a7;p z_y2!*T%MDc>DngKguR`wr=_mkdggRc_TRJlb*oLT3beP+w?CqI#56O zKPT0H+?@YT?wy>x{PdqXcm7H&zkAMFo8k9eJMp!v^IvZ|B9X^uzIW@n-kH3QEO@R- zzLRCiUahnDTGhTYzwee57e`EcTC;0SHrKtQ5|S#*uSQw-T|WEvZU6jzIqm2~@7(WwTwx=t|9t0hj?Al*7fjPq+BxO_Fa7_Y+TYimKlFN*UCkc@-^GR^&LR`b zOtm&{DE%hDxAtpktP{uXyZ@@hlc$=#s!LwGcI!IJY(1q@(~d4Z&8ukneh0Ub&t(gq z#}+;vTh!LN6e;^1Hem2bf4TOjz_CJ`J2FWLcZe>rwd%GYq^QpxPL#5a5A zpHolkbGDJmTb6ldsg1qu^GV4)zYiabiuOLKy8T|rc{PQo(8Fi!Dtg0$95@AwPOaTO zXOk6|LuK8~UkTYO#b;jMV)cCQ_H$Kx)43U%B^(&mtX-QKTHbVPX7%%1tBS?UUa#Oh zm~}>tspG@zml93qEz954JjtBZ^u7IWUhK5%*RRj{v`gpM1)KAGx8IT${3-CrpyK`B z?{}mb^o-)4&2;VAdaa7_O^e5hm8}X3rYSLOIQH${mup%bOe!Z*dUlmozh7#`=ID?f zE7K;hM`2r3>7IV*EeBqhvNs6q-E8QReYbe! z=ck*b)fp~qa$sN(dwxfNo#l=H$c5?rn9@7TZj zw!2Wmd!_37ts?d<0&TNrmp_fUD&?E6)$FUZjCsxWEqb4yPP%-n+C9BVnzx(QWI^GsV_ND18>y-zE0;fT?r&;wAU4 zG~I4zXbj2P$XKV;71J&-$5`y3fLFd`Sg3pMY$c(X*yP!bKUb}~c<>lc$_0tnd(9II z>NI*JD^I$XxH>BcBweaG@wRdIKY@H^35KF`%eJ0=>Z4I!zWsZ-`MJo4PAiNePi>8T zwslHWk=vxtPfdCPUGnnzt2mtAhrV=6U}I~Ua`|I_dV<-pWu`A*rs>pvy?R{l$ET{O z>0ci{TsY;|r(dhS$E#S)J-7C2Y9xQVZAZqf<;Es`&aZ1{KCg^qQ20?`@#bEA#ryUT zPd{=`w4CpE+J;^JSmANUs`pwiHeIv3WB2CU@8!pr--{RBvhkWM-{FTF!v9YzZDI3# z@a)?$iSNIDefsm~%^JojvkxD%6w3V0Z~nO8hVO5Q$9v8^|9rCAzGt$*ej&+Z1}mkC z_YN+T3nHtW3$7h4lvw`v_378U)_tC&2RgF&+r2&C?JVS(i@zH#nX~Wr;qv%-a{s?9 ze}8_uv80mnV~aiY6+eFb`)kqr{@wd$skOFp^*=s-{#m5CRN+G2_SwraV=tdRo&L_2 z&v1E;*~DFi*9!fXZo76+%9t-%rft&Zdrsee+jdNe^44@|YGCc+dK7woWzdpyyQXYt zI3khH*M2uIzh~vzS49)NueI=Qee+}vk7R>xj+4u@(v@oklb+8j=Ck{9&XC2Sy}C-m;L^0(z|_o`MI6br%z9f-D7A2bXZrR-adlrZ9 zENv8;?Dc--#F-pxzIJTTIsLRIt|xHI=K_l(0#$3*ZvR{NeDCg$BTF%tpsvKA+ggdce{@ zrKwzz&t=6*Z3UgZ%TNFL@={pQuiTvD(o*K8jclwl%{#k_`>M;Yy=vU?Y)#J`s|}JY zjU6HJ>zD+1f9}3FZ@XTCqtjxK3e8yp59f5>lh4b`d;hM)nbq(PXM{kn@imE82hPXK z)&IZyzW(Ryc*UfAS-~oHu@o^C;pCEQY|pQk`evFQ}k>)Ov+ry`A< z)6-ce%qc#|{*ozHVt2!qsQop+zI^&pQCw`05@OrHBE3QN!QT7t-Q;Z)HgCDdwXov( z4(FcTQAZ!$p0s6W>Glqly;{qrOe&oj7S$E}YT}wBM@7DTb&gUz9fWc(dKH-VQ ze1$IWS`_YNr9E5srpZh9M2;BWogD!pg2{8a=XvP!W%O)3wo9T`=H#*KmsLIQM}-Tp z9#Jq{ki+%cm$&J*+RAGCKX0%9U$yzmwPyZvJMZQR9lV?wH_ha{clK3FY3Gz_sb`gp z_ztpL^EGb`V_4L`ckAO%Tug?2$9^5FjhuEbbrxHCjAp3pg~t{QPT607K6Ra8U-#$3 z)$sc|rx!T}uQd(bclDazbK|BT4^B=n;7X8Lv~S9}--$DlcoaT8=C_}6X@=!7$Jw)H z?!5MB4yR&JJkZ9k%ySn$8&QhgK zThB=pSJ(gkP+;HlJ4$tW(axB8^X5-WwYziYSxiE*z2bAB1bAI#4at9hOv2$pjP1m=O5UE4f}WbDx}l26 z3=)kD=bvxp{`l}PyLj!nrG@YJN++)|W?*Sq8}jh%9pR*k5B#$X&CVNh$N%hotHAQW z*LZK^s@wY~@y4<+D!N={l2%YinNZkqpXa{M%VhM=g;fM*Z%%$ z@%G&E>-T0dG)$b>$urr|<#OJad%cer2-T?QIp6~tK+^;Ofer*1{`YY#l`Tptd@Y?X`nC1U3 zUNM~q;{RV=f6dhRgpIy`|M~cRHjF1eR`_J9CMR?3-G2J%r7Zoa$6u5`HsHNnw|(=u zdFQ8}etIXGUomf|=+Y;TH%<{+ynp|0^R~G7zH^aAkAHuE-($Fbz5do|^J_nU&AWB> zv+DGc`s&wn-maC*Ub9h2pjmvv=bx9NVy8{z$#jTJe|cx-&TVO(UT?RWKCj6QJwNM~ zRM5V?d++w{4)WHFWQm&>KX3lL$RB@ymzQrZ-_`u?TlI7q$y1hv>8v{hHcm0}oox8+ z=}+&d%LYdT9`9PW{Iu1PSdJpg_^90N+sohI+rK;J+4Skt1)8=_zNUI!JZtHbpwOzN z$3MQTh`WBizrR1q#+L76K}E*e1_Q|}yDT2Qo6GEfZCBx(V};ju_C0QJ(0qFD)+U|l zr=LE38X?`k`{nI?{?kVD`_z?kwyvs?XNk(zxsx?_8K)p`Zi|@^+xyNJzB@dQb$pmA zE4bk8wB_2Z9TMq+8?S|VnWapa&|3PE)9_V7!L_XU&&|53x(!+CI}E2qMX&w3DyZ-v)_;yvJ?^A9~ z)!Wo{OxpQagy4p23=g*Q-P5vNrq*@Odb7Z!2?Bwyd!<~Z=N4CqKGR+tob8+WcJHz$ zYc}7DdT*<=LS%D!UGjr1%ts|w?{aZoV)*8qW%}NG4ef;@4kj~tGVj!W;A+`+i}grE zcNe9$C1`fq*lcuL~8nP`4%hq1{G$cCt-G-N!16by4<;d8ZyEg8Vt9-@Ug4C%}Y(X*Q`1+=Xx)jJ}P z+_AWPVvk$T1g>-Ixo>Q<)I6;AiedM=@Ev<)y@CXV_O;I3_wu&sN6*u;A@R9u0(q00 z`FL4cLKi3Yny_7wW9gk&)Ycul|D_8g zRB{Jh3H0e%aCfOxQ-c!Ir~1F&|Fg3gvMuHQzEAJ->z64fww;STAhhd&>AZ`-cc>=| zO#c~lYR=_@mDjDMlaCb!=&k#{@7bK9Yy3T*^Q6=n)~wxjuPu?|VEcpKrRDtZ_x<{% z&G_?UyZ`jlM_Hr2=BkBO=+BMZ9X36+va*ubFiJdY?b=Yr6)-SBhP~}Y_n-X~Y76)jHf3o>=((E$Z@*i&S_gUUEPHRuxOdN< zIKASPt>*Ilf{mMx?cQ+zZI9t&gEz}w*6ibXe9p?_c1f9Ae6G77w>0+px!9J8 z7ovpsm|v@SZn{6j{+X8f1$qA#{ppc{itlA7YA&7QcRXr#v7Uwkx z3R8z+S_iYz+0SR@nyy@Zt;yrw?kStSG@L)^_#BH!-~0WZrE7J_p;ixuE87*OX7!q0 z+ZJ_hd9gIx!>G%*q9(sy`%lWoqoVf2Hq}j=7i4_6{j}($<@H-LcB{=2n8=X8Jn`Ke zZpAaxU1#)uH|%qfWBGBbrD@A_(@b9ld&-}U{CiC^Bo}OX3wyW@t@8f;U)#>CS*rGV*SvG$$#+cr z6kQw?nh(h`=so{@`SRtWxU97u*I9H}m@<}HJ^%kv{@+2%Ewio^$|kPrxEL{Q)-~0o zSE8F=mn}ZV5}1B$8pnF!XTE2CinB~j;=UFs?`E+w2f;ny+FXrs0hk+0X3rmW>p zoT2voZ=1r)w#Z$8+kMOaa;Ar|0tTXkGigBh+@6JrQjof-| zD-QjbGMS;dKwPrC(7b1NAyZ50UV)Qm6aWADQ)L%>tnkaZi03R;#o4NN@4S1L_nvXZ z*EF8HcVs#~R9JnD3w~vp{`}7A*V%K9eV4bB$>ZNS?bzAl5C67*-<@eS_uPE@dabQ& zudDKxZZqI{Y_iJD<7!M^!DW-8d@jbLidpMkZ_?9Ty8QWa)Aei17)(-*oI1R_eYW)4 zCL2x9bG_N?Kc~h`V-@^s7?E84-YekgoVAOm6u-{B<9omE_t{;~5+x#79IlntR@U0h zJ-;;MuJ!d))&_~>j)Rif9WG10U&*evnH%i&>)N+#BGIqcO1AV$=s*4T&Ft92?fZYn z>du@H6d51i-z&Q2-JM;J&Tln)fB5X(SU=0>kM25Z48bdxhPw0^_C4Nl+n*t0ZSL%S^$rK7d^S3N zP0DoRi+_%7dy2IabA26M)7U2NE4?;h)};`|rWQj!FZWE3dJe{Qrz!+rNy&`EjK`|0c7Jb{6F#O95w-5_5}l-u9mNl`e7@hae!f9?f~3=wdw$0^Z%pew z+1}ui!6WIU^foGYt>%B178f^`FN@r+ad@l^RgK+x_28|$dH1vSpP4?#NW-;_qvM{H z?shIi4F(^NEv2WrqnKF}@4T)1#$9eTFK&MP^HjEG{`dF$AGf49N#wtm^IJYW#;B1a z=icXYvW~2IFV?tTdZ8-9`24N`kBi#$OOY?Ky6>y){Sw5Tz1jGLL65v$&6az6|9`j` zb^K0i_3O1;x2^g7vu2&Asxe1Hibq72uHmuyfB&TK|MT6*E7`>boF zv6sR$zm&bbc)R=5XOVBAVuCDYEvygKzXW^FpZndAZ?Qbvc1`}5uLLKX=U*4gIUIZR z=j^$@by7B6S28QIp7PoMwYiQ)5;&I;*agTJ_M`2No@ZO(be$RzN0Y07Go@TdFtt-JR-X!EpW&Vmo? z_%2$Bv-P(ueR6O0<`eTL-}k<2q0b8pM%+1FHvC$HK1_w)Wg z*Y*4T4UZ&t%${UfyQG5R|)z z{mPaw=d{@CThzSMIzB!tU~cO7kDp?{W9LrEw!~S-^yB6Hk3W7|B=@MIxLCS<#l&yr z+uM&XUp}SyTa>mR_DHvERl{ z|FnGPo!6&7zWh0pqrI@^|GQd_x|!2sKK=ds*Q8hD%$Zpi`!o8VPu}|M@9*!2EwZE@ zJ=`GQzVO-|NtY)^=Z|0Bxb53n^ZR%1-nFf(w3^#zIrZM|bu;#UFO9ukdChx~!kj*J zh8bnnp8eQEjd~wU|+#G>oua#fr%sZI8^fc3RPX=k-tKL5`tHonU<&M+46cjRf zgp$|#2H#&+Xw-bM+A}IxFLGOg!KLCgVHbZ)5;*j-L~NSkvAcQ8Th*@Jx;D$qOmh3u z0IhU3hF`Bb8>W@6lnA~Uc(+1iZJ=n~LnoJmtbcEx+q>=Btz~yFMST-Ev|y#`-tLa_ zz{CHh&NOM8^LeY7i$U5YHiqoCCVrp;p4nJprm;?R`rxsI%WB^uYmOayr@tLmj!M2L zvF~5YUdeZN8!|0U z=VQ_9*DFR|j*gxlSM6?Ou>M-4ipjZ2TlHAddCh$vIKSWf{a}=^lY)^FkJH@h)w*BT zv};UcRCxa8?%TV!f3PTJ^GuR_WYGKg@87=y?boVbhuWqvyqf&;5mA ze?C3=<@EZm;f8!(v+l{{wJXNy9sm6E#?I)+f)ic0Wt2F-x_vw9-2{fh`eKPT!|6&o z*Q(yzNN%z#c^9+OJN!&)hRY_iFxpI2a+^e)5r+!$N}^+5e%%dJU?-cgtS-#@T-^-Qni z+ZQe09};NSMF@DJ)3V|;7aaUTzIeYy>r0*A8H1QOe(%^CpGm? z7um?^Pv80dT!pJ~!SA_GZXMS7blLBB{eS!a@y|{#dUhlv>#QV0)Y<2!Gv_&PI2a*) z>|*ui*~k9Yah*G=_$Gf@=9kY;AIa4Hc_l0OG^nOS-mdOxZOpTuFMmEgU;nH4T&9e> zlb7bxIo>Ciyj^f``?cknR(I|QTzpwlS6R6;=G^bU{~jOj|NWPb;Yz|5o#N+qeaY`_ z*Pd(_cswDH7WgQCHjLcD>KnFMZzU^84?@4f_l4H9DNroPOla)pP8_wL=h_kX_E?C^Zq zw$yb?r|k;;J}rgg_}{$p9h{0S5AyQ(4*m|y7R*>XHHtAI_WC`$>p`X_8AZDjMOb$T zIR16JY*F=|H6=E9wQk?V36^vH_HGwg6vfJr5IODMu42El{qn z$6kK>=GM+NF)+!qW$s5sHU^EQ=RVt2nL24sl$wa(c5>P{RG2uJK)|?7wPJjEuUj+~=&H_uTE;5xVDMb^cnx zq}6977WY1}tLRN&J5V|AYme7TzKP2YKU}?j|GvF@zn(KJf2(rIMx@mJHUEO!aSp{h z_Ld&ZdS@osSg0g(SfaM0&LXaFZ@F{INsf-d&OJGub+-&N*X*v|vAcRl==Y6V+c*S! zH~TS!F44Ut$r8|GTK=8?SkT(moy;G?E;YTFBYkY)Q#GZzb3M*meovdTRq;q_oY%^G zR@~v8*V`U*mOl*g_FFma)3PaYTXTk|RBg1U~v4~Yf$qTjE5RwbDCh%Y;9<(a+HF715F-+p`d z?&HUg+s}`?Z(_D%V%@I(TtB8A+y2!4`(tzDpv3+)uRove=GbzML8`IML#TQ7vV&H| z<<&d(?ti_t^+mhIm9S@(nQJ|!C(6jny}Q5n|Bhu}K&Kn}*Z+5SiJ^aw@*bVD!gX1qt{IaE%7n;9yO^LjIZFXHl z_S)C8u87=RC(y;F_hP?n`@?6?(l|aE>F-=J=bC`CpQcN_xB2wS$Z4uA2|T5F@9*!e z{{HXo^z-p{wZB4&%#PiA^F6*6boEEIywkbVIc>9N&tAX(=dLGQP7TMR%;WX4{m!31 z|JdSH>Wc4S#gWr1_r)*-ma@E;dv`o@`;yW~y|y0yecz{T&A1rgw{`2WOVO{->#x^b zdu&I0`TM-Q_m^h%9e-?dey!>AH(O@U`TYFz%$c=c;%om{@Eo3gzJKX8iQ4SD5mJwK ztm}5mndATbZ(V(Tef_>U*()`dN-X9L`>fWXvDTA$5yQpk^(trCGPfPmxl^5&J4?-X zwrShsYk8L2&pIikU7gM(xbl0R47)=}mdi<-*K4*$?GBx3xjZJ@Rpq%x%Yj2xIa6c2 z?s}}zR$ErF=MY2ePHj>7Wm8In1sU%anXO=6cvVY@V>V~_{~Pi5lsi3Giq1)`SzP(& z-Kj>&TSw9&t50MFzLFE<4fNVM#i%d&!#(E>ZC+~?+XRv{MFrPxJ@<1`an$4mPNyb% z8D3j7%~beG&{_$$gv7LsDF-U~d{)$EHhEl=cy>>kd7&KJEapXbnjS4xUCQokhgH{w_b=cEM`7`RsFR#2d~ft&#p7EWu_F-M@MB ztq%|O7MI*TG1e?P>qw zEQakSMmL>OES_JgW}kR&u9aS9r-8-ubyDVAjfgzW!fvnkk!bQE!-z+%F zd%z`mEqCFZWtp?gco*7fWKChY!S&dxe639By`KV)UK|!^zqm7Dj_I{UW<`g$O1Y~% zf3n>o+nDdLiLa8<{o3s_ZwrKE-IsI_3B7;i0K*4!`F(%?{XYKv{e8uU^8bIhr_Ee; zQEK;X*%^{7#|_^syJ&g4>a3=HZDri`qrVHP%M~Vl&WwDPYVKEFUViSkeEpAyyYK)1 zxcmNJ#*1&hw{My9xbWOsi`R@Q$1L~`?=I*6{+++0!fNjG&rgfu{GNaQeEIR`&)R}^ z_WLE0ojKNSoin-X@!syW+oQ5|qn-V|c3%1Pq2T}L`Tu1E>;F7#pFUmu?}M$kIs&@A zg1k1Mxt0fWKP$Sp7+q^Bc>ng_zju$1^S|C@XJ`9Mpl9LTyzP;xS8u1h zvFPi{mi<NuUfonZDFpR zMRv^Ez0;h|`BdBYDRWyq7no2Tp0IPuqTlXoLyt;zsOTJj9Qb$7>s=Aqd)nLEcW2)E z`}2JLHOcr*LiQ;gM&)T6uk}3MarWZzaPC5TQ(|y9g<^A4Q zH**T^Rc5bQyX_iRgBJIfd(qk1b4{NYNU$+PTuYrPF1mI_!0Fi1hxe-GYjnS^(q1^H zo6jt{=aQ~VMy`UAXqHk_xm}w;`@yK?FCSZ`hpuOAD2~1{DJzq%&Kd%wz`$Go_!j$ykCnaN?XbH z`K73rd)HfD4QiK2lHqwcYf=|ycUTDbE1rbBUF@5=d>vViM6MRgRMFY{?cPNNMXBty z-)BD#;c$pN&^LES@mf}?Nv0WVt+;!?WqG}ieY;^n>J8SotB|K(tjzSdMJp+jKd)@j#bZmCs$y0`sW^lGlWe3jJqr(C77ryTDp zlxTnX^oNapw}(4}NQz)?!}ZV=+w<1^{B=A3k--~&_Pw!+Nvp3;VC3#Pn`OZ&l)Y&G zE9t2-6jid{2(UYC*2~VHdnRmCZ_6(W%hzj+pHF|h%<0H#)fv_6w>}LLxovvviyA}h zWS;=vK*b9A?X45n6rQVoKZmtt&LoCSiAs+T8@&B~*y5+e+RrI(t}3$W^j2q^UY}GP z^?BF2*K@A#oU)`SREcS^&zg&|{_h30Uc0w@+A76`tgF2z@F(7RTmJj7f#12xMQbG+ z*T0#5Z%I`)!wV;lLJnu1!!Mk#xYXR4#Pz_J%b)FfsUyR4TkrL2r(Sz@`pur-`~SB8 z|M35Z{C{_ci((21LJj8_E^)q@z2(Wzmpm_m{}s+V|9MkS@|(TK1$sB0i&EHfPJz=~ z{m^L}d;9-8r@UITzj|-!y=$B9$S+&J|7TkyXnXN#oApb-{QW2J_<(F&zx(Syb@kRq zCS2OJZgZ2p)Ur5l>wR*;e#b9s=CkU}>SnfFe(z`R7p<+jpM$b;w=!y(@kkZDpCj8g zka^Z5%%&V)n_^rRU z+b)0a_RFBNb(;IzXEz(N2|9J0-V~!({9d=kfM;=$-1VX`rURFMUa7t>|BbqF*=6 z+_U}Io_%|sly1EyneON^zy6v;Wtx(4Uz}|0ucHSO#9!$Ax8L+{wf82Sljm4mpKLfC za`nudHun^j)u+xli5v9Ji_DgLzWn^*J!fWJeZjV?(P`om$2n4gMK=?bJ}ZRlGOf0i zH@e)pbLXq8%I+@T{+Ue?aw<4C(XdcgXN`((`dp3|9kY9GuBqEP!7zo_^0m{AhQ_Ip z4FTC(r_4G2d1mK@gpAvN>)08#itv71b;~hNjs1%D=L_F`FPr3EH#xZC+|>yiuSxvQ z$=GzQ>U~e))T0t5(s?HznDShUd_OmQsU5fQwuYcCvyal!JQE&h*{PcxI>8WYtswY$ zdZpw<4wesHc><13nr8wOTR!|coBcfI_SaR_1}SU4x-1b+W0et1e7fk6(puiM$w3+*LiZkhWzk-6(Z=!=MKPs4TIU8i@QdA;dK>AAwd zBb(~x|C%--g-sJJ+r3DbBrKdU)Y$_Px(?a;3r_b}2BO(XkZu?f$Uv zYwqomwfRx6Z6p_OPEg&{wL*>$z*}LPCornAk4V zC@>zm{*Zy8*51T-^ZoCq_Z->5b^Sr^{qHU(4xQUE#n^k@P8%CLzsos2o*_j~A22K3 zm*cy;VgH=-hHmejIF7Z4vCR1%U;8)AA>BMnO z;<>Gog!9%ZJR27BIe$=mVdeE&@`dgZ2`8?Ffp%eDq3M=~f^@I1Dsx+0*AO!0^!}{F z%Z1#NuOuF`d_HOQ=A<=Ob5sN3jl~7GesuDboFgzx)!48d~L>-W2v!UoSHjY zq}2?c>bN+}@R}Ag

AE>s?{b)LQn*-`cj-ZL#Hwu$5b@(ixXLX=!NP`6PyGbyJ8L z*X|Why~P`MitKpzt>Z*Q^i1wzFV7isi!*v|Ha)gnpRJi{H}CS(M|;d?-;3=jJ|@9) zZ~Ly79Q!zPZ&y9vRx7ul!Q!>^V@ABrN5`?A|-&lZoZ4@hZMUtea#t#F>p znfF%U_Z^?_DNI|F{I1tqV%D~|zqY-eo8PlFqi5@^ix027iDd2h%J5=O_AI}tvNW!|%h2Ccc`i0u3I9dPp03}vaR1KguND6_T8qzomfHXC*WLH^ z|I3d*e)<2$`u|@(UUttut6%r=?e_b1t6%H+Y8BqQnKEnH&9`Mz?|L|LZwp-f`}gqk z^Yeqd7@o|%|3Y(zs@L}1#mUFEul?cw|DpYG zUAQ;zy|-m|g{E(lW_VI#w>`JIy!`o`+0Q$942!RSkG(%#_vfN#K}B9~>)!9F-?86* z{`o$i+uJOkSI%S+wPIfyBCz4yi}w2#EN%=}5}W(tv(Da{mGir_?R2Wn)~e@OYq_KY zE!K0ypPrX-sV1XonFFUs*XmQwE=#6Z`A*JyUUpbuPpa5a(Y#B=D(_=%o7P_MJn*t4 zh(9bND$&o7@9>3fzH3<-fuApJ}m0fzP4a#G$ zm#*d8daZQro};T$uRgw>wI=YFAXAq^*UBral6jubF})_ieTeOA$?DH0j1SMgeVcb< zsf0hn^v|y55wlcQ`aO&BUTL~Cr$^9thD%Lb`nuOvy~$cjr!2DKlid@Y+)=Q*$Y}W- zzV_z3M-EF*4=R75BO|=}ycC1Ux-(MVF4BPl3BUI*4cm7uydkE0Uxw=RIj8oVxn}kL zufc?^W&7_(KA&=KLQ=`O==Z8iqjp|n(&>Es%&1Q=9?dIfuvMZVWj!DL`N#*yfmVdp<{{P$C+wuQ? zh5x?mSO5O%`eg}ixuKcs4!!sNS2xwJ;&so$sN$zgP1hm>_m~+)YBjqGD90R>Fml_$ z(7k!zYc{dS>#5lc0w#Q&+6>)_$tNdWJS(fjQ0%*P)x`d*hEuOR)(_QM$W}4!b(n`Q zckAmHWKVf4;r{|M1vdG5Odggu60?a)8jej*H&mgxfPtAW_V4SQR#~0^jTYKF1IY>TxfbMvtILzPUw{9 z2eTgcFBLONasH6|xOT&}$jzA%0_KbC6gvwZdPSC8ez`{X+LmQo?-_nReAszqsYKiV z>;M1$|M~yt{r}-Rqxj99KD^3OzMyUCy*qdIRQ!C@E?-|$vt#|#xy4-4ysw3BC3M`Y z|FLCVX5_TOS=lRfe6}i0S~b;6^Yx~E_5UBf|Nn3M?!+H|K7O{*P4T&2dfDdh$Nu_| z{G7`s=OUG&radjOs{Q$?GInv+f^5h)0 z_1dAFuTA>}zZ^GQey8;A!vf_X?dj3g*8at(SS5ZfV{A!{jI5aDwza-A_xYaPyZ5f0 z>V0zh$4^iH-4l@IV;8$NW76lJYt~;Anm*&2|9t!Z&&>Dt6`r+x-}d*ld3^8Zntd@r zXI8EKbno@yo27U2_CG$mPA}W*^sXn9u1lN@h`!CcV_vD=!Q9(Sv!)n}U+Ycfa13_! zy2@)Ezxd4o%Usi*XAK<=53_m#JzHljb^4&RN$2#(A6wR~zSr)xaE|esq`8~VX+1Z1 zuF`O50l$z?)$>^bJ%+08drc~y&$wBA|La^o?`4%?j0?ZnT0Tv-3R|aW;u7Lz8Gr6q z@=UAjWhdHxmKb5#1wfa_SmadMqJk#or-IZ5Y&)6D!`)Saz!Xm$|Mblb?{Z@&F z)JAa}v|7DeK*`{mgtOL8kpqQuL|E3fiKK58zhiT2>okdAuTFuz<-WHqUYD3oD4sX{ zX^q{|87zSLwNbxhsuHe}u zCY$-|LFt}I@5H5s3inLdvtyrHo&RjKcFQDFGn2-h-)hZyn#I&j1(g?ZIi@NXUWv-? zdA-Xm?JM7*X|25>GdG4M_v9UN5U-l`U}|yK$DO*5qq?o?dbYYen9H&HPWbF7A*QxN ztS%sxx^q5=EZ06IzCNz*^H=lViRCx=mx?fG-C~#)rK%lzx-$LDw6%(xPVP6~ zt*68#Ahmhn(F0P!JDv%xRn}X&X~iCmfo{UUwbh%Uw7$}kkuiKCfA~Ff1O)C zuO#5@oYHw~Os{3=UaGh0;xO>IT0W(9ZL!p^lr^eGRnspof1{NgSf8pT$um=}B`W)V zseatPoZWph`;RX_FK>S``=&hKuS+kBrgpTfp4YR#_N(5~E%)EY-?#KD3-TD8%9DiT`Bl`BY6cL92IXSs$sq>zvMuzbQ$6fzgaKjK(<$7tlJbU)cK!0;p z$XC6Xol)uS3(L>V^;<47*)T2kvWf5MO(u#r65{t&aWXxwnE8C=>)r8xKZS3-#xHMI z_p|2zmoHBaeRGVRwl(bC?{k&uy@}IOXL_$Zy=%>Bqc>$86_)XG^74AqyE!!4Pqtr+ zx~md$-$e08)a$23`_7zx{MUKq(`V1zejg6%s$-g7dGR%9R%}Vs{NCy<$HLdg7e3wD z^Lp1SKK|alJ&$)CS(clZmp7-lu=n(?u$61`eKsteHm92-BVs#;LHJuU7Zy%)aX}^S z?gP8laeZZUcxL4re9H3qot6d1-}CH~Jfb(3OqsUy*p62VYG zsx6rj_IM53yv`N+mV$|Eb2^+ZZ;4$#m+|4T+40j{7Y6Vw_)zN1a*ff!=VXYp%PF-nXVIx^M-e%!Tr zU#9u$nB&k_yLX!rQMlw+zs>#+r7O-{$ER#k{r&3f>+9>|_E&#@_jUDlhYSwK`|omJI9;=vTCAMZ z^U-Mj@t>R42{TOIc&>F@rsTv%7yZ8QZV{#_GL5RAzb)MLdaCZJHrgu3tnP1#z5agIHq&#_$rmPw?VDsgDUrcs8C%)4t+9)P z`IMU5LJqQq3va!ae3W^5khgA?w^7R6`%OF#mYYd!*W+0*O(uCuTU#B6@ApqrrJq0j z_%LzSvCmJ1_Lv{LrsVJMe>);{+sD~Cb2lj@-Y?I;zt8ZPjja5!mqA|l-j^RVx9TuI zrjuP`B_}U8?^j*;&M-lx9>aO(rx(sTc3Snh7Q?yUvCsFK-~a#N@%8unq}t{#Jj=3J zo$q>-R(C_6uio@Pm$t+mJ9la>-Ba_kXurLUo!#FbkNKC1D9wAGI_ue-%bTBGi%Qmw z-C4FK=1Ti-cHi~p$F?|odwkv$wr=Yu6TkX@pXc{3oT9~W=5x>MOW&QZmwRkE^Le@d z;*HDR?~3U%c>N;i`mOi(_g|OXYIZaCdX`Ru`L4!@wtf3|?zEhLTtEJue82m;8Vi}W zM1y0`=JXiuzI#tXczUR!+nFAY&>cqE$KF}^Jx;xHJ8!M(bcx_C=UyunNVKV$Y`r$6 zxa(xz&QnrzI%XXWz1GxnuiCbxqo*Un`&a~n#r$*PHm|c78C-8vhifaZFuE+6yzP9@ zSq1~1xA}FyXRmvEtZ>hq^HCg;Mdn4=%!-@I>By#+M`uQ|tT8=irF3bBk)R~o`l-&p>&_M!woMWIaOd2! zpf5^S&3gZ(=pC(Gee-aerDpd{spMGE#D^gjan~v%xBPnbd!7H3ERz|E)1$brFIr@q z#+H5N0~z39K0XTLQwNzIAARC>AJQW~%n` zm>q}Mgf1nA3UEmJ_51&SUjI|y>TUS()meruGdVcV9-e-_{_U^n`f>mM@z?+O`~Kep z=in^WAa?V=XZ|(3TK)UW&&TrrzpS5rTA5+;*4=miefyU8qI`Y)-g|HJd$w-9^3iDR z>s8Nh*X_^V)wlkQ3X6k6s;h!f_N&5k9a7KT&IIU2XM5glY|z=^YV=C-)T1?#)1IEM z|93k6|F`Emj-__GoV$6<>iU+qdFB=Q@8#|Py*&Rv#Wg-nOtm8CjzO}>%+xvAJzLMQ z-v4pom%y6u(q|_0tb4z0b#GTfg7Bf3LYdpQxU<$;9!;HhZh5B7`p*%?lX#yfGdLV; z+r}Gvzoho^Hjg$leen$&jKwZ;ESRT!?)QVaikJ30etUl{m*BD24*u&tUFN;FBA9P; zV3p~1$@RNDR8(BurcLn?UY8`Mw&?cl-7!Tmlk=GbWlz7XvGRYMGl9i4xp=n`!-4ug zZ{OeACR?^!Af^KsHPwuky|?yH?0jwF>+j#+wJ1=$nd8V5 znQyt9{WWFh`{~E;`=Y0FdY1RCx8=L2#e=;e;#wabnCRsFRyI#J-74PrrU3w zI9|zkX;!XZ5%kn9r1z7Pifm`W)67{xr+)vv7g1efrF(i-uw)x!%lvtMhUe~8C(l{o z>Da=td)w>X|B~K5ElL!79izMSefzdtlV9%dpQ%pYmRq)4);L~oy1=a2hYw0>Y&|B? zzVN)Iz0Do#+ad-j! zYsqTOEE#>v>3{1oX7#lO`@EJ~yLQ!=x4-*#uG;eJ+q-YizCBxC|MT?s`rmc4maWVF zz3%ePX~fCh>1BIeyQS75@%U#+n|A+{nWlygSFN^& zy*^{WbFh8JBtDO=p%*9RuX%WC)%HHktm7xA3NAESEg08+$*O0f64N5rxnEjT&sZ)G z3F^D|JIIJ3@9^!Y>8YNYud*D&rB-RJ*~O$7BJ1eIDInT5sd(OV)=rs;g<+*iXLNL) zf97=AwQhG+siWw(Go|y4L*Ct57shAz=6m~;MO*zm<*!9-$}pMrIVv_wQ&aUVQ}8KO z@ifkZ)$+IYIXm3b+ZW&!cU?n4v1Q-;9>dc%+zp1;q}Daun$Otri))Lczonp}Yj75$ z%8sHYhQPv;(Yr+QLMmfhUwz|X5Lm!9J<8Q$=@c&|C*~!^vB?6R+sr3!J1Z|Vb;h$p z3_&vlC%AU7PMEXNC8cx66Z`*9^}qA;Z~gGT?mzqUy+&Hwd6KV$xTUlSINj%0Fn!Ju zQmpm-^yTH}?YH@s|9K|9|Ig9&P5k`){`J34f0wu4Q(sZB<=nq-<@^6Wo1I^CIYWL` zL}~H~spXp;cf?&N){7GGI(cPF+lFbUm|99)e=7@4JSy|}c)z@y-29?dg=@DmL~YSC zS;(un;mYHe{~q_>-&_BG?%uBF*`Zmc4Ti@eg1I>~=HC7FU~m4~&&Mp47QemjU!!?- z3-ce9v$t=h-MppvFzD1SpUV~Vj(@iDw^V(wXtmbYS(f*IEX&@lXfn63?rezEKOEsdyOd|@{?El=INZ0SDv z6E}h^O%6`jxzqAV(EPrBF1M^%-@^6p{rh_Q`1~IUTmLHbx38q=h)sN{`{BUpM2IZVx`{b)c*}FFAx#Z{NM?N<= zrV$YvyJhx)9v&gx&x@|DUiVg@K)2KDuF!VDpf3IReN~0uwWdYwtlRnj)4#vfN>!hq z-olmALDZ1UZd$+ZqMSrm0&EEmpd*__5N_^+wZeLlpQvz*>@=5kC?Aj_Of;k_HD zPW4pdSimv0a-Mehu4mn^q|BBsF_d>n(e0RE*e2<{($FpRg+OAA_|@W9=_f3&Z(Epa zEqHF{lSwWMraW46^_`b)!|7c*pBITSO<1B6`+S?H?4xV_Z2}Ah^Oh}|buQ}aq7CZ; zUAA0Y@|xczWOMPl(#Y#UT1p)SJ}+x_KACj;f{AZ&@6JaL{!Cex8k@bgYlhRIb+4U1 z9E-RX=dNB9Q>?e@O~B&q>#k+#G9}Did+4)~baT~|rE|Q`I;6}!tEuQ<&$IQ)#yO4x zofjrB1{`adm!P!Z=4C5>4@*(AmL00|4VTX-*sY(h+pjly+h^Ip)7#E@1pB?-bxQPo zbQYWQF$L|9;fC%9u37Rvo}e|ATT;1I+t^0EwnI%pcww%Sb=AxZ{KW4EbIR{YKkncW zur=4a?zGi3ZcC^8zvJ?UVzNA68~LBEh!9Op3nQ<{#qwrb6e{DNePxm z*;{7t`3g2CE`Mj)ta2b`^{g$7J@bqXt@*TcZOO~(#hrQ{4Ne}>$sCKmear3=pE5;% z`D2^74jxJ7GfPtfbB<{}-Xbuu!RPUk`98TN3C-fl41YeoQ_w=Rjhu@yf)!h1oi794vl)u`X>yhX4Zdd$&bhLZ-Z1L=_#}-k~ z1qynzb&p1PAA8~OtHw?`xkT^%oXclZ&h4BQ<(0R4-8HMeWWB(wUG)_=7$*QaKe%a>G|Iv-Rd+o9Ly^XDkd-KL?zb6zg z%Crh|TJKIsJeyQ>$#uWsvC8=E47%4#=U&nac_HYOytdEKaXkl9kk?&<-Im7TPHS41 zGKD)D+>_4E)h&&^mO6LsR&(?ApjW%xvU|((l1^C0>xrAH=^H+i;Z(5b58t_Gn$oH# zd)1q_hA9PkrmrFT9vrSUBN3iAR31J6^fJ$tKvu6p}R_i6hF ztBzZKSu6?*{{H*$?sa+myZh%~yBe!;8w)*|B`0TZ{W|bO<>f8=M8bmFqaFuNXyoZI zOgTAe6>n>?Uef`dgWGd27wWE_KiBl0l#!gE>qezlv)rC&O+UT!$(y;`%|6KbT(|e2|uPx&I{P}|_x7)AaP@0~l!+F?f{nja4QyjLO zi+bK%zKX^8`hgsmKfCw;znfV9va?mSpv!@80j9613;DN5Zh|r6f-e8ACuaAn>Y7%N$uuyrz<3PatJM(ax8P!G0o>9P9N?mPb`nqzg2sGZ^h3| z*G_GH#W*)u?)d4ye`|GB*JV!Y2$(yya$c6Ee*C_qWkEkaFW0yK^TT=Fn>q4ramSbV zUfz-${py;d*}?6x3WtKU1*V?rdhvGmw|C#}o}Rwl<=N@yza^9peB1Qqdv*5PhtEE| z-F|;vp{1Sd@|eA!Tb3MOWM}p&!|d3VsD#t2g#-iFz0NCFI8r%pdTFBG>s_xhw2y9C z!yWF(^4#Y#o3}{u%B5LPdLLyUuMD$I)ZH1DuySGMoZ=0Itd+6hwOg)uZPZ(RtE;mo zD(2R*iwD-`R`V(yIA?xrk$bvMX|A+WGB0D|Nz12;%C_uqdwNS_?cr*1r^9ESSsl9c z@`Xw7Ws~J1t2dZNb8#}5_$IFrZJa%OcKP3i4lcdU*G7JeJ96ADC7a!{dR1Sa?(pCV zW$#qFv^Dqoccq}y%gWOy`reWY7rdBZ=o0cb0Wsh}dRm zoo9St{x)?6h6VRTUG6Sr$TDc1wA#bOVv)|#7j11kLP|j^wHg^5Ba*`UiZf=_|0w_e zPX6CR`M5oGFZaqH|9HOs-{+Sve|$B+zo+i+GS0@<$GhbEm$&QlfB*h{zI}aNRmGa6 zudL>?boks|;M>h>{xzre$H|3l44d9cvog-${w&s{y}~l_ddK4VTjPIS|9_?DH^_6c}_&W6v#r{P^!Kowgj!nc1$-_m-#BL|%*Wx?Z|LQixYFY1*92udlEF|L=SK zkNW@r|Ls@!v`Cg`W~S8txBd0I_U|u0R~EbetNF4kCJ(=^zJBfI=a*a7t?r09W;p#; z>0E{>yA<8GnjMc=vvp~h*iGKEKa0+3b{1SNV+h>Q(ZEr0pU3+Bp38s#fOorp{CF`c zx##V#KJE8qzqjT3FaP}f{QQ|me=jilc=%y~#hc`w_xI}qo^Jjg_tE&X&FWu%;eQwA z26?Go-}ZX0GSiBsPbOVYovB|uEvv^;CNFMZ&Bis_3_OP$7!2Ls9}bV-dv(I^ZSU8X z=HJ`1Z|~mS^*jE*+x>n`an^#>XEaKs&fC}g|GRzvkG2c*{QcwK$ch9mDW~S#U8sDb=oj8 zoB#LU4YzeqfBNuYgZ-OdiQCsroG_=HAz{tOW7YCrCoRR5SQZHE*?Ldnx9}EU2TqRI z+o5_(uRZ=4lHF@~ElS`dyE@bBO;yj6dMrcV?`3#0$F%sMl=b;ypN-F6&2d#p&sA}8 z$=?1j`dnzgD`Q~(w0FzaaTYK{Untg8HTS)2BFE4ZI^RQh)`gY}rMXXctutKrvS!~A zk*)3*d#!Ub%r{=MOrN<_7+OB(4P5KG&hJ96^ZvIpdl;rf&6vqky6*Y~oxPF<;YuEw zM`ujA>e42Xoz)}VHgD-NncbJ9n=Oyc3EDfaiep7&z@ePE2eUtWv|WkLT;trd%Q9F{ zo6A^OR|I_s~zpsXS_ZW)b-vTNDCJU`wyICvF{Mx@) z=KH413bLBpcmMC#{Q35^^QP~dvM90oR%6BU+1s*rKfL;Sd;H$t&yJp+e$7fn@o3h^ zv_Cg}HZYlrPV{ogz5MO&h9^6pbfrz!;p|YWV_eApub}wz)$sRx+u2`-$fTFHZ7f*3 z)oY^G(kn5NP4DOAJ3rp7#*=&FeKeC_CS#&+*SctPr#6r4^RGp@=!vo{e7*ZZ+vS&E zex9Blq_m-P+ByH{+vE3cJG1r58L7ofqh|R|+FUnL(645_vP(>8+D#e(b^pM z&WVF<<&;g!cD&6=j`_66dC8pa(z034KEJFfF5Ym>@9~*KzqaU}779*!(ZLf~TT@{% z&u{tVjZ=O-J3D*d`#D?BUH}!x*}LWI|7?73&HM7CW#+qc)Ai%^reBQw{pjiG?fG%- z%Vs{mS^fXRhXYgd^YS?|eGQLU=y!MU)RtG5e=ey1|D~Yh-rl;uzlwI=vA3~&zVGih z@!x;{t*`%k`|;z$&mQ!Adb|DppNI1QKK%V||L1%CulEcIYo(XJeEIUnj~@noi$7KJ zMxLG{RPmm%m*MKE_;>R6_Wy5Kz!0=zYee$Q*A{&Y7fNeAHJ9zk|8{O?YOm(8DPC7@ z1?+UNlRZD*t}f=8X3H^l`917QqJ)a)PIsN``|s29{WYJv*YDoeTf3gwwl<{KF!yp# zu&Chs=B-cWe2&ws5oPdNIsNoZ$)@sSQOOg8*S}unuxH!bTia}9zA5d|F}}{lbFCm% zQ}b$&#LWh-~{e-IhGq&ScF7`@`i@o?C!Up8Z5rgZj(B}-4a&GGs(`CzE-rM&xtLyR>AIsy1mR^bJdh}Df!|>dlcV*_=%~L%1 zm#)Zo7L~nntHB0_&adZoZp+<$_usZ`tNh&imCj#xzyJ5;aC!afvb(R%^XC+|oM=!g zowe+x5!?L;lh2#DI#&5cym`2xKhsurKEq85@!-0Onk_L}@rJXiE_CN~+O?#7xyZI- zTO7;v(Cgne->N-T{CcbJ4~LEKr7{D$H)v^c%;CP*E?~BNx=`utV^RJorE;rIuYYax zDeH7t`jOBArD!RmyxaPF9b>p(l$KPSeYM2*hQ9~bZs)Hn7?;RAe`(a4yfUY^@Sa4| zq{Vy}uBVC~i27`_Zs!zn{rEdz5czyw-*Nx>_V#~&JYM~*VCJ$N>->*T4%++G^J?p& z;%9GXhhN*-e!N=tzIFEQb;^q*V$V$t|MtLJug%A6=aj6qtO?0Entwh#-B(9&2^_M@sS^52Icz${AWwS<(z1!Ev?KL`PV`HOpx~RIi+hrH0_SSj&$(pJ2mPuST z@pG>{quF?%Zh!nV)#rQGa2ub$e53U3hSxJL$N&FzJk|4B(9=&JK0LkrykGTtX>9i0 z8--T?Iq&VQ`S|YJGd1;nyLZ=qe;2E1dh`@i?Db5KWX+izx3@{Rt(&s!Wz@PFtN3M= zE4NA*#a`cA_cbf_TIsCdQ*AkyORhyF*A~R_Zs6M2RT%pEugSTVq8nd1GMeAddRJn3 z{&UT~Inz@WlRH*qMlF2++CzQX==G`{nYXsRc2Rk5I$h-PrEP1Cly7Z&y{l~A^mVW2 zez%hx2itge!ktsQ&#*T?Z5fBfXkFx76}%4c43QGQ239@ZEK%QnXn@B z*1GJg`y;fYvu6Z8pM0xY_w*vO-Ieo>6^0o0brk5{n;Dg@tEtX3p)}G<^W4v6xl>NH z?{(aE3?3O$Vl%I^GI*WLj{3dt_3wKNosG6Wo1@HS@m?@+$C4e_CY`LzHaM!fY)!G` zx}Ju6tS%ZsEi(mlXD-W~mCF=ZGBu*8Z{?9v-K_;{BLWTQU7MxL!fYD1#E83jMKCX8 z%h70ekK)K(`~IJO|L@(=)6>rg)s$D4f4>?YZyK9E_gstP)l9Yt0udJ`XfInO#CuV| z)|_+0Ny}9kb;~xMTkzw-?R8f*#q!tbJ__2oM0aCrq&@dmF#!)vEnnx-onAk7?W~oY zbuI4QT+`3H)@aUp!n5kr8mEzeDN`&UF zh-;o)TCx1a6g8InEYJ6BJoocKjlr$zi<=Et5~PFj>&v3oXI?)Yv-8!u+~&56VY*ub znHfZcZu~H5Sg~&X+IkhIIyDfmMXd2b^D9l>-&Dc zdb`?u{jF`>YtJt|ce-ZZoa3iY=wzJ;YG1T(hUN1ZuluF#hxsNfD?DX6Tl;fcPws5C zi7s49iiU!g&%d`j`(;Kxy6QPU+w(jB_v4Q*uiy9g+tt<6FWcDOsn;}Rew4cBPW?|K zKXH}Eb8f%=wR^Ytz29?;uixUYz0bdO+MJ-v+ddohC#$YHct0d9yLaQ0IkTVtbnfau zo_u|4?tRw#$4uv)Pv0vbk$tsq)(*YTTh0ZaymePvPiR&`kFRB{Ui$Oc4{^J;U9<9g zeD|%cf~MQG%5uB-DT=FCU6}IeXNliVEAQWTKYTiR_riCV71BHEUd%1l{B-y1!u$N6 zjr3nPb)36dIZto;=5Nl1%O-IvRlQz&f79YMTc4~E3{#u9@mhO>$JP+nCwK4MdC|N4 zwN=`@=RdEW-krEVSu8te<<}BB?(VI6OH1~}B*n}v=6ZWqV_gbEAY&EF79G=dA9r~% z_y*2+Rv9~OStgI9*&0p-7gzn-=cgmP&sIq4KW^!m#S!NvdAv|Ye`{Ft7QNzAM%`CV_;+MQxbm%q<_b=|e)iohYC7P}vH5^D?_W)z>Zd^+daRMSPl z{MROJ_E>z~)s$81k*wg=i-|pf{Kq!w-0EuJn%ZmVcD#JS6&{yMGcv!*MI_gJa0$s` z^)Nj4{r&y=Utctz%LH#ZS1CVZ^-iO|4hg;4x_7q53OnhkPM1kGNeK~LwfXy;?q$bY zFIYS;y|d(Sw35d~riRUY={=gSek@zpy{|!Y=_wg0C1+F3pG{NvmZ{oIS8;U`&eGhe z5^d_L|6efSowT2?$6#5@j*Bz52McH^O>k^yu@{z(srCB$^Ni@URZQQW+iNxLVC5@j zUAJe+u63`Ce3yUzSYze;Ic3e;-FNHiD}KIOx^bD&agoxQ&-ZShw(RSYWg_0wYywp` z#>iwpDOsE9yLo1Bsb9$)#S-7^rLouF-da|CuUTMXsF3jVA3q--|8D;;;MK3E zrO&uG`jmBrGss{>HbaDz}y>|0;@O^a|I1?=QVPH*uMH&W)XFmJohl{NOf8<%z4dPh2bQa^>R!u9@4)OMYiAmf~ZK zeE##tk5noBSDeOW#ReAH*?K8!zUOV1zqM|!t<0`{{+jhR^&Oi2pKGk*i_Sf^2p3x? zDljE4;q9@njxNvl8q7Jje9ENH=c4*qj!C8MjM1B=r=fJNGBNYkhpW4{pZnSVTmSgu z%vsCkOx(kLPyh3rqF1|~z58b1J6+_n#o7M;e$7RJyoA) zY4<-~v%NR*2IGt%E+N4ac{jG^iaV{ayr#AD$>#|D%QK2CWzT=MiB7k#{inkebF;F% zy!`feYbBfc{gaU^Y^iwB)6XbIn#^T%EJ1Eh@Wb z(aZRb6$f`*%VR0e-CBGyE=vT9ocFWo(Po96^&>86HOut=8Q{Vb?etUWy_$Y<-6ZJBv_e0D9L ze=g~?kCEhFl`waw(elrgljB>rWGaLzI;2c14V-%-^qRg@X0+GJn5&gy$s&xZQ#qV2 zo3&;18XgO6sV!)*@qLwEekfw~1jAjwU6fL%NGc_$F&MdcoH13qw#Ds+(wZX;CwE>s z6@5#ZyBKtr)R+AKpVsHz{`V?;e_33$p3sV85(<~T_$V>0ls@P&Vf|;lxsxw$JHYtx zY^3W3*+o+$Pde|M5*6h4+QRR%$?G}KYV6j(w(@;!xR?{(kx zEMAvi2wRL|KG2tmoMMm9>2ft^D_Ci2j;)i&PBGYHLlqF z`_t3Y$Nl4rde`6I_Y1TS;dK1pMH|Cb>733o)qWVsbzb%Pq*iUK+|Igf^-dLL^(W7t zI#qg}Wryj3Q?qtoKH63H>(9q`8_NBPMOhs7@V6|9&e9B> z@%(4dPNADouP^^qW?*PMSIpPme2VMWy5wh>F&+sLY>VdRWHWfI?3-SEduu~L(ZitM zi2{tz%|6cC=f3mFnQvDMs*9_CKRSB)@LO5q|EKr=0d@WBS9yJ4?$hj>uKBdYZvHjZ zXFpS=kuY3P{`EvE( zO~uX}y=oPm37VC$VuEZ9%QCZdPj8wMBQw!-nbC0x(@oddR*P=97UUI~>|@w^VM}jy zP1USrs?TeBzb{j>NY2fWVtBAhi&??w)-+8Y$IshqJ~-bm&9&C&7OZ^keRBHg8{6L2 z-M4yPQnapLW%CM-j2G{F7GC~&=J~5N!VOVHD@$X=*K+w@=G)EK+2C_7a&`FaZJN&| zu0PMcqqdewpi#{vm)^okC%XO^m9P=556--)7HIB|m(sB7=({3}@ zuVaO^;?z&Zn+?O1c}i{8#kLk(@PI}WrQ06X*xB0I^*vsp=ahTPMANO?i$k$u{U;sS zr5b`J&zDq|_};#A=gzlS@#HJEsA8i99o7xWhD9=N5N3T<3T!U;jh#`5B%4Ki}rB-KvvS<-B51#-g+D zdzVx(@NT?ryVUVgDpw~1XYtiz$+4$R;{2X(zU81ledCk}@5pDV((gE5?AyGgr@`e4 z+jA%71KBa5n>;!SY9pG;*T?OPy!Lf{{cMw4z5*^s1h(dKH@UR1Or1Gpot)G4Z?(6# z=f00sU`S+9obudvGJ})u%Oc6=m7E861bCmbulats{rmUw^78ol&tJpi_uiFd`+AEWdx&z`OhpKo`c`@w7T_51%llXgz?JgH=~p>;Js1LKym;&Tq+y&6mn3#!&8XfK_e z^jN)c{)4TT1oJ#<1C|>o8YxZU<5^pJkNqtF-doEK<^#D>?t9J&XNZ41nNzd_pN)t)n#Q@5oAa#R zN-3c?yZzhQ+1Jlir?1tkKL7ml<;%gh+m`Xh{=X%#)YR72?#}&s#}&@va~5yQ%@TBe z(WB}z?bZ5!vrqrrbnI&7{cU%zvM&7i{NL&4zc2qhqLX`FG3c_{FFqm3vz$6R1*XlJ zHN}|e#@yGx-rWs<&+_2&^z+X@XUz(>*G~h-3`vY{Z`9ixAfEX_jNCJ zy*qo`TyyHmB>@^Vzp$udTiY>Yg_0du-$XO7`9EgAu1RIg4+bU9)mI zW!ZOm$wJ>_Qm!eJ4P7ou?bg#4GFzCKrc`uq-Ro61C4Db%`0%jc#@jMOxA$|lZhKvw z{&v^8=ciNm#9XPowpEv51?vV*NAGYB-`m?>@9KHHBa7C^OkaPjYlg@82r+^taNY?{+4<*H^s>_dQ&7FMoUN z??>-kZv^nL ztm`t{J4kC!;k8Y%(`OW^F|GCaY;-MZ?`MVztGQaQe;#Jjyt>9Q(&fpTt>>gnlwyi* zW_)*fm2s_CdC@9ao((HXjB|pFg0p^pdH8t#|F`$!_Ux$s`%Bl>dV+&Mhh_7Dwy91k zn>}v^NHBQCdFyE&4JzunTp_7H{j|;b$Eh_n6*hDI3wxuhPF`E}aB9ahftc&1|DL&C zFVDTb&H47XyBGN5IK^8T4OwarPHeqZ_WkSbeEsL2``_!T%sW^_68_w=c)5LJ?*2L{ z)kRtsGiR>f-*sN=#M-93?|wooQ_lX$;o;&k2r;ax58R)cx^Iquyj^X_>mYA2m4;PQ zyqmWyEs5s6q{0w0U9=%?e`T=NW(WH>UhA@Zb{3s0ePMZQMb701v6tBx3eUAlB^4-pY5o^Xe@;Lj79KT0W17(L4V1<=^)2UAA@q|JgD~C6^t3 zXCf=OQl9Pg8ymmrAB%Rzcu^BUjm@1w>-gWM?jsImE{jFlG%&!{qBBvGY)H$uC z;F~dJO@V7{MVj8uGnfCQ^v7OjTCr|T*+S-qsMY-Utg?HqMrbDcKHt5JbLWznNm`G0 zuPeP4n)X+~(0gra+_a!myY#je-8MTW5jf-WRK9i0+cQtCO4)p@<-(Py`JbO$E7cA9 zlA-L}VZn3w_4W0?>t-zbS+sM`x#v?%vSt>aw2Jc4T%BY4oJsSe=-L(zL$#w5Gjd<= zTKC#o@WNGbW9jT$%M7nc87+MhmEGf+s(J3_ludj&Z`q!)WuHmoagBK9${K9t#^<}? zT*$h6(f)lL!m7?!%4He)9-oM5RbAfXbWS4iT&MQ+9d0h44#u;#Td65JdqtaX?TRZA zNL;*G>edTgrIZtvtAkZr8#wYxc(UKN^UMGLT>tBS{omR5_tgBYdQqJ^@wJYJ_q9!W zR_>vO#V0f8Ej#^m)49)5k8RE#e_SZDyfpGEOB+vvLX%#FKnjb>ZIfJk9_zn`Z}Q&f zegD1p^EruTwkNMvxhfv-BTBa#)A`t9h2vLn_4AbbWZy59Bvo2iu+}6FYq6< zb`n{z#Vvv3p!MrDA6b^f*gHLl>ht$MewkCEbInFKpwBznuB&!@tk*ad-as z|NqgSzrS+2SB&9&e{qv9Z}saxzFWP%cdJ5>wn2#FhUo9BZ?B(J?A6e4?T<-Mb)16j zcc-rDH=ec6?q#Su^nFpzo;ndhN0Vz=f|nN_|L;1dF!xaXs^q=u+Pjp~By1F6f*U8FVHZ?yj&ObIYVG>Ma302(85ufTIvd4evvnh;c zZFC!^cnLO0TsAqo^h(`+ZeN8%L0rX(P7h}Vuf3{t=>l(CkITiQCpYQEUT@tdJTc1h z*_J!{%RjHYw(7!b0jcb}CcXXsH>0e5g+qQmo4vmNV|TfsUjKLg`TJ|?{{K5Xt4R3P z1-9q13-=XXjk11I?tI_g#_r#b$NieimSxTjSzzn~Cwfj-VdZ>5o5(1na4Ob^hP~|DXK-55MnAI)DEj|L^Ji|3Cl!mVdwB z#^>^$Wh@H5mzfzPKiv~wl6ZsnaKc<)&GolG?Vof0`IJk>QXjK2-p+b(e);F0HQ(;N zFRPyVeD1mDrE`n*mR=KJTGW~C;-mAXcE{emSJkJnaBVi+a?JF(NE83Uy?ggcwk-@h zr>g%thHJTq_U2uSYd%=2IyrmHDzHBvoSNyh__+2&$3r`gEz8`pea7>h?|09hJzFAq z%GUSg=XTCmG*4<_=I(8(&QaU9yuQ?t{yg^fwtMaCvZtp;UfX&l%IoXBlg|tf&EXa* zS@^o0kukaF^ro609}cqL-x|wt_U_xciwtMnoS?Q%N?_jfqtTo1zyJ2T_LSza2=B8Y zMS3o7_NQkv>=6?Tv|77$nMnH#fxw7qsh2vU-{1Yr&c!&V_}Z*OnP4wP7TL@U7Y^g= zL4pC+in&LR70!7s$uQ+|pqHPf{_9ymTcRCOuTExW5IwJPNK@_k9R4`n4-5j2SABUX zvH07Ub5Xhfvev%drNwt&LOA)3{I`x3e?G==D@Z9lbq`-R{j;lk4_PvEOR0FXnHWo5WL58Ja=5m)^84BA_iHYfzRlY%-xhn@B!x4O zGa~wgibBL{`2__V08 zP@MHPUt@g^hKzKZEGV3!-TY&O((+g z=ani}c{aR$6{2|J{C@YZi`ImOu3No1>eSC4C6;q{g#BF`ezSkYZ{8i(CON7+f3oVl zVfAx{x1c3s@ewW!A`Q~B(k#BTHSC$bPuDUmEtF^V^-!0fzLXWOcbCQ{-u>mB+*2Ch zExX)sUEur8hRaH0oj<&m>OZcYd}fo*xf|Q#_v%eO|NP_m`oFh-e}DhHvh14DtYurT z-CD-GHO#qezdeuh{$m+)^KXSQB>C<8`1|j<>h}{cMUV^Sh{#Rl1@5kfcQQ1D3GpFzW`*!#HeZOzF^UDW$S*q$WG2~t@jqP`N zociUU*}{clWxMO@Z}1)#xDJ~8Uw-`f@#V|AwU)cOv~-jRi!~)r%-nNDYx{QdjvFSq zUOO|HPD}`TyKCX=+mkL!g$pVi`une-x_a}O)2VybWIMTR=qomC<7*f9TEpIu8aUVc zO3RYO7l$8SFgvT3ymn5|BVDJC_tIlull=!iq} z+WY@M9Ohrhxhh|4iuXz{-VX<_Yn{36b%6cTxz+EQd6rJ`T6;|*JGN9h>x9InzAR0K zC$*2~oY#Ckb6uUwQKiX-k4=`%*tJgiP|zk1Uf*P$P`3B7l7}Z|uk4x=uzGS9lc=<4 zDcgxRbH3hNE$WkddZ|IUn)7R^(1L|pOSa{f++23OKHK-Q3B#c^mCie-6zz;Lx#j!V zfA_lJDFtgwV;?^)N_@EP^{r*8>!d$5Yb!a3 zMV`g%cs(n8eLVm6?fZXRUH|UgJ1z0-IfX9swuR?>{w$<8G5El|226*Kb{bx7q{2+%-4@EFYo_*y8i#;{r|2{NjGUa(UNdt#bK5M2FA?R$?^O4ReyYR zERxBHVdeY3XLGNYl|{Nc1T1(kt;?N(XF<@4Ih&8Iu$Nx7EpuK6!(zTy71>+1?eJi1 z(Jf94)?KAmqOHGT)8lldSH?PWWa?)f-irl(j61y)=C}4&+`ZVDW0s|O z=TC|K&2@|nOJ3D0uAE=BYmT3Pzvc1tgPJj^lQyp{3xI{&TLB9daR z&(Lx{7snrh)?_;=ESh6dX?AqV zzn}B}SKfHb7jyxy*sV;dsf`Yp0(wO@I6In#co@U@t?} zgf#g*&pw#>CI~XO?X-G7TP?yV%UY1-h{WejI-2S(Z+`8&5|b^fzxY`+o4nR$MV(b8 zTaQgRYgzZ}&&nzCm8X@>wC?s8KASW5dn1Qnk)GkYt*aK?Z@UnYy?%*%$co-eb3(tZ z+`r{mL{V7K?t8C`)}8O266aEMPE-AT?fvgo#sPDqHE%|}o+Dl6e0!VJl-WshmUVXg zT9`k(EK=-s%t=>Pe#2#{GZ`;RZ9W#-Z5|VR-)vn=`Sr9u|Kq{eOUw1&xh$CyQ*`cR zM7FoynL8~H=Jx+$c@ElMy3Tz{)Tfe}rc#%ryjv6wnXWy(Yu#%beNEM8QQGO*YfZau z+r8mGQ!2MyD16=3h-{Pno-QJ%SKm_<6EjPi@?Ac-ktt@?p{3hAd<0oescJ84>Ch2U zzOt#&Vxr1A3#m`%jFM9w7@R#^f_fOe8xAO(VK{YFS)o~~(*Mkv-*^Ar|9}7A;qCY9 zZu9QtoHvtkkKr-v^d2?c980#_RvfIH9VdjQDZMV5b-i*HXkO{Var?ig4M*is>``QoDF}Q{a#REsmb}`xywGjeE9M3@&3~`)%U;LeRn&4U-j>2eL1d-M}mUt zYHVu%e0a=n|0i{iO#B|XU2}i_+x@#%@Q+lzB&(c9VcpI2RH z^2jhtJnsL%LxAy8b3$}u?C+JY`&TcjyWAajQ-)zfr{e$P8y^>cZndBMzIj>hi}GNv znafUJ{yV*NX79}-bAuuyE(i#6b(GsxoL1U7#j3aI$?a{|Vv64G`u6SJ)1O6kH8zS1 zi{~xnGH#Gq^6;6OiQ;oBU(lTzFDouZ@8NO|>S6lyX0`TKpUWLBzyJ145aCy5*jnJl zDYCskrI&l{?%jKx7nOR&T%2`bTkl3K@wn%wD^^}UJ$?PIyoAQ(%ac>xlo&5)t-jr{ zeEDM=-z`U39=&~=tJ#{(tJtzEHu#L{^r*+X&Rt%jv+ncHJ;zF8lQpkazu$9qrOs)u z2E~>K(#`i4C~sPRdB?ibPis~#owY6ZR@lv%$7X(h`sFdd{PWZ+wf^PTO??;7+7hlG z_xI8C`|rw`6=eL6Uw(PQwIN{t@zU6~Sa+AgUdyM;GAq0nZMye+&ZWbfj_lZ$TYiIo zcjENmsA*R%?|)m%ux!S&)IP(ND|}oF89w~@65;*!_r2e`vNJ9m5z2nLa=qI0@imx}Q=u37p@jD_J^sjG{UH`~nY znCshK=kc)%&R8a>+ZB;E@7YZSfy9+Trh=z-CEj?sdG9gpsxC>!tfx_{H*Rrt7Ypn> zb@}BC?`to(=2|b2nEKkwnJu$H|U7HUK*>-^m)?~k?v>B z!j{`zQ;te_pFDG^H1_tkV-mTOe(k$Hy?oBG<;$0cu1}3!KQ}-#H8O0f?6h5-njISA zTiNoDEZebc#-m;9+A_3Emu0LqO#$tn=~CM{oohDx_O26~))}g%i8ajWcDuRu^d#@F zg^kWteJNb`1P|GqU;a2aCOq+3l!OcGv02w+x{U75*xAHzeae*w?KL|cdNSv!CmOVE zd!W}irTCKPCMO}Yct$}Xi4Etxu1bh}vg;@ad&|#Z9QJUMu^?AN=`;aGo>SKXxEK~& zh&QRS>4s}12&JhcGic>8y~%Z5#CiDlzTe0Hf9U`J`Tr;Ry?oLUDThTHP1)9`Md$7{ zOU~8ZIxXtbu5*^I2Yx;Mce=g*y?5vSpI^=M|cad`RhkF?epzw z|NZ;DKkiuScJ7p(jww-|S0s-9t-JpH9xFq_$*oUxQX?-*1zW#iV?67mn8J8aH@-c4 zrE7n3h1`YA@687ojvQjqIP~gt@WwsQa&mJ`PHg+V%a6I<$sv~eY-!U9YvW1T-pi&I zW#8JObjZx6C42rlSI!SRPamGYtaoQ)I%|2{qOK{dIKr`unlRP8+>$U!E5v-m>uhue)_p&r3UZ zJ@wN(e_5odDZthBVV2|7wC_*5#r5O%t=ak{=H2`E%L|>9-Zwk0S=#eBBHM6TeN|P^ zfyz`vm-6y*{kVMrZx-^+&CAc9EfD0R>87$~X9h==U*@xzI_@ zRGwC~iu*N{KR!ME{QU7{Wd^D2DR-=TA78#K%y94CU0J^NDVO$~@#lAE`EZawmY-?X zbEEU0H@Q9Bb)Q$!V(B8z!xb~<&y(|iF08m!@k_4wtOvCjv(=8ev@I)?I(~YS&+^AR zjii%%B9uI4c6zWZ&vbN>uhHG=py+blClb>}px?3A>to|6@AlV&Z;oE0{i%j;Zr zPnHM!g_)t3x2-h{+p2Epmb_+`%hgYN4As^x4148Te9o}DtlDz=4-tB7C#zerARU?lZrX zR%*$CqbHP0u`M$=I<|?dEw+EnPWAlNGVSv92vH zk9V(lX0&``0juEDL)M~;pHF(6HuZAq?Xr5ah}NB(wu;GSp1jfaoTDgoF{97~zIQ$! z7G9Ie7GhYr>y*Q~4!OC-9wA=qN~MGZ?)UqDfBAR3|Me!B`F2k~U3qUaQ!D6%)bkjxqpWVumc^$o_sjn?Iv&jNu*lNx zo_)>FA13}Aa}S-()!wnJD(L?7jH~H@3gcTl?ohjzrb% z(z`FW$en-w`RPjOZ?V+|PnI-o)L;_Z_BHA0q}1Y*nLjx4f<(4n>f|smSZJsJ%7tO4 z(PPFsv!1r+f=`OgTW1>;>^8|bc`*IwU;m&ZYfTt4RfA<$^yM&e2L9&~nz)+zaeDpC zKX1GrEL(r&x!TbROS|6Z$tMF@o}5ZnbPlSVZDypsA>qaB!*%zos^y;VIkWJ!^X)IS z=>kPjf(N9W-ZSh~SYvohLVmuVhu`CsvmPYYXWU+vZOwFUX-IF35JQ?0*SYPvzrUTm zJ$pBE!<_SvO?qG3=$~dfv-_?>3}?k0{nI8g)FVb-zD7($};@8j+qE#29ZzVx!+ z4H0--p-tN{ImvB?(KVb_uhD0Zuz`4_L`|t@dr`qtl41_ zElV?J%?Li}>JZeU$g;QZS9Ve>7kfuQ#IL#w|K1l_+ROG^KHrwvt0=H;XWHJEd4Vox z)_i=C(8KF1@oQmGZB5lB>&g47+Iq?t8bxRK>ZTYOGk273n>H&bX8P644~?a-cBv+R zS(Vvy_bs1Rq}>~S!)2deZaMa1P42B@?F+gyr^-&7bG4zhEks5z`HbelviawaNj;BX zc~)sD*RPoycy!jerR*gf3*T4E9$)$3Uya>s^%D}l-_L!vVNSSga_;A()0=dDt@tdu zF}LH^vWvA>Qe}!y`erZ`1|Lwic5N}+2=`Ry17(YU){-+ zb*sx%AaQmvQ_3mJ<(YftoIjo_wf?4J(c39Jic-mY6^yH&@BO~&*gX?I28EynQceli zBo(gDX;4hoRejFEaQl8!HJ)`)xNX1{=x{pZ+QT)SX}d`PDPM3F3)7Z!diN@yK<@ z{{8XSKfXKq?&<5=p6Q>T?l>})XELkUhGP-=JD(N)OPRgZc27??J*BXc)9 z?%B2H$#dxyZ_^WQm*#pM*4^sx|IRZ**E@$ix63?veWd8J(?631drtjho#C3ba{A|& zKYpm{b?5XibqjB}wQ^^efeMr6(;1ue7CCe1J&Ou{{P*9xb8TkFEY5Y?RaL3`E-jdP z`sbvedvD7=+r;ZlKlNtr-S^*PZ%>m^TzAiU+S$!Xb00@_zNo0N>Q{fg#SZ_VFV&z`l-yk2T6=l}lx{{KH71`9Wp>Q&SiS8FpFE}fyI^y|VF-^s-% zH9;e&k@l`yYx8D3ySh63vdQroK~bG2CTu*ns$}!~Te0_Lzx|%|`SIh&moH!b_;Jbg zDeKWbHAFWaFOaGT;_ACq$3OQjp|NQaP zIFld-^C#|(DzPOm1@fqOXOd7TzK6ldj{ekf96!;`mM z`(w|ze(vnq+O;R>RTk@-(C5}`Y%)q)uSb>Iue}uWwKMtPsx`l|-KKVAIC-SHwM0oW z%-QV1_C+puwNRXALGw3hW8csF|DE1HE$G^m3}KN)a;YwCSC&Vw-etO_fYC|!%u*3C zmeZp0^8Ww7{+E&Oldt=F`+ENVo29jz_jCWA{qJ@Bmeksf%Z^57MFm%w$CcIp{`dE} zzWB3-7i+ejySOj^HeY%Bv$Ns8(jVTw4li&2X8(Wr{+b^jFW%_eIj`^dbp5zLudc@* z-Qo8+*z4Y}>4z0}sCA$5_KVq^-2SL|(VR7k>>2IPuFkra8pC5Vi@71Yw@@h|TT?av zw!Uq4hI248NArYptbq;@4Ry1Glx+N^*Y7?5HFo;jeYJCC;_CnIob^!jNcGy*g3|n0 z>21$z#gsTsf0pz-x`i#!jDXDV$$B(}(QgTXu5p`Iiaotiu zi6fHTY~CxMpI-9N`PhtguOpv)Uheo3dt&Kz7c`%6Z9} z7bm=ISA72GLxKHSVWw#@x3=xvUH$RXQ*C)0hAA<}GJT474%K>QmQ7 zC)a*Bd_kMBw)f-9mn(C3-=6J!eT$Xzw|ngJCj!n{-hMOZaVC%A3!`PZ(SM(t`(FOu z-0>pS$t5e7d3*QnU5mcy zr&W)cE|oL z)wE4nzFrw?SDra-v-;}w8GbXo^=55s=%`Wc$w=F>)r`fSr+I7G?!<aTZh46GGsoXD~FvtQ%QV_I=5maN(7-Fh?f{T_8C21O@h6K4;TxUBz{R3?cs)^;;RAPc>}2wJpG z;?$*6a$9dss92fi`y$ssEnuZAtE~a;+YtMiyMEQe>zWzhbP1A z=G4BY0xv6lmM_k|koCQI^^I%nx$B~*d0J%1cRmR|YnmpeHn z#=ighx%W3GezR$slDj>8dgU$|`Rl2>&heN{+#6@SJ-chxJ2Bw%+#|b%WO`rR8oZhvraP8JNw-4XFE4z74pZ~_pw#1Ap3$AQC zcBB1w`;?-GujYKUi%6cn=UM6AIr^5*PMI$K@#Dh+pFM{Y`x}nUn(}G6zr6nRz?nBa z66QP&nR(OF{!N>C{yh`l&9~q0ulYHth-sq#d^_2G|KEQdnHgqnV~xD>(y}jEQ*)Wf zEVtsjrr(l!S8v|N>3G)U`RAV%HgV@3pPsH?awu%)w4m#$o|aRCyftg@TfbkHsk8OX z_uu>8x9`OLb1anG>wDejBUrpJM!~F&AzpMAoU}kaATFT_KTXKt_U~td!&oi^HE(pAvW}ClFAn?AzT2DEVhgoYw z4=FMo7A#pbEdshA(N~*@+_x^sHe~)w9N`{FRyPM{4ig8Q2@N{ioyYc7j z{Ib~pvu7M>UG}oZ@AOX@?IWig%&rSb8YPHnrtowLXa4%~YTMS1_fXj+ymUcD& zZkYX>zW>TfFWq}>%{0?2)m`5TifYweiq1WMeE8=Nk;h)) z7iRi#emHd}M6ZL3Jxeq>V8XLZwu##~mEP1;7Z=Y+_R`!Xv6ek~jo6u(Q&QI}=j{v= zczNsFw{PDHEG%B@O+RhXcly)9x4Z7DUX=#*u`^`7&tDV9VyXJlW{ny}2>}|!1Pirdw|FdNZITo~Om7Ksv&fd*x z>*NHA&+Wc@E=s)S!tP)WIl(KlKL0u^Es*eV&yqQtg=BXYoys(Sb0Xl{mjW@RqHAYA z&nbS_UT41Iy>95U(%9*$LK8RMvDP(^Nebwd@h=Q?VQN;?6s#iwOr`%_^>cj zDtoRV=fcEkrLmRd6Hp_B!@|o#nLI>_a5@;M7*AT%q(~DV|^E?XFP<-W>C0KCg_s7OF4bv1GI6T8&d5 zOkFOUOpOxpZnQeMEzD)g4=ts0QIGe=iFEdG*$D=|p5wK;XXE)pu5W|CPJX@Sv!ccG zbr;p8-yMls{;*~hlkD-#d1}c%=~Zd_*3IW>m=Y5rzrV2MWXFeHZZ0{q99)h?91VZ9 zdW)ulmi6;5a$?C}4z{a(sbyC%Ja+nL&V*TN3WrSmFI%j9^jJ$~`l+JO9UW?MO%uXW zb+vSSI%0lSu^0*EuKAj4ddy4eR*Ndbfk{POmYm)vQ)j00c3gB?F@Ld|NzaKhnr?d? zIyky_6y6GZ*IFGC81qP@IvsQ6XPpAY}Oef#=Qz~R8ON#1LBO%3AyyzK68c~+t8r@N#c ztP5rTn0514WZ|x|pzHQugLnRloL{PIWPf$z{5qlan@_$y)MBMQ8CQ+xBQCm?lqhmRd#sa`(0BmPjCMn<$c-Yw2klM zlwRfo$B!?MDSdS*;<<^*r+4pM*KVC=ar^Da?8j2QOSk3n|7Px9b6SbvUUl!2 z4cD$ll`OlmC6{C2wO>DeM9dUikUM+b`+Iw84{xpe^+j{(m91wVZYVw{c&V=5Lvw%q z|Hl?_vzDE;to`*TQ^x)I-ManzE$^84UWnZ9!P~pPy72eAyWxskG>-*odt1)!n_igZ zblc=$*2ie>gpNGGUvEu^C-PKqB%XsWxdgqpddj@CCm4FigRyUJdd~$v3P6l95oi6g_*Z6 zv^lh#*%8(oF=C|1L>SE@O*SmC1 zuUZ?Vd9-@D#5IZJ+a}w~%WuhBzc2gUY*@A@hGV0V@%7AEL0-O>BeJihhJKq`#$y$x zzG%+rU3&M}Wv9LRcfS3CNn&iWW~AyA%lztP0*B7u(h+3b`*cs`=c?_u{V$vJCa)EB zh=@phvGrP%C+jtagt@|rEDv5c__1^~?U)ms&|w2%3((}%Lq>!STRR`2H5U0t}njVJFu4@ZR2v?=nA*JfB=PmL|=`{fvR zVbP={bM~zU4@_sU@NW3?;qUY1rq%6=g(*=dS=!H4 z_7E&C1y{1rmTIMX;uH@!}a6s=eJ%tKVNRm&&T)wo|EM>pSClv@A2D^ z|DNS~Joe$mG4WMvO~W6`apW=m$X%-${`2?$rT-olFTCy2X|KC?ozRcxjY@f|1pj7# z?Y$qiJo?F|KOw?`5iRTDUA;59KGs;-)%@I{cX^regF`i;b6%(iY>t>>Qxv-5O3+ld zi{Iy3zj>Y+^|)(m)a$v0({k3um@Y4VSlav8t=$qktff`oBJBj&s`f&Qo)GBCNS>^*bYmb=GOSpBg=Pepk$3 z=Muuo#ae#;X8O}p^J{Bs=PpxR%EWNcT+!mSjMCmItEL}(oA!E-dba2^!(%4Cv%OUr zcz;M7zi9b-j_mP`S9H!CZ2$Z2SmgdEXLM%2{Z?YNci+CA$2VFYG|zul8TrWMe3V4L z%eh+udkn9X#;)CZW!0Zk1$Nv_N`8-zY-N&6)b@tZ1Ve|J@h6SmGJ`CD)Q7xXqw5QwSwwclbOnm-F zYUU}OLrY`QLcg7vQylofPdjLCv8Jx2xFFx|z2Ea@xw+iP+cvu{*h_J0ID0>-XY?>Hg(=prHog-Ae1KE^Pi_(RbKjOK!8vwHf=~d!M^(A-0Hnee)Tn15xMJw3rA=8Pr$` zmTY>(nA-7V(aL4sMapaLv`ZLqX}&xf+t?N49V524y))~{RVCg%=MIO8S3dO9`@bSe zcbSHc=JL>6x(gVW?=Z_{P!L$fEh+jw?(7wDCl&^g@7H)m*SZQYtaDu^^1O21^Oa9{ zI7;TuVQ64kF^geoik8C027@b6#p^U5*Z==t^XIL-?L=0k0+qH4mbc56W%3FhiJ0!{ z=WHrg|F63|zP>bjgI#Uazt7w4>;B&@usdYL9KL}~qvx>Bi>BvI1855Mc5YWd!pxZ}TBl1quhi3wPTsYy{G61tW%*^3FSFnM{k@)d=9E(= z=a*m3tl#|q>-zey4h=Z@m9r)jwm|%J=56*WZ;z3re6Bo|bsq6Ub>-YEjD;1Sj|DJvSk1xl&r%x;A9sB-%ztX1Sn5>hhjg-x5Z`c0) zcX#*p_idj){P^-Q;4ihPP$j$IhJ2S-bVxQ3?L`YS7nBBJIYKnoYnYx z&ZPvd^`^cF&m_c^m^?gIEcz2A$(gNVIsN*iXQ_;W$!os)?R{u?EOU<6S>MZBbET74 zEtTnWXQ=r6VcE4=MQTcdVREkLz0&#&LDx;b{k<;tR_44Jk}e_Z($?}W89wo7;bxg^=xl=QqH{e#BT?=z0Y`+ zXI@I1`TXanLgD9gcV1azV{e-oma@|Ip!r_=RevKpUj?n^JCQjjq;Os2Z?8j{l53g{ z?+Ib#Som^R9HX3bTI91R?TM<3N}Cz4h#qf05c51$vTb3e)cZXirft9Wxjvl3of*LA z@pM*_NW$MGledae2g z)sZE;)&Ae>i_wk~Cn>sVMvD8M*6BPNX}He$@ySI8y-r-Ss;xct`Oo8*d+t9q&)?5g zw)yS-JNNc-=Se4T3yb4_Twee0A^-mW-)wpg{(Am#=4<8*DN9<)6xFl{UvD)vi z68oP&_F-9GE)ErUkA{El=BRHJUAM?3w6#v{>WA*cudbVpiPs-^`uw!`|26&Rxy0k* z-kAd41>4KYqF6*meW!=RA#FD<>3)eq&&o_)bRY(H#DBUTfP7D&_}U z$@TC4TetuI@+qd}d=mw3PEdRll;oGW|97yIoEPMEdoAa}i`N&vu6C-btgEf7x+cBOYUaDV{QUg%inPf^W^0F*B?LpFkxwPw)qa$RBV@vGqm*R_R4{E&#P@LxEv-MhKT!_lD)l0UzIrJ#nsxm%k zau;11@lOzk3G3#87>y4|uZd+zs?HKykx z7Hi(wDZ;x%;<%vToXz~-`Sb7ZoBQnN<>UQ_4UUNLuD!&T{kCkk>Y|_`&8?YIg8RI# zc6LOnHo58FiuxGCwOUMVsft4Gt4nrH z>fdv!o(sZiExgZ7t=xb6>t8m(>8a1Ci82>WmRTOCZn8S~wv^z?rGA%9c$_br z9o+dq)>I)>;91nAuA3bK3pqGD(q6tc2#g3|-gIr>nb%*X8&5Q>apYZn!YQ6FjIE1d zW0+p)-u5-d?5?j;4`kY_UNm)R(HF1!J7Jl~*HVLH3&bRvQd3yggi2(T9_A8H6JTRV z))fqUHc2@kZ2QE#)#sw#&pGi*`s7lBg0)K5D{B;MLv)tE5{Z>$jbL!?QrpwK;01q! zfQ8;NgZT4lCqp~0bpJcR@StWAE5i=1PpOlWlLQ2$fBZfF?*FUs`#<`)-Jdr*2CnBw ztou_^_sgWs;MyCr+kb72-T(is-}14I|LK~4A0D>*&)?_2vgYqU{afdBKYw}n*ggAf z+Uxp1AEQ?rH)N>J68ISPu5#_heMM2PF9_Vdc#ZXKRpAc4h5BMEPkZdv@w~o(r&jOU z{clS99RyrHdvDlRDQ@^gwDC_9Oe@b~S%0EaU`I=A^x!ef#$9^7r@NmhIl!I3dzhLEgSjXKBgZwB2`2eJ}9t zo-*swA~k8To8Ru%?Vh{r<&UBo-=CL%9{yQS@%P!^+2;JY*9$yDXI!wkb3Z!We|htj zTV`hs=ETJxuG^k}Tkn}ra?CV|%S=JfGmWlEJpQ)(w#4E%(`TR8NFT89oAq_UBj>j} zSM2QQSa$yIy?aa#UY~ybvu4{hw)gMk-nCRpv`s1UFy=do z3>Tg6ORSz9c1p8l!_Ay+x7X%fdTenmX!f?3zwb#!XLUYbQW$XVXHZt;N~sHKEZJ*I z!%cd?7N?{U0OaycdLkb&*3dTj}78}AKt>O-{KJa zeDCV1JFkWQV0-mj%_wB&F_D=|bEEgx>7FjKo4<8!+s-Vv0>8+|H}w*_6`BmyrrY!| zGrTF={p{hs%-gA#W?es}6w00H;`%!DdEY$OlDTnPW8&(UboYC>bo8{bz2H+h!xQ11 zx>F{>YWe;h$*&7lRv0tye5DbU|GQx2yUDBhs#Wg^cy`R}aa{Y$A=OPHp;zRP_Y}4( zT1_219YH(7+*p)Ob)1~h$srPb$>*JoqfbAh;`e%{r_J6yZ`s_`CW5oe(vO8DVX)yNR7d# zu&&aoH{DF((6b*#HNSuTd3pFxk;IJRzT%Un%WstYJKOxTd;gEsb+7x5A2#usefHni ziY+^y<-JG{RZ(!(dgQVrEc|iTy;H5Slg~d(WMcWnbM)2Y%GhiBBG!xa?ekj`I;C)K z`t`qWH|rGoeiRkBwy^T&{$Gc~cmGLM6+2-m%df9L-;PiH>}+w-liE^!D=hi4mq~PN z@nya;WlnOJcgz~oGgD^^%yCX``fmBWP(rY9mKa;j_p9c!_svh9H!ULjtZM&bkzkwo z{!v-?Ong7|T5;drR{8&5?CmYrb_Jcb==1kKzf{cmnx%a5oMnOnYfN)*v%NQ;y>HXE zpkpxyB~lXZ|ERnC`vLcL`T9SFv)U?Oely=Kz_iMCebI-+{r4;1&)q!x>@|s5%U&Ml zx38JK&vrih>o?c(U1v;iIZ}Op+uGu564Qeop31%4<UaG9UjP5S_VrAuJzIH{G>!?SW$#gbePLH&SHJ&xyDeQT^UkHa z@SWQ&kaWtv=EsEM++EwI)L6+Md%48i_vF>e`&;Mo$1%Hk>l!{?66MJ7{_xwgy}`4& zr(2xg8m4>W>a3i*fqTkt-_6^7b6U)?M2U0Hqb#4RD6lL_?0q0JVM)l|@3GgpKiiyt z{`tV7gxC$&bPS)xn1nfA-%)qj`n_50^-|g5ld99_Oi%S(y257LvLY+qt=EDg-nyx# z*S>pRxpJ$)^>y#_^771P^D6Z{ULo*q(;2C6@AjIr9BlW!f9Yw{iEC{FTh8aq+QML^ zlNmYfXyo?0?@aE>b533V>7Me#vfbIY)s7vR8r-G1^lNk1iiNG`EE7F6pT~4){9sV{ zyvOHqPsdUbTW9Z;mdjuNP~2>_GwbObE(QT1V}@qaZBlFpg1DlRWh>v`TE{wZja;7FPprGHObRU+VY%ftfSN%5YmU;nritol@?XFex)$eA2j1C2vLU*B); zDtWSqMPSO-#RA-$3tV?jsW{>Kc-tqX*Q)UnT8$gL{)GFv^9SsDCa%Q9bbXgt{R=nN zTa5vX4Ox5V9dK!B3z)Dzyr$uG-Og2R;Rzuk;m$^D-YZ>NHGT4Ff5xv&+zhcSyh;an zbaFyF_yZVLoml!Hf+c|A=iT3b*Z+I={O`5rHD(Gi4{Byj*Ice~ddg0tqSv{Z&HQ?E zp1pke@nb>H=_%aTQ)jL%)cfKqY*+j1%kKN1^#6UD-Tr%a)#YP0cO*8=I=^JYY6VH> zYgrMhwS7ugI2XQFJI$(KRuH>;ER)HOwx<2KKSK) zG9%NbYfpoY6`oqk_2BgD;*O0WkKei=)U~ywL#r%l2=r(M%~+_;t3rDg?z^4wAZP8?*H*7}I(`TU z@OB(YjhtqAbj9a?&z^ldJDWZG?)v(_t3O-R{J+Z&o)(SB_FB1gTH*}(`EtuEyFTA2 zoy#CJ^MZ-Pv7ZH2bEm7WvTa?h(P-0qIqmke_|uG9Yc_3P6LizjpD79TWU zRVI8uBzdiBi`p}jGs!(CC%7s-k9n#U$y*cEu%L5_8%tmNRFe&746m_27 z)N^>pkrPvQugks_=Y0RUrIv~M8Ku%$&qO90PU~>dlDzyq_O{7o6Tau4P0qKxsJ*=H zt&Q)w-xpYtHB9!swsJP<6Xq6hS{u5VJ-O%FE|o~f2M?On?ylr{QeksUB=pas;L2HQ zf&pSjtX40(RmvD77@O*%vs34@Q1Gl}H!ZJklU3-wJR|s5&vWGlE%6gf4t^^|E^(*F zh^2kBi+CP%=FQI6R?pX%vUNV{nwqFNm1C`|i-=3y$CFp}mO6Jhc4#bJb8g$|%)V8A zM@kC?dFNeEUjAg0&gsjaFTZ}Btv53?hD&4Xv!Izb1%xvBE?3Q2t0=B5_haUa{cO)p?`P!e1@%WKKClC7r32JuCo8m{U0_RO2TV&QX!>82j*AH7akeMVu&sh4`^ z@*fax6``^#h^KV~!VioNq5H)+3z(vdKy~fuBF1i}+&-0r8_4^^OLt2tC`P|}GC9;M* z9^q@sYvtBWpV__RvGM&@@rJ^?tc?PCC92^^3$L4}xyn85a#hesSC4u1`tswCUsd%s z8y<_~W|*Vh9QNqa(?}K3;^k9zOgnYQMA4*CsAz3h?z1Tl3A;-@t{Lc`UbL;NP@?zi zi<;|YvwI_h^ri|f;GCyDJ!oEB{H00DD#gT%(jsS_l6h?8AFP=(H+Pm=%Y)b#yW}|e z+piz4YN|f+>C>lQRTe#mSJ=ENko&eD-Fy^XOUb>7UKQ*8Q_64TOE!!QlX9iM6M8rwIH+zJ1ZCbw4fZ3y`GjdM()@zlsyw|#9 z^jn0#LgGR>ooq0UM80R*wCl9~gywl-Rs75hf)|!sJxw|BLNaJ4Q_xD+X1{$l zcNj}<7(7hPj0z6_-)sJb>DPg)+ZO-17?s4K!8EIP!7BN|Ee3cfg z``5*|&ZznPeb+xpZ<7)k4HSOKD<~}z)z?sJNf4Z{upyQVb1sM+kf9~fA@B` z^z#QlUm9NDIYpFJqVMGgBY))=rAabr??3tPyJ=GQ|KH!g`gIq%Z(R5?+c#fEfP>-x z%iH&Be@HME$Xv?*|MmRw%O5_^zyCh(+pc-*%ej<-q;5qOYihGy3ypC3s$XS)^+F4G z&+c8vKWGK+FG!odu>L1wOa1O8u`_i3K410j3Tr8A{Z*wP{eaKEY+f#|`P&?^V@XX# zT~%#m%(IO~*#}y_*G=3xZPCdbmw@X#qz+Fvx@zPb>c!EkcF>@B?P?Xxr8_RA9a2qO z@XBeixl;DZw^pxDRm%7ucV1^&!S7+Y^s@m|!`5xDchy}k*7c3B-eO|t!Mpjik^X6; zo@TLkx~FT3i)VXZ`@rGu=3k&R>v`+WbDxW>jGSY7zxMm{s`Nsk1cV`5x|zv+QesKfnC)$Cp1gy~2`NvyWvRk!k2S zA|ZNZ?NW~qx8`2?{x+|CaLw#nI9DxDo!U!I+ENV?YC<+5YNE*6*5k^5{qoH<^*oSaj% zW~s(;{X^OcDx0brU0<~xHCJF#DLTm(Wj#fbmyzK@m43YIxlK+LR&^XFCy25*q$RNg zRW)a)3akit9p$xkUgatKgQ^O$EE6+*Ij{16U^N$W6j6%3`{mWTSCh{_daeC+(c}L{ zOEW|teVjTo@}cgt(8mX+PZhrIpcM3Pj`rcIUq;;PoA?9gF8Wuf65ygWAx3!P9`^UK zCv^h${d#qFcX|2UV=ZjcEPBreKaFY3KAPF`a7tgh1wVrm)2qzM`TJ{UF8lx0yqH{bs6&)@gw`sr)Uw%vE`e1E%q{hxQ!_y0O2q$I`4 zz$++~-Mh8H#DSd z!X|_1n_L)<9AnQ_Ha%wXdQRyqM}AK{7^tz`d6H+6+RxZr^ z+Q0Ewg?!OD|9<~ps)`fK%l6-I7kHNX^T)%>pHFvh;w%4dUVELfq4eAA_4^GS?$>^| z75w+-x&8lFueZ;)tBsn*!SMWZfz8~#-}(9T^YZhj?T}z`;!@mlPaX zl%ZPe!j3a(n;%BS`?sgcT+bHoc-N=7?D_NbvoFLMoR761i`*|)dB)?0Np8lWO=k`p ztjK-+>{;37n>vLLPidz&F5zN2yUoGivPt;WgU63f_;{~fZOe^SscUniTMJCzm(EHT zP&86K_W5T|M`7f%8C$ZvV}l=?%xmAg{akha+RH6YUk@MbV(cspQ*=Cg;j+C5!|}qD zb#^~Qf2|bl{PHe|C-SvGV0BrTvd-cwvyx^li!7H-=248yp9?zub>q1g%THOeGf34s zxu{MH^15BReY^SZzxS%=7aO`1>sh7?1Ze4*C@!-&U#xeudVPNTuH#ppEI9I3wfu&J z(Y2^nF-C`rfvI_2Nei`F*;O{{`CAz+%gma!fvxX~SKlcq1*Mjj>tCz|Qjah?Fzn>I zn03p^G3nZ|HK%)}*8Nx{mmPXdeIe89RQcX*YOn6Igh(rD9i38ouj5m?_Ma`2lo%SE zm=pp^RgBiyMa15ps23VMJ+UNmg2{mxj_{qzaV-@~jb8P~gl~EsuGFNGHn)sPe4>4< z+ME-tReR)~iGO0{$W9e>)AA7QW#P7)vF@(jo%?y)W6w_Q*|~rB*L!mQ=llK78|qD6 zUChWZeW_gk(~6GMJcr_3n&lgL^osw!+x`6T$NE1%e=ooP=h?^nfA8|&zk8SMTWDGP z=gZyl`~N@we9G+Yw#D#zOiJg|KX*?X*xvSDA^B=?LQkx=^~2ArcWVVDES>yCJcjXz zzL@gQN7X-W?x@+T^1{_qFn8CKir8hmF0W&Eu8VtT*xByMK?NVdJv*zn0~0 zn|(HIGp}OB`;z@K?HAt^Y`<;N`&i5T_uk?>iF3zx+%2;{@9niSclOr)kDnTij&1Rs z?QJRRf7qhxJwt)SmaCm1$t!)i^6%|=xy$dedEJkPoB|w~yWg76<`vw#Z(r7#*PD(k z%j8kuSTZGhrRSxeHT#YkT0ED^H2J_J*nb?fx4vV^nPB0fWv``D!4 zrJ}mLy1e{y>8jn{pTCq?%~>TATGW#gG|Tkk+zyjD-%G6g3p))pBi9wBFch5o8oT@U z``_CV1$y|wM6fqpws<-zTX$*LI#JHjmD#hdNlXsb3OF%i z+gp>X5f2Z}% zcer}-h3B68_urJ=R%2n%*jljmnq~IdIOkI78OJo2hGbnlzidwQ&T5iSj~5}TENrt`IJ{nPjt={E`EBerLDv1vOq#v;XKzRkDk8M zp04si+q5n>B#7O5($A!{wI42ht+;&rk<*#vHdCdTwXIfjz9=v)sk)hI^!e0*wGj#* zQnO9cN*{0H;Nm)TRkw}fKy2*_v~H$f1l2NJJymgxA>e8gO^~kQ{RVA zHNU<*e0q7gy2{I6yc;Bs{rvjW=B%YF!^RX3iB$%Q({#3SNm|OZT|fN)>*4nPm8a_e z>HpuEt-0*y)328e_AyM`#@%)&?eX6G7Wu4dk0-@0ey*RfU|$eZMIz7jHE}Vb`mfLL z_!J+1Zr%CCIkkpUm$aOYDvjKwzjtcEPT81sob%;pew`b!R&vj=)-@&)r;eUV;4uj3 z`}pBciPb)>H`+0dyO}#$BzrP%mvwq;Zk@8m*gtvU`ubfR8#2~wuimLQJxS>2vB`$3 zGZ_w8X*+D4k|{I2GRPYp8UM>cH8c^+t!Cpvsk+P>9==wKPD6h8ZO&>FKV}r_u89M z(O%c2Up##JwZ!I}pv!GXdbd;vfJf1jb`p?1m8CE72d<`mE8TofM@w<%c)5xD&9}_AJglbBoVOWUu|~=iQ}UyjJz8$@y%rhnx23oObERy=`ijal4h{%;zmfGI%wm zayxpAFI(suDlOP*X5xF9rBvif6l)8&PGO(nwp^}-8?P}X#Y~Iosyp$O@%usR_q7Mq zOs?I!n!vKcPn<<(tHJ_B;m1ZVj4!bG=_p;>p|kVauFTu}UiTVa-gHFu>Y=S4raVjS z{1y5v#$>@E3E`T6f{6RGDnqZW^?bf7+4pnE%&DAW4}-3Kig5`s3DR2{Up&`K)6MnQ zxy%4L1wF&1Q&=lXuW?Dgk37PlB*=aJ(7By=^LF2U+q*D`E9Ht(S~8EqlRater=-Fby)qWPV_bZB zi|>bXQOCs?f_!vDS+q~Ha>r=TUf0gUD3u*%$SOHuRf+AoVh0wF2GPurhLzW}l$NeJ z?xFc~QK=w9f=I8$O7SAu5{I~H#(Q0iUewBk3%sr>diA>Q;3^lFjlMfx@vok6jB{l| z?%YG#oSXj%xri;@6lObj-Gqh>VOg6xHV6nzx{|H73!#?RR<9c;xZbF&`F-`- zGu{a&m6FeCx-)QmmihnZ_;mY!4?lje+592zx<2>bJ#+k*8y-8vqoi_Zsxjk+kbOOY zM{hGe{QC84fyI@m3A5%*tF&xCcJE&8*ALHfZ|Crso-;Woc~InZlBox$-fWear3Wv; zTxoeGX{LtHnG(lOM%|VwuBeTD7U6Yr>4VnA_wu&?UFLqMP>R3(@bP|m@3nQ?ckkTE zsW>HQ`Qg_eD^jLS`uwuQPqUUozOzT+-S=o-Gm*s9_Z1r_Ps3^VTwUK``hwxBX@-u!rP&0`U)E_+IM->sYXeEIU_mp@iq z5n22A`BkoCv)0{~$X;X0;TG;@dQ;(sNnCYo}6tG7gpP zEfon%l2j)t~zulW@+xZRWdry9w`(pyKivG5I}O)?O>m7_+~9-cqW=Xw|FgviY0$ z!Cehmj}{rK=>>B8p2~WjwwL9OZDUxPj+Sb z6vYV&rmrR3slT%v70tZiDY}*nxZH>F2 z(nQ7!L3{@m<=#COp(M8Bz|@Y8Ls}m$FX4DP>Du8iz3b^aI5w@}Gi0JY|rB7nh zR?NG2rXho8?bNfDr&*JaXwOhxJw2>med+=ZZpF26&(5D_j(eW{^q1s{JsaAXl(w#_ z*u=E>!AqNSg)g!g<$49hqb7X#k@)lO{+jQ{@9*_vzM^#PLLk@vdCx7UMST^$tSz!g zWaWlKYk4}@biEU&oh^GW?LX~Af(|F|zMa*Z_wRgrEBBo0bk%3BY62&!wh6_kUvs_0 z%rQUyo%TQX>({GQ*V~=fj;Id)@Og<-uGU{Bl|6o2^b?MMJ-6YWQOnaJ+lxP!7H!aq zd6<=&9(d$Hgtlvz-|^|sKh|h%&2n13G0dD*shMqQ(MnN0QOmSXtH!t4nD5KZY#|Gz@U#^@pH^1nVM@Y8i{PQUnq=cK+ zx9$wP%4+dE$memUm!{yP?{W4sW3T7@nx`$@9DDugB;LStD+RhvoIZW}XN}cdKk@5X zjt92!E`5?I^*nLanZnLO&-lInPeoY!2lt#b-)FjZ^R25Xyfc5d|88$@Km7W2ft~;5 zJ$LiWW}m(L_wU&{d*90ze3s7z4!u2l_V3}@mp`63bL__1ZTolbJZY(0xO4x0=L3fy zZqPBTSS~ST>$~au|Ez9r|DFH;i#o%H>9c-{ZpfN9V9X*0&b9Wfdp?eY_``{MAHr>5{0&o1RSKUmhIw=d4Y5Gfd4Y&MV#fE%De$Uun1<#TS^jw*(0}dN+T&1GE zT%*Hf{@sxH56Y8_qj$?aIi|op8&3bLo_IHxsH}bghYvVmWa-=$WPU_Uu_@bC&zd zFMGeg_TT0A?azD4K1ma3hUNYhD$Evc=7Pfp8Ef1f47^Lr+755 z?#Ohy{ypa5)2AV-SEB@Y!W~46Qq~{2>*d?{?$C#yrF-wO>gBEDj@eO`aZsH3)n~S4 zIWJ3p#HxP2em}}F;-}5AC!Zo-u1`2Sf2C`w-Y?4)ulGN3O8!(})pI!0)hv{=PW0i6 z+xZO*T;WQO=Ug^sVR#}WwN34Y(AB6NMQu$C35L&mSZ=*{&tTAeZsa?;eBu4PIeR6S zFU@^yv47f(+{9$tDOTsbeFR%}Z_B)W?dO7Bin7mUT|KK~cVk(myxhC(yVq`C_Qc57 zxyREXFE8)jzRHMcPoF+b^)p<$f;IN@o+&|&4xF#1PoIAM`Si~(W3;1Ro7LVg-OF|< zA2dUrJ~!Km^%2#-}_)sQ+Jj#J7xIif4O)oue{^Ip8Q$g{=KtnS+>li$NAjN zsN*rEj78`89?$UhwOqS)hMvypNx`$8b+X>tHkIM9LEmGG-o-}d=PcjJU9?^uWTQmE@^(xE!rtis|mE5J8{`Y6c zi@EG9OFw(BJG1AEcUR@S<&(TQ_PwuGnRIH;GDU^!r*3@+v7BCHHP^-A%q+{}`F&skk&(LZw> zbj<3u+{@c~S6!U*dF!S{EALsmtl4U3Z}0zkO7W!=Jg#4R4X;Hyz1pS662E$x8WY1Y ziQ|Pf^Zd@;+?KnyT&d4>%ju6sg%h)7pDF4Ee!lfKs&RAK^p~oCPJLdx)={0jUo288x@NkNQ!!*+=wcn$ zV(Bwquf!(jhAR}tifI|I2!Oi&D?+`>1nb(cg_a9t=~ww@&wc(S305uG)u2(@LMbe(kI6*YR2)s@_Gf zVbKGt=YskPk4trT*X+)VjZd07UH`QNlSwXta z8*btdl8uY!TYlN3*E0MmgGheqH3o;C!ddA{ZOeYg_7qz1%iGnN9Mf>yaxzE9)0lk>{;L2TepfR-T5+m5@`GF)+tqTZT$BCBD`eVFIHY>ym#;I>}7^7 zr)}0Qm#$>nnDMOA&#<%MlH6sJ<$Dwo^&&sF@A-V~_qI&a)ttRw6$Ks!gKe35MItw<{T(|NOIN)z-Z7+wH%9UocTxbR*&9Ql)JE zV9lu=Yc~3FFfEwe$MbAX5t~EEQLaVDMANc$ITRe$Zk=M(x7cpc(ll3I6LAU7(&C^l zHNB13CQZsZy>m*=+M8y#x4pd*s`gaKqI*hkU2U!H{PV|dicGdT`D)h%>(%S}qr5ev z6wV#G*S?H%)kkgCthLu9M2~AnKc2&_$WeT%Qlc$2(kZ|*Yu5AbWeq$WIpxvWx*X4^ zBu71~+;**1;My5`mJ3XGr$kuXcreGgfhVblvA{)Ty7lhQb}b8b^+;^ySieDf;=YXB z+a-w2Nd27W4FV3GHlr?L?Jjt|mA)#z5BM*0M za(nYwE9APJfvoYNQ~YaxaWDj2U$stas^jDp5iVY>Q&W_Db<_`NIq7JxWOa*8Y;h@z z}>%~rtk&cFT3SV{+jwo{JFYPOpwX7)q6WEvb(n$s;VqrWf-`3 zPshG1zqnV2Wh}bK^`gjQefxi_yw-%Z-vnN-;S$U`dq~OUkcN}Twkdz!%E#H)*%)7+ zk~*`6`zn{qk)ViJ)(AnC29}b*YqD<7TVsy&svK(Ln6PWx&$IXc{90fC_4I`wQC@wI zAAV?R+L!5g!0LJLUarHuK~Gbz$~j5}luB9_EEUv$%q7V9ax+izUZt2rRpDzUusq#V z8T%?S;j2i@PZQbqHCG?RbVUXHFYP>^vTo~(U!{MS39rd|yZwq)@AK4I*V@lWq=xe@ zY};rgy@gG1^#(gVEeqAN32q`H4u#j&N%0z8_wz|qdp~!h$1)Fgdw1TR z9^;~-t0~dnbJ&3Q@e!WCX_vpR__i!a?D<~P7LDbPJxw$&&8qBEY{}if@m$|y8@|g1 zeM*XFLW%m^v`aE53NO384UkQ39;hmV=qtLja ziYK6Uo8?B0s0HfH%1VB7iVN3m>UdXsXp;B2tt&F_S?AAXxnC0dc1+sOQzRwZClN?Rf=5-1$O+GO^&8II%TirICFVUQCi$V&fIJJ?gTVyODE61VEKGc z_KdF|+Ov)%scW?y@X~g$&nU?MYsDCX~4*v}~d3ys(jD`UL%ZZ@ZjIXs-?%EB^d^Hwp&fTAlg6E@G` zE{-kDYETGp<=7aa7}2jRo5nNi+$jgPmfmU6>(s?x@wQ#}=;CTP|eWg|1CzrnXw>*7~ zUf=Yo&tLhvIVJN-CeJNjFe6~?)=#02R?Ie0`XTVE&Y8n-nZ)xs-Ve1j&agT(WJ*?A zO+URwADIkr+(Oz=E2-i zaE|jpr$d2X;E(JH?J@HjUe9F~ShMj=X|IKHb4QzwLC6|QW~H48Ygrk--1|N|R`Y9( z)yXn%#V2#7OE_~l%-ET;efGNCtf!|w{rOU3CBLRL|DH|G_S->P#k`6ovF#m($1Z;~ z=~EWGd;k9TTx-6|_J1$`ud1wdnz;LR^z+(Nl`_W*=S-V(eU_f?3RSJ863rB|Fk)l>ZvK2?5(G%YFc{l&Yc{y==`;J->uoKBr`o#=J=K~_gG)o ztV`|O$ocT>-@j+?o;`i}^6S^HmmhZ*VKg~hIioFb{ngydQOQNRDF!VJ3R5Fb`|esd zd%5d%7YB?R!< zGE1i%d)T2Qy_ws0^WAs<9;Msg&)vTK@yE-Lm*35M9RH_M;+f^kFI%szV|3!Gnb4{E zbPn&r?+deMZ+jWXudkZ^ey`!Qo~`GE7Ijs1Y$)nAe0Jtu`>8qWRVVM%n0D3V;Dndv zvw#2oJ^lLh%P(6R&RNdoJ!kn^hGR)YlGo=g8J{N?pJNYdIBt1b=d(?8^4#)+?8#?L z{GDY*1cVvCI!Zcs++tQbG-v0lZA%=FX{t`$=<_+mo3-K8)Dy3d`+8T*OpSb&I3p>B zAz-&v_R6JVN?WcypVR8<%k*lu`D#y*h-dD->PqTExzmb(Uc?$z_x`W$HevFd~n!^YyNO z{WoLzTZMDlGjDwPyX^I^ywAVw|GZdp`)-coH5)ZXHVu!UqOE6oT2$OzPQ*;Gog2ip ze3D+=mUS^J9_|RA^FizMj&(bypKy*$H*Sc@aE!U1{&amx?ePbnZV67jC(+`qyYt$n zpaXlJDHf&jeu;jjc`n)SrT4i{r}9_NIQ24dOA?QQ_Lq%Pw*&(syfm*xrJuO*b!BXR zhD(Z$;D&-zH)C9SUEH+o`PvVgoWFhhw#4FnaZXR1GiHe~_%FZw`0-_vv)6W-=ihtt z-Td}lv!y>QZwF~IFfuTBx;Tb>`V*yfzLwcXP4U8+Gg(+=8(=#>iOkp zyGO2g<=tiZv$t8vCtopFKFTsf(x*#MFsbL*jLR=~=&TCUz9zB%cJbP!A%+33Cl#3; zo8$j{lK1Uxf767e?wk1f?c`J!e0h7_l|?zhJ%-mWeV@CyeEH>YgGVY`-*fqbm9fh&XGTt&J-hbyHp63=Uq(p2%X{x6d`w{1wzHP~j}3f-y_OzH zS^ILA-cpO+;Fz?>wZcxfTTB)g9*hj1*?V-~{+F}Z7#K5WKD$1BdgZ)ns;k^jS#H1m z*4e}I`PsDBbKRNNO+Ddrc}fx2IcX)AjN57Z_y2r#clQ2YtK-jj|N3~i{qaYKV-eZ2 zmVG_fuwgUbiMsF4-=90&Daqm>`nD>bjZq;$QAJT&b4g&1!0j{3qOB7G&AU&Wu{_!&&DaMnp zdKa#{+otAod5ST!#oJiEgDZ0b16_=!6ssKynv#&X)R^(2)#<6lCr&;ms8zeU!Ry{y)2ho34m-CT5}wEqa!xB@$D9*<4m*qljMg^>u421# z?850zbp|h&SiPU&N3ymCbeoplU7hhX+a==Z`&ZW%dB-pHI<2Qw7wXt`=+JA41V%w$ zxe3~jODox~**STa#hlbtx6}QRxgQc_fnb^$dn`H{+C-}qGGedf`6Mrxt9Ae zd{&h#i4FPkn<>c1m}B!f?=9yvzPe`=pF5PZCUDco`2SD&*Dp`p{(Aqq@6BGVb9BpKSiX zG{V{N&}*IO^hq_>$~aDORn_^;ne_V0uA0AAmpCE3Ycl@~~m^atdC5GGh{N+X0mEK01 z9{X0CzGnV&)u~Dh9c;Zi#WLrVBaMBF&+UBwd$&2iZ^p4ov4&Gca>t`ir4|(5|6Y5) z`^WU@&nImzZe0CjQr6SYl@)bWjLp|}Y>N$CFr{d%&H>Y>66tFLVjdj|J=tO6E4=y0 zjI5Qj>t4ssI6Ws=tl@zy$7I8X6%9HJSAw!NeLuWC+`RExy zPoP%+V-|+P222hax6dwPZ-~h{mpL>2Y`=!(^;6fPlvJ8ide)gL1$pPr3e%sbudi?U zbWV|Gq|2-ej5-H@-Yt7Q=i-4om9vhg=H2>Mw%xGp1e>&z*4FI&`!;v-fAKzFv{C%| z=a)}EEV4V7DV4Rd;l-TSyZ2?zniQ^Fd3$T&%)lMFOo6LSF4^>*KfmmKG@Am`^SK=i zAMU<=H#>jdyk#4g2o;-NJ31rDCuPp66vlw+ijxb}vgY|eXKatk{?Kc9tXh80d@spk ziBc>%DQ85FMP^E=-LHyWv-f-K^_BDDL}zaelg<3q za6O8lN3pb%i{aRgFSY4CRx2wbug#6vcsy#g*7qyC$8=WpI2@TJAvjy$-MjqGcW?Xc z?dW2V%583yb0-P`WwR%fp*IJvT8Lix9U5ABx!`BW4j797dE(u>`Y zTX^QSTqllemI9uZ%cku3w2N0-nW;r*g_*RIgDA7I!^){Vv7(tA32UC7Do~uz;n9-V zYZB8hqIC8^Nq9|vwkt!_ec`QhrNV6Yax_d(e1Asa#gt0UvH zsgaZP|JPPtHjxcxxRR&J6y*K1d+&sqkzxJ2L(W;Ay*xuCNyw}%q*!CqzROIoFpwy#y#JkF)gxsF7fe85orBn z^4*-Cxzk<$-`nbS*=+V%wU%ceU%q_$bLq8N-a+=8_Bl+={@{DrWR?5-S<_@TeX!Br z{Wfj?ZEg=g%jsimAKR^7@;_Thzm`NKP>=rmp1s@USY-*&G5XA{jUZXQ!#7sd50PwrZoJNxh1 zvxg5m>mKcKVL2Bqa5KmJ-p?7AWu~jOlz%txEnfEaK-*;li}dDlLtRtx6(2r)c(&aB z_e=TzU*7WHulIM^`6N*yYhCQ^t>p)WQWG8WZk6qRyY;BnautPR5%M)XA1^QWH&kaa zxwdFs;1WHV59jvU?Khb5`}ghC$iC&u47(rjO75MX^=5AnpXKyWv2-4VXPc_#)nx4X zer?8Q1E0$-9Sb>?PKi!6W!^U>D*O6YmnEWHOEVPIpT~rBFZpV264mXrRL$OIYG0Qd%E-}8chkd zk$L5n#JMG#uW0!t#nR2DzB|61);XQCHhcE5%J!75@2%aI%Erq@iq~nnCu(k02)w@i z?=tfg0mJD_=7)-^&dPqVcEYRK9~U`a)AJ1py)Ly;h0Bkr_}tF0eHAI;?sJ85TvB8r zy~BgrXRgjO-MWVe-@f5`|HcY z!~g%x|Nq1P{||lt^X|#B*WauCI{*LW`d{7i>-V0~|NNtHo}I+xZPA=-y)&)m`h1WL zKXu@YLd=@Bg^W(EUaMD@-Q9X&jnUNeQa|-Sy$t2pbxv!C&YIT`*JNLOTK`~aPwG;U zujYH!_+Nj`9#eOymHG1NtF4Taqq4mUXD*-fSXIjFX^ZxZ%9(vDCb0a$XVLO8p5T+*=}n=VvcG`Myky>r6mMLoKbugiYt>i+riN2O!um36OYFPr(SQo`Lg zBX?HX?wgwX3M*`7;`pB@u2{OEfTM9{@u_1o{^r$JSHG?~p=oDpSNs3ZCZ?$_nosMT zV=pK4AHV#vL@uf4p0jY1A>a1hQIY=6N{9DcdVeh7w%NI#cSIZt&2r3YW3`j_6#Fso zNbZebmG?S%>wd%vk{x`=FD0a z=(ufLp;WSiLY<7Cn~Yds(H8JW|$foMyr@?L>x3(D` zV>+F;y<0Oh?J0B5!FAbRSlkapy|(f1%IMrMt=8arg!uEA+gr{>@^W8%cyVj&^{*)l zp3EtVd$vQ$H6_$-O3-?)RZ*r5Y}fuJwWLNSd1<<}${YyLW7V6v?U+>dmbf{=T}z(` z*f8jvW>UJgb6M`!$I~slJGO>5GQ>0%Gh9>%F#2oKYyaoN-_xfrhri$NbmVW{@t(^j znF~#`3}-YbD!WZfy(VXxKDRDvZh4{SXPfDhZf|@2`<@lElJ}hWc>k@}e6OF*yS{Dh zuSe_WTr1lx+vezS$>{#QdnUf=Yke)xf9{HUU7yzb$AMXlN7CVLOh8@eKENYq7 zxA*M2yOT?Du4L4S?srUEqLRb(H9Zo4)`)C=k?XwQza{0wgo~jSwk{>Fjoc(wi=Q;Jd<^Y5O$`_}$Xd;N`^ZU2At|1bLf z;ots0)Bpe3UjIA%oUi85-{y&%@5}V*`}?;qRdLyV=2?YB@8g&(Pt_vLEWOJN>q>d! z#nV(+rj*twt}%YQt5h*g^xJEvQy-qr``UWgTW?mt&L14tVh)?uy)KS9#bwZSFW~yE z__(t?G2Wep=Q1NdA7gMivWEMktF%(jiq>szo39tG)4F|C=gKLcmIDj=71rF%yK0p$ zJ9Um0^G1WsrqgClPHEn8|NU1_p@~5=I$|!{*xK?Zha^Aq<+y!!p5v@#d-v{r{rlh7 zr(e(Cul@e=}|TTXOH`|KjDI z@4q~?(`C!e9Pg9MA3v<9*m}+S`Pat_s~Z#_1kEYVzqhCI`?s~9-_=;z+uPdO)~(uV zB%bfVKWmDz!MwiX4?no*)PG$4PW=6TBfCBO_RR6O;Coykw`*JMw5Pk}|9$y(_x858 zMJF|%zpcC8b}Z66JhS!0v((PRGn`)k&3-)l zZr$!PpI??JrF%cy#+x!BTXoSg&hql@zyHcH@ExCArfQx4SjhC@qhFqh>n=|*@!R~~ zy!L+Pm!)nWBE^olq^t>5jLe#KY{pk+5vLn(%PIn`j;O9WXScM(v{91b+Jo5SJ;hQk z3q@9ZbLKiBw0P}R%hh{lxmfgl-gKrVyWB@J`*4`1>hm{Y|03E0Zsu$=H~+ovt>u-o zWzm}jE*hO*8pdcSe$kBK$)w|%KAK-^>#NJTnNDx=dCbD#dwG@RuCP-(eLkO; zEwkq`$(LHsSaDB3v10b^+1oOCCo<@NHuC*!a{jc?@=QO=#syW=9G8UU?O|V%I@59W z(Fs|)XOeg_=XWzaneyd*q}R%96?OxTi684sxwVaQdRbleR%@!MO;5Pmoh7?&l~ZZ+ zNz?gtuAl9uzm1CtNaD#coBg*=pQ-)2^Qn{7_kS$#PG1{yO6!qJf{Rd^lHrP62_}Yv zzXha>M5ePctTA0Ga`epgHIuoUwzHWUDNCk*mD?qCl_6qpp391e#8_ps2b<0?3PmmM zecTjx`uvf~{-p-trcF~X zS`1u=P12q2+`sb&G(!9T)%5*4_g3%NzyGcEc>ez1A3rjc zGJ2Ae0|aCp#Wr;PITf6 z29w;|U!^xKdvBI|x$p3gFJD9+Un=Z|&2l?6vdlYS%2C*LPh1m-2zA z$9ultdphS?rrEvR*~@ZoXGTs-{b)Bq@$s9xcVqXz%zoHEU+(cHjkYMR?hW7THop~R zW?Zt2_u8ztW#8Z3-MuYSUS7Wc_~A_|$s4Zix|_FqUF5S$i}|yUWiPDu;?#XQiSyuu z${qC?v(Ei~ut)w_;Eda9CcXJF8Mn7x+2k{M%B^p)mrafzJ~-i|=5iz9Lx=C~y_R7( zWzOlhXLE0Fi@nbK{IkvZ<%Km>H6jz#er(Ub_xF*w{rx<%_QxL|xA$|jw9LMCcmLk% zy4t_>f9?O@cd|IY*;VSCbJR4mEw|tHd7L}`*yQYGlVholEyvnJymY%e6aotvud}es zn7(uh)2;8*t_G%0bk%8j5d2N)#opP^pFTeB|Jh_()U#AZ)7Z%Lp3rxBdyBZFljm;r z(p*`w@XDp~dpq4Ur=7ClOJ4h?`T7g{w_NLX>S<;jio9RGbIRj~20YA4Z4dP4Fnx`6 zZ@Cu9;NY}$-;BsoE`~)9xpxIvJ&&l6nO-?>Vwn3{Hm|L@kN2iWddqffc)jav+U;#; zZT#1-Z=baJ-??j1(^i$Oo|5GA^WndDuh;M2bnVRNO-DjDX8cO>3Vf=fva9W7Zj4C6 zo89}S6xq#B?&&gFb-sV+v`OB&x6gii_UxL}>>!`aYpeG&Sf(E@Oj@^mvaYJG_m*pG z!XBM!wY!VxQxYnmDD>8bnU0}5#bR#26n(I^n&VYc9@Vs*q1RtJq4L#zU;db@TGMzJ9 zpLM@rTVoh$%cXN{O_SkSU&EAZdyfUIUGqsMu35KY>GQ4n(hHxgIe%X3M_@?^%e7l) zu72iQP*kb2Rl>+?J(JVP)(!#9>9eN0E|1NSvQ5^%99{BWXKB`@30?vvDa%EIds0qV zU4Fl7&gDHGUN8OYe|;|7|NGVI^-h-~lDn^*>PZ2e{`BwD^!K~hnX}DeSYQ12?*D&} zPrrV@&HQQl{vFkyzrMS^|L4uOe>3ORoOO8-!E}+0L3iTLI4!+ywb%+B!335|xo^7G zXw|aJ50%cIsIl(ZzS4}fx18AGd^viqFVwjHepS3I!?i2bzFp@nuLoVLX7*m28u=_! zrpT-4=eNkm#RvKK|5kWuzvj}vb=&vfynp9j%Qjv`8(X_im$d~Y zlE3|)eK&9W%iB{foj!f~@#EyRmMwXkUxu8#Rr+@K`g?D`-(L52tHveE<=HYEHn#SE zU+@3-?eg({7q*E##-`6R=d=i%>}c4i(qj2MbLP^b%+F_IFPl6pk^8jpO4RLjUk{r5 zdTSV}3Y6%1D7!52l#j3fEXOE6UykwO?+0`G4f!6Q;F;}PUtR3oW>@#)$Cp1xm{Q)< z?SF0lHgB`yq12hj9?As2*eWr9s-_*LaF*bGGikD4-}Qc+&Fd43{I;@=g~5{zpkNI1A6& zZ1rIh8*`DaWwrdfy!>sq=NdjMs93x8nGK`E+PPc9N;A&QlK5Y1R=~}578S0(cb2ra963d+Fm2=!$-_3k&;;XN(FK=JF zW~tS4fxs(&CwZS+nJM*rPup(=DY1lU%Ss@OKcDLae;j(S=BpS3LW_OZy_N1j$#Wo_v0vJlW# zQqYR_oF-s$^6* zq#5d>ls)Cl?tf>$axFTzaH+`h=01-ZtM>)Wni!IOZ5E%>Ex`Wx@c&g=KyN4}Prm)Gz5o5%&26O}#}=s8-r-d^X>;8E@5lE4U+wGv zzx=pcXL)|zw%=dx@B9C1_U*dOw@akc^PBfGZ@k93?y8h?D8rVIzpM@k%w)ZGqgT7D z!gEG==R@&kZVrLWueuMSBG#pthEI&TdUZwFt*a|*&c4{up?OU~II`F5OsIO@)T!)# znSnoEe~LRXE2UI&t;VLZt%mET3LMQ~Q;okB`ZX@AlVM`EZ|BZUpHAwmX*Jqf$a73cQe`r?_3K}G!CL1(KicDbCF5t{%a$pNwwZsejrN+k?4rcx z+i$9So<1oVv5kvEjK%-{X}_RwXsY6nP!3wx8ptU^M6aR7nmOg{diP za(9b0ut*5b{T`j}xTLU;W9HIptETQgG}U|O?z`J_jeEUg7PT=Q*d*a?XYu?!&`kDR4ZRBR*})<)0ESd7G@>thF^Z5F|`(FQkzWjLQyyumUD$lRw z{SbYvx0CD4Wdpy->#P|rl*INkERb62)1o=`^UoUFnl*;!qF7Y2ue=VLIX(4R)Y`3| zjkf2yryMJ^n1627`j6*+3WS6utx+_J$vzj!oa(o{c%@`ukYAx@jOx-U-rKG{nZ)1T z9Fw4ZRm0%($BI)wD`%-CI=YnpFf2Y7*}Rs~gJoH!l9A$$ZMmU&(}SE`wz673-!}X1 z&ze=O#itxnc(a~!E56(F`gh#c^(h@Elol21J)g2!Y-ZEeu!rKwJ@?jy+4yfRT<^oy za42+OxwrS_Gparh znfW}iqfl_i1&`O}A%|CQYzt#c&V6)Ye*=TSqKi*AZMyh+=d^dLID1Y+Y3m$gVp`;= z7Zd5Fxq?+&A*-VFn#a;8?dyyDDzC;Ik(ku0tDfe2nJ+FKKX&N)69c1TnU<^A ztTS75nZg44YVR_s-60Qd-#^Z8|L5Jeg_Zx$-fnN-{(E*+G4t-bc~NQ!n~p^#2(Ic_!WG3J zBBt_Hm?37#v+1p`x$pR^*1Oy~yg=0bdg+QH-KEPyxi{j+EBGFkaPzsn%;&74N-a!s75H z|Mb-^*CWd^Pf9RoO!a;`V-x?`wB5z~9E+A_tl2bW&KJ8yYbV9tUMAtceDkzzEdn(h zhOV`3K|Yr)j;s1!zSDBy+g+pc%P%jO|37tR_Br$N?eFjH*?Nxeutnct`~Rmeo9G|A zvFz;|gR`I4e3kSlc>X@9N2g}<{+GMdp1N=x6W}!Mov_B)Wb3Cr zXSQxCvvCBU@IFwi& zewi(q!npGGn@5x8$Imu{;s*Ka@OxlKOg8e80&ExZB`Th zWp*XV+spe@!OO#{i{7TKmcDAFe`rmpHG@pk_G_6;aXyCAZTKFW_}cxrX8nF|gFqlh zoWqW5w|DQ(-}m#^&!0apFF#%=v(?&nYr3JUQ$Tr^U!}x3P4(bH z>EVwXjvTW*tm6@}%Ql^q}pg1ugzlkcrGxb$ggy5nyc*O zgO$m-F=Dlg*BYK_662h>lW79~xoYv`OM8RQ7bf>^ z4qMvQ-#=f@zP9Ar-v1}f@1}XbaB;~}OVD}x=hL5$?egu3U6EQWC*N)Hc^p#z=b`(1 z`Pu7U|2kHE`jAFSZ|Mo01c3*$m{z{O?z*&mWBJ2ZJ9nM>CbBftec@h~I}2X%vj&}H z>db206O^-SpJrjJcw2F3UCaU3XHi9ZrotS?%jX<>Xv6D#&hzkwR!IpxCgBTvq)JaR zGZniU@WeRs1U|kUW$|oJ-oYTx;<>yX9fe_fYjwjdrxo*^)Cg;6IX2_@^y!tI_xJpN z_U@nW^<4h;!#``{9vwQgF=eY_{`P3zoVUNO-SWMk+iQ4w6U%eQE!%T5ofdDM%$_K^ zD$nfpzW3R>-g9Q1`~PYB{vT)8*O$sI1Kry<>;KRAf8{GnckfqS^3mqmq&ZusU5HkW z*|vM_SH0t3zWjMICvb0MU(_#lwzc_A_e*`HgY*{9bf34Z^5@6Pmp@-VT`3{;UUTV= zGhDn+r#NM7yZzRnuQ;afIJ4rEs5M{DB=mk?cr&j|N4KjaS3GCKvHg{|&1%c-=I;5P z@}k6st6OpL!JoO8&2oQyc<8KGx&MB(JV(IgiS31RrmIeWGnez;WW#GWP8s+aPts-W zJ8sOpxB9=ZgT~S=Ys=%DFQ{B+>d;EN@n!bx-M`=Fi9Z&ap0VNH8#D9YwIA{>m)_eM z_4#Fm$4;NmXPS;lDNN2jyYz_&>w=S|3$K*vUblL(=+lE)o9~sKbXd1DCC=J<*0R@9 zf1gagswWh<>Ye~9tTZN6TWk<&yL&zgR6f}z`-?A_a%^WvtZMiyxvwh(wx#<8kBh<|2z-sYG3 z53k&d4E@-dyT(cO*ZXCeoi9qP=K39<(|z$;OjL9%OV&!CjGBVn+gp53E1ll7WXg`P zhkjRHvqcoo3vxOVvT4=tpqM@7<=?-%w4GRS*-tNZ(mCt9d#^lPy**BenPY3n-umUL z*$>1RuEk7t)8DiyoQcEhL8DXUR;yM)Ucps4{vuh-Vy)|RX4YM1m!DIbk~wSI=bw9Y zHb2{FxcOa&PR_Z8Ke_AB z(MbJ(<(r>OX_?4T^q2g z?HAY7O%2|(;(B|4gR_k&W5n#2C!&nDdfonYp`~(O@EKM9HoJ`n%63=({fr zNuTAkpf#U&-`)29cHL&(TgxWCepzED%dffY_wQ=i`5h@mmuzEQW-ZISb*^%HP)$Yg z|LOaG9KHYN?E2${e~#DxIiA|N`)uCsyAyeT|6P||RnfitGGpuB?;guCcWt}9XP;c% zkAJ&U^tm|JeW`R=2z7RFbbjtI>9`|ny5f5-2?ybU(Cbx%TNgClZIM$R_aYwq&mwp+xO zeKSwY@mt<6Z+~n?$BVT5-*5G{oXwo$wKn#CWcJ=-r3Gt`9Nu7KzyHacx!k^&|9p-A z|MmW#XV>NJYkz!r?4EWrce~-!#~&lS`0SR?+{-7_sw%=-MacB+)mbJNmR_5**)3i3-{PX& z=WFt|KmTmGw1>fa&di&;O<9wBk8a$hxaMHgF^TuhYo+R+@GPUtAC$G(t_;k)X`}Ve-yLWFs)^oYU zYTmL+Mu*-{iD|icmzTWx>eQbgyH(NR{q)pML#49hEz|{?B4r!{EHmuwh;A-_cBRP@8gf6jN8{Dw7jn**t-}V zi^@0UaP!a=c+s=@+OAWgU-f?GPq0#J2j(Me6+ z&zsH!cX=I{wE5knkfrTY2RS7JZMm z*Z+AO9$)|W!-su#6)l9(EEV#eEL@kw5mR_XsA>DT|gUjH@x`RAAW|H=P9{`k26J@ZUW!{?bY&oYzP z5@tN>4B$IH`)uClJ=OQ_--+1tet*?`@2X#45@#LrpD)K7Wt#ajgG-!Y6|0=Q{QS7M zWtBqH)J)9HfB)V+yZv`Hi-k;|zveow-$7ZL=YAjh`t<46_qXT&d$YW}e0xis%<|{c zr{DkoZ~HN+{9aRyAT8zei~g}*oEyXWWjc>+JtFyBbJ>hwAASAy&zWy_-K#90x9su5 z4KIJEZ7w_}s%NNsde5Bm%jay~c+KkawzXy-BsU#4m=d(S@|fwR+YAlG*DAxkE#>&w zUZ(B;8a=&;%Qw5nQ*)`z`}gne)&KohVDtQXvKKc)ylwBj-EZ@xog~^ zsqWpk=a_^uN9^{OyX!W;OzxR=a_!a@9jVJxCu&4w_nzJPL-g4OhN?N5QLLvo`CR_- z;hFDkljWCpPGMR!T{Yz9_pVnV*FI}azq3`=c>ek2m9yp+*F6qCcV_KoKjlw-%ac#4 zzT6bx<<;pO=^NEHXZy79MrH;r&8Cj6SLPUZSiIkMJNNq5S!b;KlD$@DN=>ajE|Kh` zSiuld+fr3FqjFw)t)_{h!7&rR;vPW(!M*#6qs(|2?y<|ZYcAdP)@=5_5-Z8*^0(W< zwz^6=r_D<9JtHJ|?f3lRUu==dCuYnM>VC1QiQ!n3=oQIqlhX^Y1?eq*-SlwnOb)F* zN-BJBqqz{k(PusE^v!J1(2)DQ2Ts!Bg+&{%#(>6(=2EX{8V zJm>9lz8>YRd#^Gw_0KN(ZpFnuagA$s2LJjH8+$S2Z5Cw7ry^3+g$5F)z2RfHFY25n9n=@|KI)p{zlKt>wmv~aOY*oujlnY zuOH`^e?H@x>OxbFBMccAD)-;ado<};s@_(v&29f8dEX}OTvVf-q>(*Aw0VMus|s&? z%xQ)#VeMh1?rT|Fx{Z}WolV*vmRQLg%bfFU=MyfDYn4}xPI+lc_%^F0n;2b|I;FXM zN1NGFiT?HL9~Wp@Dl28L`)Flz^^TtsPlDcpZC8R$#%!51oz-aiBJX2Q*&S1K&z##j zOO!iRz`<$Vl%RRbjz&%QF1(j-Zd+x;fB0d6#azGVg(>Ua-`l%;|NiaQ{(NYbZr{u? zeNx1ZE0eN%Emxk{mV0~KYrghy`I_6MzQR0#Ctp_B=oy~-ykp6kGxmRP`qzD39e;Js zznAm>t@oF=>&y_ej(9(}{IX4Sk<{c4wzEG&vg-bPS#zByJYckX%A zQHjTUv!(TQo)=opT~;~Idu@Y2)BgSY_wU`k^X_}6562&WJbax0_~VtYs~FzS=3Dyg z`r(HaHrv+jzxjUc#`eR7v(ikP&0}}$-yQobbDrC}$Que1Zw0t$#cF+W;)uPkz4)5R z+p}laZY`5IcKo>i^3RpC#Fb*6sZPJtX0~ib(W<3WVgwu`jL$u;wD|w<`~LW8Pi=U> zr|e2DS@(V~|Lol5f_;xI)|~7;owKI!oYi~L&o|$HH{ZTp^O*m9(CNjROCRk?o>sYz zKmXpI>fdI1r!POA{C#2WEs1-~68I(2_YzMC8?PU*T7qo@CC-h%7q0Q)Qi# z%dr`6m)(E+{o1Y1l^upc6Tcs{Oz$XKYnhsR?31FwXA|G8o4)fZb)NWGu;tjD`}RJU zP3D~MNSV9#jLVZ4j)hkj2`|lEQ*59H1?SzqRz1Hc?$M+q7nRosq?cFw z-QIR@=cZHFR~LOXJofk@2ZJI@>L&fE+OFcP3?^SJTaHI$uT|Eqd;ay^{-84qey#sF zIwp8WK3keQYg?{s#k}X4vrINxwFJy8bus+BN$1y9%_{xrr{a|g&SfrHA9cceE#pav zOAcyR4>dNdIx@YoP_JRd!PWQf-``*R+lzK{6KrdnuXYSGvmOUSe*%&#VGF@t7h_Tu4@s`_U{zpf^Iq20v4(ff zqOY>M4z3Q~_u_@tR*uu9-yK?}Bx^2xJ8OHq?TUL>C7w6G|Fw?G`Rvryd^Qd%C+*G8 zzhCq9tN;FgKjZ)XIsU%x&%<_kdt1Avg8SxokF`I@jZ5-Mv0VD;{Qp1yb)RmzZ};bA zzr4JCt=3YD=PYL*AMgLa|Nlq(cX{9Q-{oz_=jPLnOR+-aTy>9gfXcR6H1dKFVfOSv~U$!-1=u;wx2kCohxOF8s&-Ot!)i}9E!QtG_Yejbcn$PF|eU!fc-`DlU=l*}%|L^+4uMg&|=GkhH+&U+}Ye5`g@BK8@zwfs1H@{|aJ!kvxYn6IW zpOvL|#Kq5BUYO^^&2g&q_>8QzTQ_Z8_V(A?GUqnG<&Q647B;qi?s_e+^!=VvI~I8- z=Zt9*#pjeh{eS+-e!tG?K5hRCeX^xjC%==kVijTt-MDJks$K4DOkb^^AD8VS-p#@} zp~XXRqT=6^tNV;*^OwzMw*A@I?IHNE@c!=OeKX(wy{)OP$;o)ly zy>cRwx*QpwHcoPk)KqPq;C<~@#Oqh9%FZpFGQ%-@PWq{xbIyyWRsH+Z`u$zeLy5&H zv8OJ++53LiIf+;=&qBkir=qp4WqPKp)4IPc_qK_pc&)Imt}{o#u^Cshp3e2QJh_Hn z(Dc>5_s<0cKGy6L(c+p?T2`ndsx|Y8i*xRV35IP&VqYd+teVy7weq#wEQ8+eiQmjz zT7yn~tPt9@{Zj?nafrRRK8Eyy0F)=)!$lc$KQ=llY_YC zIXz#$y!ft(@7)s)^EkGca)q4h78euj-TLUQw`OGO+~TIU)76<52uwZka!G>jn=3(i zQn}NLJ#{O&y`t}4@r}KH?8PBgkb*r%*q#Xp9rkEVKE?fh0>f5A|F4mD zkS*_}Z{_9Xtv~*rx8Gm;T~D}OKvE!}G`HMlZje^&b<6Y)i?&F`j;gcT*SFqH+x_$P z{=eC2KbF`3Sw8>%UT(z}g=222-;d<)|NU%s{=VAp-2#-R%>L?@tt!=+ zDD!LI{kLVuUhI2cU-#ec*p|C*_}aOJP1dUT{tDgx@|CD@&96T%i)^0GS!?Q2DN)5K zAk`jt$6Ir%#AAaq$6lOG+kF3hvF4*!S8p*mD@Luorg^+y{@&>+hpX>{Hv1U58CFZ> zuJYY_|LxxOyVhNHJ1+6uAW$>(YQ%Gw-(FmL&ri$$`|)?F{F<%TPyc+p-2d>`T{qwE z`}a#bpu$3?=i2YTv9actO|(Kb8NF$jV|e%OTkQ3t5_?R&k~Lif=C1Z$`1+|5b7F3Z z=FQ0Eg3kpG+bk6c_L}-VXn%hDn?-LDl(w91)Ntw)cx2F1^n#&)qgYdAzrIe2+QP2^ zU4=62*}i_QGxn5!;^!7zG|hL0;jyUpsMFWd_untSn>Fuw&?cFK=Wa_d884Nt^fc#X z?(Pdb5d7)h@;PfZzG|C4bqZU$?xI68p2ajQT(x>z?g|4X>ExE~#*WZX&D0~&vQ9jS zY0u_(pEL2D9OC=(K#G)FQ)*@P^GZAa%Rg(TnMh6)^7!=2UAVdD&@3td#rF1FgNAkKHPiXoz%} zz|Iw9cgRB?p(k{#qr`Hl@pHX+ZSqlpjC8M0D)P^tpS*a;ZR*jeqi-U5sC! zEX(WBUK$Ydr4zVS_8BNNcvv*< zm=ZL@ORsfDi)NhO^zZU@brt{qd_8Rrx(Mko^W>i7V9$+HIM0ZOC7)uHy&JfFRlM^} z^Lt@+w@qj3$J^Q3$rZ7={`vTPKL7FK$MySpx*S9&a&c9acODH(%|Vsr&BJ!y|lq)sprQD`MY@Dr3&x3v4cxc^yFg=OYXh5)toJ- z&q;C6EOCxlBEcW%pR3)ec`~189{npE0tFITP_aLOkcb9+Og}RhDQbJ ze!a}?`8a$1J)1lB74eLR<^O$Io+x#@>}r{I=cBj3AAVl0Z>b(~%Iffudpmcn6bR9* z+wH?Oacv#bBM;db7?xA*Jr$FFC2GNSd;w`@Wz4qze^{Xd&D;nMU zxoTJ3ZzooV)=H*Fci-i0y~$B{FlzVRy!Y?Eb-b$CXAsz7IQ_`>h=wV_T#D7t*9h9{ ze9YwgSz={;uxj(SbD7QOBA1pZdIc&c&3IlY6R4VN}g-4GOLx3j3grcOhnd>xJR-Ry(GK;6hgye8<>5S0aaL?Q~G*s`{jql=0 z+iqp#uDCF#`HAxBM@*(#NpHU@F($GF+>2a0)lkUT$LRCsYk6k3Pk*)QQ+Bv0#Mden zvF?Z9lvT5jRqH$M_%!{Blgos8j@c<`Cp#OXFRbv$eDZ&yTF=b$(~HlnTy^K)!&Zh_ z%QJud`BC|TJuc*Vx0zGGJJu;zr#T#-k?ghn^Uur2_2uu?)_nYP^tE}u+`IR$cDY^I zmc6&0lkCLf+5WWM$Pui4_eYxn-zDchd8J-R*9##U~nz+|sSm#(pi zS8ts*k;ibmZ)yGh`iips_w(=9|M>9jZMCwK@-o);MyXyyMYd%RwBuF&*LZqg_E)Yb z^$t&7Hm6uK^3z=}O{eG0t%Zji4(M>4oO4L$KL4(@GOof(_Az>v$5jsWNjUW|Mx0SU zz2IZ#O~n&ZS}b0gYiD+PI!$|;t!ub7qsOuJ41@Xc&K+-;Bu9s3Oghk{p`EvK{WXal zo-R+`=@_qb63SjJ;^6i2?|Jzh6Wogbe|R%}|F6)*A3t7R-u(Rh?QO!Mi#F$NFV$Qs zkjAp3=lbiHZ}xnjs;xQS|GDKEug5=&WPpI$k;mv=q-`^wo?B(|VpKkA) z|Ni~F+TU-t+wc8bC{uNQ%I4FWMpG@@1k@_83idMj@)$mU{P55JfB*mGpMO`r|IgL+ zbw5|X*PH(O>#g|Ti85?$m&(4{m@6*$vAxLC**>#3|6a}3`)_A>CRBCI2rAs>WZQqZ zWN(ba)W3O;_nfxuz1*>4*S51uGhE_-?^=0u?%d)&LxvUG@=~6NCfC%|m_&v9>T;)F zS<##Jz<<8IW%g6$(<%$HeKeQHoI0zpCZO_t*lW@IORQSGCeBU1)H;(R=%nGY%vtS; zQ<83O+qybd(Mpbg=@g$gYi#HC-I_6{n=N(CyItF&w_daA-4)QOxqQ-^Yf;G@A4{yx z7AFPVyBgAUEh02D)N<|G@9WmtEb`Lif4swC-WET@laj~J2(6cSmaryr{q-{i7BYuz zPM>jJcHeS&CR5}pZI;!!dn2=Z4VPYiXmWP*)v$kC3uRKL=-u~>bm_F)I7Mf&VaSTD zD__m(Og*gf>$Y@A$FF_;hMeKQG@tU#NZrG!;OBcz^J!4fDJ9kr1}RYyJA2ovpsvRjr=)c^yR1DbI8!z2 zdF8AWzs0?Q=H9}BopZF8JGFlC_0hi)8<|>?yLta@%lyM8{+A<)#BP``I@RhlBlUNv zN~73=wvGrbb_VHY-^-zK8ND%^a(-0wDJ~UpI<>RnePA~4q)$8xrfjYH`DTllpPJxw z)#}!xdTKI}8<{zqTwcv=YOT;@{~_bdIyL>?{kw~=wI5cTK4-ImsHm~t+pMc=cWqnvDB!;Ki)WQp|G)iR z-Y*|-v;J1q^iMuA*IzGNyH9sz#uLyzYV+FW7<($IF)a$@h*@D^7#JGP&tD#2|M%DG z>-xJxZGEpx=r5mav~)sBk^U);fNPcs=PZLy8oljJOiPkHJT>S@!`gt4L1!kcT;h6f z($NV%txt@jJ@%a8bWv4YG(%}?tlRk`E(f-hJY_iG&lG>N=Fy_pK^yicDkKZ@ZdYjD zKVh*d<=L)Rn8Ch+2+<%v5IPqUi1^B74@n(cQ+b*a*^&}>tO ztJ?&ee*Ad&Smw~{)2?Am-@kuFLKS=UttX z;&qQd`0|z2d6!LATb$iw#^8GH=HEJVp2PY7-LF3C9gbT-u2{og21`}OPf^78My@BX{HyS)C-WBKHs+i#yu-~Xqr zW2@<7gI3Ekij2X**A0&y@0Ty~J3rt4`Yjp0erJ2m;$5p=Utd4ZJood*$L*OCP8~a+ zEX!nEuxd}?vVcI1L!0Ynsl8wQwrXvl|K}}nYv=E;*mwVJ(8>14A3smm_X#o2J^8d~ z=Ms_RIctlX9IoD)7PIbowqEAmxa#-GUQ(rVp8JFpow97Vh+ez;miVGMn-2INy)JZU zj(1$Ctl{zrmvSrPLiwiJ#p@}usI98mdw0zm!+q--DrcoHTNJ42vQa(z%7P668;_ly z5|o!?sJ2%1+lp8`L>41!*cEQ)%*Y6-e3Lm)6=I#Kd;yS-G2PI ze$mbU-nHw$INZ&8yEpbilgqW6Q&fdGdIV=}7Mi6yHK|85W9u5ltp(-|B2O3&Xr?bc zV!XRRRz)l7)CP&DCrhX9ac32J9F%e>az#Mm&2QGpy%#l?^2^)xTt4Ab8oNDq@d1O& zKWpTU2Zmkb&g|?mkX=*S>T!&1g8|F5z2!a|_kX)=lf8EKvbeHaTr*EbT`$!&TrS}f zcknA|w1~4$pQ582obN%0} z;BDu07rQR)I$=J&+1t&gK5S8T?={Qpw}1cst-Aa5`fCe5)ztUe&-a+AJ)e@S#I$zf z9g9Y8ZJ`L0cGc+u$@lqvOe{}_FeogK@k$K1H7nXnf-~mF`DbRyuTN#K4ffh}O8nt5 zBfjJf*L1p_lb0?`XtmB zh!`u^L{ZM>X_ua#pWlD{@%#G!>}u;)@5;P&>gF6}<|lL73eT+M+57ih^nJ6fCvw+b zvL~YL=zb za<#M+&Z&IU(KryIaO{N7p~G%hR8QBflSng3`?*&vrCZE%$t2d57Jnu(dgw4IvaMX; zHoG&;aC_sOx&}^(6&bll@A`%GT$Ebco2&n2?Ek;MzBbR_m$dA={r^9I693;Vzi;?) zxqiER-4Dae*y~mEmif=OpYA`!+vwQ0>)EG%mROxDHahnH+wEm5FRr-!@yE-@zyDP! zGG))XcrD`cwzZ%61TX9oIsEn3^Eqp_E}CCgQL$oG*4C(>tEx)N%l{YH_Ze|w~ zCQg@x`(5;krXMnB|NQjPT-HY$<@bD#TKV{)hfAx_qE17P6Z0>*UE^_C!_d3+oOOuB zS;ncG*108QpZS;f{nb_J=WpKE|35BYU$t9inOEr4(sR=eS)MOse5ZKNq_pFP3Gc*7 zG1}5^Z2XV7N+)kgeKcor@w#=hB_4mh<$pOLxai(Vt#GebcklkK+ixw$Z~ynp<(DM} z$8yYG2c0Uap6qncrFH%F&o5(~IJU3toVa~?^xCrBzOm-hPekGv6U&mbke54 zQ(tc#KE!nOvWadB_u|#7R;{*rKFQ+rmA;wW6PZ}6GWj*cH?ntZog-pwuxNd4Xz0|f zX?&A?p7gjSO%s{>`l*voP*U&4GdgT8aTV+9#r8Bd&6aFqP1^7(?4EUeCBuoGEdow6 zT7wcyq`t)7&e?W5b6)+gm&=X3SZ+DJ;oY3^X>p_ow{9pvthE&U=gtT`|MT&%_-jTH z=17*s0otK;b>EGZ{ydtU-&5$bM6fNf;`_U|4?ib+dNO9`dNf|*+RCJ9&LB1Ym~wli zg#UcIdJ7plsU=s}89LvuelK9y68S7Gq@!))x;=(w)7MC@-E8RcbLG>tMLlBjM#kPe z3=9pA)bF0Vq{+NEr0&#~jD&6*COz*zU3Vr-5G}4ebuDbmlL-q|vXr=bp0nj9=Jsq3 z`0~Eh{8N7AcFmm?^heX zzfWuI;{A9QOm$s4d)e8g9vtr5UotFMwJPz%BmT)ON2grszhB>fY){3%L*aJEa_0Kj zI~1`@4f<@;``E?j`DfQ@hO2+y-7J3ID^Y7nuiWuOuEnL>-bM+({d#G>e()*FHVp^; zpCz-GO|{n7SmG42BDOTX{`1@N`?c;mo0=q&udJwzwGQVDoZhjlQs!5o1ba7|=s80T zR^MR5#TS>Y$_fxTRyptaoXv*oR?R8iml@@vEB+^Q7PnwluaxV}g8|Pff1S;*Q#OuS z8gjifreoSvU9F{F8XXZ{yJGZ;JA7QjGDBk-SG+Cz{pl&U(!MzTN%_yU^S7*9#TCdc zCBd*oEGIT*(X1IkN;4T3xwXqVoK<`>C%MbWt4b@!XG+knyL;xJC_MdYaqjKAWvjRL z8J-F5FbqsoD?2yU>9WZ&iKCq?XRe7hE|XRcVNqKou}C6s&Bhh3Ha!dvte>y;YLT{l zKF7!)B6((ll>6GPZr9Rs*9r^o`Yy_4Ynj%hG;7zk%%W*ar!WOvi(EQ|r@Y9AXU*55 zKPO+U3^=z`K}q}RoTIbP`LgYQwe6`>Z_$CsT=BDZw|px3KCinMR%*-h_-RqtEt9#E zmle-(OItd{=$gfImz(}Bm7OfnZg!fgTCOfvYn(+mPp=T@zTY_W_O_FnQ-hplq}ph& z5J~V^DROdk{1>T7I?P-f0zOWSUwi%B&CGehU6$g{=WN-2uX<_2ic^-k+ivgw*L~SG{_&$iV);x72g$HCUy^PKbnm&f;vRfVno<@B_)M`uzCdGB2dJJPV7wewGGK-UbBhNwpy zuQq*GNV>8quebAQr!cc#w2_ASX6%C^RB&d z=Jwlf|Ngz3xBqg-^V6N3tF|$=Ea<99}fTjklMD_>1|Vk)-At+%?$fBp2+nCn;bwx=x(KH1-Xe8#264+WCnv=?4A zF}(Ql%gfW#gRhs~zPs#6QGNaQv*Gbm552#)*Iv%QH1vAt)_mz?-;PA7<%g#SFLXNc zStjPG`3`T*%INTYyE^*?P0dO#m)d)>|2^uWIbt{U4U>S`#)q{_m^l z=Vr~nzyJOEzb5`?Y``Yku!pp^Fh z?$fH;KYuFwl5dxU28Z7}X<~G4;rqq;uhzX@V>n-a|B@+Om&rs%A4a{QjRUHbW+ z7b>P%0SA^GE95HHSSpdd=kv8$Q;hXhA{U!lr619X4&^mCreY<#_tmxU{NGDsmrq%i zIp*AthcI;3RhmxQ^hsUh;qgPVr2CwkaIBFO^Ab7Bg&x@G5{(tQ4A z?)ukJcQXXHi^_>6y`d+9lL$&NI)s{+C6#KH9X$h{?_>4U=9QxO#z?=COt# z4YNe0B!{&PGXr%6bYABybn+=au~cO7R>NabivzW?eKpNXp89na?99yj^~|a@Mh+6qRVrtzPB}8 zl6jeRED}2R7vwp*PEVSfyZfBQ?JK4Xk;^-GIW0|@wBmN$8v;A`UyZxUhbN>E%ef{1~iN_cG4MlQ! zqF5UJySm!BJws+TW%eCE{PXklv5U!2`Gi)pU^Ud3lYT~At$&2V|yx1#8r#MAcV z^tnCF^K@D^ISaj6SaVa=)7wfqQk}`>s?%%-sa}hJ6 zHj8TDR)&PNfv2x@%y3-Hw0P^)0|#0~ls2;XYCC@F$cb;!IP@XLOu$H1LCv$8!R^SD zRDSl$CQ}z)KejD5y=UzzuWaM<##(pYfA_t-#aCKPv^2HHQV!Ilbt;^P`@W;c zGUxl+F3)4`e`nYbwbo1XUiIrcX?yuJuWtSS?RNhDf3H?Ad|&^+-F}Y0dcXlMW{tI? z3@rl59SpakE|=zBU+1m)_v@Oi@51h0;AhxyH*fZ~n`^G`{+qWoO14k^wT{93KJ{c> z;pdag@9vUcxBRl@(kHQ3w}f^rT=lnn|BrRA!$R)0YG$Q&bWFW<|E__|fAzpgs%viz~( z(v&qTUuRw28kX#txiv~L$-ejbXGw-0LoShG&cr$!8$bKKbA#8^ruMAX@-Y-X``O0- zvWb82^{untE?c)+;P8e1%N9?kguUIhD(n3mC!4u`@9)>|h+7{Ts>xbB!@=>kS%}B4 z{P(fJ=3G}nJI4(-U(1-Y`t_}FxejL@t`INHV@pi60z6aG6tpze*Oorrdo8MWL)`lJ z@7}pGe|mV>U2A9Ft7UmfT~kg-1c&xXCiR@NW+>eor$4>;+);_oEm9ZQOr?}2Y!SaX zC8PIVb%LJjiC^TNxAn^qnzO)nWH%nt9LH7)EDrxoG)(-+RH0BA@Pk z7RukbBl-CRMup{Hz8#n6fByJ;e*KTt@qb@kT`hj5wQ)u_(_4^GsbCwOLn)Y-*a*ZTL;-k%jwO^Q8}7R+vfgiaNMt_ZI6tXIY@4=yZv9 zO3SuXc({FXl9axg#JB92L)z8%qOIcmS-OwiF;J9`+P?!TnT-!0}w2A+%S=%xX%iP;vZ||2Z*wDMp z@Mxh_u+L7H?77oo7n$+g`5u0ei@S35C%dIA2l)BV&$F#IWRf|q zpXQv>m3S-X+WGl+OYg2)_2*}aUA*7n3yXt%Z(BWIQ+jt*_N!;7ryqa(aYtPK>}94= z(a)brutrwf#oyW{7+!Nfet(T!L(uNKdHe5&@6!wEYk%x-zt48A-|1do&G&!OHgCH& zHArOYvdpd5JRjA~xV=nosYJG`SagTCPh#JvsmW=*y99VITeJrhiJ6Cn{#=$>cKdDF z_NyK8_H{P#>nxvtt-5RC8~S_7B?FzEEDR6!)_;5RuvU%63B^Sm&QDgQZ<)A%v51+kGGqQ@i*MKC78)J1dR_B$ z_HCCuC&ZIj+ zOOBh~;`r^hp~W(tb9tDTps+v#TlShcW-BIgOXi-?;d(V?vfr&J?PCEEe-eLb+GNPE z2X4aM=Lw1EtEKn9FTd=0?!X*lXTkj6 zLOa$dEL+kk=4%>wYUYZFUVoA{uIp=_g7A84|9RPC6%*?8MNTvu37X z@AqRjc>+3&1NC|qFz`5NE>%!okhfd!SjA4ct)I@ni(BO9COz4^Q&Tgh!i(FETjA*< z27#$()ETBYwQTBP@;vfoi%#m&2lu12S-Bs4;56RPqxM9vpjuv}`^>%#h6>Zw!dM+R zcX?}uv`&3=+i9w!M~F_Y`H2I~|6;fq9jE(bEj+on;vTDvsnd>Et>r%YwxQt?WIRHtz}st{>;Gd^3xiORJ~id# z&$Z`!mt5_j+ml_wxGPcXR5CfBMci zoVE4V+cM2%mtUG3Z-~mjCVi#CLT1L#ckl8f+|u8^dnnft?B{z~DmhhR-@TlG*QbBh z*vPzE7MX8$exCjG{vTC)_x%59%Mg(qzqcl`D(6E_jx0mL8yV`lnX3jj~ zaM^^n!D_DG^3Rp?)YHtao!GxWYVEe$Ys>3qr3)r5c1rGD@qF(0dq1ZLGanH+B$Lyz z#Xy#UVNUTGn|6cV;zdknxGY-|wya#8wfEi;gYVa0|EvjHF?*Td%+RY9i^Z#oW*nYv zo*%cruJm-!!(HooBh@BHWzSq@tZ6xIWrk$y$=W@i?v?2>^gRynayv4o+afj9A;{~b zr6%jS%IC2hjUBUhm7QBU$Lr!TiPX@i+LdAFB9o^j{Qh_AbQzZer*4>&?V6>9GOmhB za%-059V@h%>u2b;*!S+0A|ru`+pmS+lRV4QdNQ3Uz*78qZ0yxr8G;w2lgoC;p30Ot z7NoIsg+S}3XI~%9-q*=D?P5k~Xp^kqA>qrDLMy5RrW8+-k2sT9!EJHpUHR;>(>enY7e6421%>M|FB-zc{{6zGxBa7R5ZSMRPVAe4bV-pzm{IPU|+0q#Z@)geOk; zy7cDME24$Pr(bwvD>Jz+a+PR5Vf5jYW}1?xp6GLyyBZyaaLnD^R%J>g1*V(K*QtY+i|4J}e;>g%=eW1fNc6 zDoopcyUn6ES*ER~e!C%;sED6t_TCd|ET2@IDo?pCJh#;6^Bx6FZc#1&y#~etf>~Eq z%r8s)6p(svMc&)#yDZnKh_efLx+uDaIDh$4bvWq@bDG$fcb7s=+A}GL?cY-)E--bH zzjyXohLs|&Dr>hyTo>QVdFRpg>Dzk6vgM2oGnYPXpYnB6$P3jwMl2VDPkdjWDc!2I z)Pkp(;pJM>6;Ie=E1$pKwQ^O|q5Do^0)4w6Vzx)1LWzZ{_D*Q$Xu@ zXTRV7|K1%3W=qxATen`Tezz-c^W7r?MU6p58c+8|ueW9`v5=FnSoUH~?rygw=cm6; zl~T`q%&=qCtG{{aYZI3}yd^KkZ|ItR=E!8GyQQ(A)#Yj1p7U|{PS;h@|&!^tM*(!#!%pMQEGSG`r?J3fBvbku(<;|cyrOZ zFF%Wx`53lL3chZ3EGxStI{fg3_;cycV{Vt7*>ZbL@9LD<)i!eWH9r=JL_D8)b4w`0G>Q3<+yC9+O(!x$sly^)ka_i4xB$<&IDEdKsq8(4qM>s6ledp4$_O ztz`Jx4+ko)pOv>Y>gbfz315FySjhFSDCIB~t*ZF=>g#Ls%O-#R{HyisHjT;Fb&$;N z{k(6=op|+dO!cNML#2>oR zi4zisIg|{0e(Ev-qS^op5hw;kSSP7`M(_HgUy*Ye!nSpTv0SOhu!_Dw${b=2oT)U#cN ztOwmW-#csn-6Ad$yTY(({g(RQ+w1?%|NpxF?^peO*|)S>43B2cY5V-N#%j*_<^O7S z@f&5gZYT6^mKeg4!mg+xuytbUWkJyV!CB^52B=5FS? zVsl>g_@u{Ai}u}Kdu!dU-S@+lrv!1Y`!7^BCVf@^Smy)h*`*3>CH4F7?(^IkYLdwfQKo+GFmSH>L$~ zJ@N60dOc}r(~8^AH&*PQA+hzs27w3l;kWacQcvUH-@QP7&}ufJxQ`hms*OlU;H*43o^>KT<4F5gsumAI~UEWLc zTtr&k*H1_9|Nnb`_o|;Cmv5K1uiJO~{r26l9G|~dRn^ppSiS0guD|Vet)|WS%Qn6T z_S}DOKD&4GyOLdB4mWsR&CQ-WIX(Vr@0-7N_5bDsm)*-XmrnkA_wL;9VQ=2>^GApk zzxeR(`FzfZX$O2SM`Z8+|M2(cpEA!bYy|(;b z^>UFD3+F6;Fkgt_NJz(~4%2aTC0jk-j!aD|XU}U->~fBH!<`B;0& z){oo*mAf8#^tdGRCko8id*c6*<1hD2mN-ABSw@E`Lf~y%&?`;X4)Jw|l!~V1t`pIH zKll5-b+0E(+ECTMJeebK_ENo5r@a2nS{_g&*xSjm;puh5Yo|7^-E-bjzs+)@siKhK zBB!7w0a-ILI8;pEmMxF3^K@C7zr}6o2Qz6F28~r7`Vvkl49P0XE_z1n>?xSZc(Ecq z@Nw1NtlQi6^X>n+{C-bxy-f{Y;j7=DUzSL%J8aS>u*tN~;l%exN1rWQTmJiQ+3ve} zW;}OFwF7p&|839xeI8##r&AdB!T)M1I&QZs=V_+LOD?^Vbvg9jlbC%qb~1g>g(d}e zNpwWs-Q+$)Q8hs!Ai?C>GiS9IetLdSKv?T2SEokH36;g(4y_H|1?tQT4l%Wg zZeV&eTW0Rk_D3OHLYgjmt_$^4)0Gl}6oLYewK^>m)nfL#rGM^b#PW?j!CS&2vjdEo zr31Gz-Q5*fG0F90jnVWqTS_|4M4d^SB7CNJOW5gJv*$`CkDYu%9CTd+zO;X;&#(Qn zY1-t4jk}hM@JwYc?0z#;v_}ZfEAaEyrd&I`)3eR?DRZ z{`YjV472ZkEU=N``_BJ6{{PkcIp(kVUPGpPm&b8zB+V!-u?QD|37|cOg^(} z6{Eu;shX$X%YW;YafK*Oy873E=lJ7`)$&P`c#Q>P*I(bY?t6JPi)ml8p;(}?fX39$ zo{g{1TD3%8YF}O$Vm`+*->hS~z~Q>>zi;pVIs0msX>aqL_dUn1W|i)Y@n60<|JK{Q z=X>_YojX!fUjP5Yo0;oh)&KuHzr&Q-C)i>xze`Ghbn@$6ewzG;EzWN?JZkaw#Hnj7 z`QEHIIdiv53w9_b>HYlp(Qx`56JCZhm;d}c{d|<hw)XskK3VNk8Www()g-{Cd^8 zqLYU$zFv1^a@=w)>h;~c^tE#j*_6*D_)~auzvEh@BS7|Bf$|oG` z=OW^ikd)l}`CMfB8qJ+&3ZE?%5Ac3E>F#-xsY~58HJ{Dt;*prvDyh|3op*a%^wy$av2P1T zEDV{a+ZO(R|NpQ2;s3AyPcBM3W$5R^D)?&GMYrHYwG$48?Giz!1RHuUmIi)85hcfS_d&eLb8nfyXx^NC-DE2EfrMC#T(37NbkH*;?B#TBgV zOYd=YqlZt`gPdC3f&%ACAHVtb`>hOyqI15Fr+Am0+nzhSxGk`C3PXjA`uo`1+ccM+ ze%i&6d;41HQjudb_>Mm|@mDhAR5S@)D)|3LyZxWb_J0>2$m}sZY*F{)gQA+}y+gZ- zG@l-Rd@<5FWY6|tKfO~u>_CM1+3xW3+jnpMoVGX4-pt?wpQ+MB< z=5*-%w|mylgU+0JGdFle@j2hiR?qG0f8E)7_Ve@e*I$bk>$L`EFBTM7yL$WWyB2wb!{58TLoBxR`gPJZcT zx1~;ZGuE!&&Tmn-+^yNGNy-4$KB`>x*77k?%c zX@)G}Qp(C+yYZM9^FFRDHMXN~=9XU5WL=sP+4lXgLGR%r6PH|GI zSpC|o=9f~EkEZAB){YPk*=wc2E%8f3Zg27aA!D zqsa3~?F;pnhLo}%khiauSQcm_>Ki-V$GM(WVXMsb6WpgQ1ST4~1+8*D_+-!I>dkkI zQ`V>-PGs0{t)s2s$r05_*P=B;L+y@>H7bF<=54I zFGyee^UKT2kEiP!wj4h^-ExUjNQUGeF9mVk*RvKmBt}kn>7}{$>ZYQQfUaF}>+|pL zoqhach0Xb5kC|KrT1T3UmE)#$9O3Y24c1akj}PVlZg#Zf?eBg2rLuiAU0JUAEO9t< zpsVvp+DQ#XweIcm4o~MyWpeG_^*+1~ z2=h|Sr~mVR)JaPS8D?KO!yuKi!-spL^bCg-ai8ulfis*HHuVL~=w?b$XwtW!?Qq-;`or&M-qnv){i(kljh zJSSL`xKh=!bFXh@FyL8iA(+$=!r}0??6$am+><%y(mV6{kFW}AzYhvtF0td)Jz zC{?|ft{-oe{_ko0zo+r_->sEy<(Y?G6?D|focCN{)wXPg0G0I5GARPrOYgqEzCQI* zb@Wd0kKdGDS+Xp>VsifV*Jn>pho7H!x4rV$kB`k;x2=>G3%^$sn5+A(jkzl*m^3&{nY|&SI zE^~9_^-^6~zYa#>zQ-DtlijjiPFjmu#Lar1ux726f5)G<(8Bk#iDt;X%19ggMCta#5Uy9d?&g@?`&r4S zc*asZNo@au(rx?Wg1V-zZE2WVtYvB6ssDvHjq!j~sN#(0mGhoSFgFWzT`=+c6t&g& zcA2ihnMAQ)ZaY=^q?0=ZP|dBX`v)@AWrJ*M6UBBhM(`E#;@M#Vg9~f|uhNMu)Z6USD4yuf4S6)W?cB z&lUC__qAp3xD}P2pH;AStA>}vf|MD@CwR?y-mv2JuBvZ;Zr=ZQ_x_u2(wrI9e_vgF zeSLl1zn{xrpIrQwEo#v@f1+W5h`^IGAzh!}{rmT>W?#*pABB|*io_@6E{~ejy_BE1)$7b> zFU{mNK>~;7AFtZ)d!RfX>iGW6@G!9G60I$SlJVzK@^&Bs~BMir8TwPWD_0`j?y;8sIj!6dYt@-IBZQpmfyi$0ay8612@7g`T+v|V4Z$Eyx?*E##tMjgMO^kKBFtdoq)6w?) z<*zlFyWidHo_^UPEHpcJs-EpC)fR*2O)Gi~S8QExv*?^9Z*qK*VN`bSxtnve;|pW2 z9+O&oAwt<&^l;UjV~G;WD=%?<-@UgqEPJtF@X1>*zE*9GTtBy*Awe+M%gN3BSWJ5& z-)1LOZs|$Q^Cj29N=1`6KI`1FUAxuv*t)P=658j4D=!!Oow;^v*ScMK zSIyQoFggbq`CXJ^4CxB&^5JomemvLNC3)=&^Ntw-Nn2bNuH{<#uWGLIgy_9%E<90L znSHV=I_OkSWLd0b`un{b)1;1P&ht8Fv@h7paI({lw`RV5|Vm@FI#F41jc|x-Llx6l>)g-4Uo8*+OJHaVLRajYj=>i>v1cwr?sip_CQa^5IUZ7#*mDu34Vj`zldn800Z<~BWvzkTw>W#6u@$_1_b%v}6l z{@t#;slkR^+h51(Wv)_h_1blN)u(b0_`YaH{r7io{q5`Qd7N)wxpL)9jl~?k9|2($;`{UbOx6kkX?mqtbvHAVF z-+z8?{#?0Zj{Ey8t%#p%F7jO8x8%>Ov#Z6=$MYR7*cY?@y4~{)H`mF%d%t>>+%HGP zX+g&l0~D@XU5dIsb#kVAsO3!4HR0N?=j2pzrJiqd;5}@@(|ob4c6;R7u+Y%2hqu0G zcPY7c=Cev>9D`C>f$H-a!s4@%W!eKZMVB#bXJCI{`*f8=kmpg0la9-ZvX3w(Y>s`u z?e^6y(Z@A5^PFaMW^TT9CDgXv;ES^0`ta3%@7}9Ed?w`0iqI}kyGWrrUY?2VDw6gF#T$X3&HM`kPt*uE~t%urP6l&aXo8G;Rg$Ewx` zlxZg{Sa@sENlpI4D|$0;wKa5dxCov-bLQHOIiG`j`nGP%mHxT>+|4<`nyy(5Vokz} zM3M^|Iv+>(oSDS5N+j#->eBM^{pQD>3E2PnQz!B3^2d_5)k-@Q=QTER?0AyWdvyV) zgu~g)Yf<6r*3EX)oL2hgUEb|7Uqv&io`Y9ct$y{Z!sg$P%l_~0?LEEHLo#fEXakSU z)SxL5N$r7FTu0BOY+d{6mUVi-Q2~dKS${X&e!Kp<;<7~|idlEod_ z6SgL4MP|-C?ER-aa_#H4dASaMckc4YnRKZ2h|aHyMeuJmxwF#LFEOT%oN zrVhrbY`XdlJu?`lpJSVUiuv@MqQbW6hZsUaLobIJ9urxtvu@f2ubDqy(;i?HoLuOd0PMh{^nOV#{){B$6yLk2-S=e=eMeP19rDl-{?QT@m4 zfA%|W5&L^}P{Lf{i{GAFomD&{@p@TYX>4d{ZM|mewbfrwPuCCjv#w70WM#@N`& z$?^AJwqPs>4gIw7{;}Xo*3S-qEU=otwfNeMZMnbqz5iV|>zMuDm;Oo9<{bN3W4Ua` z7DJakQET_r)cyJJ@9@u>!gHmu%X@o2Pf6k`-r%-;O0TA7dZdMY{l9|p^8Gj094nl+ ztWd^X@$S+&%PW@!eJkI6_7`hIg_iTVow=`;<*uK8`e%*a(vZ@%j*;)R;th*$Z@X8u zcI%V*TXVIuS}SKUIy~Tt%Fa&qs+{GvRAj@X-o2mW#NKZblsq1IAviR+`u*>H)34|; zRD6B)^zUI=hAESN+nu?r@;l>wO=#%f3+)11r){xI3ha$4)Q&3p=8LX$FHdutvxI3gk5ez1r^d#(2KxgEde6c+_$=Wc1MoTV?swZhb} zJJs#llTTZox?gv*e$KGqNl;N>hgah@uB~@&o_@P{`ZD*!zUx%CJv#ijQ0Ca*yZ@e^ zo~|Ef^Z&=k$LHVQJ1WvyWdG&R{Y!4e4>z@J5HJ*7G^0o_vg~=oEJhZWB_cXi(_9@I z&h1K+nOByqqDtCK zzE-Uj;W=EfH}0tfPd(e3Kb6MG0n+OOH4`Fdi8$oUO51$*$s~cq?f388z0W;4WBctc zEsJA6-re0TU;pFd^Y8LOJyFt2MH%!CKiU7sc3Orn>jINSpRcYKKYxEu9q)nn_x>H3 zvZnO*+i4QN>Xe#%a(JWCef_3%KbiI#7sWCOR^D$<3K$#lmn}ZB5Fw zH(MnZTYTASIPK^Wbw8ul42Df?o?9c`rW+}lb6q`Tm0Xyw(D?EX1xOxOY*F~TFUV`7)Ul66mgg@YIWzsW>9K?@_wvjcY|bk_ zUo^!m%<`FOPRpkPyZB>;CHI0C>G-CUd+{=XI#|YN;#Fs$}=oSn@rd5>$BZ?*03c z$;UEz4ENRj{k8wk^8Yz|e;%*@13J;_(bD;CbBreY^nW$zGhB8y?edkaOcyqXc`c23 zCXqHb{LJ0j zvx#)}yZ7(5Iv(HFvS5xe+x6L{PiuP$W!!sjY+Gx1EW?)P@XHdlrro=L&;C;@ar|Y~ z-mK=`XVr@qmIuXNBzWZT0eb>scd&dQeP8kJE z$`9grShaK6oD`PnK}>CD(_+ne8J0|WCh)|f&+yqCW6_V_xE%N7-o9IxzgCdvh=re~ zdT+$_ts9@+`u^C$Fxbz8$2s)sfxLOk_r2%dxK;1Dk?&;-KZYwNzM-Lp$1Gb`t=hHD z`AFrwq@Iq5P*cOFQzmR&wOW;VX#j`eaut1-p1|z249_h)I?qbPUhm~;3D!Fx$*|V& z){D$}A}iLt?XY~_(s9n}WbEtThbRAPm|1)(smbt~$1mCRX={Jl_@1lI&f!Vq`wX(I zGb&$7-BK)=#qHa<<|Yy4%~>&z_i`(Cy$;Uql{&0*_`1dU#ChsUVpls)yjbkPU(D!` zCaYZguVhEuFVISw%D>NU9{&5WqQkIrUJh4q$Huo&xx5XNZdWg~@ku^+({i{ zvqUx~fznw)tuLx}UJ$w#xaHa*76C>%!O+m9TjwqFe01bWe8;+}hk{zHmU*A_ii~@b zc0i|4rp4P&|8z@@%+p0;ozrfdU$v?uLS;RVLV4cD+2R3FnQH?zkDlUUED;s2Aaf!Th*U&9_BIg#f z)n9Kl1SP$gJad8DmUG%0A1$wWT(NRe;>JQ9`xQ$6Ha~D;(nxuxZR*qPb|9u}SEo~g zrecupv`Mj{UKJ-9OumE%cr6ZF5GbfKvvg8o)nvX+Db6!eHS{|A)DK*F8^1u6vz_(H z=M-h(7n}`o2FDjnd1AdlBi&<}x0K=1oHedTU$5JH`~BX{mnD9kR&8rw?a?d^K5dsa zPmOKe?PYIgNi3Qpb^Uch@3iALzF$B6RLkv^gz}=iS-Y~WLwFP`p1&!R4)!wSa(w;q zqr;Y|`IYuxl5$*f!$X6;Ea&PeE^{(CKUu-)SYb%ayRzA9^J_Sd22Jo5Inej80@ z=k2{^;wxQjHHYu91;^)?KR>6R+j;omhmVh!Uw*0b`qI^`yW6(jS+lnIQ|`{)xA#|m zepXH+d0^)AJDZ>D&$F+OGm`Z`o+$BZ)wH)4ZO$LQAl-WV z?Y7sucC8H3ER;&xta_*D~L{kS~_ zw~kC%R#@Pt&62z2+^YhMY|CGr%Z>+%eRX@i?zNbjQ(VlesOg^b<5a&?RS=po2s-TOM-`CpL6}b zInOG~)_pp6-td@!m)^crE%rjeTdvvheq5HxrC9M^Fi=}$(H>K?7K`)291~~sm_EvQ z<~n1-vdme}IR#(uattwaW%tu&=Gm!|Ah+C8W4l+;nP8{2y%#6#ShDrVVX?KPvEfTS zcCkO3vPo?2^-cX8ieA$~7anq1@ilGpx|*x}du^g4XNXSPVn4y<_`26F|I~Le2F~pX zp5PdBl9i$5(7L=ux}1(@ESWRX*tlH``j%U?7fQtKsrmNl>FL*Bt8C`AC)&j8?OQcx z%9%YZE{cmvznz@eyrLsi@Oq`5!>L8*4lGTXcT(cdHB{P6%PiywYR1^gwQY ziL|xgNk)x0w@UqnNS7Il0;iYx%02j5BdDAld;PM7@8>x|K~kOFGSAqyZj<6z!X!Mg z%qpEJ)a&8ruQhM=|DXQ<$$ouVHk%RK?QOa<*H3@VZ~yPd?E8N{^V`?x=bu^>%Mf!* z;^KvePK^Ox%8RycTeW9nSXJ}WZwXV(dN*uy=D2LZ*lZM~E10VAwM-&=eHD>IsvV8>3SC(!?EAt6%S~ejj`N zN^64OO^fA$^8LpJ(!zJ9^Pk_f%<$T*xpKz~U$igFHpz%LFYe$9xxMB2Y0H)crpD6? zWe&^C+Ew@U*VXvHSK}?8@B9DHfBEBunbGfKdNhye=v=JioBrDLc}#d|*|bir_gl?& zPB|A5uwZKtL+miJ--9x4#}X=nLjJ7!`hXYq4DT>X;(UB_cWIx35;E z_DKHn=3x|#N}pNDC(JNC=GMB%wY#_FN+#@yQ}xW2U`zJ7zIN?a7uDxReTNPD7R5Lm zS(Z6#S*Iq$>eElPYI@J6^_G9ny=|GlJaf~zt5ff<3Ex*_n7a7u*P@+q?E=XsT{W-j zxPE`OJ6$g%$miOuZMmy=owBTcKWm%g+GhsKWz27%&AQrYbxumRgn8%XPL61wBRnBO zswezQq?BeY*PMQa$I$I2=mOBaafeMB53S)^JtJt6yeNa-b8cbN+{>kNL-ZG(KRK(b zW6$?}pU%|mjSD`fD((`q-b_jG)Xt74p-0?9L+{_S*>G*0`Rh~R#T@O2AKRQapI;=j z^~5GBG4CiyKy`7pu6J_)?#r2Lx zoOiiq)%xS#-}b`~H{4v;b5Zm1Q>!UX*KW-zO6yA0J9AAk*(bY1@zM(M{ZhL50)iQ> zOAQK7a(}wwA>-fUIOoBpA5&%;>$0z}+Livl^28BW&D5T>w3$Vxr?;zwU)H<5K4`&| zq#hw=Nd}Qc0*7SMbdQ^MGoNZ;N@(?xPCg{#eePDo(!fB@&wqB`|GO@4_5a8JU;e+4 zYn8l(V}0H4-S_`K{{Q3n;g1Xq$EvgzF@$^jaWH6nO^-HcT))=ETtO?rL(?kRZ1&kt zVn=R-@C63?DW1D_bN#+w&u(r`S8Vf_x2v_ZNmDx0vcgqU_m&cC&w`FhiDRE%KH0nZ zo^NqavB|mWi%biz9lA94d8Bmi8rB#l_bEZQB#urgGP<^7)ufvJvrBz6dp|u+f5a-M zQ*~=qC%t&gXV25wT zN;^UpP1(%mqRLqD!(^$d0?Xb%7bPw-2uiG-Q_86l?{{*A@p6{1)3)n!Z*982-pppX zO{<6#3v1}p+v~pGYc=S9{9rG?qS14Cyj7{$SGnb(L_Lg_50Jou}p1P+o@Vr$rr1 zeU}CtmuP?J6;hM>{rBIr&2q;JXFYrTafxZvYr)dnQER_i^i4l)$>O9Sm}lTp%d ze%JrKexKL*zDaKL+K#3rmaA)24<1}9pp|heZEMuY;HvMRs~J|jo;7oMW|CHEZ2tD! z>oQw7^x7AmSv0r!_S-nkQoHymOz&5JO`9qIWDeKF?^EY|a=BS8y?E}jmmezLtgj6X z_j1%bTAki=k>lKbQ{UIMy({c%epJ}ZV_2|m^{REnUYAWyy;Pg_xln5D*3j6kzS2!I znw&IUHTUe_egECIYr9TAE!rEWT6g}Zmz20#n!@`%9XZKehOSEuo~QIo{TzG2VAAvI zx9gquUA)#LkRK8Gc*>zk-sd_+PWj3+1fANHqP3;Pr_F$SmHpBqJid2Nygqj3n$%iB zg=H>=%Yuxwu7tf<6yTM4d)=y6b1vKXGbkKB7IZ>l?G(3=bCwUKE8p+VdpcLx@}tf9 z#}=l(!Pi4Co1C-UHK+gh<(|zQ(n}U+y>ffEtNViA!fR0?GKMZ8r^2R|-?MzZ+UuWe z+9fyTFo9=BmN1BFtnJO2kupPL&RhwmyBjt=nxeWuAno~@?_2k{86M-8x2sNnz0J9I zLZZZR%QK404!Lfc%p(=Brf8zhMw!nS_%B;55m?EPabd+uou3`IX2g6l@ilDAoRwr! z^y?w-lfrCg@!OMq{?FiKm}`7mHfCi|mx1MUvknifnb&4T+WtKJ%(Bfu*mI_iWx5s% z%c6LP^ah9KZRef^ZAr`JNoQL6tMJG2`+t_*UQ_q6dwt}`hXsGO*Z)22Z@>5T`n@NZ zI$1xj&veX_YVGPwtz5pf?40*kE%SE<4AR*}nwL$2JbheWOHG~iO!N7k+wcFqTL16W z+1ciXZI54H236?C?f<Bg3!ILhQ9PsCI!8(5*4y`7A1z#SWtD!NT6^v8s_c%8 zS!$1z$`|Kt{dDhm#G{WDdt>yj%XRdc-hR7l>#L$J#g^AkYj(zMx#kuf zAG)bn{I}H9o~WY|`p-AdVZHP;BepYl#VW0Z&qG7+md0`@J{O!SrdD;Ta@Mnj@1I*9 zn;|Y$^w4SZ9sOsPQ=G0vy2u8nEUM(oy_~mg_S-b+6y>5n^Ue4UoYft9{cX=($Ju-C-%W(z@;UHb0K;VAO7 zTflSi)@7geeY$6*K7ZnykU6DpvzPA|c5=9KFZ_&6-=?RVHr4IpES3nA&hC&9oi^!s zp%&95KF2?g&qXQ;P20F*MzLO0a_H64*huAf>umaxISxu}j#@kCbC8pYr{(lD7cJhq z9C7qH7q((bhMtH<{LeejXDAy-zuxm*Lnm(S)C&$?jA<^Rp{2dPd7o^2i&pj;a;!?` zT*2bHh2xZpMqKy2iJcwO8Vp^$6rVI13QdhMV((*kY!jWW%|Gd#h?CTDPqs;)Exu3B z?zx!tIA-ZqHnzlLYJSHgN)Dw&=?Q$65ttg|`$O#i?C}|V0k0cBG~(N?TKS&-(e(-;;wxEH<&^ zoceo7a$2rfR?31&-AtwR<@;~uh-f|kbN6rU>!;%P>wbTDBe}@zvrTn;X0nMWLkm;QbXv%SFVL7AY;*9MsV`ceB?@fizaH#flG9=gd}|uCrQi zt$>K}-&T`A&4d#n55gu?JX~h5;n4mQ1(myM9tu}Jxwh?e?oo;4xuRbr13N#@N&Zn} z!FPGj&3CEY&*z?9xJ*(7hvdFNtpdx@IgYO&}(>i%to zPv!P|nl9OAe%8!Wuas=DQ~z+*#^2Z=BKgQ83GSyc3^y$MxjIi++&A%!z5YpeqB99RL698nO1UNo%Ilkx7%C za#mN(InI;yRz^6m?{7u!?r+!k|8v#+^0(izOY z-PrQBZohu(uZ8JJ@mCoFiqc$??^yVqdG+q*-tVzJnG(NFfAwrt=w z{rmO$ZJBt|k3T;*@9FCBXq7PDyyEvKnP)L8R@p3i*&evqENrt5gHdNp^o#kAE=T3> zdYcyfbJaTO>^mmDtFOK)uxK%`PQUg#^m54a%TEg|GLnn+rhmQ~Ubp>rbhcgn&zfB_ z$sAMH2PSeTK26zj^ZobLUr%KS2lE(m@HTer4xJthI;6tw`JAn8*>_8~eifZsWI4aJ zD}*caO6g^fltm@^cUP@`T{Y*JgtEZ`x9ptQ>)n~<^OjHS@D%h`f3AJ~%2va+M(>mU zEbJFMZA7b;PN^#2lK5=#?RvKV6xOXiM>cJm^s#M$)s&zRjiXb7xD@Z1wA`xJXO`D0 z%1%irTet4DSGKKRGKZteGKLLz->o`zdaiSdk<;@@W+ul>j{ltYci#nv<0 z555I^g)}lvn#?5P5t@D5?5U68J)gU$9sGB>9u<1JF5+9_iuqfllP<=_yy#Akepa_8 zMAP-;o)-b~egvKC z=umm=ule%-6W5}1Vy84+*UCzTM|S%hU|6v$&DEDzuz8$pFFmVX+ za%sJNw!6KZ{mQe`j~9p8u+O|Ldh*8EogKX)*_&_YJr&*)v;MJ3pys)##n#!mJ}Y}B zZ#(9^mZwW`*ZXS6my(ZjQj>cgTPzWo$r%{=deyF~8XFn^<06l<_U38Fd!3unQzo=7 zj7u_k^{Q(t+7n;>o;OpFRX%z3s?1kv&uw^W?Rh@iy#HSPUdEWmx!!8-^1^9PSpp5+ zw&#BT^RxK*o3A#{oLGLIo_>6x{1(ICyYJq;D=Tc7eKu`x?E9pi3?D(Y}_5nxDIyK?rf+}jdKtETTcvhV%u zT{m;g9$PG%!QhZxa_;Grj-9ztYWu!~FY4*=Kle2C4`0``2_gb-XYM(AuG(d)(NTf6 z!V{Lp4d1sq2^WWPt=i?ZG{{4#wc^r@lLFB{uBx#1#1%9*+n*xv}8nlxwSd zwO-X&2|HgeyJvAU>&^1dPi=Z8heHt9v3EF3(IDm!p*p76323EEVd7@inD%Df?H;F3p%3)E%n!`}9=$Y0(#mjiO3$eYv39X*dl@8)HE&7euU)0} z@w0HC*Tk8Efg+!czFnEb&Ex%6T`3`ih4s|i-J3KV@)$HD_n*qjy}7hoTKU+G?~Avc z%lKQc{e4!vS767^Cz?+)XE}3S`?)SNe08X1r9@kdQC9MoDV@g)7dhqL-gRoZMYqMP zo14>fZ~Hv1cw2pOm9BKcQkU$Mo;0S9^#+|)t29-Y3kXat*E?d-YH(gy!*{`+?f%<& zH;75_wI4R%D=#n4-5$vGdY7Q6k*Jl7{O^CKj#wUfTle{H`Tg4Or{nk4e(&qJwd`AH z==G~rd(Do?l=U07y)FCxI{trDcD03!-Jbg8ygA!v8?^Jw+nw6B{@Xmq%-&D3XDtP% zUVd3pUcUYAyVa|#=KC4Cw^|(&aW=VTR(h7_Tx6j3k}2nwZ!Y=dd%0r2aD4ZVZ7IiB z_L@b_*H7=A)wtELdU0T)j;sEhfQYoenp<;U&#|7Xf2ekQn05Baz{}e-(?2RI>NVct z5jb396)PvtZ}fEgHN~c4K9Ta+-8a|FExu;K`FGayo7UooYb@jE7|-;&z3o;;?3S(P zth{}eU-n$QZuYKsSwDs1R@)u5XpKD9xjNL{<+BO@VGAB*L*K_X=QnR$mK(iMX#V-< zg%ZhLo7mU~k`Rh({BpLelj$<7VE9z5fy_4=)PQ<8ZuExDK1E5*QK z#Jz!|=83DI=!%+;d%qu((29(X{i!uw$9IF#t7)g1_kB7x<5cw(tEE@Ihf78+xTfjG zw(Xk2D}hBnW;w20B{b76JK5{xWedN{91Q0wpU=5A;gW{Ov1RL~I?a4>;z&P($%bpU zDwp?8lssv3UQ<0;hvA4olGkI4X+bR8gqLe@loWb(9SeQd+LR%5{YcebF(VnnrE`)L zmGqSEJmQ(*V%% z+`^?A5~BOMK}ovMWvz%nYN93AG>^q>4}wlj$~k)4N#(hVdC~k6e>*Iu_XsiZSezHW zn9(!EWFenkVmp($dcyO!GlDEG% zzF(d>uj4&%?q>LT`CVVX z-Mky7EI8F)-oCD~CL}|^RBLlwzDUyDEBC6kQu!30+q4Tfov8FP?Ejpy_GYg6(rjlA z)#uswPv4DBX}|SiYrLUzX!z9iDpy}tSV(1SMO-_y!eF|HbIz+>>#}>>I+tfInRGIv z(4)oDMvhNWEjc%IYwY#4*B{&XhCU7KzTR}~;h{6<=h;hS&-hqmwg2zOxdw7an->KL zPBuMP?R`YvzIIRC?xx7%z(%g4*)cP4ePfyohI^}HD z=BU!pSj*?;J1pn+&$q8*S~_RzzIA&(pAiX`YFC`ThGA~G@8yW(=aw<+!<9B2TND-| zl9h9dP2|kyFOQyfr=Oqqwf6td&+469r)<_;vLnSt=(xa&fF(<&Bq{L4nD_ed^w7MqLiHaS~V<1f5)jzHnn(%kE90{+X516RKGI+In*;T`Ik znYftA;(712zgy=&%bfRo(q^ZS6^)yg7&A9-oYi4!)UZoe`RMdqt@mrU`shSXYTBZ& zq7$ZcYQcdhm!&~mt3%7n_jfB=XCKd;cf3+YZRr%(=7!i{&d>TsR|GUNt?gA@9b9yd z(XpdYrtNAL>x$r0+wB=o_`8NUaa=ZO4%%cDsu6QUCyQ_A-7aU(g-%iL>JJ;7G8S3? zW%4ZNMa69k#l?(u=psOW-`_L?g-Y&7_ z@8=t5Pus7PWpK={{@-Q-wyZlq+*-SA`S>3$Eu%-)QuVyUaVh9cm6^dcqlcMUC z_xS52{=*zRQPC<(4L7;PhtGbSwl%IdIsW>&-|gb(=b!z2=X>+S;JR5)cHfA|?%C$j z>9Of(L+6~&LMIh93%qQ7A3L~+P1>RQFY#mg1zl%HO;#@^6Nd>Bi#`TRO%z;m(phz# z{zQY5Y61U}maSSfXRE3@Lr{a8^jg6aqFPH+P9(fFdK%vomnC^vP;i5mYi8>j5dr<6 zx`0K&LjO4xvU?9q;FerltQpMFoB8hjH?g-i=a0w)ZM@b~tFN?u+uJo^s?(=D`g^!; zZ{*WS$zI9!=ZZs4mBfk%&w1XdVD77G{_gctp~pN7F0N0GYQ!#cTG%Y_|6IT+_G|9# zYra>*u70eT^-R-PJSkvR^xAjt{>nUiZJ?0!G&|Gtc%qJKyx#LViK26dEgI|Y7zu`d zHmR$$`Sas)e}8}f>#M7ZOX(q9KYwYWv*)E)-GLRdUw^^Fl`1oHqTX|*Ir2oXWhv) zzrN32wx7$eC1K08w37h_*YdVWzu)V|vb06m(D$;5A5V<=R+DSKq3d`3mk<8`H8pbD z-0w-n=Tf&@n)&Q9>NR`eEx@qe#Kn{U`~Lk(>rQwFDKSiRVD5dj>eQK21;=x@|G9M7 zW@18Lod-{eoBNy_)oU~Nz5o7oYuJ06{-mUy_UX==m3!mfev|e5q_{`M&;R`KDU&Yt zUeulVVprbPt-J3YbGPr`?6ReH{cN=(2D_gYNgh9K!}oZP4(qD0vb|B>o=cx?GrjgI zYSnCUbIZ$HZmn3h_51hoW3MDG?a_IDdFGNSPRnL!d%ucK-qNcmk?X``x1-=gl;Ouc znK{<8pO#qlrkZr`_1h)L>ZvMvd1mi6!}M)hk=f?2OHZww61B0YKtMr`|M;9u*P;}B z^JI=sJo`I|!6&{R>{)X2&B^U*_#G^d~_g|7A8DVnM0mQMLnn#a$uRcxur_Jv+6 z=VYJan=oUKfuC^y@}N_P4YWnQR%#x--pWwue86NYm+8?He_xj-w_iV_w#duF_p-v3 zIl{tHvyZI{({|`kbZUK3%TXA2=j(n4hmc}TRaGXPtp?AI|F&$&yZT-Dj3UePX@#?b zeB5RRae0X}dFfhS4_YlCB%s|iWz!U`)+47R98PKS*PXH4*s8pIO7XXIKM#DIdf@py z0hQJ#3q#JmeEsBG=n+R&qw?)J(gtqILJn7>m{x=fJ}%AOzC$FKWrb76*Sf#Y?#?y* z_4Gs0&X^#tJ0TBEze;#|sZI?DdVYx0JzQ!1kMQbEQ=GKdP8D-8d=~ZU?%%iT_x<Q{@Cwmbnx?p7n$`rd?KU|=+J9%m>P60JFEyVR1)A^p=0w$>a~;1o@qsn zj6ohkA;n@YnnyC{?f?BD_VzZ#%2XfStGaqWHaPpf%yzPMJQJ zx5l2yj7*c45$RA|D$=`Ev87+${@iD7hBIE0*%7`bpygIpbNzx&fhIwH_rF?es2bj; zpP{)VB=_={)pem^+cJy%g0k*$=zX?{(fe$qc(H10pJCm7IQ5@%Vi&e&4RWyyEi98Qfd$zAD*O z^YasHfW+Mr?ThQG6s+X>9-Ev!ZPYVaXp(6~>~+PHQ5){(L}mLw|NOIt@1Xg%i9ttZ z+}WfsF|4avJUKUc_Ub8nu6_O~l61xE=CYT!uFYzjTf1vxO#825nPn%x(hV1QjZ4#InlEieuxwBP6r-7efsjF>QbTPf_(yk*T2SI_e~87GV^6-(0n>2_|(qVx3~(d<}bLmEOXX2 z#U(Qqm7jie$mFbVvgd`Yz0ah|WZcC6r0w0eckjfeBoEO9iMA=-W@(qrj%|=w&y=Ef zXzHb{Qv?oeI4$D~L=T#}d51jDq=k?jJ{Dj8QClVX^$Y~>Uu7cVUbjrE_) zpC_H;W+-@kw46VmrNU)qQc7RN?XtrKGnZAy-LcWvR9(s%lF-Y>y>M=Ejb7mvuDI_J ztqtoY>xT7Ro@#uq{%PS%fkU^_cHez6r|tC2mW-Us@tIkVwX1jObaz`Czi3gJD)f0$ zpum#V&!W6Sxh=#)@&!7N-nbTVk>g~4KE4l9)~^pxjmr`;dbIk}F&D3!+wP`q z?lbhg{9^C(xrW}i85kTq8P5p0?D^E}rDs{FSH{GUyhUJ^>E1_O7ivw}M0A#VRr;o{ zV^A?zrn2?TE5`afA>_X^ znWO#i>BpBZzlqU}%$n(~HEGtmy*qbabYp+dGSSS9%Ta~pf8P(UAFm`HM7{i4ymIN1 zD6glyVi&I;d;Mzv>ek*P6=(0eD6+asUJut}+p{jaQC0iIgYW}$j2u2GK96BEh?!r_ zpfOWAOl74<%=b$wCA?3)l=WB}%@;9=C^-4ev=K_+nD?+pXH&o7B8SG72FWg$ma4yL z#phf?IaV?36g%^Wm-)G~(jz0k^Zxv?*VkXu7C-d(^JODH&83x5H6FL`u8aN}^}2FS z+ryA-K^G?tl|b%ib0S;9ygC)vU;n%Rr+Iy0ef{^Rrw^aE|9hr?H-B8_RBz3#8?Iiz z^*U|qYpJ!RnpVdaqe&V{BcUrt6gDUGrh%B z<`h4+;L_Y|w$rDn=MEce%MJGGRXp3>ui5)pr)33e(n_ZwT@KN`-&L<4%TNjSDm|Uj z6SIHs?{~MqAA1pcJ89+f(-xNYy}>J&X6h*HIC?c|XRxD7_S&FBQo-6fr+Zv-Z*yoy zen?x(YxiuD_gbEOmLsR11}$6@{&)88-@AA3UiUVsXZhuq1vXEmmQLx-oscne`4nX( zP9ZN(Cm+M-A$z{ZD{Dk%MR?6zrfGG3%h~n!YX1NIet-S1tA`IC-gE}MS84n6&o4he zKfmBO>3$-B}` z{+geTb#LTeWBF+P`5?F3COtQ6Dk~+AXFjo%;1Zga+ou}0`u3de9Z?fKS7y(j9^_?e z=%=~-Oi9ApRMwMbmj#mdo|+x37wNUKS97VslC;Qai90qJN>1EzZBy9Vsn@tz&z@Fk zIWf~nf98(2st;=wHfpTRWn7%A8&UN29{au%tV#-(b+w*MF*?loGtJ0LAdy4!XvUh{ z+hRX8cL_~nUG&Vkzp?jvu|@jHRIAqqvWxEdUS6{G)$V<{(S@c_KZCgVr>j_m(PMl6%GDVs*)wdsi{BimG zx_?hUKR^8OKx}p2#fakD@2~Cdop2;kDu_#U=@c)8#R(rznf>vS(p|d2Wbp<|8N+2Q z4B;!~81^Lnx#=G1<>+MgUnp?xm#Hrb|MD;_2&=2rBB$75UfT2K?j->pK|xu`L46$IWq#*= z*6h>Sti817F^7ZwzaN(mA3prCBEhwDQkU<74uz#JZkbJI+PZJg9*gVie)(n3HTJt~ z)1xde6ScZ>R(u@a)2CcFnHGoy?sK{~CCJc4@k!R!YqzFPX`0{u*>b5)%NsN4+c$!| z{5YkJ&whSbka4SQ^WA?x9-q(Mf9w4>&8bY5M`VOd-fF4Mc(&Nk_x^gXw#-?n8=s)*_Plx0pWB+JABC;n%Kz{5bbXbA-FKf(+WR?2KyRtr^NQ~1mCvHiKmTmA zaz+x*L=P|5Amb?-?J{>B@f~;evs{+4MQ+)~B{n>V4=`r>YzXnIx@ecO=4S;*A_LCQi<*vtF8EJXyL$U9S83o^8}xI(wSy zmCk&9O;^nmP28F%cebiZY|Gpdw9T;T$)i7;bPSJV&YN-Qx>>GXp3JdAskPU3-wori znd_&SDim`o-gmO)a~bE>SxJ2tx8{1Qcxi@jylFBm>Sm0p%5!nY%Vyv6@7Mf3`}?~Avd;Yw6d%hd0{`2Rsi&4#$WJvmAY2LB&%Ar*P*S1-ozh9r_`Hkaw%(Kd(bJB~q-#)ul@Z=s(tWkK8E^yMF>iNHi_w}x_tx!2h_D(6> z)AQNL|M8Q#?hE#1f4uskouzAw;SqVK9>F+AzO5q1GS>bQ&2ozcFZmlP2fNsLNZ9ZJHuKafyJd<4L`xC3bV?J-TfF_u>5d ze?Onk*)&1vxrV0NqJ0twD_{-1Bcd5KY!u-z6*e)bZd9 zZho$PRZ?+hxtf%Y$}CX0ee1#0ob$33dadR%cP1a3!r;&{q35Dk;W3Hk^AEp1`giI> zEw3O0-@{(RPjyq{KXWN8o#LgKaB}Cg9-D6SaN&%ei7cH0x7O+IoS@+7#PzVY$Mn#C zFQt#7U5zXe3!cqs=C`RHDQ z-I-;Et78)O-rjuX$Iqt^#QIzQuG)Uh%=hq?D@&)vZhyIZUhZcvt?bg3rCXy~Ux;rP zNYaxO+#0lW<71PLS{t9t+OvPo`IwM(;jee^jt;vj-*-H!>w99M_u8qI<9NCENvWjZu!Fit&GbW*;_Apc@^^( zE?F0E+B^5T<sbx{jbJsCo?qqFzMw4MIR!4c%4n0o)~ zXB+)<&x1HLpPK0FJ?$`#)f0D7SrWQ`*Z%!muUR~gi<>up-u$0Z37V6qOca;vHCD%tRyUX+KYksIL|MK*+zWv`X!a^D=r%YmaIz8Ub z|MHIe?`L0|=eIny(C0(){t&;J`5b9eEVhdfFzw zoks*b4fy343+9RY#j!T5d;R|1wZ|5GhYy@8T^e$(Ra4#NklEI-3sFjn>=Kd5of^j! zmaJku_EN~%MQv(;rf6kJh_|as-_q+*pLv#DVLI^PQ}!a0Yp3GgJYaS{wtHQ&!-w$W zQx)QprcUwV(OmANG9}>6`>g2O>%-P$uMgVsqHlR~e3I_cT#nw+NXz|C1SJqr|;f<`~JUo+nv1L z+bC6ub2;3;Rya?8x_5Nd($?a-nYFjK_}(tfJ)W+euN3UpbI#?^vP_1o`(Ll!$eh)7 zS|a(R<&!C9x8K%n4~??kKF5E0qQtr9pAR1nUitNW-PgbE<@q<4N(8T9I41Fa?)QHs zR_48t>%-P=y*6u3apd*q)2BcFc_ev$Wn5BEkmr7#^ZMs~vzH&`RAtCoSs=OmtIhvc zzwNv2>}>3Krz>+#@tS?CZA#3CgO*EsHl8)n?aht8y-37sb?AL=#aqWrzHjf)*lKia z*_8OWxO2}>pMJR1>FTXF*UeR*3oH&fm2tWB?IxXLw<>1^cNwNWx0Jtl_s$)LLqCfW zo419%DxDJe^T^L#7uXu6B;78%c4L|8{hH5b-=01D`LnUUT!e*{b$D=9W#!JE!zGse zms2Cfa^JlDd-v_#*>g@mK0N*L>FHCdYn$P*B_S!- z1-Y_yZ+{aA)XZF~x%7y{-1r%vl$T_z5p_C~XE|5TXU5$eGvCX6mnG)7AGuprU0i&- zY;BkpOT8pp!>TFoev1kg8J?3}5*DTv>7{vd$rQck-j}1+hMqjK-hYmo(yh1Kw{PFR z|9^bl{jGN&Uw%Bl?#Jb)HJ}3s0u`s1@0_yy`SaitgZKrc@RK0mFclU!DtUYV4%j}xlxI|J|Sd=zhJ7D^tv*OqbXM>?6Fw>;`}zkU6mKQDtn>@y7W2=wL+w@Ns>bz1mJW+knaQ$j44&$-bb=(W~s z)tXr`mn`4VUN`mS{HYf>1f6!o7fw7KL4l<~o5F(QKd;y% zAmF6e>cOFz%_6ijB(B-fEAnLh({EvZ=Ig>agwHz!cfAsIKDtE1LFC9)ndJu8uRq&$ zY|f{kEf?LS7p8b=J&!5s(>yw7%7inl6P%O+dFK7SGUH)Z!3x%iMU|;uQ<;|5?cJEg zs!((E%!WR%eY@VV9uD{5SgPWmx$?r>x4Md~d>vg+PM+^ppS1d`)peQRoRZ?=>c_P` zA2pUJfsgh}vNp@*h&L+-4vyYg>udu^rJ>Tn}(`?Z}j zgpx#CatcmbPF^tk_U+lTXY?(*`uhF;{^#1mx656LnlhpGdfv9z zb0!yGw4T3Cg*Efb)HSCZ*Dbvg(sul~d)gM4=YG#WzrFkS?d;q6`~N_Xl@C6Y*ABLVG^}= z`|Y5dbFKoMi$A^3e*E}jMUQ55_S&s`?zkN&lu7Q`wsH2^wzYmXT-rXF zfn1z|3P<1EF}cd1JL+ma4`S?OP&^{B?B3%_8*}8&46N7?YZi_d)Y_K6TQtN8T z+>l+b!)$(qd%eHCEdJ}kYRxBUyWj58dv4?R`Dx9pV~WdmWLlZc+4R`?LzjaS4_ z-tN1;_rIU3j$^QX&fvh6*yHB%zV=jW*39Qee+HZqy>ZIi@c#N3<SZ zn{Kg;>y*E*yM9aI(+w`h4X>kC=idL?KD~MNTz)xiueGy|J)1Kp)&I-SeO>if;ZYcGrF4`xBd2BY z$NAa1QPD0X=bi?!JT$*wbGhtns?417udhzAHq7F_Z>+lf%znN!#^QF}rCw)VG(~0a z+`YTDxcc|Q+4*_js-6CPJZ@k6?~i3m!jx%V!rcKk_@$CmwE~LRJY87!+)eSSeHwEx zs&mEbTbVNY%M~ro|6{%O`gutE=_$oKckkZ0^QP9~oANv?CAzKx+CoZ#US-dYXLAS? z%|3Qoqj9~W+WIPye+RaNe4X0Wvwqgs@OIg0K~6m3H~7E*U4Ly2*Z)(}Ph96$oOMfL zOu7Gk>9wP}5nde&pTuNy9+Xbzv=o}RB&eeI^&Tgm_0Jsd$#97Mcw`u zd;eR|$(T!zrWGnMIHhbBV_$dq?c0CsH^}TX?Um+WP2!W2Ove;nt|567%NI4?6W{ ziQ=QIt+(#yaNp0}S@-kPg1E`nOQytL|5~fRw5coh_PXn<{q6r}&O0A_J!I0o=f{s9 zpL4mLKmYpN${$Y?CDh(GT?@C%t?xMS9#_qdy-|wES+kgM=n_#d1U)t;c{CHX16{vYs ztwgEIaISu{NYi^`#nkJihD)YDPoH$V-=WW^c?b}~}&67EnD6!*Fuc0DSdxGKa zZP#9{zvn2KJS9kDDPzOUw|9cpU(Z%l`L4Ie@qyr?DU&9f?UdMlEoyDDR}%+A@kuW) zhCMNQr}s!Kj?6p$PbO^L#!a2Y%2UH8tb5I`*}C!AHeR8pL7?@nN4s*bo8)f3o9BQ0 z@z07r#cNSpr&&63wYa?gTDAXn_S!w)4;$!xX6lI2c1)SQ>>!)q(kZdGGZ$yCoc{38 zBY%dBoa8kxclCAL=v}b&+3O3|(}UDjHE}TPE9#!L&ecKQX(wBApkJkoyQXSz{Px>x zuY5A%V{m9<;rJ%V=&-W$zNXx)|)YqQUm9X>QCHFDeSx4XCJf4?2OKgw}!{Jxr> zKVE*eIlD(o#A#P$ou$m4?;0wcf!VH1B8#plO)TFk#Nfs1vC>O(Nr;zlXQs^bql^z_Dr{3OiV%u)6si`~GY?$uCx!}nboo<0enNqr; zo->sNrXTgx-1_Xb;E(BAQ!0GxS8^z<3(I))UQCczP$P2PEunvrMx1N6x-ALX6!fX< z&WcbEovlBnv`kQ)F425#SM=JfwTerEPMx>^|74G|$e)cqpJTFOziX$Kq|c7=?zyq$ z)$jNFO>b?`OD?=@!E1Q*_wViBw|{3a=zDC?qj@xSX7*acF56Q>&_Xj3(wA$UR%m#c|;)C zZm#~;rYon~0~UO{o3%IYYSi(}yqwMV@8^_9as(#HEH9k*eE03!_P_t_{(t)auj~7N zf3^>I`+xoaum67^+yDRAKYzbM=l1-2UMubD|5?Nwi#cV{d;YwCKz91qyv>e^bBZq; z$ofD3Y%u5ibgl&NY~8Q1$GbG;jQAh_`0`^#Z|0h-w=&jx`5l{awrqBi)!e>wl`3a6 z_48J4-E!^2oFh>yxB6MeM6T;lS-Mfp_nfsn$DU$_t?^&4y_nRAA{O?x!{TWcc-O`!4T&$^ADj6WtM7_7l((p?xR$i`-?!WO<-6AkeERwFgTDPA1OMXt>nj!> zGD}y|Vy(zNFxCBB<-GGQS4_e*Ra<%tmx}yYvikqz3^@je27x8rUF$0Z#j>Bo#9f~2 zA?@o@Q+q9HtKrrwF~<56UWh(!;qcN5V<_mqRJu0z?bXa`-PsHFeEPGz(o1peud8q0 zzRlbG@$@0ZPtQN^%q+dX{@$JY_wU@9HmUx{L;vHKJvCFeJW~s8N;zq0y)U^#zkT}Y zpF&9qdj5Kr$4%j;KytU{cs+i$;}ZJvMaHE)b&h->!cuXn5VUi7lF-(U0R$HyY8 z&0dCV^-_KO{QUms&o>?U_iWqC-BCA7YQJ)OuapST^iT>d-8<>2gh2K9uJTXN7#D)$e-cTM)!GJ zFK;rO{>XX9tGnFV?*q9`TKUO&sVIG0cIEm4rybi8!Yfi+XBDwA9OhE;<71XMruh8K zI{l@geU>2)TGZFBk9d9JV&$C-XV(TT28N<1mRS>Z_PIMbHLU6likepR)Uf(}ZO6_O z$>xhu-gAq^Tw-sBHV8QFfA#+T`_?I+jpp||_h=sd^zrg?etA2SZJY;%uP)p8cvH&* z)ucwF7(d_Zpu;YHJ_9v|%o$9VS)4m;;;T9BVbRBP&nI;zNa!1~FhtymDPGsGw8)t^ z#!VsV_BMtR8yUUnibryKFM2JTHFc72^6hmOk1dIbi;H7;P-5l(SfzEwGQ*mptyd%_ zZ}h$VV)fchBH~GxO=|DoKDI2zXWh;N$NT3Od&b_*yS6$ruI|srX7( zNYJaxFP}`?d9BLI_Wzg9%VRF(m2ci(xqR)1dvnh{fBbOg*0W`sb7%E=ED_loe`nK@ zDZA@bYacw-5Yk+Fdz+<6unXi*S!=dc%=Fad zmrM_4#%?;({drz%O_cW4(%S2{u6Zdgoz$`AO!0?C{?}(WZ98`7&3E6+3{w(!)cpCm z{PRy9hPPR&EtgL1tNooO!S=4xdu7bs>DP-kA2OZb&*j-w(ZeWO^TBXwRN}1k=X1)87H=I%A}|pZbBSuoKCFY#GU4_VJRm=_Q{=7rX+C$YPPl_UlsHh+qC z@*GZeI1_P_~`~)0NCwo$LOnRQT4qsAzg} zG-Yp63U+$%b)NEy0vj2><;ABmci-OTtGzVga+KFh)29qmSqgW1vUqvAJn`_?R1ql1 zV^f+I{CDrZ{gw^>%P^74 zEA`s!vuT^nysDqu*Z=zR}ZCnAD@2M|NH&_$L;-pqn$S1%kgSG+J4|ivZjE-^i5~-BA-pa{PFt!e_!YS z|7RYwdWsVN?VW*PLEa)y=D52!yy`gh+F-)|=Jki2ubuepIwkm#;xa?E=|ag57IsbH z%09cbpz*L*q^jzps~eA<;#-%o;9ATlqx8DKnVwJg$R9VHzA0|)lxv#DmR;0bnp)Of zTp}wGH$8W2ih|-&kDUw)99)DB9v2Y)n5x5(E$Zau>GW{x{wKNVJ}e=sPlAF@y50O+ zcYVuSlfD|~a<8cDr)%e=mD|Xf-@f~s|9;H5>7SoE>2j*9TYJ{hNDZ*H#^{I{)tSa{c;W58EZ2Z_uFP8V$jcH>&8qjCiZxaw2{uXm+aI^S{CDha z6H9eA!K*R1j;#x?y*%qE`-+IhDMD+nu3nehqqL^ne0R`+$~3o(g`sOzudcY*ce-fD zocwRE%W`|3*oseySz=;tZtkW1{_oEcmIJqE@7{g8`rDjoQ?@*Nw`5w=XQ8hqxn{d1 zE-arEG;{jrH;?k_FW*>p%`2!?!joh1)Lt=)?9T_V3l0^Yv0ze_6Nm12ag~9 zDbakfEmu=vYTRqkME&pIXR9WAhb-n()H`>*YjtQm;|<{^22EEnht?^t-t-uDsIX?O zWO2~5e5%xXCDHb<0dR85WNxoM{Mx-N-g6bwFh^7oXc+%xa%|L50zoo(-Y z=^uPa7k&$(OA9o_qEr{j!AWHc@@6r`I@b92mf7v z-(Kz)p?zi2P4VQ}(|$6EI*2tMxGF5bRrbeIv)7DM=*p|T+<^t(Y-X?cw%z<*{irx4XhE2xNRp4H z_49MjFB|Z8U??5OnGS<7m_fBX0DU6`hzGmqh!&F6D(=7;aT`|ah&mrn~OEMehP z+H{O*!FR?}S7p8|um8!IH8JSv6tU;tt2Bf4N(<*LU3g4NXiAXPT))`snrp?)t=~@# zb2<^>q-QyOPu%)<@9(b-`)kqHewbTY+a$Cp2!U-u_5YSY%$=VqKvm1uid zVa1_%f8WmiJNI^KGG5vDzS!%QNA8z@-}HUNme@o%~5v)!~pvtI7%^W1&>a3EJBgWa*r$OYH#=H36kcVpY`Kg%)~UYo+inK=8| zvu9>YOYHa=E_|Q6)vVw`s#frMq z;Ot&a{d1n0lF|od;+(d~Bn6!|@Vi(2{_Z9V#g-{nPiF)tT`gQ&I!}C_o>J2F+$CEy zUDs|goAvqOf;2^o(;N4(Ifbm5QN+f%*5q1*r%1AvXpdpgs~D%1rWNigKYu+`RM8iy z;8b5aXZg#QAAkPK`~UFvc8`x0Ry>Dq>YkK1yz6I>*N1tX*?SU_HCK91+-?yQ^*koq zSI@#>g(|a<`l>52S?f-w+aG@+d_^nZ$rR_0Q43z|zPjr5ExqSIe*F0JdAa`d(_4P1 zOu6b5wnCAm;Z${b_S>wjT!wm^yA`hm^)FmX!K)$-Y2S1tSHU;pR${@?la-yieW&w4&Hz@c@D&=V!C zUlL9|Q(F`#IW1CIcD4BMAuUBkj;xu>pDwrm`{n2J{`3CrrwdQ4+ft_L8L;&4yJzpt z-qq)i=UX1^w=+yfRXK2_O6w=Px}TqRyb8aikvwIR)zS*P_-UCFRpM4Q1pK`AbJruK zLSCPp>!!yam55>epsd$oaLl5SVd z?>^d7e9tmFH+%M+)dD9%O1C-w5LjdACHdajYvROZi^7}~6yuJENoM`!(%J77$gnd~ zOeG>;S7b_1ke{N9(7MbuQEmNj^Io!V7#vQpk@Qc!^6vKM0E zSszay`sumyq~_GjIp?QezI-`&|Eo@y{ZZwy#ysMhqF180k4Us02>Sf9zBwaVjUs(6~toicCl`?MP$_r;qe_CMUcX&$izdt`efBZ1z^P}2dum68t|NoKx z|F@SP{`m1!Fz`u?-t)v+$5LnRQ0g?~bo~8yt)beoBd;4L)M~nFE)BeY@7~>e@19py z*40H$bM@?0*>LT1fmv^_<}ZPzSDdC8=zrFbn0L7(_x9Jr7IXd6!wwcQ%?f&SegEIP z`~RB%Pm|NTo%{da+uuTyg4~wxxtB9_!Sw0k(`J5nx^PO-IpLEMe+w+1%j{lvdrerp zoLuDl%{qN86M{B+MSE#3_0x>ibH4my_TOpSF9et2poP)F!}{m*GKZgv-zY8?D6lGHJgGuKP=TEz9B zDM7uaxwkj8Ui_^fxP80%?z?{8YCM5nm2qP4#a4WM_itaWl+&hLT1$@=K5>*`c$1%} z)li%_EA724lb2%fzGdqeu1GX1r1t6ta9i}{uiLuGa8mu|!v+=amuJpf=E-qzcdpW- zIXdS$pZPs1+PTNiLO^gz&^5D~|CvjjITY32$FAMObiT<&HPHJYS8s0E-zYCej@avG zPd}PF`?OH7hU#$%N&a?*Yi5sDS+@sqa|$?RCLjCw=d!*8^ZRt48dl~*m5V%R%1-BvXP7<{#GeV;Zx=*&{4kTnyE z>$dic7+}1Wt!XS#|PQE|sZC`K!plv2e?=*|TrQ|Npf; zYW3mMr;YS2pI?4D$2-t#kwK2qrLgGRd1bpx_XkB~E&XAm?!@4Z z-{<C?Nve~0duJEnOwB&)UPs_lpAzh5qw*MGh_q-g7_+{H;9 z=l#z=zwD{Cbm^m;O6qK^sw%-|>yF;i_Fc?0C1}F&PM1#QjLsi#dlieeNJyXfaVueg z{_WR2*Hl=Z@@Q_XX^3bgy{Rrsvv;XEiUxTqa%u*Nc(qEL3h6Q2wsCb`?;l~|sdK_tPgT%b zns(s%^UDP@y&9)lC9STXwK6@cX=G$|Hm2%Ql|z zJ#U$RZ8g8VorkKtynO%h&jpfm$~h{E>*bSw?cV?Ycj~+=}q4p)HdD9^69etUF&8)ds!oS?4!eiB}y$yEjxDa zzx^%`bflT*O8eShKfZj~a_02UqPAICx~KO{vZ_4-eSEom9of6b>YN?RY(>b$_ z<^#hRua)m%DLJ+8=Q$=x#jmJ7d@Oxd&Zrlx8#M}YUrgq}-L z_BJ;EKFlk`c`K=p6-OBtu6MAyqE zx!E_%6_#eqHe2@R=jX$3->!SToB#d&>s^+IYi8+BKfOtZCoj~iFz?D6=W~|N-gxVo zYI`=BrxfU3dtRBj+l^~xP@iGt^PnC})qMd)Ua|N0)&Ku@_qMsqmoGnloPHk9cYJbr zpKa8U0-jKB-SFgTmvS6uKl@o?*ZXA{1v+${o4;_ZKw zCo3?V*s2kl)tXwU>5^(9$Tea1+p^tvtKJ{4jCqz?cy2fA0;An6p&_6t?)=%?rmla> zC4X@OBj@dHuWyxE%>Vz){?FO_f8PIJ|9rYQQ;^rQIWbxcOIn`ruikhpLVHTUT2)7n zh;=qj6TH5zPTL);y*9~b`QyY{>1%sMtd7e#e-ha9{j!mK`{U1_?^bPY3{nYNx0T`O z6p<5M3LSTz*!RsZ_Uc%l8L?Ajh0yvpDd*NO^C=%#zosjyGxmGW@!GSe9v3P_um2ID z;_|#^Z4Rrx*C_$z(}LOziJq3)W%C&~h(C$XcW0QFeN>=ZgJD6hV9=B)#a=q;`j*kz zjSks&zs@bzYjaSE=$Id*Wm(KrduryoZqrqZyi{26iEXnDsfYd~@9T;4i0ZCI;~wep+Lt)VVD4NOoy{ zZgQ@4@~u4ch=&VGdLEYS`=BmA%~ex$VU@gxfYQg9jSg!qxrJVT&Dww4bEVS<(0zmY zER2mi4k>PYZ>=e6EPHF+-5j&oZ^Jylem-yiG|Kzv>8GD+EI##Imttn{y}ahv)fZ`+ zPrTNsoSZQydfFWC&Q(uBvj2U2-hX%5|JC*1#s7N!{{N5JE;guVb!h%t*|x;UY0ENi zZTotU|FcZf`(1ha?<;Qe;An`Fem?EA0mFgW=J}i#E1x$hD=eP6HEnO)-rC~-e~N76 z^emUg7|A<-Hk#`<{chQ8;pM-l2`rlM>Gu15J9pkyerSGwPkq&or|T6g2_G`~#W-YrJciC?Bcf09t<~)BQzP99gsgr`pg{bw{U#HGoD)L5)_ulh44ayx0 z+63AQXY{M*_9|8V|2H>8Y1s+EiFf+-P76))H(d8?>c(wT9yNbIw|S}@gHw;4+Ocz= z=108_f6lW$mt(_}q}j_hiV5s3kG~uKa#o-c!vdx~T$fi1ELrkI#aQu3_7Nckr|YG2 zN=|J&H|NONW>1D2x!ZfU26ajNeOe^uv?bPE#mVPhtB5kgw4kIenJq3Qy$`?EbsAnT zT)EZaeNSMJM#HIhZ`Nh+Ek7+ZB`D^F(8MVkEeobhQ$2T@#WZ{E*36vCCT``m>1zz5 z=K4KPwK{iN$VqE!TyBb?WcIy1mD}&WX)CO$sHpn!bb4h}T(8fPDN2f3S0))T9u=5B zKfWV?%e84v@xk^<-bY@{b?;VLA=Ywzjf)~{{GX-MCM^tmutCpni@KxAMBeo~rwBN6 zY@DLBHBCt&dsf!k({I{OKUu7=xp+cQK9Gj!FbnhK~{4s{Hb&AWPDM#k5i_#A9unbjs_S7(BahQNg zr-RjV##I8JUd>~BFm$ZwGUeEn)t(==9_%taCc@h3 zBy!kv<>d8qg1Gv1F1^-0!F^BLnP;Ptr}C-*7ZIlqil2h4mL8GdP3D=LaO3AhZ$ZtY zt7oqlV+hp#{ncWUS>hChN`^y{mP+CZB2EVWRTAx$GnYaU9Y{4E&B0tdeghF&5;YJY}*|5RrBcQpF3{9yEa=?P>!EndXHh0 zm#Y(pV%AB;mZQHb=V&o-2{CkYZ<-i;e`~Ha&*Rgli#1&}JzHMCZL?kIvgv)e*QssU z<-7NOfA{g@#}cchQ|9n570G^kH}><79|q^U1ya`V+GVU2+kQmAxzloaOwb4K4#!EG zw)q;~xqDyf+4=c)`>X5KP3QErUw)~yG-hqs?6=!q|9)5$=(YL$$x42PoljB=RoW6~ z1o<&U6g{h7)EhS~GHu}!7S`Ku({|sl`~2^2-gfJG=j7{unfMjAYS(R<_UX@`FJHb? zSjmY$&tCV2J%0Ul{qz0pj}!Ob{laU3>~$j;0EwshL_=gS55Olg|`KXXoJC?s%k&|@qczu(h@%VKTAVgVx}Ff6i+&1*?M60 z`cwaYt;;Pw<#g(6n(op$(^Gf!TxRjHS-F(U@o$yx(uil75@{c z9^cVPUSrrc{r)}n_so0QrYdqRO}RYF`d0L6>DwMld$+nQ*>XzN|MQb)N6+TXjMZzs z_-T&T)djwW|G#`bfBf<3)2B7tGPVc?oea;BeZ6WU)9jeZ{+8lp2S1BmEz?!p8hg7u zRkE0f-WL^^p~y+U;Q=htWoXrITM@->H5px zOskytJjlp@Rc~tZj?CL((vySN3A|mFE2Z4Y;32@Jw06ssEU9Jx&)5Ic|Nrs-5C6ld zyKdf>SXy){{^86ur-fAc*=h)xv!cSlW0fKfwr&DEVt9=tZ;yKEplDWHotvnAx! zEEeC>I$qbjmQUH~#%aeOq!_lxkF{cbSeB}g&v}OI;y?XNOEz!_l85S1JkR_fw?t>_szXBm4;4+B6ZGW5^PYR` ziYGL5Bd3;#igyI1XzmO%h?36EUaPr$&Srj2ErA0z=M~c$vU*%xo*#R>@y^|Q|8Cd+ z{=M)0|Bv$jK74$9d`gkm&Fc4e_ULe5pZ&VDKVZ-N;4iiJrB-vYaz5%7);=Z?!=Zn^ zeMZvStG@+OBl53B%@$B%$XGkK*ke(2>F>Y3UN4_MeX#d-j+tiqc8l94L0<1`I2=7( zRV0*Z>g)eMoXvmU|9t!5#*PUuo>m;W9GPj$f7s@%O2Wr;GoBs(%|G|8*#3;Q`+q*W z`>3rjYTw=czrWpnU-#3f@34ul@N22E-E%KRad8Ih-luPA{QY5t&9MnZnyTqbzuf&B zd+uq8%{)K-+}mrzYxlpa|2+GC&bGaQT5ii(@7~QT-!01du;#Tl&ceAJRXU~14k>-Tpp z`Wm*p41XPJ-Fvw}@?Pzaw9PlI#al%>j)ZZoxLDmM(s=r*&G}}5{rh*{6joF)^S!+7 z>=PLViN(3MITV){mV9Pv33!#dqeUQVt>yDfIo4~tcmLkDb?KCSuV-)D8KbAK&wfqn z!v&?MJrj!c7z~$&6gjP_y?y82y>EY4<=$TRo+DFc`SRt*9~OBUzAc;mwQA{Brhpcn zcc&HEZb#fM^X6zj{Mlw+cE$0EAGvS9`D)j>IJWnD z#TIeBZ2E8Is&Z(Okz!Q#&%6AJT!AeLVvN(GmWHg+e`c+}G=}T)0nOBdpRICDVj2rt zZFe$6c%`&%R+!@C<>-5PTdwxfRSZt2-j<#IqWu2uo+(jX*SaFN{Qlc}@fdq}5$D0> z61%o#-iq_GJiqxnpJ4XULJ3cfwQEHJwDZcXvvp6GRh+-FE%y4>+WkUIjNOx%7@pni zeQe=>+8|c%ut3s+u5;DT_lN{#oMcv9Te`NuMs8W=oaKhcby`+6a2(N8->LRYyPzfD z+|nmZTc^bcZpf6jQx}}5DXq%#tt5Nt&cCNrl{pu?T;7uZ?tY7i|8ilajHQvSJ8!># z_v`o9w9j8^e*E}+Tw<|EinR3N^;>mLmsC{R^^40lP4ddB+`FyyK~}cJeD>TK+%T6lWRPx8G zsZI_Hq8hh%`d(HL60wl;J8Up5h?S{a$B9#LY3{792Tk(l#AmH|Yo+L-%MqZJxbKl> z#0tg=UY)c)5rvSG2J3>ll=}R2Pb3~U+;sMz_Ak{I0p}fU7d{yKEm?BP@^ekIWo-QW zbj>4IH`Z_c8gCi2CfP@;RbWTp<*4OT_7oqxbu6mw{3g>Q%pRIqnx;o5X-{%#oU^FQ z!%)>_QWJyWR3WDmmWu_p9umD4x02}!*Um7;I|^4@G^c;^y5+cZYFJZka?1=Mf4$(I zV57?Ix3}kVd#n^m?!8tOIqmb$9cQMWZar4GVOiwbUsrd@wLHWzW=s0U)dpX=;_m^AD*6`9(3BG_x`^B8=~JkdEBec zRS{<08rFL)_P*ca6>IOl%?o=yHPlUORlxDebDHvf?wVKL|NoPryVstw(ffNN);us19KLuL<)*MKbClA zDN|?Aq>xiTcjo3^-!5;rhk1qN)Iu3=RwcpWeRJAVBP$vXDNmR(srYo(*1GTS?p}VJ z+_-A0%c_`%roPd|dw(cwjaz%Y5@TZ2dnwc(g5WTUYfo)KycU!NWt-OYb1N z(38FOsZmjBOSzR)Qzv=7R%*GEb1lg8eXQoBiLr*q<^<_l9$hdcYc|`4Ud1fcZ@+#0 z?tZM8lb*lUbeW=%Wx7(%tYz0;HEXMuAJfuYI%#W&Lr&D~@3Q6QG1~I}%g@iZuYO-M zLvWPlXA$Cxq)}xvl@RY|X6=Azho9mI!%BG_B__bX{?k z(Yakz=|juusLSQH%^#C|RHsV>`^|Ly5acUVIojBIK$^u<26_I^^ zU*+bTZ)-eG|M~dz=gwz-Dbse>4f{LnA8yaTmuD7j zcJ^x2jYCtD?Dp;XbJ_m?$NYca_y2r)S%1V#a%Q1-$wsYZu|gYm zE)Y==YT5ZLXyVICp92A2Cj^w$622xhzCPBSFID!#Xik6@Yk=jyzTXBbLKVDJLjyZk zH%43nbDOaVu zo4i)GKVJDhYHhE=!HJgJHtznbw=8FE<}AII9Xj4#o$8x&PG`(=^V+C$&OLwY#i;fN z#|w+j`Cqm;&+Y$zdg{#Ql@e@DT#8R7F*cZQ3wU_>`T5JK5$mrn%M4pRujRqjgv&E# z|Gg(sX(N;VyfQjiVX@1KRH@^sGnZ$6dUyQov$yZ-e&3e=_tgK5ND3=gzUcxn7pIr%UI%zJQ=CKml_P$J7vsBG(;reT~#{StF z*JkDK|9dT}+D7jD^3B^kvn7+Ijt6=f`W6;A``t9#7GHYq?B_S$n!DfjCEo}-d8hfw zlx3M~974m^MLYL!D736Gm0G%W>zR&6?RLv8cn%xHASSk?H+Afi`_T0`&sb0g`XR|iPDqk|_ zdu&qid}&DS{!e;81l9(2ObI$UGq(IhyJos;oF3>b7n^xOIvrc5WUP6&`<_HFm)_}_ zlYN&=Q4x*i^gd^qy3c9LMVm>wJELxI$<6LryR}8&=DG>RpA?I{?(cfP)zI*mM3C1> zsmo=$Oga&fYh$xkE}gLB)T2tbRM(tpm{z{|{qL_Pz4o<#IOf~g z+ROVLPu301W?ec>=55yAsIN{7qL0m)Z06>r80orb>J%^O;?4Va?%sWO&2^VgQx}93 z_1ORY^RfT`pTqw4|G!+GUsvj{b>T;EY^JETMPzO4eS3Yc85?K7>Mn(?Y)gVx zD=@B~I8mi#Lekzy>D(z(rav{B!~SfAu#mtLk-gs!{thX+Y_j)dtH7T35p$9^l&%yl zW_8>;#jAvsz3701l3e|-8grHMV>%7<*G%VW%wgiP=H!wIg!TzJydRYne<27Gj ze=J_JHEsLc5HBs&mIJGIv9SoQp24Ee8Kl&s(!nw{CaCB1o`~$7>c!_i7xOT9t(H&{ z^b>v_`Qm!#vI^6w3l+>XT$HZOPFTyz#gu-GspjhU=|`B$CwPfgvI+(Bk;8O(Oi}4QN6VaY>wDhe4$tPiEBn1Y|9(_*hgR4c(Pf!c zm6kfEm6jMBTeE%kwzq&keMd(xcq)yae4jb zo9m=+ACZ{z{rBIw+j5VFER0yZfoaOFGn4xlzMXaWp-pddknZ;Pibo`BeRgKvnmt>3 z=@i4TSFX3!j#`{;=1<&vYf4lwPq?~3%F2+-6Aa6C-<8t!?$kUMo*UR}_atcH#$zu+ z?|=TWaSG4W>svHDN>eggvIqvZMW8SA73BH|UpDBmct^o8uBe zrwoq>B>CNpTD^AW^v=b-JAa0GZT6lX_uk;!jAD8E7wh zc5v5clZ|Faj?aj^$MB%mfknw?o|afLp@YwmIgHO*^2ulj6`b63!bWiNwHoh~wq z$~C*qRXXR`^uyfy=RGYwynL&U>hwu10yT%`oIm`qgzwB5(MuBAQx#mq7k`h-UOFXd zmr3yQ-+5O{=L(pd59Y9Ye!`1m@rG+%qI1g6S*|ouUn}r<7Hj6xV6W4UGztsUxil9P zX}P7|ToNrae?^T-S*?7~pNGfg<>ck&eqX&$U8HvZtz7fojpugGalU-(Y}MBNclFj) zR#hn~E=gFRq`qs$?Af!;^Y5?u73U>cGi%w$A}jg0d7uqU{{Mfpw;x~rLb(5O(W%cy z@uI6tg_YckrpMHL@zP#u;=$M;s_m{Vroo}WF*QZ>sux$I$Me%qOYDLLCvCVAq_nx+ z*@u&rQ_<3rB};dyU*)HJ$7gtX-g>bt+k?w7;&iFlA>C;c4n?T0KXf)IxyOkkFCeIg zb$v(qye|e-0bY{9Ngs?0udA^dzkU7s>oF}J&80h%{%iX)M7&!#H7M$1JJa>&myC-w zoc%PwmP`P*N~kH|Ndr)suf^cWKm!T5Vd}*Lf>m zJd-E%;SMvmHxZMkT&ulr9vfYB&5ixJ{Cxh?n=(>^&UCDNVtI7MRn2lQ0DmIhf7pA9aRfi)}=d!tlSWlziV4~RR3cOePPbbrM-qqoGb<1(QAy|6tAs& zJtbtTm_vu=vMFJ!r%pMQarp7gb+5fwo~vA(yZ_bfZK5v6W?cSRWI2C&>NTerRnOCN zirrRRwqboPCtn9TEd6%)3d^H2YBXD0R2Ilg>SxvZ+y5U_2J6?KvCLL2Ubn}BwIyh? z1JjCoI|ZD4UZ(B8|Ni^&%P$`v_djj)Jf||^_*BX@7YIMGsXI*B_JF5r( z_yaU~9UdF>etz;yQn2VGgN8>`a_7+vQNG>D)j4Z5T25GgG+n=C| zJiq=#>UE68{Pxs&Hv%8*Dem6Ce|Po&e}A8!pFe;8{9v!G3=XMp4Vfk^2r6cESn$zk zUR$DsMS5gO?DqTjBa+(_Jv1i?+>&BS=+_L6zP)X8@77~SB$9a66bU zMJFv0@yb?7=wWscbkb7vTG>+IyZKq}KA$6XQ$y2t@%qdE1)4;bP7z?5zwq|ks(H^v z4i(sGE-livtXz8{qwRhd#8pYPuF zUHNtT`TbL(I8+zT`=q#;v+UtfPI!$Om=?Xkzt=B{1JW^gQPX_4N!ou4ZM zn2OIuX&*7#ZQ;wEZ}|U*zP*i|U5i9`ZGHKE=S3n;1?M81gDT!Tf3REjxN;BAW`RYy zvqks~)wcVbJL~0n*F;lnl1P%xTz$o7mrsT+h|0SD?R8zM&axhdPtTtD2^B3oompCBr|lMUYsnP7>Bi5$Dm;2wlGs^UnHt^xfb*is zS~0Dwv-WChmghFJG)(0v?sD_Yeruwu`Sfa*Y%cMZF`DF@e9 zX7P4-d6^|BFbH4#X*JR7UHR^>RkBGvh7+cEO_&xG9qc_Z=!incgr&Ti=0P5}BbJt) zds<*2ZGe!;ix2TF<<|)4Rgra(Cgb)m)4hW_T3oc7pbsqbt%U@@xa{MUu&E~zRMI3hf zY-DF0yLazyRJQ)~)2Bb5KK)SjSOm9+;`P_1m$#f*eP*YZrc=jF-CJ4Nvnu6+wQ{e2 zU2V7QNmAEREzKoC9uWrNhpu0qs?I-CV2RM~yT_!o17eR|zWmsmgOinI1;Yey(`y1J zC!I3dDdT-s^_j)l$^1y$ZQ;tr zeqm8-OS@J&O|_Y~``+pfhmOX~Owk2Xx=h)(UE^N3g=xjZu5U~gM{_q^oFNcDSv^?S zH6>8H;KnwFXFF0FCmjzsw{yzoi&jg&tXg+qy;r=_z5_)ow=VJUkoh=uN6do?X(hjj zZi~WiigK+KusHnb>b9Wu^%0GpE%$y)JhI^FFyMK?mWFPHaDWI~H zD{7T-YS3p{&!ue zjf}vC>|H_G*UM|~3v%95i&GVfGWQiuer+q=@XDaaMIx!>v`|n`kfCYY_vbyY8iV$J zZa=KJ`Kio;D@8UjV$aQI=PIi_ZwDPE`uWYV%vsM{uU38Oy7l;gZnE$F@2}5BDR?ba zlD}|5wMIo~Ynp1S;j&C=`9B{k&h0etJtd)iWGc^rLo?^R-lSu=gzGB%{G>-LGo#kc zY|p*Eb=yQQ!QE{Wr=`wYzB25^u61v%HA8z0k1o5ms6~lI?k$Iy`Tcf_-YcG_Uvfx;T$RY*~MZrwLq8%}9ri z>`-F#Q$rj5?%Qv+=f49T1!MmF^M{|)NudtIzFRbxQG~rkcx^Yei?uwN_qx)aS+% zKr_@k^{V~(*AWv{TLQeU2F-XbpS)&igMiL@jjdjqiqC^Kxt#PBtq=N?b;jvhqeS-Z zb;e|H`CbE!E|diqDz;7T`O6`R(1k6S6kn^w8W>zGlg#sJ&i28$R)K97wym z?|tWs)6?UN{49S)J-U1MY<}J6XqTNjb#ZCu{I>cAzY1DtG|%k$9TVT{rIxNv5&7F{ z@Ap2Q;GMYMPWVrUi)LkEc=uj~M-wKld%bnqWs`H4gAT3_)4n-#TV~c;MYd(>hW9&+ z*``;9c;%Q^mo5&w`>MbG&-CNV)$1%hxXww-EbpA=ddl*75MztTJ@@x>1+IM8c9|4Z z`F?J6kzeipU3+)Fyd~Bd#AA7Miub8b=hE5RBG-oZZaw_-N%P^L$=4KJG@pLIU;qE# z-`$^omh6m~wXO5Bk#)V*T-B3bt8#NU|4?1F)F~^fwz$|?rLC~Z%lGmTkz&8gCfZAP z^gLK=z1T1)iCgzu?cp!)PNx=}E6rWK_0v5@5lzX3AzQ+n5;z_%pI>(`k4;ff>v#K~ zmnDUYUYc3DLEMXdcwQRC92Q=ayF^6uX_WBB)TK#0iY)7=2Du3bYMuPeuvOt&&{73P z)}nJ>+6+79oIh=JwQG(ZXaTcQ!u8VF>qiA@cPw&{;Mj6Ex7YCC@j|KWwX@$|i<+Ii zG-ztj(nAI@^XAQ;R+x2rTDYK3-sLFHjY1k*!lI=6+)UhzSM(omHe6acYo6TvzT=zu z83kt_lgi#==)|#fiGx#<#N_gM)@Iq#N>(y`#~*9{=s0pkZr3?ar;;aAf&!OJTHmRd zd!KU=m+6rQdy>_(Hk6vHPdX*l+K_u&!_I9%5Yv3-$&)PVeHfQbs z{RqBYwrr!h`F1bOrC0d>f6_m%uYd4&=Bz2#W`EuF-s;%zWs^1>vN-kpsin((voQ0t zy>ZgXTQ8m4DR3#eGg!(~xjxjD(RxvpAn zKXcl|cWYQiMd!;_;U4*4UY_fgK07q|SCp6JU5Vh7Kkk0l8>flH2g-_O-F;hr`)pa{ z9sc@XlY@$`ZZ&K>{P4%q)6*;GEfH|iP+AkfswD2LE#$Q_Na6XE;+3Lpi5`ZM$x9}9 zeexCwox~8~CF=ApKi};3*3Wqo&$s1nxg@IXeJtXNrUyf;v%u6X-}B$y-(P##WZSbj zjFoJwr*zrrXbNyCKD9Wjy1ArPX#IKzMZx4XH#_yGPHItAZ(L-!?1_>}=Gy%~Kb(#K z_cVTg_4j?*QYULCP5N}}{F9iu?71Fh!XCw1T>ffs}T-(j>2- zh1ZWglR6P}%Ka3s+EQ)WzR1 zUTXgqtL)zAb#mvF?DwU9`%J`<7?-2VIT%Oa^`4}ZVk_x^X9 z%yfyQ%UfO_o5s`eWRAL!mnWN;fvBf|RPymetGRJSde?46WqH{>k1Cos{bu`yGr@hF zudaqC_Ei4<=DYUk%a@iv7EDnTxok7vZ~3I`p2Atn_q}di@cHMLr>Cb+`7_%*|K7e_ z3x-<~yMO-qSz$GZIT-Df}k`1JI2^Y^$pPu|36 zJ-xg8`|h`P%L25g%<|gG_T={V^78WEhd;_BU%GYVP0>laA5+|vO3odZkj@T&4Z6Z; z*+#MDnNr7p{`}e6G3RrDcK$qzBAx~6N=ls_8XR0!?~i9n=_TLaUc0}mGuHos*z(?K zLQ}WoUOroTO0#{&n%oyrpw0?TR3Zy(epV;FLtfVmC_WwqQtU^ zWxBbxYJ6wkb)_>|$!n&(+PX7b@yF_Ni3#OeG1k(#H}1|XjuSKYyL|cc=HhkPQn@^f zJT#xjFlZ}GiBh?E_H5q%{i%o6TstMPb_$cz^F5#6z5G}x(RTUgmG8Cp*Dn#dSGirs zE7Ng`K;)LGMN9$5IP8849Nek8{K^*Ii8G$hRR8AWp_s~g!jfJ7Sm6`VzH42<>5|Ti zr=(mgxV|-NWt0*7*R+NxEdQLu$?;Pth;m{QUjV~uMNIceA zdf}MBM6;s3am)wihHN)C_dl%VpRxy~MnmCKecpEAWL=Ij+N!y|=K(=+?*B5q5p&1w}mY|_&mU34l)>$X|$ z^|IRKYsKc2@g5KenbgFR?))HBtl!0Db=}o%vJQ$&`+qDye*D2+B~hn_EvL>iFfuTB zx;Tbd?Afy|NH;1-uX~_yZ-Ca z|L^X$|Nd?M{@z|ur>7Dr=OVRP8MeC3yFVpr-=!rr&ZTA;)4ic}%iR zcj9u2%>Jl2MWl7Ydb3$`;?}0k=>8F&Wx8n>3s>}^27zKlw&agfmYve_5LhVC^`Emr ztVv5?;+KQ{mt}&@wr#vt7JJ&MMsdY7ub#ity*!ROL_ePsv?_Df)vQtrnMM9hUp`iJ zd~82k#m?ZgVhVSwN^4$@cEFiD$u`Af4GlY|WX{Su*Ij(?XXdPq6}hvw_uahDr>E=dFHiO{JZjQ2nSWlr`%;l}b;3>|2Z9u*ANq2ma8A#~{C9b`m&xis zubj8M($8?y%l+?vU#&{b4fbjbYCD}dQ*Cj|Wiw6H>?>t^?dG5VY$Lwn@*}?U`YKo6 zmHmIa{eF4u|MUO9vip@TY}H5GmZm^Y)xE^;+%yk58qNXDv&WkzbxFdHmq- zf}L}Y7v{}g_CEaDtf=g^IZ2**6ICXBczrV3%TCt+vQ6Rq#+KD}S50`UZHi8bYVBVb zv(PAf`sWKbcb+%982)d)p_hgygG4fmgW9>H0>SeH4US6G%=m1he>!!J^O-ptH9@xv z}>R&?n)sjnv(mAz{nGsE*~sc}v`(;o^YJ;+L%^{jH1*0j*uTjyR4>N{MpFD7W! z%+DDQUUR9&XYW{TytV56!ez_0UYqgu?Y*4^HnLlb_llGwz*E;@zcWprU!O%7i85#*M1IVNtrL4>|E?=Uy+@(cT-Q_a-OM>-iKu z{pV9^)|PI*xg~dd+}hqu25&vpu7lL}jzZo?7vvLC^Be{GNA)B4-%cdv`j zOl8O_*tlrIxwF2vy)?Z}YP#){OJ>q!Q4mOCJdv{@d{XR$J=0T5ZT(+cD6PH5b$!W` zPgA*%{>|4^y_VIJxu*2q)SBKaPiA+z3b-s;`)=2}qMa=AJNS1WIsJ7NbKmzC5#jDg z=bo)WUOU4s9Mf1j)y!s7Men4v`wUB_y!?OuYJFXFS#0>c2e<0v%g-8rJfy&+z!)ep zQ)p`BB#qGU>nkpPJzoAjBQ<=POW-1A!Fx8vcMWf^yDERJ_Jj;?O{XBok>clX_MVxu zU$a#xh~uC+zaXdTglMy~)uyRio~`c)Rdi`-P&^Wqefz8L?WI$kc`U^>l?AQRw*+}a zc!dUaDNQ)edU{e&(K+QuCc6CmyT4|=_|4m0!l-a$PfP2K0QQ8nTklC`SETT?ZU|Fc z8WvSvCDC^HVS$BA@5TLpj_T{_>FxXf&;S1am;Rqk&LwK@EPHFPrtNB0ZDHZUDT#^P z*A}fg(=^5C{5;$E_2$QMrPKs z%-P%C?wa#?)%*Q*UxOU}etCKM@ZY~CeXq|}ZH}|Id1HB*HCs{9!@=(>fBlbU(|6y# z88gJ$?~Ga-CO-Z3>)Gn_?dpDgco-bhm49pB`w0duDlAum!e4*Q+8Q^la{F!H&r^yS zcOmu||B<*jX>n#5sPxT;U7;cV4qlX;G2wfE0nRw=%>%J-D% zau1H@?)@jM{2dr7N=vuijr;TAFux*8>9_U(y`|H%T2iil{cjTElz!KM=k-@NNnZvX zm2;X!;cK@FGQE?3$Jbu#HzSD0aM_%oGy6W~-d-|gS?0Z*Yd%Xm4ZRe5O_>trEYo^b zSs&B+dhhPvyYJ>5Z)e^-wNvBh5zn1LUg>j9!?YrcR&I@2ySF%f=PW}_SC?16{Ww=H zHHgxFXSYV|5{Ke6foGLniccmj&y-s}lcB>v)a2Oawn&aZ!NWGn%l9%1FkGJfBX-@^ zup@JdI1**t=N&#iVZw%N>7^-}Gdqrb+;XP)I?sxnwKFF^3gTM2CCJM|*TLXT$*g55 zSFc`u`gQW_!~cH#`pWbXs6 z2YZfBxsfJxthvDa!mx~!{eqQZ<{d)Cv`#(SX>wjIYD}P{NoZh?HO{>tV*N}~o%R%q-$;j99 zEZ1H&@mqd*W7wX3yY}tbw7tG`|Yz;sXcPXC(1J?M=2gT`t6W;-&s$~ zbOsRxpS7{Kx8%CF+%w<*=c#`9>WA&}asU5)ufK4sz%w{IHfh7Tjk4}uPKONiKO1SU ztSL_WVD5W(#{cK~^Y+*L{PXJ7r2NULv*g~%`z?Q*w>{k?{`R(-`{zI3DPKF~QpW4O zU7GLt+YdjSko2iHdsZ9o3_~}CT{|m{dLP&A7v2==Wq5tgPpNy=`_9j3RcN`9r{8vU z!B#Fs4n>BLwGr88HK#`P7#`a_C%Z?rZAW!Q#e*3+$q|2&KzFOYpBez7Pn zIdLWT`IN1F=K>R*JY5@{RlY*7syEe^jXu2t>i@ZNgOOVev* z(4n-wH}mxS+8^&Oe}Am-NhxRkUvJINdp>kc`M5C0U&%=^SKX>EOXK z67pu3yl`5;NwHNWG9@rdb8U*Ci^|f9m>HKXXHC*N&lRzYqhD~kONd~xUerp5DMfjI zb}E-GmB~t7E%yFN#h(>{*6$lPSIRLO{5h$ix^#j{3(Eob62*%*EspwsUR)C7(RbdN zNoiYvN$%xsx|bbpESMIg;-nVC;L#8iG%?6*-%5|)hwYXu>758VZ^O%}K_JqhHf7e< z+{>YhQX`KQ_GoG^_0qANe$3oUQgO?Kj;$8YTSL6E*7{!Fu`c`ewq8rE#e1^DRzG~b ze*eqc-@eYie*gc=--^EYejuV1sn@7I2RdwP1jtpD=% z--qwsJ^XJ2f8xfl<3f>NgS_T0zZkiCYS1Y;zTFa^&ExBHr~LWy(o8S$g>|OMZ}}p> z<%M&Oy)@}%c=YS!*TaYXJZB~)J-uOW|MSg(y<-j=(&Z0}vrP4X|=fB$Z`|MRhbd%T_f9s8ElsP)%opRL+`x8TFZ^1SN8z}5-g zdv@%4cWc`eBi~>j&g{3p&PnJl)nontcl-Z;?e=x~b6)>GXnHT==GL`lTw)4BmrV-m zfByN|efecYeSJLln()^lmn`NbvAmZ~J|VGstB0nt9tz%)lO3MU>sFg}Cevw=&m!w_g6#BJor`fL( zDa$FZ3T&j3!wwfnEe|R>=_-9TxowiSmnR29a@N|{S6|)SoZfzzkwqeVOVGJ$O%?7B zdp`TGTOa=VtBK!bo2hdwMQ3#^5%NjX`dK5WwNx;B<b2>9B$;-=A zZkI%GtHA8HtrJ$+*-Q~Qchm3Fn%k?kPMadQ`qC}S=Mf&y=S(^!@px}4ciCk#uer{@ z1tymN)SUkN_uY>sXFm%~V^LT#>00FbJyX)>iYsaL`rNsnr=OQM@7(84zrONc-owGP zW{RS4Acx^G50>fE!(V^(;z)>uX;~mJ^v_AM1iGI$V*e{ ziko?F{MWx_yQexmo5bqRpY`=zl5><~dd|AY9;d9AOTPLRS8gsYNPRFo-+zsmf1_^6fYH1VPZJ;w&47Xq(2vD z9lIZvuKTcZRZuqLQr&c=gix6EBI28-tr3R8mPoHksOd_6_vvyHcE?PC#v_aBZGo_eXg(NyT+ zt5;dOZ|XHJecl!DWX_)Q!>=w$2v4gl^!9Vjyt}2>tcve*)!tvvW{02KIbA>ghycTf zPp=OBes+1ezh(c@XZv1XJA9$^tj+oN_x4o&fA;n2*Q?>{#Cao3`zr&ivu{qpz2(}dTzx{Q){ogWE9tH!yolw(Mp8NT7e%#)Qk5_k` z;or^fl(px&*VQQgrjqe)j9vuV*LEp7e41{=au;o7=Yt>|WOozIMRFqh-zt zf#h3n<$z4CiGe^BS!Z%KROs^2fq+_(4dq~d*h_Q~ z?%ln=Haz?NU-SEaT3!}^-n2v_d#yKz_l%`XhbCF9pP-<_&0%m%fKkc!vB;qs3z>WM z72FQ`aW=pG_y0H=t{-PGb64)|vuUfR$Q=K8xSc=f)c5!I|9{*5e_oJO*y_ZsyLMC^ z5>|1sndi4gKzpmy+Ub>jhPh_Zs-dSmy+Vr;PiQbQ1>83A>WDJtIM_8wm8DefYxo*h zk7=?}a{ss)EuXjFeOF;4nQS&&SLlL;ma1m}N8+6HWA;|>t*(1&CU;n>Ki@m4h*R;{ zj=1&3y7Tk*f4FhA`v0S&+>5{F?VrDFBFC1iTc^#L95hM%t;DKrZXTBU!RI77mP~SK z=$P}dqCLVpYFewm9LvQL+azAYr6J+jmrZgzESFjy;Zc~g{an;HU4f=ar=(7Ird_*L z_u|$uVMgaa#|mdXpV_}`S?=zA`}ged(bPRHlpNt?BiHY+=yjFCjAtvSW%bw{R}5oP zSraM{u-5RY$Yu5=s^<(_HV3$BvRKLWZw<-)5+1+z$FJAxdpG|6%g2z^vnqG%wbMrZ zk0qQ$nmVg>9$q_l>y1(NXTJ8sZ||=CUiZQ}dG4g3?7c0m`D)o4-rhQW+FSLji9f@E zV3+qLU6#ipvK0?}G}+|4`L*@CTbvx)VMo@zo>T1O;cq#&Pra>7I=w}}qr>Hx#6s2I zDPCuVri7i+n5ylmI(35AeBn5Tgv~eiRs8$%C`s8-A?)0}TW`N@pPxV7Qs?p{?~74} zlLD7;1PKKAacm2_-#s*?!FQScbrY zwZ*FGVFg*|B>W`hN+ll|CjMOWT9kpQNb}5V6%EVfK|O|U*+(ziFg(%kydH4Qa$EYr z#WEYOWUa|%j1X5}`sUX@`L?b#E=GGqgMabVJb5Dcgqx@3;;fy_GUpF-8=X9*(GaxS z%+1p?irL|m;KUg#&x?xlTm4W}TO_f_)U9w*m8NDY-%6%~U5nT7YAo#$opEWCj-ra2 z-10fc3ct7sE4VwgPw3g2d)+Es>VA&dZ126bH*DX?@riD{A{CsmiE%|x({?w`PNykz zo@bshJ^%Si>D|xI{rw+*y#N30_V|5wqOzAZtjnIe`CJs^gT1eRzx$LFo&E32&&$90 z-{;A{i`)C>Sk(7VSGgH7*K#IK^b+ixQ1tpw+2+`_Z4%RivTtwuY;*TsUEH*zk=biq zh1w54+;CrVty~$uq0YX->({UQ&)?f&SoiZ&>bzxNt7=PYTO;J<-Z3Z~3n&(2@#1>^ zw|lyN&$a8})BW@E=EcN+jh$b7Oa9ycU-|o2T0Wgq|6Trn;r|WMP6C$-%2rVD0SAM*t5&fR>!@AtPyNx#pZuK)Y6{d)W1liPRu&(Cka+QlBA z=_b%5Ft<2zWyq<&hyRA_$9;4D|Lkq_^Eq3?ICzz8H*K5RP@IzOrTH{wW7gVM7DlFx zNoyIN)Ee_>zhmk@e)(U;lbOjqhR=DNI9C3uGdLEuWX|Mm%U)MKkMXmdp0rl=NMYoZ zDgOEMYU!qtDO1ixY0hk$x;)cL=G$c(>(?zJ*Iw*S0Cs_*jMyK94P?P?RS zJi7Q=QomZ8#!@GzMkWpke(T=J*V6f4oAjltO=D1US~NxYxKh>qFMlT#SN{H%+p{tA z*43)1#d7}3FK6}`7VcRxW!3wt=eC=brdQ6K{*z~#uEtW4Ai<h{;Sm($I|uTKw~EyB#Wn(N8myMK3o zf4}Up%(R}4tzy}}T7oQ=uh(qXRE=!8B*pOV{rmpo!MC^G`+ar8uW!5a>;Jy4KT|q$ zdZhH`mIYG;f|RZbh#gt9h{f@iWU#z7|JvToVGReOI2TM=Gd0Xh=wwv3=1lviLEOeR zFLm|_xZJZ$?@@F*Qp9$0uHsZL!NLF<45uwv(@s!ep}~VwwyX;!Ff$BOZ7``o}6AQS3Pk5w{JqwC;#U)`>zFQUrmkU zIOpR1?@*+dXUDCMeYYdtZ!I%i`uE`u9q#?@UYUzaAMc&Qv{FMi{P3+P@noN!LKprO z2r%)rU*5#BW^0n@r6qG(Mch0?5|%hN#T2>vXew>HY_YHQ^SAH&fB$Xgmv2iH@Nl?d zk$k;*tJ(6Ni0P^_{L6zTZTMs#-s(NldTbDs%?X7jfPQIUGvR5vB^L@7nGn2Iuccwm>$`V#lfK={-8Or< z|9rp8e|~$ML?`P-?@b zqfL#cgR~EEIUK&gUn^X6WD@Iw@?G!sY933y|DE`umvzycV2;!6{qnQ^*2yfNvPd9^ zAtL?y>aSO?e(jzfKQ+p}=GT^%p9MB|?%w^h;jZQHC%;avo?mnM>*2$P-<8ph9ymTQsEZP_(@mK!cv)1slOe|nEj@%``T=h?;{`n24>j=$x8-S4~G@7HxlN_&5xt{%g&ZJwv)AmNjW-Z>LwR4W&@eN0&d@?!bwR+v#h?rZe z{j#Pe*~#^Jb9l@MR%-fMwKs14&Yw5rzim}n>$_I8VN1{w5%0h$(|j)rIGkL1C26aw z;}?tR+N>=C3awkONdL=NcgmtmV< z3oeR2b~1OX0!LwC;lsV&n#-oJFenN(Y)n^}{rzm#-cM_{pR>rHlEzhBwr|3Opi}!^ z_iT-NUH8KLb*TB4Pd+c584qO$KYRLjcKPGPRZ}aDNIUT;hOAkezbj8cVcyK8Q>IQ5 zHS{%J-&pr)^87!4p4+=05edHi_1KKJX|s)r&lj)VI!!7|MJG!0=^D+Y*NS=7daiw) zv&|>RAvo$3)7EDqo#k>aE2c24VSOFb%@7o15X_Z*FlY*c%BmTT4VO+eo!ZGXcW=Cc z%h6NZT6&MR7@mt-nW8t-^`z*kYix5Kh0{rUZSO=cgDLa>$BS_6k^L>-p~=}Ym0{&Fk=^^Uqw?n%UYmC;bLMl2 z#UYoZs%vZC_PF`v-mm+7cJ-W7^8bF++VvQ=6wWy$lAupB?mS)-^5VR)rI>+4}3(P1`Vs?|9EtfsG<-rv~5K8K#}Q zMzDX|+Nh5uw-VQ$+x_?7yRZEBnODSc32-cA=?E&i{A=O+Ef<5l7$>zF>zq(o>3uG9 zW`*ANsI|AYt&O<9uYS9ZVk-kf?&UodKmR;E{rqyxr+aljzdW=5_t*UY$M^L=E`PUI za60G8akDDD=UC77a(@53b!Vz47x^tbb9v2FPtCs{etq5J6MlXA%p-|WS&l+c8k@EW zDwOYb_6gKfE!I1hx;IjCZ7`RY z=h8JImojpt z1Q(eud;R9I>z&pd0uV3_O153TGdh(>y9A3<%RzYg>FP#-+s>W{ESZ`$sLVX z+dO6zozm1^JE!#Xra69-1=1OG7F%_2a4rpDSR~Jutt&8x-8*9KHp8WlUq%G6Def(1 z;F|bwr-p;p(%xwd8_sxld2(#Lx#si*S^niy7Hx`(em(ox_OlxzbM>#!TD&xSs@K+D z5vP}_?|wf#IXOGIXOffj;;oYywu&s?V#?<&rtp3D;eUzeXZU9NtetARH-KAUiNI3B z7ecoCA6~DIt951Y=7^GH%`#rIx48WKcdxAbmHQq9dl{bFbxo^v zO4G!KiKSIK`xz`w|FjI!nI4q=DBmjCqi5@c;I*bpbrKxg@2pFkYPeV=UCu_yb7hZU zM5p3~9PM+K(uex}!tneY3!`syi(xxrSt%Yy_sdbesWov>-^ zxlgNHPR_XW{r-Ojg`l3@ck5*QWd!Q1d~|d}*8BIM1>Rr(-d+2Bp2IG$ zYf;%#7(7(}KQrI|GyCWKc`^6*biem%`&z}T&~x$kVZY3ePoAxMef#lC35}#x4lnlP z)+lei^PYdH+UhMLO4rwHIcceTIb`LpW5Pilj%kTKp0ht?KFw1$}|UIrA;4}i>WBE{QP

QH17p{Qg0ed)!F z8b>UAKMOTY+4R0WGp@+5BQ!I0F7twIx$D=jU%Odc{C4~AzZSi77KeGICNG`BrM*mO zda6jW((i_PX@)-T=vSE|acSeZ9N= ze(iVh`+Crv zw#zQmV4Z3b72RiK9Ok_;smn6_y>zb2gBs+q#bt zA3q;I-+uSq>hSZwx9#3nS5)|MX$a580`cf#GcU_b=f3v8A8+UHzx(!E;?43}?UqxT zKaa)7#^spL-j_RdisCYX%P%d2CQM1_$=joR=~%|nEgRQm-YT54MC`LovaX_D@299J zJ>?*$Y~Smp*R0fH&wkEX`}y4SNlq4-OQq7=u6?`9yZj>m>r$VW59|C@el5Hom$U8m z+O1)no*g@vOeyNxcty%dYWd9w-Ki^fay3YNZs(u>?f2eGyEawEbIi-X?zJ@K+_x>q zmdH5sY@IS^vYA^`#_esn(&u(=ce8$<)Ny)`$v(ql$3SZ-f!n^rl`TX#=O+%(lQ-5bj!*cdnHoUSasXEJ^I^wToldfT^e-+Cp8Yjge%y@#=; z#eBtnpH1?wWv@B0tIhpepNq@dso&>rU6#8yzlP^@+2*{O&G+ADuZ_LWcrnu^k#%M0 z+R|BSM^;Vo;mlr}dwXhB|MB2+oz?NVCAZCb_CJ&rV*WRKon+g?o0n%lKR-V&Cm{m?|Of#B=)6HeMyJ2kwil7VpiEh*+VrWXiKb7KWu;K8JZu z4)!zD^EQb39-G~BG^mv!Ipol;yK#Tsq~G_w{N?3k^`&1f@Sn5Jo-<|BG^=@iyW49| z33AN&e$DD%*z`%uId5)@o;d5+)?DdnGlHfBSw7#juG{e3XC4OM*L%x@v?nY|xZ1{S zUu@~nyX$Vxxv{RpUE7nWxG2qYt6@XiDD80iiH`xXDh z^#o~V^9nAVvh$VWp~oL*1c|9U&z$*eLeRCS%KIKGon}l|y&BbXHH1@eYtS{R_jgZE zkLNw?l|3_Q#xlM)T+lC!V&?%2Myw%w4w9ik<1+zPjrsXRG%9yt=-ww7NWg zV$nLy~-&kbsuM! zZkMWBnzAs8aayWX-{sro`Pm24{pTN*xVOK0zIU2T#!1cTlY)8_r*^RlwmjJ@B))U^ z-G85_v$RCbUiW&-M#E!Kt4f!J>1Ya1yd<^KAnuOD@~L4_noDaho8FrqRHU`$XqREl zj}MF->({SaI_2dhHW3utchBG-EH==+&GK%TjnjV?A&?9t0Tzc`5d*g2V@x!MeLReyY)hX zNjSurBmKTffZoj3i7^|hp8xzkFDxu-8ZN{q-ioBjBsgtGLOR?}(a zHmc9py=}Yd(D3@~hbJe`o)*`4j`{by{(t^_`}&#BRhLdF+H}pXwko-&vZCU~`|r0h zu6Lk|XRXYYy1H#n^NOiKL8Y;`P2N3BTt4;HuD%Yx<&(FT?cS@gbWhFC ztEZ>ym+xj*Y?!cio&RdzO95{f1lu_o|ai)|T$RSyEfwXZYFoa>X(= z)86m*`XBGmIsL7u`s*lJUuw;s|fN5@ek8Y|Z|1X9u7oChW9^{C4uhh_VHbFOwY2S0SbqcrEgum^+U7Nc|0Yqw09^w!|c^NmFz z3m?tB^|S4=o3!%MF3qPg&r(lnt`BRz#FZWDrIeMIGUVgrYrlS7-Tn3( z|Nj4n=l}ch_weC$({g$*du`9PUSoF7w7h)xoXy326N@@+>SjLETzX`2tfGrO&-rh#AiMS}L-i1S$c?f2hJE=tO_EdmTGj=YL{tG~Z{_3D;Yb#=8r z%i#;w*}WSXB!aDEjy?W(WLw#3ucl=ucdp%fWX7^g&W{0JvSHfyJGQ;uv(;;5-HY4X zHb#BEIV)(*))omb-JG@3Z3iv4-^|(CbSxu#3CjbX4Vp_XZ@FV@bLalsLqgwoHwV2c zvEkI*dhOosb-A~X75mvRR4Z$TDJ%(EHOWgW_h@B;t-j;)NkNl>?&jp1&E{pibN8Ms zA3LX4_R$9#3acWfTnW<96iu8t%{Aj<1%GaI(aycQt$l+ZOP_n^y-^@QYwC+LYc#jo zy(u$1b~!UQtK)c3YTb_>J7 zDLOh)E235h9kNPZ%0j7Qd`Tr>C5%sSXLwbA|9imPe0#l@R_0=;-uW*^Jf*E4&4)nYDRyRy>K z89^N}d2#kT|DFH;GynSPs}^##w;LLQiq>|pCVP3Us(W#M-J|2;LfS{BY-MQNx?xS< zRGy9x8a)h*P6w3VH92^68(S+juoV;jS<bd`h=zDAbWoooGU@H#9-m&rkTpvueNR`nWT<$*J^THvWw|wL zGv_T&sR`n@-(>##dt`{L4dAMZ&hGsuBfhz@Um!W+S)r3t+&uGr zm7ktGORC-(wRX+LtxqmzUg262v~!ApUt-*^uUsr|zI`{GGV64Ol;etJxl5Y-=i6QH zT77lHefg}lhH6V~n2yYllPiGHxnbwYx`MKPX?`8R_HZ5 zQ$}7+&TqLOL(sX$7FT0#oUmBa{?SC~p@pAi`cjka)z=~)_k?XqVpJ?zx5wDeGAeoP zH3p@nCtfEi7!D-SWiN|;E-hElZ<^AH`R z&3Kk5v1C&F-$a|`P1+F|Ns5H z{oSKvSg}oEUAL;V=eLl;D1vhRsYYPbp^IAGZ@M3lP+Ryi%&*3evK31}{ zK_J5LIgf*3>lcBo*Me4Le*aJx#o!^G+~asi!nteX%UOl9k~)t`)-}y&T{7j-hH51y z#;EM&e;!ZSBrrETz-fUqzCQcxSC!Zy21h;BwgQ2MkcI|%iDX`(+V6ME z_y23(|NrOt`gut!wrVjjtP)*1<(O35CN0gIeX8=t8>XcyDe*e6s4!%mRR~=h>cQE< zaHKvY+c)T1(9Z|cf~3|?{Q2Ld!c3o;{g-0k%E;!Rpes!t8m%mjK`ZBcHaUMyN>kL{ zt^fb@DMDE*HC6kj9NqjX)??`pW=4kF7q!AZ&e&XY?Wl2QV1dVkf^)*Uca|2aYFJze z+4M$!=EJX#Hw1D#sGk~=&etKZb=s`e6PzU1F1Zw?y^yzK(rN}y2B8+?W9IAR*&4W( z%qdpAzU^(xl%-*c2XFB{_S+J2uk!sKks=w;g}fnaqBM`lIDfcT{r+a$uU}thM|BnN zd%Gtn?4_4P-}09x^O9oBW}BwpIa7RK&V(N+CZ8?pY-(-f{9AV1jpJDzdVAZk7nyUG zKW$M>JR0@-^!k0jzS{peoVK^}|3ha#$$*~AKi(bwefZ&ymY*e7LXE}iGH+k4sx90Y zn`98h-E{cnl%zGP_wUtes@m3mYN>ta#I!KvQcJj>~Wh zdg#u*s<}MVDs%0%*QT5Ky-r>(H1TSm5Ojt`N!C9~J3CkLjA~}8p58WdbMw^;%g<$I zC!JB$Q%z?SYEWsox-EB(;oq#SQR}bSZ1vN8Tekc6@4GhVPa6rJkV*C|j$+zSaC57q ztEP(vV++I6HT!q%dH4IC#NiWqe0q}0En8%i()pAGKkO5o_+d_S*PB1ei=r%l%&^YS zTpPPK#mBIXW9p2M>T`dD7i!e<`H_E_>AH+d=1dLh$WDIsuwYO9 z4MvXlb44G_Ry)y)Jo=6km^m@H6fA99S-woCJj;|GU>1L*axZ7)dYue9;Wf4O;;k3Eci*o6_unT*%w%8UEC0u}pI@Zx-@p6$3p+M<#V{tT zH{a*m)$^S;(k* zkD*}T{qL)-SadyqhO4_iq3H&)?_U z)do40#-0}~)|?p|ef;smhYuefZs)g5&owc&?a){rvyn^Sngrjer_ODKyoSdn758X9 z57`)%yr=x0wPxz|ZHKc`dsd1jcTAo=Wzrc1(_3*dIoYnsJW6_r51*?)Kh&-$Sorp6 zmylnC?oOpimsU-wvXJ@ld;Joru;$Ht9E-Yh&Zc!1b0{7O@G^{I4s;Fivdo`4MX~L2 z=8-5y7mmcpCV@$p8zxQ=O}`$rEjL?Qdul=N=4&4-E@v$6ou*LXd~8mM--R~goWM_J z&-OGaDLCeQJF$~#Vc4=~Qqv1;iK%LmIdx^og1fl z?zt9`ts}U>hCTbNgX!{2ElXCml9y51pSZak`osNnmogP*E~~#{T@d|k@ltNVl|o6X zMo}E9Q>T>VeK_|nePPR_J@*$yd5I;gefrAu*ol(jr7Hg|7+Zogj@+Dcew}_??cBhF zI^TqxI2P8;kttfP8X>*%$8r8M{~VIOsQ8PuNTJTyNptD2*x z>N08O=eRW&S1*+RweD@(l*=D0Ui>~-dEIMv@vlE0Kd*ZKd)eDx?`~dRe(vV&+q>_+ zYiM!4pSjjcZ~nLWS*n_@>c1Ag{~dbYq)y@&|9ks6eZSXb_5@xwnakI{!RPf=u8nOH zQ$tjz7g}cOhOho7zW-0^eg61;ckbPPSEOl}wy^d4wzpE6M}-yzIeGPZXg8D_+@|R$XYx(>#TF{p_`dqN$D%@iBs(?d`uWS3C(mTz%hc?bmbF@=iXM zdY^6d7j6i6C{v_pVKsGO#<2|5qnpn7US1Q%eEVOqAB#e;-qIx^kt=yvXNWFXk~7;a zi*?WIbrIQr4dX12h&VAUQDxCNvBM_$%-y?lx2{V%X&L!?$s{k$=eCTJxwp3M+_j5M zVE5mBAy1x&FfIxUQPbA-_KPYy@>IylMTzgUFq8L6C$6PS!i1JSG4D|R)MFUbCE~1` z+LtNy{A<-&6~SyCp`xxCL9g7a`mRN3`#N-p9ImmEY58HUsBl!La%=OFDOy{nOq#vy z^SL7vwB<e*b;=@7=#ouQp{{F(t0~ye~8;Q}w3(Vef3aj-HE72TbM#b=?uHe!nGX z_0?CeUcK6#zkjmVzmNR&zYfp;S70OCYv#1NP=<5mHKF7UVbW{Ynl8V5LLhqH){5sn z8`s=6y7r|g;aQ5$LYu5RJL2B&y0&X=P}b(S`*-f-Y|GX>e=@-Gc_rWTsQXzDM6^;n z1QtA*6Qt+R6kOC}IsK*ObK~iu!Csa+-W?morUYp`mHb~}BfE3I%95@hGqP7atF)Bi z%e^g>*yZWvcx~e)N3YbiLJBTIUMF`PiQy8Ae4XU8)8*c-28R14vG!k_@2{`AZ;{K( zF+t&Hmd*Ux4m|g`6;q#C&Eqe)Cf%Uq;1qXdZtdmE7QF{|UW*L&*4tW<9bw$Qbj|v0 z_WysLU$<`CWfQ4nFI^{&l_jc|f(mt3g$QYf9g5=OFpA2$T5zfNN|1E2fcEigg#yc; z9CPjJ674ART5`TIdXLs)&K)aEuX|PKc^EI-yp-9&Q{QV= zXWQIE4b_irTItK-+W&u!d4$uS{xZ22ff45y#I5U|INNMlf3e(xkRT36haj$Hep+jn zC@eM6SA3mpqOQMhNr)3eQQ@Lq5vGY*nmff*Loa`uF1|N(5`zW5(sZSctg}l~mip-K zXc1P5Om7u(&{!HXP4#(*_d^CbF&T5`|9@_7UiLZS{8A^5ol1!s?G{smDl~W4%S<=W zQq^0pcWa&MlIQ2+BGT)B&%Qs0kNy4Is=L+iC7!OEqIPJ`q>xpavl1iL{xUnZbXtv7 zUw+*FvUBo$zklETm-v6d)Sdfw)&2a$s`T$;|9#;{{|Yu7i&|aWXKKA&u4h{2%v;Cy z|N1ri@xKRK+Md2eaklIAH%e2b@icy9my!>6mO#r5aa|NVMh{$5=~(Kff@ z*!_{~Z>{sS49}iw!Xj`;C|S!gTeJ4@p7QJd>t-Ljd;h-gXA_?0k8asdV}HN7d3jr| z!0PX7Kl?Gv@ms#5WkXAOe%$R1 zCEB|+Dtc+-f={+hE{aE|2n!a?bLeQ?ZK!ypYA+kZzrRu1(wAZ%Km2peBRQwfvMs`B zdJ>Dmxoy8TuZ+-8U23{DENyYIRpr`JPQznUy!Dnoo07d!p`r5_!+|~X1r9%ET(@SU z!U3_xQx8R11~+g2_=acZ^45-9>%R8p-e#LH=ksZy{^P-_e2O713Jw;lxf1EOAt_u2RS?;EUT{SDpnG`)1+EPpm@28Wzn*|8Tv-9k;D z5A~$dq8+b-Pv+MKqPhWp_K=in^xjeeTGFAg!rMJ%&bHkzO;VcXH@{=9??u zb`Z24JN)p5S<7DX@BjOEwt2psoLtM4tygq57r!&QkPEJI;C({QiR1v^QTk1AMSQ#_z<-`OvJ-JEw@PR`X(LK zXOoN{_*|VfsWX7*?*&uKrDxMVU$6gr`u~^f``_%|HzmgFq-uHY@~P`saxOpOT&{4+ zvOTrWuq5_)b#IK2P)1w83*!mm0^0Vog0vR%Hb0%n;VsB;;_?6Eac!VHVt?q` zu3z><`M}JtQc;g3rF6Y?y|P!TFlgpR6#iUMw>&6IGht0&zle6-wjHM?Us5PK@8YrK z$zs3epC7kw`nCAVsY-pDY0n-ts&_KaSF#OT)YkZ->aXG-;6bMm;qV)pFS>t654UlJCfx$KU~*{lft zV1*_F{o{xIw6@;O`zc*tTx|QL*;Mbjm7M=$lVu{Wm!Cg2bKUD@nNrhlTK@m@bMtI< z^D}3Ef4e>3L-YRr>h-;`Yp*e#Ft3xSI<#oYot*1lO%n{(gq<^YDX@{%zL+J^mN&ua zOyVtDn?IlJ|6dRK{8?O|A!Bc(cjC3zrd_`t9sT@%-|x)(-@RV*%iH~Z7GJly<<5^~ z@=@7aHn(+b348r@!_6E${q60K*S)sBzw>3urBg9EZQZZE`M&)6@bPS2eZ`L5_Huo1 zG(?@4=WbtO(>w8!Nv`o1XU&~&cO^z#?J{&_SrH-_*rnAngMatsms@leiX78+<`7m& zTKnh9#mc4zCy)2B_rKpWH)Jal*ERg$Eb8o%yd+|Z*JBIKrL&f8JSNbt$Z^T*V5GF_ zlu0ZbS|Yg`BfNcj#gI^B;e#`TWhC=jMSPi9LLmP5P33PF86z zx!}aD8N^ZSaZM|EZL!(&l-{k^46Ev8j_-TFc58>`5!IzRYgJXmUKcKXU9))VwriVK z8(mjoa42}oHRsX6ApZp|5mODt!>?bvC6Rpp>jkG%noos-byTb0SL!a=nkBVAir4G) zn5L{{H^H|L^wubvyq~3VznHN@TL>x~v{W4b|}1Uk$z-n{mu? zo@(K*{%k452NgC?LoDpBOQ=p=u~JKyv$5gUzWmu~d*imxwV(IRC8qUyd;9M5^X-Kl z?Y}sf_-@|1?&Dd@(*c@kjNA`oVkdZowh4tj(Fjqwa;)P0l%O|W*N#1zGBM)w&MUJV z9d2*Sy=?h>NyyemLeD}LeLfZ;>a1aEoyfUr!l_E_mJY=Y`kXBa3Ts8Qwm3z;E?TFs zWXh^*K1_#%UR3za!{V zLckx3ZBO2cCo8MWjdzYq`gpJVxBb#74<-dM6n@~^-63D!Y^|%X)Z%#Lga;}W3~O97 zbC<5^U}KpOG3@}$Gl|95etr6MNg(m)lt(6Wz4Vq|37Ij;F-VJX=@XOr8;Y+*rOu6A z9~PDEH+_Or&*L3uKKn5!TXtyZT(&vOG_7kwP{}QC#U;~jf8DV6PSN+1YY!*xw}12f zvyrAUOTanJWmmr1y6YU95^{0(oEO$hLo_7? z1z%TK-Mf2#?$&2P+xJafVRygo-=~kCzn(q2{eG=^$=l=K-`myy+oDsf<>{HawtQRe zEH`P>^OsHfPEV@%{ZcwRSAWS3dHG|7d9#+uhc&S*>`-lPsN_6;`DDiSdoda-pK1D- zY)*>ERAz}-J8N71?jwHw$E3IAUN`YwY1$ItwbJrxQCzJc$Mc+(5!sqvQ(^=HJsz$J z_L55evD9H1o8ZS;D#tyXJl3*wL}|Uhz0-M>XytJqhNWViOq@U5=M-n|?vvMA_h(t< zt+!=M3#1!kujiT1UMSyVyfsAcLfP)aCcY7x%TG0PJJOdu>> z+N6F%^|;-}4t-kBCLJ&2@iw@&foI2*BgcXl^RjaUD=>9vKA-eFwXp2n?|YWV*KF!O z>w3Fv_g%m2#Z#l!UaKk9)bu{uB{DryGuHc{lV!N#l5JcJTP`h4S~>N8s>6=|Mc?!P zf13S!_wS%&SDSf#{QTCdw!ZxRwrurQwU$Y%84d<9i4}RToodJu!L@0|?D_)^0!|5E z9sO25Iizt^KxpER)$xD6y#M#`{d~L1Nx>HLmdz2?c3d(g_I_z?dW3G^rm1O>&s235 zEi!cTlzg`1{`<1swi%b0Jh%da9z{Mb$jjqc=~Q^EV{<@|pwe@Fjip(dHud3NlF4$b zLv_+cdFOnTjnZ7|rFmkK(Bg_NjUQgYlQ?=VxrQt;wv6Lqm6_#>GX$L_^bYRTLNJfv#H;ORp?m_pk6OYY>OxjEhdsCGr&}1+5Evz3ii= zYIf|o)Owo_9*Gg_Z#0!z&E@-D+hMqDij&Vy5tj;=d)msZooBPYJ~>%>@?f=oYt*!W zQ(wNk6jpuu{JcDWd-C>l?e(p* zX;0^a9Gxpx|P@GsWE6q2A>(lGwCl7h8Y{;CauK48W zx1y{04OdL`d@uibB)|E$fr59dUe2538c=2#_mDr4mYjtb`a-a$d1 zWo~C24S7AqxS>OF$&{p?kO?fDs!Jy@O!|J|`>m7>}U1%z4^T*B5Iov~w=zUt@u zIX!WQjCzw#d3qddOd^+3E5e( z?~s`4wM1g8S*^W^@8_%C)34pUY+?0$YS`8%F-GY#=b2`gtZSdSxj1f<@-;8t+p9Kr z&Cpzsv4xSv@0rZ;k2wzn8#=tS6h)N&?U>K3pfE*f+w85yVcE*8d#Zo`y1e|he|+Bb zr#5y^71Zq(%(JU~HOG1mUqh(p##iC9zgiuuu#oH6Akt&_F(^2D@s?Gl`k&``Ik~Kz z!!R{CtczjNw3OaUmoyhn5l~!u*@CI#szF4$Ge?B>Yu`DaqqMW<25I>Qb0o&49Sx~a zYI86Cw^k&GOVYW6K__%-kVJB?Y0$|j#z9xzlmpLcwmsf+=3e!E6YqG}6w z?yW2O`{k9@^qS9KGB0msIneO6>qrBmrh{~D&(k^4D?@x&J_*v6WIb(h_r9&p=Ot5L z?&>qGQfRsM?Af!M_y69#zqjhE(NhcG#}!hn9b_$Ni-rP=;HR2>9ZC~_%sy05 zKJD+-sPl{Xt&U$-RBC3MI_Xl;?ZSQ9Q!j?N>b2UYGaQKFVQji8^XKiqLW`68ZFV16 zD)3=N@*0g7Z!+Ahj8{8c=%2B2bD1E^YVAgW7KT-K{?7fXz|i}B_SvefQCGVjTl8Jt zbLO;Av!Pq&->0Xir$%;sFzNkl@;s)p_c*V{wWzF@CA)8~+4u9AcKTcSe;@u8*i4IA zyY2PaU*Asao|doTJEj(?otb;>weMw<+}nPqWt_BDhSj`r&h}J1DzkNqN5j-5L0dx> z``$JQ;+UXx-Ky{N_WO0aZ^rxswd3y|KKQ$BLK2gvielKRsMTkyYAY)XrxgiiEOitR z)J$dZNM37dcBJCQTQPJ0$D5n&ZSLIleLSZ)>Y1yTuZ!mMC}Yl&V}cjcHox7KIP>%G z@9*c^?XLXvB&&Gm&sVG0&oO>~f4_8pd+JQDllRPZ>{mv7=l{KX_wCGCOIp+Af7r=S16|Nme4`d@eb_f>tII`LQh|F7nk zH7l-vJG^R&P;$oXV|iwp&mMnVGIf%dR%T1nKKU8tGSg-RecHG2-nFRWhq>90CMIaI zuJ{zt7@AU(Z>Zd*dM>9=Wr2vYphBa_k_F4Aa0@AQe!g>3W9b}Ym*-ECzJ1%|&B)=E z>Xf%)sTV_>j<;^-gbhMM9tRx{bf_#@wN*>gvW!vgzM$GE>BS+}KR-TuHAGi+xlGE3 zeTm`v?+y}#6@q9V;Q+UQ;gb9d|hGI)#3GYse;GKn#X%TGw9g#eKzr2 zx+~A%nn6~87o*eFS)88QYkOC1oiZtzNAXseS%j9<+r9hu?AUVe-ralu|MA!F*t`4w zdw-qhQES8QnSVCgp8NU9FR$#yQ?5njU(3#Zd-dtJSD&01YJPlB)Hx`T+hcmm$Xm|A zpuqN+iSOe=8E?L*=q0bV?A^6*-<}&9OM^8cT=i6!vACSGRCM83_~cS)?PFU-PfZpM zx#lAh4jlXTKImC@TqM}W-K{MG6=yV-K6taP@?6XpiN#y5Ie91^*s@Lj*W0w} z%F3T_UY@n^doLL(6}L2};@iSl~A){65gt=HHZwdMuOXf{tdV#io< z=l(@y!RF4rYu1N&UG>_TA`qo@vLc^v!jvFR7X^#&pMtzJy}fw;rE)55j}OxPCvj)z zJbRJG33J{uoe=a)FIz0a=VgVJ;-al$EeRTt z2Ul(BGdwnBnt<8suT@*iuIYXb(yGeedX3|VvRC@sv!F)y(Yo#5?@3l$`g7lZByU&8 zvf!He>O|F+q(FV|mtMMeFWj@KHuC*kw|)C!Ez9$dJGM@XQf)Nc7RI8wbVBhAyP98b zdh+(Z4tMT&`FOd0#DkNUm)|~{>bGKx5W}1x4iCe&eedVn*YCTTBl!B^hWWks)?CZk zJL~yloAb*rOC+l(t^G3ne7U*#_j9|0R$melbY#d*pKkDcih$v)9Z_Eo8!Va9tK}2y zdPLyx@9*#P^55In{@U?swz~hiR)O;G<^12vA7`3e`&zsIZjRnMz4p4RTh<0CEwy75|9iUr|MuOx zfB#K9f7lkZ!%bT>*l+2byLaWkStqYGJeLt2!mBtfXwA_%uQF}s_9bhcs9bf}U`y_4 zt7F@8fA2fL^h(guNq4$Gq#pYo!!hx;>C%$hzZT}^E>6{38j^c%l|iCl?T%+ zC$70{&=sZD#E5X`<+F7bwrk#rv%X>2C zS!EJ)Pw~1ZP99sQOcGU`8j#F$u2R$0k8|RbpgF46=ZifQkDYkr@a=26K*tHG>f+*N z(Y2=j^X+^!W%<|@T~D&M*z_*9|NoOc`AgdFiI?7${}yOAJod9qAo$dsx1OpoueV%L zy}qWX>tx2#DNiI%R9J-_T9!E_%!_NK%UXt}8QiQFu0%1Ml!y)RI{0?_jKea{J9h5e zS@$V~Yvb-SY{yqh;Q=<>^o z`ts>TzfStsJ9+T$j^&uB{UpZg;^CHqW&w)ne?jSkYeTLPnG`PlPLS&GcI6j~~h9eaJ7lhz@r z(2{FXx!Y!UtNX9ZpJ&H<=P_S&tcOn%E3Z+!Ot|HJqH|H=OofBx3O_q55`Vu440xC9)} zRnB~V+2Yf8#*Q_`vDx0bE} zQ9L$#wzSd;*6gFp3Y|`6+O$saTATaWfy;5xluhO`@B8@n>ea7Dzb%>~l(ge_ zjePKm&p}zT%Xcn0^n17J8N+keI!>Q$W0|u|_1K@|_I3Zi-DO`XvOVvyN#A7_jWveP z4}>l4Z$Eta@ZX0!-v5r9kic}-#CMO&+O6Aie?R%77F;r8&DW^gk3VLt&6~4-@7}+6-`C9#(y7#)BKG|9{9EhtZ|7~k8S&ch_~pBA z{Wt`JtYrA-+tuED?{6voe)iWY<27d2Zbof<-*)vEllIx-6%qfR?*ISnX!qUg>-T|| zc>TX!|9g75{JpQW->-)2d+9EJoVj;r#jiiBV_wZRUoZdeeanPvmYS(Ooo^3+Hp<^7 z?sYQps`gTg7-x=)rFXNop0f1RbnekSHpPfx1OKG?+cRf9tK>9%7PCYkclFXKZ@#CT zUUcnN#Np(bQGr^WEvnCFT(a@Mm64F(s~O}m;Rs`OKc66jz_&udpzOE1>NXqRl34p? zlTMAow4k3?GH#dc-n(M!k}1zp`xLnpf9-q!t8DYVjKzY6M@{rkZ#vV{kQBDC^j4)f z6Sur@_+PJ;%W`*Z+k7mePeyIl^U4?1PQN}q_3pVCxjkxaLqgQELY8}FpUc&z`?%Qa zRd+DhT$!RBz~Sk6BuL`2Xz&@;*w{F!wb>`1&)hfR`9u9MH~9it0c8zd-`BU7DJrN) zET4Ss*RNkEC##z)wwbTLf9L-Fr6CheL><>$R?KmF(~}8Hf?Beqj@P}||MS`M*I(24 zKEF24e^w~M@cQekpI1M}yuQ6XpTGUDjs6>thf+6w9_pL@w(Ry+XWbQB<}AOQX;b-r z*S6n*EV``**FdYh+42&7gmu$s5A7^h%KDQDqIY@S`H%cHgz|vlLnS)I^tMMlh(v`{e)sOaJY+ zW;&lu=JM^9RmxgBJ@w>HsqBgaejTdMqq;uJ%`97{GQmOV+6N_fJ&y}I6BHWv_*otI z<8mq7w&bAXVaFqm7A`u=o(RUT^$59Pf5I$UfU8qQ|8NUKQ_#(g>(@?vnseXVK2Kza z-O~z#WiPxmXYw3MwyXHVE-0$45E&B0C8YULxhT##mpG{d)Ckx43@Gl&#k?=RL1nW_&=T!(eIZcJ6Dp zqPQjtZNL3i=+bZg`DOgy8-6@Fc{Z)OQS|q^^4{6M|DB$$zx?ov*z6vaN#DPNF1D2W z#-9A<@7=>kkEY%)6`wwRbzP`mTLw+hnw0R=rHn%Jp$J z@@;lYO>{SJUibD_!FPpLvsrr%uk!Nh44w1%Wx;-Hj{vVziC6!BxxIetHGY16-^(`D z-*PtJ^ZC5#$kKf4rFZkT|NiqccWwPKuccxP8m!5F5r#){dT!^(a?O%$v%0v)}s@;r%=&cFk#{33F=xZvNrVg7Mtcx*D_KWJan8m0yk={+_f$Hwiv^nd@tS8d)|~=t4=knicEV} z|7)4Arrzg0I^AJjUI+D}7?WQ7UiV&Qe`AB2G^c~*aaGM$h7QrS#x1U%scW;O%ZrO= z7TNY1D{AFhRBA8pX*quRqGfhZ-mY!$qmpwUUY37TwR`W5Jv;X9uKxS$?DO;T+U{Nq z8+z(ysk<#$DiOZ=>(Qg%UY?!2{QNvK&=DMO-~az`Tt9wa#!|D{yu0UpZ$JF^uWh@+ z^*Mqo!(7&Ax+$_KDKaj2qB6njDyO@G*IFi*QGQ^C1ywd=R8ioQP&)yu#l)`0hTv+R24#kICTX;tG#{~ zZNLBa@uVF;4X%MUKG^?V|NqCIpPPd|+4!EenR@(mjK|UL(zT_xx7l}nohI>KUT)1+ zP+LWFg;es?^f%K~bfV@QFI?klcU;Lsz)j2Mxxsu-ze$l2md49C8762I-|O_yS~(?b ziI$SLV#WKT|F=F-Qkg58zemqW#U$|Y`JxDsBbTfIN_pzv_W+^x?GlQbFZ zzr~%<;Ik7{>9;+<%PE2V#>}0HD*V=wp7Hf6YV+JXT0E|;JXSq*%C%|PK@3)Me2-0v zZY;fcOKNg4U!td>r>ZXdZjOs4XP5iW=l^{;b5_Tz3qRM%|5|r`=Mjni<<93UTPv(& zz1P0}niaK7KR(ZYd0|{r#3u{A$C@U-pP!yympM!PeEyoyYdhDyE^<@xSUF`<`0T6x z`zlL6KT*>CJpccnXQ!v@Z{>P^D=)(G`C7vu&*w7JWv|Uxmb-f!GfPWxj>O^BPCQHv z9Ny=Gxb78AofN6w77^^><*}Ma;L5DIrA!*$ty>v7y+j$pYTwshH~IJL*QtzZDgX2H zY}p0k_ubiB_cu|df9aK|O@-U0%_{0rG`u{?TsTN#VdB#&hKb&DmpMFk37B;)du7(q z-|fFYnk21l)p*+BB5zmsrvCfx{XcG=ulv{?|E8=weqT+P*Ei>Si@Euw(6ba3#}oVRmoP?6X2iOo`4*&Xt|>$+Ba=nKnSxs{nA`R#@8_ijrl z)h{@k_H?cGRvqsrH*^&rK1#Y>x_9fd0t=b`tK!tJiGJlzM(gCTZ=DcW-|`E)4Oqt^SrIwfyqO69vs1&Ui;W zE7aLq68nGie9+$A+SfgI<@xjP?fLoe>+IXNtC=!pnYH|QdDvY*VTpsv!WJgoB^o=G z(iB}Zr&^zu2zpg~+sy0azSnavItVzavS>$S+Z4iCj!+pgbY;$WJy+_J5sW6p*%M>ZWy)upx_>_^t$f$!|IQN# znlfkhzQ4Ese_B2N|J8Ud&cu1yo_oHg3r&5px8Ewi^2c%6WFF1JQ{O5NgatYj^62Pk zE;}GrwQ}p9jjEG^ES}%cmJN0}^117EU|hvDQ_cxa7bD;6adIyFwDz<8(&CSOoN}y3 zzW&jCR2kFZ!7$Z3HYtWZQ151zbNX6^*Y8*bK61^q?E1qR?y^S1gF|&{ki(KKcG(&~ zrzM%soRsb-$r$wXG?$>Gz+VqWHivzOXZ*}wn=6$byY}bT*Y@X6FWQnd>zd{CVm7C> zOTs4of0QZneJR7lFstP;T1uA;)-b+0`}OGM<>s2t1egRQv$J)VKAZFT@b2>Xyxs|m zvUhjSk9}8u{QUfVbNrN>A0(Z6RdD0&yIuQCj~RSX(c$E1+ZVs~*TUCZuJJ$Ka{GP% z@x|Bn<+kt3cI?QU73`;~zBu>#&9hnFEsrgPCSG|}amSNWf1bVZu}?QcvR$?IOnPeV z#&vVsS`nq^8EeHR`8{^zD!6BK$vKFv2{k6aA{%^3C`^L0nv)=Qn z8}elLDwxf>aEWQ!JhiVsCM}sGcb{qHlu!Fo8+92pmhN76 zKWF=~S(&qrRT@UEZHv>K8+67}P{{xD^31v$wk-nxjz=tVy1d0r(}kt*+Dq<*4Cj_* z3mBYtWXRbj9^~O-;rn<~OJ&@&)NSWVbJN$FMhQEVd5#xIkIu31GGQ){y zYuIAn>!rT0w}dey%}ZCDweIzg=!wN*3R5oH-;(z}XUMX2O3e#sA(pOoAaSC3G^3?or?*EtN_j9hz ze*E#nySKN0x3{OBJY|s{bMB_~dG`s1)1oH?W$_p@NIaf)AWCw^^OYh~g0}6O>{azP z>@I^)(b?a3>t^{KUiJTG{GT^hSBLM*Tz2_odH%o01s2On!EaxE7hrre<_K%6sK$A<#tv>6%vzc!Ew8N>zFvaS&rzvGp3) zxtYr{mrQYqDC#s++7$k4&e^m1bAqz=M$WI??B=aFy>jOBo0iY_KHfBM`M!QB*QHaG zT#m^K2yn|U5z7d0(pa1`EAp?; zzM3VG)b-e6+N>a(`*URsm+Z~2$oSQ<-APHHX^KsB(T}s|m(F>wbkc>VN=A3y_7EG+ zCyWbHijG$byFGUhwpebH?7DR71g8g;cAX3?Az4f1tIbw^&{;Eq!9FcWNMPzD&M!Zi zk`Ho*DLQ;OBIMwq=+Wt}x&Ke$ZnahcChlob77r&Cr#$$>Xt8DHk`RxGte10A7l-N| zt-k)f`C8SSWxIDrJH7g~E;~B=ZC1qB+Q%PKdWn>K0xoqL>1=X?!1^z~`0NYSik z+w<>#t1U0BUAfh5$+FCzQvTh)m%Y6vKK*s#)$(j7gKM9rYWV6bK?`8rCf=b(eL-Zj56oRz5cIsjY@7i_$b=pt;dJ zrCVR_z94+iME_jn(l>i_6kPO5WtPv`6k@e>Q{4Kw`CG0is0cEIYAzADWbysWv$U;J z%QAm`dRk^PH*2l$<%rimEH+$Iy7aJO*7H_|Usqr0*L~akHO+Wl-}mk1-Vr(hnyH+@ zJrkG9ufAHfHEQ#bi5x*)%P+6|c+V=k=cHw;fOhIilQxs{m-n1;zP-2R>#EFImGkHL%n;v_y?seSWs*Q2$1z*3uzW)D* zcKbVZJ6VNt@8_ENc`Y@SNdCRgT}iRQ@WZ*NxH+5Mow5(F7I&Nw%;kMT^Qc6{6}N!v ztN-cOe|tMw-Cs{%f4acE{k5Mzd?>J)GfnkL)3r$MrVg)_PFhP-^inw;=Lt_|U}%|A z$!U0G3b&w#;@qz}yJk(VoR`M%a4-L9BmZKbjUROdpUk?cv%*yW)Y{^CZ$B`|R7VGU zeHOZL`1oQcxhDlLEQQ!3k4Pj(9C}%?Ms%&|u?)o{Cg&e#_84a6wlt(h&RX{Io_v4u z$*Bz4x{5k3UvGX+H}{{%;L!84!a`3`WQ~(Y_LhrnCmUB^eK1G5E%dfo&C-vY@m?20 ztMkoYf3-P3->%l=-X5WN>GWflOJ=z-7#uU%_j1=ciL)oSvj|083S?ACI4JRGxxf7P z-FMR>vL8-^YBvL~mSWv#p0 z&oyzCNQ(K*yDIHZc>e#_-%)&h&*86p?U(PqVdK)6I92kM#`S04o885pA3JoY$zy?* z^5Y#lXTJTL6O?}N|H?NF|7&8ck4m}!RVrWOd-b|#_|)1foln1h50u`(^h3#ECsSkN zl`r}gniZzOi*s+EHP~`YB9|le^sYI_y?N)n*}blA_3K^hR`0&~{{6f6zxI`u?cKRM z_tvwIr>D=atE-nh9&7&Q`)+|t?fmj~wZCk5?*9FI_wHR;#V<8h!iUxrA3t0f1LyuAa*hch+p>QuKKo<8?E~jMd}A!CPB1 zrOvS`uHL-PPVe=uqI12O*HX7$_xAUo8ZYA zT*QzX8J4Zf=*98+RwIL_q$aE04l^mi^Uu4#`xVp`R|k7>DxUfL>Cc~->Gyg*26YwZ zZS3~{IDdEjufOl>>nrNc8BXSQImgN9cG93kzp{d9W6$FXtGR-KnyO0^rf4qxy-qve z-15(rdwf1i1^XDbiG-*7Kc8H@y4>!0%=&)Q_w$PP-OcMY>^NGz|0Yk*#?;85Ls?&+ zI%#P3W=;vSd|Dbgr}UiFW+~HEb3X4{_ciOjneSzjbGs!LfA(`)Iqk-j;13Hg-H*P@ zaqw*xP)UB*Ubp-9w^;dL zPzT@N{_mF`Kc2*@v=}RLT{``QuC53CG3X9vZjvwzW)}Bb59mtv=Y)ed>v* z`+8CN|Ca_F<8tslc{CuKoqX~B0Jj*0C3er?_At>>Z?Rgz1#Zrc`fdQ*!;RM^`k z;fx;VG9$yB8VVO}&5PbF$#$$#Q?>1DiQRnt=d13D&ymi(wQVI|xXYfHbB`+w-87f3 z<2cYX#c<`6@9)mn|GR8+ww!-^{=L0ddnL_g=NX^KQgCj_-e$1Ab@eU@<0n&$S(E0@ zJuVr%?!qz&=iBE0znuU7B%$zlyTXpS1tHWb457HKZyh*S z@AaPBbuHWH*q_o456!*Pb~3Rv1O!E9wS;uv2tCELb*|plhO67wFtn~`T@&NDFJOB5 z_5T~DOo~@&U3!JVC5XA6W1;?9QGp*?ew8!%POj#C`|I|Kx3UaVR;jFe?`PUEg-P?* zgILduo}GL5?%jJ=SmF8QpMUlD&pUs4S53wL)4zZH|7iL5*>?N7?{Bx4e>Xq9$;fM| zz!5te9A9@bIUhR5@J|Ws<}AK%Y!k%K>cQx>QjR)@9*sm-Sx1^ z-dl!_3=lU;%QErOZ}dAe|OYcx@Fmvp!19#vP|4s5|1z2*w+1% zY5M)J{e6CZeuKl)nPMjlUC*d4zfrbqN>GE(K*%VO!-q%kN%V$x@msf;p!< zF4gSuSzT(VrWkpjzvYmB{oicOCpEin995b&>+C7k`rmJN-~apeeM^A6UCp1<@pX%) zOi7$_RU+uMPMXbpJtvL@CeM91^Jgs!iYhww{9{sh zv_bHxvfXQgc4fY*VOw$~_PXLU-N%X+@=U5v^y`0iZ+%y@Q-=RM&w=Xj>GEDzt3^2t zuhp<6Z@Cu46}ezn$#Mzjsf`+n%a1y0PMOqYUQ<_R=2e<|Sz_<)+^uG!Qjz@2WlK8oSz=d;pmYmUT){B=yGnS$Re#1lBoqt z`&1bu%;xUsI;vnHo;)e&>ZcwK%c~BnTsRF5_J&OPG-qPap`BT~*1c1aNDh4%`L)sT zS!t}2r&s7&Q;X+wO2fN#)0<9rtyur;P~}YSX}6~6pMJDgeaeM7&v)wWeWh_Dpj`DK z;{*YwWS{K6ze-nceKsegk8#4LGoQ1re=h7Xd{?%6-Fr^OfG&|B&D5S{Q>F?{4Pj8& z!JsfFSZB{`>-TfY?%^wJ1^Z{eO_u|%KYZyj+>U@ zxr(kGuN>+M6Oz4pf)C|v&^#7&_F+fI35(SS^9|XY6xD;ax&>(}rub^G25DTm#lpg{ zzA~1@I7yZ-&TzMf7o)=UNaZUJd4rf*L=-*av`N5~2LeKv~m++kB^7Pu{((sW^KZt21{foYjb0^G_j zdYR3=lehc#?`lQSfR2mv&MlvRZ-0HAmF0Ae_FehKaQTGZFwJGp4}V;5 z|M%+9cJq|4jYc`2SV6kHHgq##gyDT?D5hq#@aqKImU zBr8j!ieq=co>@1z7anN&+-P{Q+>OJq|MMoDb?=N-O7E@9jFq?d|NLZ4c27~>-yg5n zI~6&&_(&Qva2?#ej`yOlQ}LnY`u_KRW=3hGPAylr$d~rwOrD~3^H@jy-`D=_%acz| z;C_>z7q_==Y45b4(q)FrZt~Agoj0F9zU6}N#vSwed?vra>(L?h6m5Z_9;nm6N>Y;n8j|s`^`7|)!cJY)$=shK0Sp_Ne90?{rvp#=ftgH9DbTlW;lH?Z1wLi{s20r?|pBk zWxDhI@Yk*@y<^UNwwd3@{#+)>QG?}x$IjcitpW$Fbwh7om7TD1GLy@kWiM;|qE_F{ z3s;}~*kbMKyE)~*XYakH^)#e9JbQYf)b*QfQ-Zv%t<`j0`E+;w{ePf~_KpY$iOqQ? zrRLru^4iHb>F9nh&8Z>xb_*zr3o!(p`Mf3I)UI`z-*eBZoQ_x`!kTp=QlLqDsYJRL z=hCBR>&pKZoY&lVy2@^T-}21@uYSL;U$a+}q1h>kWBKQwKYv=P=1ZNmTt3Czw|MjZ z%v)wlpWKm3zEiT+tj+1;YTYR@*8L%?!sHCieHU-tcJBXt`@c7yYb^Tu^mL(& zyJE`q@~GJ}ig*-{MI1eB_;gP3jEz5S{s*z4-m@cUc;qzZ&?*zw@n)mx>pZ>{`4 zhnkBN9j)R#XR&;$lCAIL?+bHpTZA7^=DPQM%GQgf1q#Pr%>8WVcRFjAS<8lb;m>8$ z_q>f-YbwAb!sT$1HF({@Z>kSp8p+M?XZ5_DzhugckH_EF|5#rC$6Yx(Kl!$lVkt)> z_d!lpr&Y%sIZjwszBf!i7&$l7|61FF9S?s0`nTFS$za^~vXlhac-o@0IA zDiGk6Yso!j_1v8btZYk-sHse$9C*&Ru+}Q^-k!u_gQ84eRS@Vq#nt7^%z&njb33(UZoS=>d)s5(mIqFOOb_1tdU^S|fb&92ws@m!9~Ljm zeJ3Bs*tk6(bUgI#pijHbC?0zhta;>?Kys2#!MU}?{{8U`r{CUAd?e8ATSRDcq+lGjFvO{`yc* zUSGa?>c7+e@9*#bzx93Fw%5r#nn!mn_*DPTcl3lBFf%Vh96xAWK* z^IpXkzsn^y^BBB%PDV}NXwvKGV|Z+p*{KdW+q8(Vc&}$YO3!v}U)my>+mdk3@_CHc z#wm-YOmZrX-7aZ%wS1x==Y(CiuH51{X!*GP@Wa#7k4sFRW|N*Whfz#(X~LF^QC@rw zL0zw3#dKU%X_+zQQk1mf5u5Xx>8ECdd1W7FT{>rK&u5c(J@IVWWS^piEeu_nOP3km ze!K1UuG2q_;`}s^&Hg(t(5!E|lb6>`D`{twV+H2~E?T~ytK5=nb~tUfB7>y2*T={0 z@y;DfMNUPW;w<*G6!PlHTp|**a$WZCg{(~T?Q5?Ed1(Hw+kN}4XDS!t?i(F9vY1<* zOp)7PbI~iuT>9Kjvt^g~^7Hd8h5O^}>VK4WDeBB{?g`RV487|m#B}af=B#U_u4fF{ z&RGgLNs2msJ#6;$&yOD_zL%#yS6Ld;RkdI5$!kU@%}}qcxrbTB=LoxbGqxYJlz;3a z>91*-zHz~%AnCPBRxSA?5cENgVM78>XFxzumg-J{oFt;j&%%W^V7lS8Fc4=H>BnHItpHW~gSkj8E@%rUvnaz8w<5UE!(jtldW) zI82XSv5Y?T^W*2`Q-YTk^3C4&{`ay`0qJ*issMZ(rqi&~B^zdwX`K zeF+c?UboP=U}~_FrYpxWEtXYNB$!fkB8qx?J}%3YT>3dFbLr;gclg}0Cl`Oa=K8uK zm6u`Z*4ov!r%YQXZngRuoS4W6+(@C^+qO zXh{72`d?QM`Mg@rb>e!+X*01kRC;z&t z9m^hhb&ro`;v8cy%ja{7<(tk+3a+keG@P|@m;4l=Dmnej)4e+s)ep6XMCF#n?%u!m z#l)bc*IT)-mcC6huKE3Td&kQXE9QjT+boZsPAxj8nOau8@BX`6+gc~BjjjLny0U)r z?YCtS3QxD(EO~xfmF2zjvMEZ*rnN__MVhkiIw?;7Y{Bz)mzCEkgDsOjKM-CzC!lR< z#e2T?=S>$(d@oBVJ`sH=oUE5HrQ++Wy6tz*e)iDJoqcS^vc6|q!)E(lFU!3=WBKy* z2BWxTr!5r2TIOqb^n^_jbTWLtRXJl?LhZcT^Li(kG;7(ZUp|_Z{DA2n z)q6AJk2tPl z5^JNkm`yEcwDb)=BRPB9&b#-`e6wSx=8HIS2rDfQ;i}up{`R}|u@&DPJpy(AdwHyM zxjOaZuMWXQrXq(L1TLG*NM?+<)+)>Y*kbt%jzrDnQUOX9v44&FrdQ4r)OMN5V6gB< z-TuU$IeqSD!r9Yrn=u3wJ39ASK3n(p#sPuiSdrMv?=@VHe118nIBr^Ms69Z%Y?gm+i&w7PJ7^F z@%mb6otpd-SvFKxxU9cbiB{<26&al#yhVU;7Z~2yxnOAqMDa{tHF*x?-`|o2W z3<+yv-@cj??&Y;|i3n@gltu5YmzE^=T&$WG{{KzmgkvUA@x{%-UWP|9XNB)J>rtAv z?R9?Mx2bbpuHiKlQ0Os^OX;~7x!UKjCbQ1@_<8nqzhsi${C-$tnZ5M??fSp#^Y8Cf zWatrO;5fK^PWA(5QJEtW(^DVzWUbX~=+|WXZgRH4ReR}F%@&LI$vv7?nI-l-Q-l_q zGA>$r=-7v4nXk%At>z|iM6KMGYc|{W{&%mJ65wmB`<{QkdEPFaN7HL!{1hez76oM` zF^xqb(h5$RSC98Cec~h_xUQ;#Wx*cdcaue++M#uUW;+~)A%)K`sA15e4dyRIJo{IJ5}!mb5s zH@-Jts@i;ee^teSN&3$>7q65zJGDY0`1XspW!C2}TlBp);yze@yHUW(|Fem2$Bm4| zTh|?&eoR8a%fpf5h;@2Q!TUQf&a zxiDp7&@^s~?N;flIYhnZJzFUhzfL*dEnDPbnl+E}{9+}RV}-vy{k;75?%(bA_wL?# zvsJ*p?$3uGUzX-ZGg#?g`}x4R$F68ykfYiT!=l}{|2#V`KU1Lo*Y^K^Yb-byE(y_0 z(^E-h;*eebv&c@qUwwPn?_%p5v!GL{Gw09euf3mJZo63em_^cG#|_u>?@8U2;cFL{ z@jD}#EpB?gxZ~aK_xJWyf6t3L7GL-G!CY+)`+wiegH{}VXmBq1y2<+cdmN9fvdo*6 z7CG(l$2mpo>WaNQu9q|wr_1JQc(1X_-19l6<7`@R{*fs`OSeo}lpA$<#xKUKS7Rj~w+HnE3gSVFjMv1j7Umkpt{e7*| zoad|?wZuJoPH3!6b2#Q=dpgNyY8}gxl3MMSH^2KfM+JK}*6cj9{PIl0byE_j7^>a! zdpbvA@f2>wE8(1O=XT!D=`qt>S{j+$Rr<8Cqjc^ylU$C%na}4KJDrSJ;omZ0&Uyt_ zjR`?QPC{JQEOl1&b}GvFtG`}zt!n#i#kBX%sn3GEoH;snE;8R@WVv+4CLJL`?T$?% zDlIJn&rb-7Y{ zN6wyo%YVQA|KI-mlDZ#{noJ8~(rD2xQr+DAAjW%ckOG6-Y^|mj)+^?$WqL5@^R;6S zI2D&B^fXr1STich!B7NJI8XMTUT) zuUUJ26xZ*m`EzU6)tu8oTuY}gXg;-S{Ifr${i!Qoqz3CTmsG9-x4HXIeKB}m5gRCQ z)g;!QCH}s-JEOqH*RvBhzis1P`sZaq!MUi{r$qf`SU#`49_%zvPQHKnEQ<@#yar8-M@ zJh0N|PJi?4+cuFuUp|I=op}5A@B6y{zpt-<$KU>Z`SWJ~%opt~0m6xEN*~))OG^mH z-k;%if6MOo`+x77^6BH}=b3TE3om_PF-$(Cxa6{Bi-B+PxlVygywc|)uGaqf@wmMH z?`i+~fBp7`d|sVp#yo!y?5y~^i|6pNm$NG4_SqcGw39!7{%Za_p<@!}v(J9#f4@Yi z{@>^MjT3(VzTWgXQ^vloX3zfp{|~>fub-#4`)u9*+goF&2c44OypTC->aP-;d5jGK zT1$nRjtBTE|E#bvICg8><*4mF9E!7+ZCw5Jo^z)Y58n)l4^i&Ffurib7Y$=p~=iY|B`?y7HhCm*&zitD_OCCU7?dO$(l< z7u>jL>Z)AB#3R3Vtn)rwTYhi(lqr*9K1d|7h6bGKG5bCHZ{GIRIjqlZ1vcG&D=;zW zoP>GQ@hM)8$vxMCyc%b4wljNxsKGC z({KF<+kE|N#S$0IQ2(^Gy~Xc7KR^H3MpP%J<8NMhb@k${%ci{Bc3Z$X)V@fM1D$kA*>F&l86U>op*6qX?%tL?kH3BYzc0Z&j+!f{a@p1V`cXM= z`t|kkZ}ZD6A8uN6?x)wiHE9#M^0Q{SX^94VS(cxBS9UvpUv0IWeE;#sX*d3TIK2Jv z2Fv-!m?|T8Ot@ll>eka5tJEWR^V}`@6}j}b-_AX2tkQDD)7)XDWPi&95$8ikEceDN z+xGU~hu`=2*8L6O`uEfRUy+TRrlOm9TkaGto{C<4=+C|CtumJE3>3gMwn^B<)1%F zek8qp^(Uz(C-ZRY1uyN?HKhy>_Wu9#^YSj8V_Pn|yRG>A?&Hg!`~OU@ulW7$?uMM@ zr%n8n8EkBABlq9Cdr$iEpC5<$*T?QrTi?sr;AL+$*T3|Z?fGEMR>P?CYR4oW z`)D5BSut@{dwV-*-FlE4=Y_`)MVz|=X1ST)s(A0I8B|cb+5Fg*o80RgU6zKd&yGzG zGukDx$!jU&0;Oj~#{~Y^%zs`Pc`enLC(7fP#uT9%iRCLd@h)`WIA|Te_LSyPtM^Gg znTzl5sVv@nzy8OMhd=+sJngH!&n)oVCe25)@L19ziDX{KxVMki$S&WwWX)E&HeRQV z+stAGxutSvZM*2D>XLo;r;Xp`iqzj;3zsaJ60^~~gW*8yi(6qHew>wt=Py*w+SA>` z&ABAAN|WVjOz<@BY11`sO>ye!G>R&GSjrUUwQ!1t=I?cDi^8-j&Q;EPK4qd|qEGN* z&ma%e^ZVG&3ZG)|xL=-oTdX0*Aj2(8 z74k`I{;t0?t1wDHd&njG`9bEvd_lc=*4HRNAK8L7J04foi6LD z2Tek=yu2(Ek3D|rqIvUend#lxEdi!;6n|*VTHo&9GcjP-&74*L*Q_gTc@R2p`~CZO z?%plZV|gwq$gy(f%$`E0gjvf9_gqmG7GIR^9(Q@;N1hu7!dGdgN- zKIh%znOR!@|JA#9>-YjSm;bRUSy=iryngB~i!%Wl&wUCQHr#vGvnk|Dd*o@`6)ZNm% zchCBIowO4F{}br%2e%dd3AHz~xN=2ttJxlBj3M^n^+ZH++1 zN^b{EAt%dZ22$&Mm>x4*myV3=dz8h?fTbM<@LL7^9FJC+da>CE0sKJ+0H48wk~WjI2Zf+ z);3|UrBmeW?b`$dPM2MNF7j=6-DcS-muk;#PrSe7R>sQSsAYm4fk9uKkMEfAjFl^~ zC;WudQO1niAg(EsBr4xZ1o5;VHlFsW{r=yFv-9uQ{`&FpeEq-A6_)(7dxI|h5(wf_ zG&t1|q#1I#q_kOf(Roe%&qWqC`kybE$fhXhlaMfInc>zW|v$O7Byv5_(&SBWLEc4pcdFPLR{`qL{ zSxGK~_neEbFE}bOdBP@#z#!9Q7ni-X=r+8zMe6;XpI*PkS|)VmBu-f}XIbVdiM^+v z&M^-Aw>M~2)TgzZw<_;hl)!oC&b^%Nvq8uIUN*_fm3t?@YaOqmOkew)jaqLOPB@+F zqbb|({=DX3&~EcHl{fefUno4b-@Ii6Mlhxd-rZ_D) zB9hEe8X4Qb@?KSZ&gZ%twl;T6d!_uB|1H~m`|b10ZP(6y_TqRLvt-I4k-f!J;}%X? z(3E*Mz$v)P|NME&bWN3Xo9Anm1Vsrrz51OzSKRWtO`|}QfQ;bWt%j})D}ui>9nd&y zxxVgfhnCv=nuC|V?!7dNfkQ3m-1E~rvof|`nsIs4n$X4ZTuWMJc-KBk>X0nrSZK9f zL-m+RN9r84wIP~EZ<)4q%*ggFJ-2MmYq^jDN6pm*RSX%kmYvexa*bh$h>zv^*Ps4; zdDA@IDP(V|&%_|5mIDu3r$nu0S}NqQWXjZ=H@3~4ut-b#$!R+Y?~muI zC?erMefja@Z!Z*|2MO)QMx=on8C>f1UpS>-zt%D=gULv#0B{{CIae|NlGw z`t(_9EgTI&ue|1+(VQBzO0wCg>M9S{$|;ZLvurYUuo zt>5b{|NQ2T63diB&ohrOd9ALDJr!JdjejZf95fv1ix!{dGS+ybM04(Nb{F`t>ZG)z?xZb2U}cbvQ!X0;h^E zy}~Wkd_hmY1xxx2lkUIU7VY%n z^TPrg8Ohd#@8#af3w(ZAvS3Qmsr~;x#m#OLcr{1wIXYt%t-RlT)#Ti=BPli+w+=pJJxL$*pz!a z$m{A$rbQv2{{6dW)y-P}=cD=kn$Pd9$J_sV{r}JWdzLFh88=1AzuwF>!La&$vww?2 zqtIf1LqV?Buhwim{pgyh!7=TIRr|6-tsK{DR_;`J^Db{{h+>47k>-wdpY!WKC-c;8 zJ$HNQ#a|B}ew}TeUwl%LE!StM?b>cz89sTt8jE@<+y%)UETkCGnR?*yF8e4^>5kz@1~p`#dhaE|14R@ z)OD)vj3%SP^EpA&imU4C_zqUTmv|m~KUvSPP2%trVa206t(?W0x)Y7>yvggyT(kA{ zrnqC=iYiMLT3790cL@m!dw>09$*g6SkQD+cJaoUhleb)$jA0=I@3f zvQf#i-llzi^-y%7!P(0;-+Nz{?T(#WewB5VK$Ae1rLCQv{^=<0vf2v449!kIW09O$ z(+uOdIyLR({Vtn4ox&nMr!ZlQ$ci7kdn#w8J)iSnkKF5Dt7;Ob2)OVVUg>nzoKSSg z$9SFL(kTW@Lj)urFJ(yIvnlI(={Ln#&VnzqmN{{*+~up&Fr(D^&9>cd(>CAbeO>a< zDPu9Gs+ZSN-p5@*+ah-Bcz;kls?qY`kcVdW1Ve@;bCNrzeuzvrZ#i&*KgiSNi(ZJV zFNflDmH@3%4sNZbJ%*-Jzpwh?;prXfRd{Xf>eMNEpI7xJ_8b*h8_l5@+V334^dxKP z&G-3EA3krIGHG?pX1~rQ6ZTa&t=n*G*TQvt`=joyiOqIh*y5$R#n3l7>v;gP{c3^5 z>n5&=4L)_X%EG^R?@?~WDIw;+Wf^)NKP|B6nm%Q9)fFwx+uKB)j!PWRTvPGlZQ1hO zeT89M8Ef;4m$hiD&EEHT&l$zXKYwo6S9RlV&PI_FEk*05Cbh`Upa1Xi`TCz<|LPxq zoH+}4&W9t(2tiY)3gtbE?`V9v#5 z7x}ku-~L-xwZO{m|A)KZ|G%C8pF3$|ov$IoBB4nld+xul|FiA?zuWosKfk;z*;i7# zeckEj=ldNLj?dw)`}6hj^5e&w!WSwyTvbcbj5qep`Vx4yY@O2)o(BE5*Z;>JUZ?*{ zD9m&9w2$_)jTKC_SiNlJwQZ!igKl}A-1kr3YvqG}?R|_J=bTGleD(0%b-}ZEScLDr zE33V3XD_>T(%se9oj%P;W@Vq_|9scQy*KY?tleKzDWkvm`@Nf6vwOqV7MI0cw>o3E z*KnEXv5d7E%$k*szg~WRzWlg*w(V0N&XrRHnj9O?*12;yHcT-Rc;u|nYIsg_?LT(? zC2S1O|Csn*tMWQow)^8f$>cY?s?vkKrWBs5yuH=jeEa)*`}W4@{r~s=f1OqAyS)4v zPnKUkdOVk>?)+tz)`EpC6GDsxf?nG2L|>M8{$|&$#~+Jq=Cw{R zH{Z_B&wu;vvQ0eao(C_L5O!(-9aYh?LEzBOnpHvvmt}G&UR(Fs=K7DRbCef`aQUhR zdp(8-DC zt$Q7PcgvJX0#07%)_p75sq;GY&Ku_mPF|WxJ)dpNdNa2gdQD|w-Ta^_P%L=WmwR)9 zMWup9_7VGat zIAz}dYHq&!Y+5j{Z_nqyb*GPtCW+TDoSHg8Y1wMuc|m$R43C*C^IIC^HO2Az_OjB) z7QY^FPm}sEvG}B9c1X15(dzleNqSR)mM)p#RFEmv+ws|CeVOYKFYlsry_ZFt9;`9C z?(KVQrSam-ISVH&yXbXlPIAvh%Y2`OEzhRZ78u4%mGbh`c(BJ`vrXXD)67|R@p{u| zX@#$^?NB|cvQ%TKhUV^d*|WA~y|`XFH7ID+nysNLE3D=QdF5UXv-I41U%vnRIaA-u z0jJs)Dz`kDQ*@%mNHOH-FA<%fllxxxX1h+%=*om(^V_tk!%Enh#yOJRY4Q^85e z(=*n~ax*CRa7D=;?wP>EYV@qBuC!I)re=?7vRF{isnXREn#-R*uZ&z&esBG?b*HVo zPkJ)yir(j&{i!Ls%hFSIYmjRC?sZexG-B^>-Tn62v$XB^^E4d;nNNGGbo{={wvqQ9 z`*VY%O)p=5ES#mLl{$4kpY!%L-IE@bSOs}WU)H<)<>$v@+o}5wEju-JJ)^#rvqA); zfB4Sh-IMkOc&)p8wY%u`t(w@kC$HOI-4yb$bL(bj7x_P>Ql4HN6N>}IPuqN6T_T~O z+Qa1}xqJ8C+P|+H4MnEpwEw+*{r#j=O{C*oEva?%hCgzbV^g5;^ysjN}L6nS@>E6 zit#C*lXU%);?{iD`xS7~eCZUcT=7ZfYXhBx4heYoy)b0BFy~O|qy;LfM>H;%trZCJ zmTclSG;_0le(a+S&*2S{x31-EbAEbj?deZHe*FAUGG|$-%<|`#3nRl7*S*Fa)wwfp|tZL?psdT91NK4NL?`}x;Z zXI`UaQz~D(dHF5B{Ih1+l(%KOmt}IOhBk$AIqH=DT{q>FWxU?vV!j5ArA|DnzrN1W zTq|Q~#3&ZB6b+N!LraUfyGozPvDMZNFxiWBv6~XZt;se=n5f-aWs3&Z`TZ8@(1xpAy9K zU{3Y>;5Cx7eUqau&zN(edcL>gn%fycYn?Pz^_EQ7bc?5mhhfV(If-Yh=7e~0E4EDd z`g2We_=g#WS66QqX%d(ubWDU>z%6oWq_W7Vj6Too)RM(pN<_Cmk2$4zj3r{ngQyo# zR&BoaIcy&{{r`L~FpXpF7R9IrhJe>KJ*PM6urA2Gz3uhfSk9%Jx>mk;y%;Up=c7ae7nsd&;HHmy6HF?Tm3t_PIP|*I%bI zmQSU!xB_m}r*Wh^trQRvJT>+9X$$egY04Jv4oCK^(_T1*+fnnQ-Y)@*%JNzeV;=>Du?8*mAL9>Jg!u70GVLc3cVabY=8v47tGmey#V#^QVQH zU*F2J&M`Pr8TX@X`%xyt<(YFhrtN$C>)aE8>5*2?%be@$%QxT8o8>0V(1L<%VxjH+BCe;ng)qRj;&^Y&mCr#@BoM?Y;aDA3t8Me_s7q$mNn2hcgEySRBsXJp9-8 z`I)HKYhu@YRTS`QTWIckSn_qd-Sw7=V+PMt=gy5bZ+2(WUr9N5ueXn3sWV(tB{YgcAXnKO}T z=@bzSP4y;&xMPPu3mQL*N#a?4xszKk>4RO>`y>vA0Co=1_}Na+c#8O5Z@PBmhSi+4 znzy%}OFeVV>V0#1OW>j@NmHgZIbW~+{qF7V{Ch>mg1Y4GYpVYLtL@h|~h{@2Y~cKM}E-{YS(nyg7Zmdn?7 z#$R2wYKxeY*3uhStxv2n3@XrL5pGgEWaIz*bY*0ie3MWY$4SXhuMJ*_2V%T9HG`b? zem70fS)L=c`Sx3b=^OXyP2Z*HX=p9~DsV>7C9n72!E+S%!>|6^wWH@|q`AKF(knX^ zmR`CQ(mS)5&*SATH;#+jWEby=<4QhtEiyaTE6^j7OQ^xiyw`Ba6hW@#EykKE(}OrQ zTLh+VpAz)P>am9=bIAMvKh3D@)eQn1A-`plb&g5Y_9!NuJ0|h^P1bbnmx@Ovm>4f@ z(-nAeYn^G;jhuV?suypylIc^dc)#ZfkC&xfpW>0qILqf{rKTtQjaE&`>`825zkfs^ zOMoT0C&0@{S#v5!&*xWbm{>H9T6$|v3<_OZeQsvJC6i?$0YQRXJrh^ANOnvK;wmgI zW)9Gs?(O(ySD44nb>3Y&v(jyTHMw#rP8Es~V6r^=Lit(JmSsJmg)IV$-nVbh-}m!( z{g2|F|9`bSg6o=cdjd;itE;Pb-+dR<^ZfHps{rrb=LdG&s*LsEuzS8{ zO4PHHRxc+vVnk3Q&(Ex@zM4Llk4E0!I@eH&BXvrW=Dpt{ zLK7wh%PsE~c=Ybwx%ID0zMbB6<}yczWb&ai61sDglTKC6oPPM@5zEJVM@l1?S5_6* ze}7zlKcK1O=C2`?(yw z4aZMsp7lO6`CdO<#x%0H!qnJrrWV(UMaKB`Sq@7=bd}^4AkE2$}x}j^6b0$ zVp_4J6Q7{-m!6FRk9OUVm^>v(V%P59vzHlKCi_J^{_%=+k5d2oncP#tyFFFsKb^Do zv*NKiMvvz#edE01^^wxjYn9LMJgxb7(A@X%t90A+^u6CF26b^VPRq1of4*(@yWQ{i zz1{V!v^;;y%ineTdivw#>}#uRWcrssUi$L)@7>{&FLT1z_GI3YePnX(+OLMhInV2V z-~YdE>bu`{n{O5!lw7KLd_vP_If3J+T{PdfpEbW<%gvhP^VnpX_o;|ArCwT25gnQH z(w@sYWX{?)Wy=}wbCox~%zhi^HPi6dhGP@vc5`aa(%?L2@woGdz@kg9RHORJV$XA= zUYAVx`s4B(V@(g%!$DIfED<;+`Mk$SNyT6D{O32dAMS;k_eaNWtvB=CJaLvl+00{? zKbBa|=U|w=NW>$;$w$$}$@BKM+Uwc6$vpGz>%~eL|1Cmt$;tm|s&U6ZxdKjyntGr+8^>Ufv zwOwb^PD`k^RK35Ry6$9$VM~vO#q*wErj`Z)reH2r<5Oj~*BVvDJ*!;z(k5-|!;+m} z4nO;S|Br?K>!&4O4jP{_yUpmK*urtJ*h|r(L5XE`{{Emz0+WK0dft^uKi^ZVSHaD2 z_(Jiy*OP9~SoZAYna_K4CigGYV=`89)o>`*o57trAvkG@QRw5%leaGPi{wze)|NSM z>$TZu*%tadmPk(NSuRoRmz}&sARxfgG094P{``6TJ)d^Bhig8&S+!Dy(L+`F$=STm z<^Ml?|NrCs|1CFjlX_Z(yfPC5xD0|Uk}aRkU}6mM%2598t8c-6Xj-bGrjQ@U(w@te zXCGI3-J9#T-2UJ1{qp+LckjM!{{Ego(6Q4T9Z{TqnFnQ@rtErnZEgGGlRpb~-`-dG zIiNQ5{k=W2o<04$D`wfo?2yH&*T2uvmQL!awCbC^W2^?P>ip8fn~jnwheDlW~fa(wD~$D1;3;$JPCEVw+CQ*-K^;)Az$TRZgJ z+q!J!46mi3dIuzOcfWgDjcyI~g=iZ~F0| zsJF#kASmo^*}6iOT+M*2me{EWuB{O`6CoeF)HL}{wR}^4@6Nq9@87?B=kC34vG#qB z+xhD)W#@&bA1{!!~qC8@O- zy6xRAZ(sZ6T${j~S9ATHyc7Zif=+!lYJU*y1KQBGdtb2E^Ep}H)Y8vLT>WmFtaNNo z)%@VnZZU=vlCrm6ttmLy_gEs!eCnjw>r;Ymo0*%N>&y2ToIkzG@9>4vSn=R}J;goR ziW}~~?=@`NY!%9_Sh4@wZ}-1{^0(jq{o7hGb?;hE&1as9cG-(>ZFAH)^7}zet<{{* zr*|=gXvrt8-&mn5Ty-wfefS9MQMDo&fTTtaJ?+(wrC>6I&;JEER(`d)nz`@AT0LEwfI z7pDWi1Md4TRdHBpIB$wkGEe%Q-wiW%uhVC}e)X8dtKARhF7Mgq8+`x3F4ml0!zj;8 z!^Z|ZQzlJeUhJY-TwN{P{Qv#`zyD8Lcp6&mW^(8k1vLErOiW6{aicD>)N^=QSCYf~;gEz(@duv6_;_S(J0pN)P$kQBCjD&TbRlF6>u zR$kekltSM28}8bcZK@Thow~MHvsFS<;9Nvw()F*Wr|ZwRuk|{(y|*nW$wyWBY00eT z69UYAuW#$wSUJ!5(d_N(|NoT#U$|R(&-UxhGntl~9G%g~b&f0PpiWDGQ^@aa#rw8B zpVAO?sC|Q;_jQvs2ioK7e}4L6;`i95|J+)6yFXg2{DvMY@9o_u-xjBR_v_)qtFO;7 zKBaNw!JgpLHm~14o1Gsoo!Zy_Tw-rn(T(@{@8Uhr87BA4)K;8XaycmI$&`YqbEmiX zY>rNE(o}lp)MEH~Pe!iQ+C9bR^kz)rPn6QPTt10=TG3TSrq!mL972b*bN1JKRg8}h zzg^}!L%`|PsVA>jPcw8{U$=#8mQuXuw}4kNn^wJAe^!zqX{GhHsKXjdug;Im-|g=o zzain^5lc=(VK3t(ua)v_$5j^X;ECe=xM%dGlR!>E)LtJ7a=+ zd<@RH^cWst3E1ZR|JU2wiJNWX_0G3{UVePJgcoDhTK5CTkGr3X6ivS^a4l7iafv{A zd3o&ZbC)9+ri$G<@i(vd|1Y~AC6@k8r)QlMD&{*_oxM|FilN3#p@`{~vzC>XM$Mih zlrV9Q{hj;sm!E!G^Kgo=pX6i7&Ef?O6}37gmZpvlOqAtL*=a>((XDHp2tR(ySR z)&B2e`;4X6O6M*={qV=X8jHW5Uh980DXl%dlx4x`Jx`c?FPrB6{&n_sdwaE$r|4SU z(^WQe_k8~S_w4(BAKGuvo-N>fCR674=Z6(m^PD(hFV8x5W&4-EzrV9|m@RwzYuipzxeyM%vr0J>ai{rit_aLxL#y--iskd%%U^2!8`pI2jnTVolby@4wCU=ChS$?(ggui|WcBhhTzbY* zS>#oN@^lvliM5)GOK*5^D=cj>RMS*-5b!>C#NGb9_;aS^Hcw+dKbRV{bisGV;;%e@tU@Bgnae_S{`6g31(b2~e7@5k@2O_y9R>@rD^ zRk35onr@LGj)k+^1T=P5JQwV~Hmh4(LjNd7fW_l`k?h&L@u_oyoiq}CE(9%Dc7sFn z)g=ArQ-aobB^;YkY}PizuvqWcy7V)Qn!TB=BATvl8Ve6dcy|PCsC$vzqh}#sb8e$u z$??aZH{B|fTR0_Kq$AAfQ{}AVg>$B>Eq&Whn>{MEf8Uk&Ergy6rt22~3&J}!K`S0uX8)bJzwRs}Fw%*KI zd)9Q)@(HOM&+QBPv#WQEA}(IZuZlt@A#bl z_QOX0J%;BpuJhh=Kea?)*Zb|aN_vVfo77%kD#73WT&hyoSz`G_fm>y<*S{ZovF-J? zpn!wRPwzN)+Qc^|;Ihd<_SbJ7KYqO2zu)pXlZ)5I?|UmMH)%h~t>65QnB^Ups&^=T-HH9x=E@Y&bHuearDE*AZ) z*s^DTe)(=)&8H=H$;ZNTyII$KR-K+2dnar5wyROPr)~PCr$+Xe{k|K=(a73=Jo&H6 z@+qry)}}cMGJwvM*Dkf1_y5=R{qy|Nmu{WnwFiRw-<^qRRs zIh{da>NdAb$Ame8hqUS%E=F-_Cda6T3QWCHdwuzwY~zCxo6qq%|NnM2TJguvnsb)f zOF0#LGkXlDO!)r({{H&^cFFhh%$+!vhHxa7om-pTvGdl({^g4Wigw@q()+0A`r6sw z=LrN!dR(?xHbY=TR^jSNM7D(iRN zjxkVl@%^>-Y?$ijv?EbcngXXhS|>zCXRX!ch!I$7%3N@et=u!>RajTB+bPp>HxI?7 z=N^j$Pug_z%UrXn-FLU;MsGDUR8x3RyCTKt9BZ=9^IzW8$0U*_1_d44!+&n)zW3kF ztDO$CJ81MO9+|QvAolj#{Ic9+4gsBt);_EBw^deiHJ4u7xh}g^;B!z-|l?fd>LqaPu-7y)lMEC;IlPnQOP^1a~wBUHf{sZu8Tgr4rXlzn)7wGiB0QgFTghUv01dEdBh= z)1u0%Dy#W@*P@QGrrP{E@O#_F*SEH^d7ZrY|Ig=m%an67il;FxWAg8hKX%#j_!ghd zv0~5n#9m+b_TIj_;_B_^B{v4Bs;Vw|;=?f^Xi>T0Q5oA~b(`->Xf{V5znp0`m%&)% zz#Q+T+{{781h@;FR_XF^CGEVFwyq|Ag;>}6S3S4(U++40!`pMW-NdQa8>d`q`q~}8 zDRy18A;SvZ(sL8mN7dHnrpQ#LPf^3ZH~(Y~-NmqFpF$^7SsU*=2? z<+@mU_xASu{QUR(YJLW7wLSj0aI5yxJ*HFFy0=WxdA|Mqy?+ni*WKUpR{sB&_xAt) zoKA8+=fJ^wC~HY}kWlf-@C5!Kx83`0zb&_XUOCh2SbKZ>uI#;g_x5V~T((^5WZ~{| ztg>UC(v{P*CO8Rgy#039I{W$huOBTdRI*DsV|XlcR^0x|-|yaCSv(`M#0SKTt+{Bd|doJPV?!U^)k`tb_SiQ zo*$&saj#uKDAC6&u=e7$o2vwWe0bqBK7OJ zqZ(UR&t2wydB*Q{_T*W|k3WyEtF-quPF<7P6BNY}_qF({U-bM1bAnbTy0sX%{WSf| zIUz-7st7B~f`pSZ7$*vx(p(+Y_Unt`F^-9~*RS>+-2GthVJ5|vrX9EQKu3day>{&b z-?^C~wv0x;o4?o1T6X%U($p_{0bX;r9&Gozn>))b(^=Ab?)lT3xD|Q`L)(v^rWP5+5s7*;GpW~b2mG1EY?ZxwN{X9e_NJ4 zYgym>B0mO}i3&-VHO^@|FfI^1RlYCgTI$js56#|7K}Cl$I1c_wSo^%kGN zFRbXo<;Th3;1{T^y3|It-!i#(l6i}&2E&tYY5x!N@3)lgf4{Ht`-fkO%Xt`P1oyNQ z`f(T?+Ee`asgq^rz3DMqu0?5{no+FR!q9PV=cK~@x4DkZ`F!nWri}jb&o;4hzPEJ0 zU6$`Nx#(HUp4#8vZs+e`I>qDMRzuc?|7ZE{AAeB)^Q&z_<>z1j(zbgVs?7Ixbb4`t zH~03vyZ4sP*t>oD>7S>U`yW@9@jt%&^5wkE|DS!m{d>1HlVq?!({q8KgH4vI{oX=7 zQ}$h)G|(dm zQ@i^`cYh4po}pWM{rSWRK@U{Rg->@MSZLSF>dh;ob@8*(jt4CbW@rB%K5W2K`fiR< zoL>5|3XYE-KRjpzRdYJ zqdE4ijlc8#z183IHs8!x?7#eYme7pnJA)!y1p+>Btx8!Vawl(lkmfnnDPFH)j=dCW z`v2wOzw_o73=N-jrEf1Qa)3u*1mOC5Np>^_Yi$;g$^EsQ& zZd+6#$!+Ir;f^^}mGnrq22KV#9^XnQJy*HmQ2=&FHdVN|D~0t%l1k zzjV<&skwB@UVcgA#WGwktJn2vHJm>mo-#jZmENu&TQx&^xSTwC41Y`rx|j3o_wU`O ze|~y+xIJ~=t1PXjE*yV9JbZk4`f~wcraAiR3_I#~?B2b1X^$Y&^Ups^tgd>v>1ebl zOv!2EO+0dQ>lCG_X+8G#wtqjJZm;SnR@)M|Rd=;(_vTrN6D^j9XdJ7&`he@)>C;V1 za;1({o?E#{vgn*?NBZ{LR{}#@4fek2_r6_rwXs2!^JK8s)uybqf4A5F-X7BWdk&+3 zm*kX7CTS9EN0dUZ{`K6wDJlV8OS@H0ONbq3B zoQcOIoEQx9f6bdRDQU$4dwC|IO*h}%N=%p|+t03eEW{Apbz(i6`nu-SYdHw!o7X9+AO!#Ie8zH*nDMY)5@T8(@!65Z;+f@e6P|)r6+I}r^C6Q zMsd$7wN&|?>&wfx-^_{ITP2#bXiDgssawzO{`z%w`?kz!sXu@GER0=asQcW*^TbVO z%{hUmBJR)XiCDQHN>h;~%_jcb*CyqqdweGUe;t2+Pu1TKua5oRqGzW*DTq@skSj|2 z@|N6c*J3)43(1jHJYqFH$*A|`u0F$Q!FmBnJ<~s19$Qg*+`>JGL)Xhw@%#4e+e077 zIWL)F;`Y6E_0&gA^Up75dy%$#XOHr;Zo^}&_Vdo~zWeUsw{=VY{P|L2H$SLrS*{a@ z!-*{2nBoT1L?u%Gp@B$Hub$-M{Yi%bKPjP@A^2 zR(mPSgBHWZzUO{^ejJ#+bPAio{o3DaCuGH5tBf>CbDMd|$!qIf9?`4C6?Omq{OE}I zl_$^?E|&Y%Sk-5a;mVX_S6(f1>s>M>$%w(_6oBAG07;2|35jbb*)D@&P?aiLmYnH8h{cU&o+CZn3vjRLc zzu#khZ(sY%;%xCDjTfQeoe>QJnki@JtbUfH*OGU}NWcC0^VHbe+urV4=Tszk@fPa= zgJai9w_am%xL%rjd!Nq9JDZ-R&OATeFx}+Vk{x=l*G!!dB-b(L^}OE2#PIx%#M?RNEkn=yh?*B2<8Zic!qOGe6u64T z*lXSEoVQV43wx$ZdFZmF$>>jx{`>m+`lTYK`cs6IcJ&!YaXf52`#8#IUsXluf-|3e zI0b{2O)>r@aJZB!*XMev>6bq>R_8DG%-AXNd$z6kB<@BIFHQ#8niHB~#|^~i2uuoo zw*2{VnaK}yp6=1{J}Fpa5*21Nqr5CKM>Azsnwui~anC@nBER}S4{!he9sW9pfBE5; zUzVCWaf$6&y2NDJ&9~cX=TEQPH;3=s?J35`QhGjpTzzpy4ew$dmJJDRYgAXo$o`Js zt^X-B?*G^G@sTnST&^uzYQMRn_X*uzS2UqYcg0uk1mOXo0 z=Cf4B1x?-8OjsuwMtOa!vH0`n^6$#1rA95OE*T9^=j?dU(j*Y`dVQtFvMF!&d_VWJ zXNp7b?_Du^px(>Nk_&Ez%WhvQP^;-!`XuPmrgP6fYpSiuQ`v3x-r4JBl=_Z}ufIf} z*KEK4|Ka<8cE>+F{ps!H=`l5l?Z}*BFHeqrxAVEDO)1{HZSt(Ga}AT$l>OzJBl~OL zYioubF?tS}jfZw-L=~5pZ{B~i^}+GNv?)r@M1&a(jx~KhcQbV+_mpEZ>i;aSpA_`h z{{Q*?`~Lr&9==}gSE58)FMp#^S#j~=B{6qjIakzG&3o3#Am~(@ds=dN&&|@0n~l}P zbPSIQ>tz5NEJskzbW! z$u#YOv(HKaort%en;2#+yPLb+bkz)_V-g30{N;s`6_+SXlVDzYqbU`bI!FwnPi`);-xCdTTW#&elqc$GAV1KtBz&rd*+2p*mRdonKH%P+KQ6ugeDv)s~S%;ci=a?anU)4yNaweNlC+%w;87l&laCja{M^kspK&grQ94PGa= z<#K9TJ~j0}_gf%$SfA?>bj+(sKO%|e+D)0@MkmeOp2Qd-r)il* zbzkR6<#H?y$Z(AKJ1clvleQ{nfa=sq4wC9D8Vj2~9@Ef#YU2AkOETH6rKyAUZPxTs zP1Y@!d@mdMc^`b`E7RtyDIxq!!YFC$kt=i5a?e~SpMBOW)kd!G>*wI*$4xV%I3^k& zlVI?ws=Ug;buQwoQ;F~5DGXkgs?Mt=8%txo92H$ImpGjYaQtP+t+@5t_VVpPUMr_e zxx_v{NT*fq0^MloHnH`Q=s#BNr6e>7z=~Xl?>Rq|-$GX?kOP>aLs7j`KX&;;+ zFv;80S6R`4GqGoPDkIyRk8^@QoB%BlDR1rImj~sR-`ld|?%ti)6nlSv)eij@$;(mF zW#;BR-KQV^OnP29*Zl9TA($G#X#cC_J~f{{O#c&!!ojYc04I?6mco(}mx^tyRA+_wSFFo4+hmwZ}MGrKE^_IVi|p@f4bP_S7Is?u`cuuG1izq}+urt=&3@}v zFsmqLPHjz+QM4}%2 z|9t=d@9E2rFO;9P?)9u|J9n(h_F9;G`*-;If8WaY*Y2qQ`|%>9KmBuS;i;V&KIVO&ckPJDU_B&retDrx@yXJY@581ir8Q})ayCrK+APW7z_s)C z`{1tWrzMI{X+9M(pZ)fI{r}&kk!wqXR!aPRS|oSe)9c=ry9Qshql^2Ftq58o&{TZ0 z^z6f*7IAv_e(#g7`~0|nyLol?+vEN6(@QxGmxU~u6kEP{tJ{%R=Pc)+KYsY*&k~)b z4huEa&qZD@H8(f!xistIu65h$&S@_8T3O}ybjpKS)-Cs{(__zhaye=|k8zoKtgx!i z?n+p*kk%3o#YDZH&&MQ=RhVv*2-d7@Wa?Iw<3E0My%Wc|%F7a(N6U7n_RQ^juC;Ns zdEl`bK~b5qNhwq2oqvA#;f{5$A3t}$JY(PcTib5unj0QF{BVcP=btueKCB)mQyY`s zW^EChG=0(%ku>8qt7E5c6{-36iDQPdo6d_hi>Ksmn|;irCFsocJskP9=YE3rn`uUyKdQJ|I1U7Qs!L0e4tw1d4>FjWpA^>rw9i7SgI~fxxBT> z_+rSmpONNkHDdpCC4O=Mfk0J{d?c*9S=0q8I&w8M-`r1I%Q6O z#|y2@8^5*A@7%d_@6MgMx7iG@Cw<;@QN8rT<2~m!ix0Lgn6h!srPNvT=FOXaRO!)$ z@7Zy4HV3V;ieK5cc+TXYSwUV69*VC6cdpCs=?rEHFjkp7Eh$5Fnotw-fn_h3Ogf#l z_H&H1o@w zvFzG*_w9Gqgqdlq2UJ2epX&F`0YbebVREzjx=}H&Q!4{bj+<8cW%IHUCPtzbv0UPiV93uNQ}H zbdNvI+~adBL(k%Z)!eoTkM^A5U|NtCn{@Pj*tvtDQUP3z)BCz7X&rgc6TeU@URG;~ z!Kt`^+ZZ&%dqotd>@nE9{sv#vh9fmwo&G<6?V1?hDt|RJY~`i@AE)o%k}K`}L9syb z*S_a-f{!H~nq%;MLJ*UJ-H*#JcdX0y^VH((Xo)*{Nb~9CmntmBSD9FT;Ws{>*R^5ru-hBJ6-JV_bbyc4f z1rA*+wVkh~6W{X0-2C_1Wy~6paqNP`{^eF>mUUNuJIhrdn)WSGKWC z#{JxmYm+W4TDr#JeALocQqoyt2B(H<5Q-q(rS@ZcIbHL)+buXq~@(M{a3-vlOW!}7bM_bpgzP#(9Pt386 z41BfmTpSC%8)bqy-r4j$wkT3GJDeub#wGJ;PI&4!i#~?b?Kg$ImSyfsds7w8Y~2$U zbgEEBb7|1Z(%9A~zyH?FJ6-}XKA^5e(P zKaHxMTO>2MbS2&D6B0|dkm{_>b&iS&)tgv*iNDfU4 zb_K;BxAd-Cvp)^t5UibWsU+7t$E>G2HInaq`>nf1$7Up5^>mWFng8o1{nYfHUKm6+^)co}@WH(!5xYGkpWqKnf4rCCd+ zgodX21fQA|w4~)4OUh*l%@99MLv7X81kK~KCYqh!JZV<(!`x5jxYnBgo%ZHM>5JGd zp_s0^-S;!s_MK_;l6lW>S}*T?xr;AoljYJWEj!p6`yNNA+C)4KS)wv! zn{Rb-@#)qNdOEKsC51$3-VE2#Z)}oeOpvH@WArY&CvQt3rr>5AN*?d9MKTQeBOkwoV z6k=mFth7IGtURZ1n)ki8+gjdCOWkYXBnP50j~nKETkPD!t=m!fi-m&dsyQKx@| z3QrF%I=#my*zf4hf_*-pH+`FZ_riC@q@If=XVWB%(%v%%Ofr}(bw{S@vtQ65&CiQ))=_c=a+OS$sJtQWOihDRqD`E`eSvC8uwzr1OUW=lf&$h!qk z6zIfpG>BvA9HSNck|(`>`jM53HNI44N^J3o1tMMr-e9mLl!7WiIB@$Z%PH`~KE!;Zq!XcKc zLDlcYt3TP9ti9;=efBjIt*K9%&RQ0pb6TL;Hsy1Bf4{#!d*N1%OWSfKl!Go#>byAV z?weIB9XO^5Ci8p_;hHuj*l#BHM#*h+Hd?JZCwcz)PgyQ)nc>_c z`u7a$@|G{NAO8C`jq}-?Uq4=6{`hicq1>Keue*2e+S=^Nx_vCF-SW4@t~(m4_V#vl zfBsbLjIpcvvElyPUE3But$r4n#IyR@7vEB6et$81uZI0jGeaYc06fdP4c=HZtQ#WecAT!<%iQ& z^cWtKnAW4|ebBi>F|^U*;hbD^X{J`D4chx0L-%ttL{59@cE9HT-R;+O$a3=E8e-=J&VjpN|jHiqdqtaUoy(>0Iu(uY9Miw)^hB8E1PXzcJrA>3zqG zMTNuO-nsjBZBUlf@zmJHfHR(+_qN?WTlV5sUtt(yT0}u)t{aP&$n@O%Wx4BnqrzUV zzH#gP^2?Qx;X19xOPA_N_FlCA_v7r{zk5ZDmX%n|x3B#-DJW*`(iR6t%_sk!`FXu` zp7T6Y>Un8oP?x1@{@Rc(L%HBjic3T!AI}KnSZ8`W$Gpp1{LSOX)8`bQ+p(tm`@!n- z7OTyp(q*4r;jcZ*D1Sa(!9ilBg0IGcDZ6F`7PEORF}OM@;_rLSg|W7&TiW{dPxG+0 zJUAv1bYjEH?XweQmNPv)``Y~A?*939jAAZ5hEto)9hXoN*Kty}gk4 z`t#7-zUbJpmz#ctYOLKV7Bs2H_d1)mW~!5E|9N(4197hv2PBHlIjn!M+>2wykF87& zoC|wI7ksF1t=pTy7k_D{w$(fVjh4M@CcVBMq~M|T>~Yua9YH)^nyj3OQ{OX6YA$5u zQVgh!ku+xY($HKQvAgPh_B_p{TNq{@KU(R|A?$Q)&Z{U7E=9}Nzw5T&4|dvTW7|0= zJ3V-DxtFGwLb}e82Q@d|vadYy`10qEC0V=siX$0$o&2(8!ef|HJm!E%sU%#&>*w0gQ>+QEK0gr8r z-S$;}KYRbrx9ts2{pcgF=y?}ZAxW?z27!<%+? zS>Zq3vl)99ZtQj57P+{QP0E2eG;*qz(UFPKdM|Iy6|Ylj?Cjpt(Pbkks{ZZM)BE4| zoiDa~CnvA5lq2ofZ>7^Lk1wk{RWUr28hTXX+;cP639C2V$eTWYZxx^687@Y~l6xL2 zo4Siamansx;ky>a!k~OZW9i)E0@3+x2iqBX%p@LvIo7L?^Xe44L7%dd#_7*nwy4i2 zP+YajCu^yVw&c@evkE18H+r2G$>ct`+$Q=~n&fK61|?yRiHS4X5Dtnz?rKKKyz;Rny0z6FYuJb)?-p@ z`P*Zwiq5C`#!hX!{Q2dZImHh#Z7IKi)A|q|N59GZaUbrmU7xG&Cb^JDut|XTVylGlK}VjVAVsEQkzrFJBMpvi z{rG&_SH+l^xY}<&#k-$AdiU*d-Tujsx7=N4wz~Deo^@K;wbgc7OC`j2J`iXqlqi~* z8@76t&ZPp5V6mi>InOKO!c;pFWsd)=F^h8K;5@{kv^KOwP*`P#fq)Q?(ZSY)TgSe? z&pUCP|9AJ>w0#jQ*@i5P6)h(oAAWaS{@y+NH=Fi6P-+&;P-Wzp<=_^$X4Ts&Tm5XG zmHq!7pY2UJ<8J@bGkR`^iGZTlO5b7}^+?eoDF3tk^Lx zsfnXiea12M4e$3KF~6|CVIhM9M*~N&<{Qq%6Vr|uIx#r)tUNtwUA!XO1II1}rIs2W zU$+(kHh=Zlq)0}Nz@~0y0nTG5m3SG8&RM=+Ydj^;PjYEX@x0D8THM)lHy>k<4^54| z{&Jg~@`rm?**&I;6<(cNpY6Cm_k6l#*0iUZ+(OAATB#RopZ65crKq1)`dr6a__&e}Xt4U1dOx4z$ied7z>R^WD=<_%|K1)qj(0jD|K$98NjlTmNEv_xQ;jUj`@&lJt2BEoJnzaO+-K6iOfQj5gm7>;u; zUkW9D{j3QK58V`%ZkUxUVs!7P=c@1T?e|oFKWG^qdiCnnU$0(GDqgtdro0>jOYC*a z(4u!NGZE0Bbr_wyLSFZhGg@sK2@hS6Gt$Ow8mC{=+0hePE@;1-s1U;D}-IiEy z^Y_a;+k;EAyd;>Dd7L~9nlzRkU-(%}vEuKWsFj&A>hexJ>t44BIK^H)eN>2P#gvkB zflfVJ&snle|SL~;GFK2tD-jqp-M^(O2b1_s{$sI5BStM}I z^105`E(52>3cu?vcxkT%3$9&DU;48hg$RTR7+Qu2q|C7Ee)g z*|pVd?va=3x7Tq8CHb7%q}O%ncY$4be}A>5@8UTzMLV3Q#Bg1_oP+Zx0PNKskED|A7o^3zW5;P(kl^{%lC#%GEP|=?A1D9 zR`Eqvc14v5>Z&4YEy2^Cn4YtGsluSZB)Z;xOj1H*wS zPQOj8ANDBw7%c2g`}gdsx4`4${PI&S2|i*-_;9fLKxLc3>|NKQy$w!v@0od4Fp)#> z@h*k~S$ls@w*Nah{rvo^rJkI#%+FeUI~e`GIixwvtcfE@pz`6Iy*qc#JpVkt{;#Rm zFB4N&hGUrVk$|}9-+^nLV5qoPs@;EM;5~Xvk$Cx4DdfBzi7RPfZkCx5l zp0}|lUCDuAm6gTvHEb*q*>n2Ov3nJ?cu1zNEoW3bv*+?+pa0qo0_wftJqAZQ>~)u4 zviNwiNb>LUPho#7OMYaw|I}3Hf3Q5jjYXkKKIx17{YwfhFGZytIFg(^8|!~&ee~SH zB(#Jp@x86X{D0a8X_E8LiYYV&-(D6O_G^W@1(({4;w~kL<92tf5B@CMY<5e)>ywWE z!AG8c*Jep5KNfN6Q7AZfdrxaR(!{)nCr#Uf3YO$e_85f#JFC`4?Zm zing4vw)Z*w^NQe+o-#uQfy+#S4ow10Zr&?x81`wcUZkVB^t0OkKi%f9&pvy5yZo_9 z&*6~JQZ7-WCP{6HpEt|o+Y@IjGd$fQ;2-Lq{MNv$C1Bq3nI|r(tLT4{aGthQo7}Shi`IC_ANpA$=g#R6;<=!SOwf%0}Xm2!%}p- zCtZDz9660c$cb~Q&&ie9?Vf&z4JzJW-p!Y+X>!_$=dy`n3u6aI;yH<9Di!%_y_L%~ zpVpY2|8ys+Jj?&>&e_LKN+eI-Xz6n7qPpSC#Xc1h!ArkYSnW)howhGZ&)u-}xmaR_ zh3tFz`792ufm0&o99a~eRQd^&-Ak)vRJ?FYi6>Hz;~+<(mxf-!hlA|vr}P|5lyLme z)VWAQz-jUn!wM&jBODHIE+-%TT_@4RKPAwoU|y<#lT$5^f9RYo6Xq>5{4q~}hvCF$ zC9&3v$0E2Ld}i9V1b$FFGC|HwV)3RkNi4@KS}qoConh`fkyFiL@e%czJFXSYTIOjb z&eV~qRCPn1W2qh6smOV$Q!Z^WI3{o{bnc?lUMnS%FWCy8l6dMG8kM}~v*q*odn@-w znaz15vDiGV+$QVw+-|cQHmAc^FD>8p-l|1Gg+(!C?q@Y0uiXN1`+vTv|MmU|4 zzvPnNbT=*CZ)3>NqM5GX@?NItAfvtSVU6RLKdQ7S9Ivge|Nr4IzkN*X+I>GBoqc^> z|5)OUJpH=AtJnYfrZd%3VOsZT?gk|%2G{T@sx2Ij42~i(4njRGwpWu2X2l-mAeuf#-vOZ{XFe$i6G;s(h zwHSy=wq$z#)BG-Bc*vmPiD9JPG>*ehp3HsXVNjWMLYYM{WkyB5^M{~Si*)AMS8t2D z`FUq;#hvon{N7GM7WpkkEWsRsQG1e`md;wUwW3pho4D7^%P&ndHJ7{C7@s`wvYA~$ zFHKbV^tRlmD$hSh#$N5|yt+%PYtLJ&mJ3gx9(`HT#*o}{YunN(&r%g#+6024RPQG% z&3BrzY7Xb6S8KL5G-b}}F>RT2eSO^Wyqu&ZQOUQ8U+)d}au-;;F*Q(c`surM`8}RX z81nxnmUl2tNevZqohP0e`Rl>&vJ1D)&5}xvxq8*a*GtoDr-0JaWd?3s3mx`+wwstE z@tkGy{QC*Fw$&z0{k5R?mu2lNi{2>{&ZY&6xqevqzJ1;6q!z;;DgRf!&a#`o{PiaT zp4@6PH&+IugMu!KM?UZLeJ{Tb z&c9xHOz}w3B)@9;MVF36ax0Wv%UarNcG*HL_Tkg3Pmdm*`u*YY`E|SY@AkiZVw29R zWrgSN)%;npg^fYLN5SQ&k-Nlv|MXK5iY*>VTY3$H+$JxyIB$G#XYsd^u)6n`gLQ5i z&D|=YbZ(}gnXu&f3Le7+CN1qz+UbF@SDii-PKdql&Y<8FrTyroiEsO1#k1-L$6Vu< zEGqdhQGi9!XGeQTrdI;T+SL2~vG;o)r)*&r;7~mG{PN+$wf9RoP3Elm#=yYAd~nA) z?_D!A*R~g0ZGA4G%yBWQ)-K9)=G&)FYc^_h--%9NE2*B@+sWWJnde@3s;7ZjP^-kN zRj0a@47UoZO$lgNP;ST4Zpf+Fb6MqSMsKGgQ)Aa8!*d+VIIZ)V8YMh4mu2=aDD}+Q z%D{B$l~0PoQ@P}pDGCg8zcU=UQu5_Mlf-3iz2_oMFZN9LVoGQc=rNk4DlJ%i&SG_K z$3gaN*<&3+V$IT1dpHzW7V&c~Y~YkUrr8n@rn)p|*0O!?6`amlGblAkCQk``Hpf?? zf4R=?1h1^*9&Wj#0*V{1Wi4&~zWHX4K#hgW(;Lg4tG5_5rmZm*;go2aG29i zX6YP`rA{fSiV{!tc4S_-Cb4)c|3PIJ&Z|+~I*)rqUHc6A?k#$t_<^NKAco`5*VW>u zdli*Vo1a}~Ex+YktaaU=it`*@3^8$e^?%jl|3Cl#zJ6lSsV=UhBP$k&xbiOznNY=) z*pV1CDe=iZx)5@?cDfgC1GfXws)_iH=em*x$>d}tK ze1;D5?dG9Z&!ide-u&1=;E+bDp_|CwVnM}#^RL9u&zpPua=-n)x4*@h3Q8lJB<}C~ zn>pi2_jCQaudCzl-@9jP_vU!vhI3O>U;e#pR`;U)@WT~T+&D7U`X3LT_HmUKr|Jxa z_ce!DD&C8K{+a*(iU0n8N5jp{x9|V|_x-WNFLnmUG;|HxdM*hpI&tLNqbBaeo`W6B zKIkzRoc}D59DA*2^OtoK(j!?CUR1xgRaIe`pI$jlG&DA~(oeHlK~3|%iSNPoeeajf zQL`89*{pN^Q;pqhV>kcj#rI2RJn)}jz@S)iZFw}OTSUP`yX{fg@->%3*2nFw`S<6h z0Z)HFKj+NC!i}c4e1bw#IR!r|mgJ_d_0;lyF5|+`vSJDogTS0CtP_Mp6z8;gX7>ul zt(}${nRBgs(}T!kWxww}EGW>^c*@{#t~z&0F&|@tAd{e?&!oQxB+75d`{h=b*RNl~ z#89N@#PZnSM5fY=E}Lk*X)B+HT;KX%M8>~zC+Eq`DPCS2^8+}%GUb;Q&N)^X62v#( z;$2hmDTxLNrz5QTr-f8z?YnkN;=N5;%^bHL28IJx>2o(9<8xlW<>R^Mm2p=^-7G#{ z`54S%v$v>G<;bFzl*)OlHnks4zP~-{_Vk~7%E00#a9G1Fq4#2tZu)80(5-JHwH+iUPY$`&AnpjiYa#SS4w*o zLZYNJTN9=vuoQSSK7X**@LZG=$5II=4U6YXrp{55G)m?V-vNGud!O3v9ETOnWmyO!f3JFmkLjvo)2ylqP+mH5Tp zzI*p>o4~47YgQTcE@~05RGblzUGv3%?W^PMzrWRV9Y}2o^m63jlynw+)D&*X9%pSC zwN{xyz$n+}h=^OWLG909SHEt4ek!BI@z!GpkEYFn4o;r4>TRx`;}D$5p(xYm&ZBtj zghZ{yo8^U3I?oPPDsFmO(dplbt`@w)E&eHmnjpgP2_y4Et z*?y3f!BHu#fg`yhOIw+PfrFvp!N=vsFEg{hX3G5dyjENG&Hr68->w}0EO$gbgr!Fy zp@HMsoJ6O+{IS=|V#V!wh3w}}G@Ph+aQSVyLn|`VKHpsPde`TlZ{|GK5sOS)!IGU*Ej@_pHP!K<{4tkBXgpt3E&5d^6|c-R<@=eC+AAEF8*fw%&VR zo_qUg<;J-663Nd$-;{9W5L1fRN#j`H>35KQY3c2+{r&v$^ znD)SE^G_Y&$jFc&?OY$VO*Yz^sy&7R&OM41{RUo_mtXOD|I07?ZO>Ojl_P>pE5EWH zEA**VYZ6pbVSOItZ+LpsCY@)SEf1>fd>b4WqxrN(LE?PcW)a_Q*Ss`0>uf$Y_x85e zyZyhG&3YOcD57c6CwT3c0*hc$=hd_=pA=PEZI*7)Fyfg0(o;&gz|5hiRlw71N{_MK z4v9VA9esmuZ!_GLDPx(g=;m~Bjg|q^oJp%Ro^`7(I@8LK^yar?-{L|&hK!z%*Mgi@ ztupe;R^>eS`s=P$TJtBEofGsiJa+Ha16#!-*4NAb?#zw-&=c4&q4Zx(*4pB8kuxW- zvKSw8`oO{Hm7(SmD%umYSdyhd=#0kabNq)_^lBMRUoz#f0iSdDw4*h4v(KkD zZCcVeEknuR+-ok2r)OE51R~FM8zl)Tw#|sOum5*C{-2gUcdk#59@}w=?u*jtYqv(N z-m8Dj&dF;L%4!9b+Yxc9sHqOAP zp8}VcaVUxO92aPNu)MJ5`^R^W?dAp{y!Zr&-d0KtHDF-?~^8BmhZ<}5)?Zm6B!QuEV|lY`Phc(2Jd3-#GNd+K)uJr4B!cw)h2=)}Be4y3q4K? zDm_|Td@e(EZ+~d$wA4ruRgO6ArCYfUMyJo&e((0CHLF&AJXrmnC4G*b#!{B#=Wk8= zS|$|E`Bj(V6L3nrd**|jpj%PtYf3@)7xB#5c(zJ(5BE7|4yO;f)mo>G&U2r0RkB}d z#ID$q&>)a;dzw&D<;L2dk5cD}=HK62|NUKUpvd&qXHER>*M4ulwrk?|#cU%;+rIxn#-F-0SLW|96+$|NZjlsCazM$Jg%`!vL;u!uWJzvXvpGnUf$@1$ai74osNTa9 z(%#&}84~y_cuFIe+l?)2zzc(uth=X5?Qk zI>+8%So`Zkozv?E3I5-IKWP|(dcIFRF0k7(sNj>j=`+CRQaEYFjF zViC1?t#QjmqxEUNZ=DT-BA;7451&50`=*aq?%Gnth{rNtj!7{jXenne-a0F(U}f&x zz4kJEQL9@_7xXx=w4CTxN^UtOVIdqEo7nWJCqC!;>Bz&9wsZTE?SokQ)rFg~U+vQR zR3{-k;kxzoOIH@pkvQrUqQLZgu4$MwFT)ZM>9zKCKR)cJeslX?B|m2)L(k(W&p!7Q z_82;?oL4i?ZRWfpiN?28f1hm*mpHhjC*$|ShyU^<)O#+hS-E2yYTQio-n?6=dxS-SV=ypjsA%RKw} zm3jTA+0*ssAC*`;O_9w@ks(0Z_{(7vp34@Yp+{N$&6JMacvo0nUEVFQ>h-LEs3~8I zcD5Pnh;uIuxil#{TO-kM$I+Uw>%HaoJZdle&U+zvtotb!OIq)xq7PLmESiho$(O&k zza!(PyR&}B>?b=S*8F>OzCOFhQs&$9ZyWBvzqfDqL#6nD9-+>*hu;Pvr3 z8G{yuHY4F~*VxXc&1>dA+4%aM{JS*Qy^(9}|9|q3ZxVw5gMrP~ZS!58F}4WQZep+z_5JtM{gsUR`M4i0&YXfaTldXNl2SV+AY7<^ zM8Mg_g3)5$nQ4qJ0++tNd-;9xyOPTmc1;%Z&ra)hWjPn+t7$!V_de;TOD>&WWB9Db zu34g3an-6*-FK>IGw5EsCE(D+UUZC+qvx}cK1+Iw0>k2Y9(;_it%FAn^(^6y2dkp1ml6%xYcYpVqwa!*kb@sA-yLO2=y(#N& zzuOmN#q*fo{!hd-)#rP1&p)=XN?)~0=J?9o^Vd2Ak}sdx*6di3Y~|3ABvtNqs8mT>A>sL^#bD>T&f`Jt$} z`(#}MPk7$DcbE0OVoT&aU2$(cqt|oHgj@t_4oDOp<5XpESi7}iTKal=2`vH+$vO-UE`FS42Ynx3sAgA6niBOo=0lHh#{x6AY+FxF z7X8zw+D}OAt^2lT-RdosUZK)!j|W%F?Na<2n#>X!8rUSz(p22DfIrbLyK7SeN1K4a z5eA0ESFUVVT0CcR-G$}J+pBHf^6llHQs^jg&LwgR>%tT6=Q+-Am{26smMG!OzGHl>hm3mwOiR3PGqt$ zDD*gWeA8rPS(Z5|%WA$~kfzg#tgR7i!@Hj@I^n9esc_D(#1D2$E`>EJY`a* zL|MPJrSF{0i?ntKD6*~AO)fFhUS;!q&V0Y;_y2$7|GPrqX{qh{*>2fax2I>Tv>c4e z-g51hgMgr@-H*Q?S4`zn+$Hf;Wo~h0phzX-LUT{ewZ4a1gWVJzB$Zjx4cP>ejq}4+ ztDgU{Ca-UIv)ML{u1I6ynJZ3h%GB~&u-z}<%U9Tp1XePwp(oy z91CZ5KemuuY;Y*1!}HW04MV4%O)u_b#9rTeP$~rM{J6n3KTqdG2D8J1Imzb~56U(CKAcx> zS2~%+kZYcI^1lyv%O?l6KYqEvzIQU0V5PjWi=mQ0vez;v2E`-)t2R&NlV)h}KX^%^ zaM7>d3=9nymcNdeWx4D_V%PrW`oeHK#Rlh&bJh>$UFa}u;;4}-S;BZiJ;I^&E{pX(bO>+kPB{y5QW{bnV5xpT)NdkVFf zkG{D4VQvtUp-@lIt}3TWAtT?(Q!f9kIrk}NYgF&!9rZRImd7MkZ(8+gmZ9F&FCRX9 z;Q#-DUq61|0g3QZ+dk)>!o5*}#}rsNkL2~RGDKx}*(~o#uViyN!ZP1aeKBv7ht|uc za%=)# z3`$2=tbBcQ(L*L>mOaK=Zpk7o0!~xfR&V~*$|eWc&SG<*^;bVZqEv5^auNM~i#etkV1zCNxs;p&z5<-6Ct-o5U-ok;4`rweC*UwQhJ<=*}K|1Pip z^Lu{Xua`9z(%BkMEyZU%D3tiVd#|CC;HwyeV+l=bx7J>_eD2Wk@?zk#pJ}^iolTPv ze7@!)n}dRpBgd{iyDI*DvRwS<`~N@f{pRM!tn_YlPd~qU(E^sp#X3_qoM>BTp2c*I zC4CDI!{e0Hq?V}1$M)Hm=`tw1ZQgn<=+uMe6GwQKf8X$;NAt80)0!nN?SFT^PTPDl zX(I!}o%{B4{M}cU^fU-C87fUNa7y3I)^KBg1O`)k>;N42oXnvzI?n zZctdZE~01U0iI;1i@{x-2N_y+X?5o`a;ob5E+}SaIMsFNrh|qf$G6z+_uux+D>^82 z($eDn9PW$WmX+@f8@;pzl6n{&O}@ld+kJJ`cL{rvI*qeo!6mDf84bw-%3hYIjMC>8 zA2#v*{+<8y(M7AY&qan#YYApaU^rkYzSxz6_5I{Yi)WtIGn>6^raa&8bfqceXz_e(a=qw3S1#Kyj0f_kHW?4udGKrLh(@R3uLn~uDYiUK>*TPL^LuRa z_B;DAiG?cXH}4T(2#NWyCHo*pboP`hZqo8hEfWmAINSs{^$K>~H8AVDoL4V#{G^5S zobVt|O+&Y)={-hz5giL(N3|#%lyDJHTr)duYgE+Q<%Jh+F)-}qPQJhWt))fol@Fi3 zel}l!zr|yhUvSvDXWCjTHe2z|No>gE>tR?jBjWY$ZK*wh$1KuIFI&8J;_)$X;%s73 zTG2C$&*=0fmBXn$?hHYJX%fF4IG^%~%2sqzaN}6W-(fgQLQl};oaI4_s`VO80=oom zt?N~ocP{sOX;h|SMc}mVpEklWNhV*L&91TgbLOV;E){Vy`SRC*=U(M=+e0%FY-XF6 z23OV}we+5Ev-HaL`*qLfaE2{gW^-(Y(5%#^K&j-Cb8QdilvYmD7u$UL?J?{6^788X z|2MwA-6^fa5MTH6=IiVE3O(D+w=*=nt+I_?yZ_&(v(NR{TP8C!UO%sAU;XjT_x-=0 z@BiOkpK(67Nr{0$%~EseuZ8Ra&K^lTG8ZzXoIlJlnAmwWYj528XWg!sMGTLa1Qfb4 zGjOtqD6d`iz{h2WmNC=8zYhhS3pU=>N%!q(44hS%X7Tyjsl)|SOhTU~nJ7vuR&Y4G z=A)fO`l?MQHu-2hT{G|e>#Io*_b@XQ%uDPMlI}E|Sk>v2(%Rp0;quERQ(o<|JK|vQ z?Vh3A^Usl?(|QsP&J26C;dX0fuV9a&l=$4Q$0RhLsw|)4eSQ-UV}pV~&(S;V0>uYU zuhMY|yZ*GsuARd|Q_=0}SJke0Nfw&Sii=u!?ylJKc#5fSX=!QXxoO9@b$6sF6`u=k z(^O^>$GS3c$_kS$>YOgDE7^n&| z9XwiNmORII@|J5Y0+Omdn>ZL$L;QqX7W96)XZ=2z<(8G+5uMeB$t-R@8fO_Y{C6Ke zemQIB&xg194`0xqcJ}I1HH(<@=KFtj+uxTzZ=P@8#vssktxaOC(Y5OLbKE7Qb2XKf z&NHR__S?7Le#`TFRoXgAEs8D58&;lRy7ZTu$%o^Ziss(`p}VykB=+(f);L`~>~H!+ z;8K_N-=r3S78ZsFwMCDYEtP8fkh$fD<}yachJ*jRnRyO9dC9`z^xoFR;0J@^!5K@+ zd(+RaOY>E>n0q!YR8vA-^8A#ILB8$5x4%Dc`LU$Mr1a;fs+rG!9zE(BX=>=U(&s+^ z!S+On-*r3+DlK~wWRxrH4uAg0q4+^Bspa%06^?7Jj+&$~KO&Ym(2~6F)JAl+G3V#IgD6u2bDv^%ifxvAb|A<56(pRBU0BpYypZ zdfw9+7ZPk8{8y{`%=~`MQrOAFOFCKLSg5HsqrjZ!JI|h0I~E|8df8-3pe1j+t!!JM z_VYE9CwCoG<`7hTY~g#^#o!P}+Y8HN-_04bmaWV_wjw0BL$PXhA)7)_6m!SvO)gk-n7c%w8w{QHv|?<@_+v?H83pn=nM~|1DRg7G2WVo1*DVrlwV{v)|7Xem%jRG zP|Bg7H@??$m}Q*im}Po4Yj2l83&*d*E5|If{f}?n%2v@JXmYHHW6js^{OC}sttY`mQuRG1#Nn=?}(B4;%F9w?u z!!6Fc?Pazaavb=%M)P!*-Rkvv+ZB!#z9>*!RIYf$^VII$?U9#1=$!ua=4toy#}?mW z#g!QjvZnVKPD#+*8l<-Lx5RT6K>^OiJ)e7o`V3EgvSFHVAm^NWi$cp0tNNVkY@h32 z+Ybl&1^4WmdEO%VqEg&5@o@dOCXtq%Ja6yUAHKS7<*Gax8wJbt^KzfR@ZVomV{^~; z-I~&KkqjN0t;S(e>;WzU2VT71v}f0yb>Z4xnoD~E-Za;KKYTm!#nxa8%hWBw!TV!$ zZqBKzsApDUFf`)QH|t*X;Q3>}la2)!zsKG$-+foPb%J1%!Ma(JO&Rq!1SL-Gswmsb z#t^1@IqEQnx}|S0o8ZZ=JG&QbVPrTiv31(DjLwb4b@evBi(gec|;}M>sea zwj8h#?v4xI89EQhuC+FO*o3u+_?QxGlddL7=XQj$xj1C@aT*?T5oh8!=hzW=^@?j?g8*Oq;eQ_;HBVaVzv?pZirMtY(%dt7 zYcXH&o4wt32FJo*o2H(z_HXHEsdz51RAuEYhbzbW9yb`C=}zJ}7a^;-Hr@F7;%mY? z_wH2}QDS+jVtj6$o?hSbZ#7mWcNyKf^{(ySefQ|mZ~wpS|D*qZ_OJgn|KCRQPYAJ6 zyOqbH$iTCR)$o_V)WB;;1=e~e$EVb`X#Cueu;{cq15<;9ViV`GCzJ2m{8(eco8$NsMiO=%&RqmQAPd5oQF}&LSuHw#d3BzO8q~5eiu}Vdb;y!*sg<;;>iY4s|u~=o)>JH zBsS-hz#_9-R?G<<3Kr=-*BBZk7MA;IKcDm1Ds;2qQWO2IJsLs!?1?^{8~=5lW@mKa zIQRT(nytG=)$?OzA;(-A(mvlz+x=|Lw_5uzvJDJJPe|O()8FJH(6c3M_g_9H2T9Xo zZ@#q`&gv<14XpWiRDAc{$Nl#Ee4PX)-LibVWXT~GXAWl;4u{8gEO>6sda!uPsfgqy zn{Hk4-5jI8dY9MhmQ5Ox{)@|+Im^z?yIssOF=1ECxmUZ~Bu;zE^}V&|bITTwIcEIc z+3Q$(rC4%8Pq5eI)ja`H3)7pF56pTyO<_vFyHgo&=Y+92K3EfYnMLV9hZ2Y7(sRrm z9Revc&b+;Q`0LlNSzCARt-W$f!AIeUPsfWDpZgLPP31aMe2yct$@}3_4~?>Gp{W~V zbQyTs51*`V7dXOU!6scn_(A2pF+vopEtigP2c}#X=$0v@^blmd;fmaT{vaa zo>fLHjvV)P9+232uIjmF>VgJFg_gi0g%WHN_>S(%eC5UQZmn#4Tz2Gj-h(dE+=fMZ zbI!RhG&&X$Dtdmt{pWj@kNb{4K6|`h;NTvOwVMxHI5nKwHP5t|p7aiX*UN->Of$G`p4J^~BH5i~alYd;Ome@6_cV zCmC`J+PwSr*o3h`$>G=yj)ZCAiihf6^lUr#lS40Q+jR9$EckixGd1KSVymZa$-7GI2y<`hP;=ZKr=_zCe?inalk%uTifx^q%G8t=D=A zTMBp%RKMToFTMUt2ftxLhrzKaLOv`0Z`tSdJlN^P@)^New~j2Ae7HPV`-s20RGYz) z`Tw^tGk1QOTGYe&UnIHbL;5EX9>yh-;RXL*a@Tq}xiPRLb0#uPHWhT>aK8Pk(8%}h z3Qr}5?X$l=kFPl?VLi`p_g}|8r3wo#Q(q&d38|J(@35bc)b8H1YL$6@oLD8J(~Dc* z`0xLzlVWdplyl9dy6o?tpPy~?%YOg;`*-iwy1h|(X4;2#Y<*T?m1C?a{pzDFzW7>|)qMY`m7v*|zmFE>@U?ZI9{w&W*kHeZuWUc32Z;;b793QQ}fT=L4~HC*$$%5J{)+O3xk8$@I)vPewM zlL#-YD7wA;dB-Z9Gj&Fi zk4J16hoJl(L$f0doXvWCho8)0zSuZ{W6n8=&C@5Bx^Zl~x90USmJ9E{9e)tmu|c}3 zNi%uNs%{%$MHf|7mPWxd7V0L`A7A0Oby~1$))E&bmbd)sEQz7l&sn;09Q5E2t2txr zXux;CytFl8%e|7nJ7=CBHGfAMbN8b&avVz5>BU181O|-bM8@QXleXgxBvG0B#-5>U*8o)hafQ zt-0(BB1+rUBC_{#AB$+(_}wuj=1Hq*n3VO)C2P7jNB({Eb8675ci*o0EZ%7Lc+ZJb zyVe;UoLza@Wb0O`i12_rB9Z0W{;sH)c$N2LS5)f_9Wn9qH*Rl#&$s;W#@A_|eJ7q@ zzixf_>()8D9v7b1yql*#hdWZu;^34d&E=Op-58uXcHe#X?;)e3QT&RwseIRX>FeoX~urH2?g*m~+qX*M9%|-M)^0t#q~N?j8TW&i{M# z66uy7YJJ>50IW!6cr5V)j<*>Ir^#7`nj!mX+qEFabk%tE|t0&g+w%lu< za;c4hiNl#=#sr)5>6>Q?SUMV5D%{$()lHef`OE2}NjsxX7XJD0@bSxX{r~aU zpXD)!Q%i&4H1X7F;m3AKINmiWjZAWK`f_%UV3T<0zo+8ye}2yY7a5l#Z(nC4%byw; z6CZo`{{41=Q>#ASt7d<`xy|s>_sdI`Zk9{VS<9`ckt^BT)Gg6ezHMcfRq`=Q&0e2f zVQaOMojDG=`(@7KTx--J;5MUsPSL+5<&CV*ZDjA>`&~Fmz#+T$yWU#X=I_TcoCF?= zBztIXJ9qEy-M(b|*`}|*ep(%0<11h{@4U19yRz^1Ys>BAzFoG6e6Fvz?WBde`t!y8 z3`$N0iSqgHr%dTF3i>Rfe^2&X)+~meDDCvzN9jRcLX*p7j+^v*9ee&|yV8*bQc;PqpJ5>=sg~P1mEc zm-@`>UbIHzG>4#)`t!>#KYlE+`FeTDXPfx7&jcK@V>>+mz1!@=Ah4#;aMvP%*=M(9 zrd^v+H^;8-Pl3(6b$aWkb^o;Ri>*!*IiIe5j+Mb7+mj=5?Y8*ZzpQRQ{(XG-?Vld2 zX?l0$#PrZu-=dt`eFAJsZ+2}q-1hd??c28(>1>5{@zHlI#EAGiP4pWoZ>*Z+R|{r7R_<6o=x>Yip|yu0f`xbP( z&1R+Qe{Xi_d7X;*a4zDu$(O6E!&iTO_UhNK?)Cd3be@}3@hCSqc)oPjIOU}I^W(EL zndJhH$~G6e#mC0R#pNC^EOE2lz3*0@`R==nOb>cmTdLkaHON|<#L=VZ<~i;2vQ>|& z7#U9#&EtGHH~8{|^q*;a3)O0Vzby~q^YWc@vFEbHLV?KG>oFzicPoQZqv$klWb|wl4Lx?dUDg5$hWeKr?gJ_G@Z+JLU&KlvzjZ% z;`Z*?wPRn=!R&O&^J_|fx8MH!^TpigwPI^@_VOz-yyoed7dY$r&TDKe=6u2SJRFLO zF7uvNPJ15O$;tPz^Xb2e`g*ImQ?9v&nw`~Q@i*9XtmpBJ^hmXqi^rtco7@EwyQ?Aw>zhL$@`^e+cp@ui7;|9 zICQY4b1W8cx;-!1B{ERVbz$#1i^Bqvnw4t*{+$2!=KlZR_4R+BO?|n0_wVS%y$LxR zXE;jSum4f-`=f0`!NwS~QrVhcY%@K5Ih1@P+-5i}X%jrg;Ha=-U&Q)PF6+P+EDHKwJ(-to7$2I}E&5o;C84&WrsmCV!_%MloG@H!cub)2yIk@a4zWd_?k#<( z%WzRbi$%MOiOGcLQ;k)2YHyjmeT~h0zuVn!#pl<4>yEFhtgiq3z5aW@{htquug(i} zoXIdzW3giAoI+mA>X`FOEXHxm$zg7$2+rl6c9A$GOKr+LkYwDR+wN#WQK2f7+byPMqW^Ysu4W z#-`-Kku*iHLGaMIohdtgjbBYsY!X-|#P8M-G%vlWP^>xJD>8d->GEx%uQ<7?+Z<9@+M>}nS(&(j`8hBd`a z+)k29uSA;r3OUY6CXLQ{TiOp7*tI3-ZJWJpRl4uvHLe^-64HEQ zMHZcN-5lxX`|#vKi`TJDAYtg^k`~R{t ztk~?uCws6%pr*pcLYnK?mkamn_PlQE3shoxzNYf)D_`5}U3DA`PAz`NmD5Xh##~E% zy!UvE-n7$a%Yvs~y~2O|v7_|c=Ho5vrmem@OMzwo?{C)6d7Lj~J>9do#ist-o;6xt zKeKi+GI*}EtK0GO$K%^yt3tz684Ry7b2NR9{JT27=I@_hRrOi?&lMxA*J~Y}V%qlT z$E%xPUx({=*>K-0|65sq!~6N-^4RP5?%VPlUNZGmch1gPrkkHV{kqj~S?=yknf~L= zGODR>RPepmQbHW^N&EAbM9jjL9nI+r{*{o&%=Kb=8 zv)dk4ypfZW?=7EldCe(9RfDD~v-;0B#g$I)3Up&|DsW!^E-!y=*!sBrtJ1F;^ms{g zOx$`c;%3NME&VA$i$gjC_suvjnSLxn)AT@r)bf-|3=ACS44f_-Oi8_HV#*ZYeRccY zj~nz?CD}zYaP*pSFi0r7U69CK+sTlTybN@^RY>tOUWW-*(p&j%IIoSJ=6dSJ?DKZk z^Ug1SEGb-OQ=VU6^Z)hzf6vVK|9$)Y{-45(--|S=EtejV@41lqe9u-r7mlD^{0t6; zM_Y~vIP=)K2{5af9AQxAvOD(T@Z^Wbnj|K>o8Fx_FQWDTs!u%g`qoU4$hOzt^+>%m zwa4HukCXqbwO6=*PrvV*bosalbED!94#tBlN-vy0C`zn#?@(Zf{C35KQ!vSA)1EI; zYH`Ut49Qv&>=!KGTb@q#xF;KQ?P%F%%~PKq7XJVESa#{LEa!~F7F&0nHZc_QO|7H83_x{na%!JqN|to!Ydf zp|zNg>4e1jO*ZF?CUQQ{x&QU}<%^d70`p9J%g;qI3%vOy#x%o7N3rd}-jGYy=OrFH zEipRvv&3%R?tSmqlt#^#P~thz){v-ix=CP8(j$R`N8CFUCoJ)K_Ghxpf(8zar2;IR zGjEj29-8s_PL#iA_~|9$N5|1ImJa{lwOd7V1qP3e_om4zo+%a2t& z-@Q-U#cclhSwA1${r!FWcJtc%*=yGpM`b_XEA#XM|L?nUN(_SMES~$E5$}FlQ(Ibl z*W~OH6Sa&t?VtZm$TjN;a^h5+VccY36!rhd-|xQN{%EbJT` zzQ<)NFV}41Q2b=)6U3>w@6VrZ&r@|00xYQ~)mN_BdM%SBu!Jw1m0@DSj;OQo)AWlb zo-Tal_k7>`Zlm~f*TSbypQF5Z$*igef1j_9cRF=%?|$W${ZacgyJRBf3chOpaCdwD z|9|=aGxyGAmY5D8GF8_qvzA- z8~x7uEW2;$q=za`&AQ!Z+;#Gd)Z(r0+8@4q=l7&3u*^zk>n1%n={*-Ov9DjY(SH8k z&!?aMdH2rl$AvQ6V>@P^Z#{7=bA{wlj-;N?I*aGXg2tJ5*vq+unfMy_U$pcOxRdvc z!<)(%d&Eh+r>{75Z&MNz!z1@kzCowv+Zl>*wIxk*& zu6foQi*3t4-@W^IXYO47&8fE0dQ5JSp>ekLaUoY#rfxGU-RpX6-=9z2FJf2<->|nN z&PzRByn2a`K}({j z4SZ=678}|n_aqqfP7;_l{erdr)@#Ra;4WRSPN*C4U| z_1C9arCTGfl}=7|-L>h6gk`$okIOGN*w5L$?tOb;)>#eJDOKWIg4mP!elL;y()syC zO+b`8!?xMoN0SnJrh3ggpRRh`=6v^}B~y}mx{TCkv^>yMY%JDPUo6AK#l)l7#v#yQ z%XCdC`|t(%-}_&G-+ehf-|oNp|8MtYzAgX$-oEEBhhhsyMzY`X#bL`<&5ATup7Ow=S;j~;;Xi4&xzN&&V7DUVzsyaf1ZrGfaCSDbH8=Y z3&%4n_ArnX3aGdQ5TM;7jr}X zqf;Fuqt}K_xl}08p7`OMqKnX7_4CGx6|v?m0V`7jr}bGiwwe9mk{b#4s&(G7H zep;Y;&auxQKUP>3ADi1BrKx#0%kKBl@cH{|%0izdg}nOq?V8u#I2|!xdP;`?67}x9#lY^IJMZcOBfz2?|)xc^E)@Rds(L5w(ORRKldE( z@L9ZbzxBMn>SGsHPhVg6cXhm}@6-tg&$+LCy7KLs@2{_Fop0I9*1Dm3hxI(`Q$KRd z(iy(gW^YvM(3!qkAZFfkmdW32=Ij;|zH^0l8As#RIM2nRkI!Tj|Nrn}&E7q`^t!X= zEcyQM)y+`jc$Gb`66=5b`z>Gp>*ey}clAzpU(b)rHy4*YeDA>Nx6eMGx93+(@JZsi zSZ(WhP_ku*#;RR2E#Gsda$LT9xRLwc*X!|X&Aa!#K6m_Z&AuJGcFj4T#*DrIsCras8LuOFFl?s4ykqxL;|_D!?fL!TN5%dKT|IsMtsAF2sM&ZUZ~GL5 zwNo#xdS9(6x|ql5ib*lc!yEmNHN?A@X%xM#`LOPF)zXx!XOepQ`G4QsbMBK8(~P*I zEds978ehmh-zhLjkVwZ{XO1a?)92M;OA%i|K=P|0w0mTm`YKk0A zT|57s+2)xhIHT$x==?qp?|KPF7Z$~iqyk1eUpr-C8KtDP2PMQS@mD_?oGeD84p7PgOZ{IgG96cW%b2p9~SsrYPnkV`_s|S%T}I$UR<8Xc`(Q; z+xHl&@|T)-`~E&QZ@+slvRh%(ErVkYED{SZe^i+2#KJEBYSpUMm$W%&uUg}!IW03 zP6{TAI9hC9UMSIXjlspMQ$Xo-krM|)z)2ULgA&R<8;r00?l9~W+?M{+|GE9YFW;X1 z{QO#f|DQkN`+xkYu>1StZ~yaP``%Trt5=_W_c0>$>bE527K2l4;-B{@wB(r2ewuV~ z>)V$#P8^xZ%QCkdW8%ZQpuvAk>s+jNCJdP}Uu5=9s$wCLaaxkOueiovl>HqpAE zrOWSsAAUHY(?)pC0#E7F9BLel9G`XOUk#m>s+rR#c;%SJ$}JaD1J&L?dRZfNe4_@J zqRYJWd%q=Wg3Se8t2|NB`>!qH{t915k-~aoreROv4m1|PUPdf}goxAx! zL(%ZZ#`kNDuSKlf!fj|V|Eg2=*W>ne)~6JOBaNeZ{;pVRx!w7?zrYdm{Cg&)tOlE| znUwzhboBJuvd<3>xBveY{=c-`CgQbXOK50pTujdPXokmm+ry*8l$aTIYQZaa7H->B#0)Sk(7uUWvM$7^-v3}u$yYsVy(mV7wq6U;DaW0PT9%o3L77kj@7 z2y!%B6H~epviWGvweNpd=I;G2F?pNy-Fr0}T{*jB&#$_3O3@;{_JUWoOt|mIPdA?) zt&&{x_uX#({@=36e||pQ{5C7KSGlJocelNbK@+Q^f}4v0(}z!=iZ>eZ^uXQ7e7 zUHkt&E_d_29v*)C?KHtEyLoGhITk)YAFr|2a9MtRRdsp!_pjmcwST`}Kll6cPZq}= zb#+@~_4KbtKe+SqU*Y$P@Trxd)AZwQ-`{$;{JZ?xtxs!q#;pCcDMeB|^bqDiH0{3P4N09;PmDBH=CYMhk5n7l+G8w-ZQ02>iw);=T1*bpSkf}Mc()J zzq`M`|K4xC(zJoY!Q)lh=hN{I%isUIU;p3!-#JSgJ}IY9-|p_-wenPufs@XG(?@?D z-8_Bss*0j>|74F6yDGzlb&IQ_Zu$E&C3zw80?*h4`~hkpy~@yz6%;b+QG+oO2w!<04M zCsQUp^Uy5o;7pT9{#2q|e9ei2^Rd61(EH6kF>x_YEPG22R&yk9FeLSe_FT*s-{bGD zdg61B(}ZW!6`Le#c^Hl)tXayUSn}uH?fm$CHJ$>m&Z|FPD=?=xY`Uo8lRMk)?ONUa z+FU;lG+=knzDVoTsy%-zK7V@~y|m=fl1`i1XVYFkHHy-dsNb=F^EJUk@5=kn?M%OZ zerJNSxyDKkrPc?bYU|eNcPq_aGAStY?Uce>61i{QzOB8zjiDmjhhyD)>y8DR*R6Z~ zHS1pa@`dl$T;y21Y~7mEdp3KW%9QB6#G%-u>BOSYqM)^UPga`i!8KQpF?39rv&QUK zg~h#lckOKM)&9IHK7IQ1`L+L^@z?*jcgtYD{+213^9t;?PM_|2`HG=i@)koYUGGme z-w%GToi8)>vB5DrHSVPQrGjoNuUPOYwlFZYELfM_u^>q6xgtYkVT{Ig);|>@VOatmTWe&Sbl{isp8+8$jB6V z`+E!w=Cea%uh-tUy<@+|Z1pCs^SR5n&Qp`L;W!xFb$Zhco#`xGg zw%UAk>b&qKPPdMrPw)PHySsaP?)B`{SGy`d9-aL)?dH7x?c3#7ZMu_VR(oIm{PMyn z3PV2P{lR#s|=Z}XUAHJJpsNd~m zb$zRPGSAXCr_aa#{kDC7#QHRIbMyZ%?f>Tg|KY!%eZupeXHC9`85DI+F(qp}oiXd~ z4M&Csez{~OWhRAlv#i8L92yK4^=KyF`ut#xp<9`)Nm#Moy!d!wuc=eKPJgZX_@~NS zGw{-$y01lBV=UA8f8Txg@!`8~WjX!#{}fl>c)Mon*Q9$fSMSX#lvus#)T>RkJ9h4@ zpK18c(e*L}XZ`tHAv72gkVy!-X7H!P4|aXa zS-XG#PG*jRd5jF(X8RiJ$L+0(J3s6A-F5G+-DXZX6){~Ll%dUUpG`7MW{J%=7kz&B z#T2duYB>eHlre| zxlb%YO25AP`nuzy{lCBVb^j0Zw`|(}-d@gc`RdoFr|ZY9yXO6v_h5iuZU~#}b6E+| zijSWn=cU*GyDP88N~{g^oTTS+?YnJ*#%W{IlI52Lm0bkXB$OGHTOtEZ&elx&^z7rh zx4SZfj@{VuNOF0gc4!#rgeqmrzQYDyYnOzz1o*gKnmS?sPyPS3JEQ9V{Y?LT_~-Zi z|L;XKD0DN_?AfnuAl4Lqc1~xO)YY<`Ib3!38Y%5}Uc_j#VDBY>AMRUE}@Z-2{OZ9BS#wU4`|xid3g_^(bELogXvVzB>4l zzW2tyADYEKUh=STEE1S+^ySCp{w>!%UmHj`b}+iLIQU$RWl%VjvF}DmX{FU%0jHCW zHaibizh8UZi}&$@9yb4to;?O^0@2SEJ8ZDTz*1>+3=L-z591r-*3OoyEJ8% zTl!mr13HhdMI@UnfAe&6x|il^-`Fq(rsTOno8*uG%-I&R?$oL$P3cP9iW#flzk4?& z?Dg5GnVg4q+{~-{`8r-leD&=9?ecbaEcp4_-LtRkTG?f^-Sry(VVZ$}P#|A!syQk}~Tc^Ke8iydq#4t9+8L3T1ewqtg zo>gv}vA(YAd)=Ir%V*49hsKJwa4{8ruoJ#nwKXJb&1y>*j^snElZ)mo`}pfu*5;c6 zU0c(pPZw7Zn)tNG(9|>c%g(o)imR_q68QA{etofx-05Aj^qHm-CmuK#(wg?Sn6ntmtEU>JoaFK>1NAvg1 zKWko83oP2Gc=~72Jr7g9+S3-UD^f(xr>V9qcw584cj$zr_UTP4vpW((#a_Sd653^L z<9Aqv<0R|;8+m%yKEEsaydhG!sXZ^^Y?^DNqT{kcCWa@K@AmB8yLadPyTNYB9SSar zn;yTEX$uVTomc7h;k3E_zc1VOUpxGvqPYCs@wct3jLdr89-sZaAGC{M+NGYLH)}S# zrK;Y(a(epuxFnvr#$VT*bWxK~pQ7X=pjN4-&=Z!;!_bki`)=2wOL|I9wLQt#r&r$m z`uFf%W*$48>9N-%9}6B6S?qk@*lpTr*Hg=M#f;qo?{Ckozjn*Rly#}3^1;=&XZKI} zbhYa5v(M+(d_2dh_-Rv)MEbpKiM1QcEKc_4-`|#7|NqAg{>c}^vW1hk=s9%Eirg7> zPV=zW$^89)7f<^2^Q!62T{Zt7{(jh*&Vjl`py zPHf^ye6g2VAVpICSK;>CZ~5F`&-osxdADq>=`;Z!jqKkaE8Yn>F|hO)I`jlRY5Hy# zbWGvHlZOlVm&@1xJbnJ(_5Zi)6E~+lKnRJNN9{7ckjo+p4#> z{=Ur(n|k|elJ)BI%MbSie}B|!c&mz?(@gKq-M4;kZ$=bb%qjal+3?%tUhRt9cWJ7M zD9%Vpn4sjP;ikdsT_}~%#-LbmZ-4FY$st*L;}U!3eD5jb`0(yy*r}IU)q+V!S`vE9 zUiWT%`1q`Q!->_?*H2&lv})_BSymt4^<0!V@?Ac!cjNwFN5v!E<`zpVUbJS*wzn&7 z-`q;OW)hmqbFwH<^yZQYTmQZKl(dysKAJ{~QL4trX2(Sf6A;(BxG9*>@cDN>3V=eQIb z1a|D-Yi*RZMX@D;#h{DD&FLjaWA}%zucy!c+GU`yyl|(^dcANCAr^*15C45ToTd3m z>-XFEKUo`N>h@Ib*?V*T`MptBPFW`Ve5yZ{=^!8`nZN3k$yXE4r96jDNJWa?TywJd z`>|UBF~=&`d{%mXEXu23>csBQx#Fzikypo$QDXV*8>@fq_FK1tTXXsJj@A8N zv2@+4T^ia98?P;sKc=ATK1@#nKmJ)b|W+2z zA>hp|Muu?nvqFp3N$zC8!nI}8s>7`wFyghs~4#fFEfE&umpM|I`jU)$?{ zEsyE=J-h#}!ruOVert}$;_>?`|2>)+@~h^ZW6O*MHm6T-idjE>_E{@^qsMEf1V@UP zoSyl4+3U?l@n>c|`1`y3z5TsAzXUexTo0=Ke)l^=Q>3xA44+|Bp@e8y7cu3;wwW#FF)-O!@j?M6S5~$&2_81@>hce17=& z_~nl;YCW|0p35#YF+ISs)Q+iRLGd+feuWN!3I-?8PM0~~=k-Y0-PwCTXZHI2zyAFH zRsYl5@Bg>Azu8ynIIWmDHEibLy7Lyh>&jXcO75N87X7;B_rK<#ApW_ps@*m_J^8G~ ze?k8CHjP=~`u8iIzk8ow|Nrdv%No5G&Da-qbhoNuh^5M((J`Mj2gL7_>Bg(caC1xt@@TGeHg-W0zm=Y`wq*pRgw)lM5-etU0U?f1X@?CeIXd6%&m-|CY+5a|2r zKYn;S*8E@;xMhpH5(&1CnmP++04+g ziug{)#iwiKZIPvVuy}j-2@5==i z^Nc3O#>S^eW}KgUxomTU$<<35r8c(z4%h#;e%@~Xx4d3$N5$u(o1bPC-unN1vE&tt zFJZ3yf(rL`tjjfDojUpYdA;jR&z1BQlul}Pan4Yyw3=J@=VSZ3nqw^t46f7Ghn-IN zvgf3I^}fh8+uDP_@3<}KXXtW~`Jjd44*U7_fBxQ{p=<=^=^926~J=^K{ zWlh1lPt&#@YPnmp!~Wg3vuhGw^9t}(TbCYxV07ljnt2B~dbTu)f4-@s)UZbS*2m@B z<@=7C{m5w8xA*@G#$Q+0*Vq32!z#We`*)!PV-sW3rZvh=0*O3@RnJ3$=J}_$6fAO9 zd8Cm#Mey02@_W^q$sIES&T%#cNb9y=WG{br?t6LpcXRWDl@p&=&gbN+$UQ(4sv_+9ETQ~Vkg!tl3 z=Zd^M7j0VW?Jm*`!oBc}M+@?+2f!CieC! zNTmmJatilwwlSzPh`MnoHyAQ8DD-UBS^Zi{aZOLzx$5Whocb9Aymma`P;@%+@(z=L z_~)Y!E2?TLW-O8V{jbET_i)0T(5U)7wHa#Mih7@o7CQSq|E%Heb@J9tf!S@vZ=1^P zZtbYwWBaZ=zV2sbaQej_1CN#sfs+`NxB{>HPV4S|`|axL@cOUu|DW~O|9O1cJpWXc z*=*AT)p`}x@ABlomGkdTl;Kq6VDO3vz1q^1DD$gMVtHpmP}tM5%@1RI7Ekj^SDI~V ztSj0Skg`UDo8eHwtF5I|3XaXV%A&}m^x@p{L+cHXW$-$6C^(2LU9&o?O<>!#ishg8 z7{uAXE6crI_1-dd_F1=-LsF+L0)u)lo6J7@?6c+bnit3Sy^nrh-tL~h*3_!vxWx8z zyB;uiG1-`*(KrRx*6ouV26X z(uAi%%2lN4Y+kQ`(wuZxKF+C5e?Qm%RexP|OrT)jx?MjbjJpgQstW#p+W%Mc^l5(i z`-v&8+s`Y<)Kv6pvP&3BeVHp|<*GUN+%m@#f-JM&-pl9}NSaceH0|`$vh7oYgjyCW zObPN^J~=%9-roBE*Q{gZ`J=paLuFdI4^1%(EfreS&QTh)&V2O*!RIBnuLa#+Cx1wz z^U#Nf|33aqeciZym;BV5x3}jnvfxr&=(r`y{=V&=lDh(Ko->~>KDp*Z@e^4`o}@cv zhwt8OM~ORkM8(`y$?Tc9SYmnQL@{NLg7EO!Uq79<|8r8pJU&K3b^GjE zhZ}Dmt;&m98agTMmemVJrvp{?JclaJHyvPpu;=>$L$7Y7-TQW(`D~NG!*JnSZtV5n ze;uZaO&H<=0)|++x1lyRR!l<3MZ&&GRzPV zYCBl9Gv~FQr_iyV`Sri06^}f6RAe^Wl7H5^yYJ0TKP}pCrx!o(RaLd2=K8c?<*zRb zUb*b9s)4kgmdYaJJ)O{7xrAc zCUHz-F^}VlxyyT813d(qUgT`vB6p*KJ#rRf!;&c?4bz{VKK%G_)>k8*CZ!!S^wU27 zEV1goT(bG*|IhaS>-OGsXgM%b`TN$V-5N{MjqgW3@h~_h!N`Ad!PZN!zSsZAeDQR* zd47NB*1LM{k3a6-9)8s@H>>c*{3jPekJm~BtlezNP3F zNHX};>TUK{OM5bB9sAVq`|`^TQAPnq858BJ5*M|^$KUTerpg~7aN*^akRT(r*ROv3 z`SR!8Yzg62TPAo#J)g5R_wJVmX$&*whkDZ8O zx8;y<=6L2hD_!%DgJ-4HR-sG=2B#T@Z32c-?THd?ic3wpl(T(rUlU-`{QC08gFWsX z&JBK{x<@XQ&NWo?e5AtFxc%zO6&M)Aj95-%b>scdSxG zHM3>Yn@?eT%P!nv44oZ3$>@oQL2WR9;S{4&feGSAZ#)x8UbW`kY&FkEI()B=XoTZ&D$dozF z#CLm5)k?nI7gxW2ecDyXb@yS-j-;o5E~jm_l4-pb_0pz$U0z;V_s^`YwjsO?PYNuq ze9hc@Z%Luf+S6Shmw0rW)K*sAnZIg@&ieZ`lUrM;CACRcwo#6{jOMMd`K4V?q9=ed4jP>*MFWxBDM+eXW3y z(v;MXHQPEKuX+3X`10k!rCiR_mPQIKXAxeyWzwz4{aN9ev-Z~h-XPdP&=eyYg6CMkv8&&QGT+Dg098D97%5KQc zpEtk%_~FBUV{c#KSQr@A!sz{|C5Vl|3j_QQ-?W@@jII}b~ zA8_taXt{8B{nccKy`S$fvn;%L%(C0?AWL=qx$FujjipzrxNUx9zjz$O<}8uS(764; z?)4g)$-JL99AE4aJrwA+r#v*&^7)0zxl6ZxekY^E&fw%Bz{J47!f@=zoXI_lEqI)d zNwDn8wqCg;XxZDU28G2l0=oowDi{o9T;k`hT9$iyQO~1_{q`ZDkM|lMn`3G;AwW}? zf5X=W`PCg9$@-g5wg{PZ9E=o|P-eSYYU?>^lT6~;f1jqyUoW>`wNLVwSi_%hSGPXj zSYk6j=8Ska!=hbl*R)1;?B0F8{QLZ+GOHeceECtcWAVb=DIuwp!+qEAjQ%b6<-_8{d@>yoQG}c}tdCqq? zE=kuyiMH;eO#+LiWZimJVI#Hn_~c0fw@Uve)?2-AS-bgKgxm)ePw~$;bF2b`!hC&2 z*1dMq^3=E*_5ORiLf5WstTLKboJ*r`ALaG>Gd6j)}U3Hv-Z6|tiZyMv3B=5 z=@yIK(oG7F92YF9sjR#Co{c^6|@q~uz46#Z^#br|@beWH79IKob z)K%SZ$fUnXr25I6P76C(v*mZM6?F-uO<`jU46^;Y z=Ki)hE`f(+8x~1|PHX*DvF5cG-?N`TKc1eh@5IS+vBK!z)%A6z^L4jO`Ty&;{hy!h z_OY??5v!-4p1y2dv%UBgZqV@8?G?nJzsxa_Qe;^hUODGj;VXtYzRUW* zf3I%(q~kw1XTSZHSnU=&(_F)O`(~Y8m%o1Q)^oS_y}zQ+<|yFfBxiK2MWpnyPPqQ! zk0+vZk2DzE$uWO@^%B=y^~FDbJo;6|w)V@ZmcWCP&oJ`uwUycCB;hrKJ#Xut^?l1V zrYydgVmM!a`me!=evMs`a zV~IeIP)A{hfMQ5??Y9?6JzFmZX)c~>ELHJ9;;CVsdeh$8 zxu+Bu7$!J+zG)P?7xn+Net)Xu-v1x0x+d$#zuW$LuHlQv>+8OrZO{K?k~hmuBXavx zqjSvn%|b0C4dtvj)IYyG`sh|Qli}AJTNhbg4ZWV(kU2>uGE$+#Z~3bNo6_9(0tN40 zuWn^vW?8d6{-^)*+6Uom@AoTO*c^K^=XA-rq!vj<2}Q4!A@>-+=g%*fyS>Hi-|8cm z6gl=!o2C0fV$RZdM*+9Qh|uNxre4#|mURlqR^GpD!Hr5)&PMy?@{&sS~A+MEAs z*Xt;T1~aL|F2hd@MREmR63gYPI1~=9(C9Kb?e@4Z_WJL`AJ5EUI+5t}NQJYRAt7e_ z+>rF})*JGEK`R0ySs3{jt9S`HbzEz!vX^|iDDPkC-n?J`et(zW_w$39ev*segGyb8 zpeg5W#ql`nGZcL1|84*8BY$r2R98OF2`Zj*R1QSCX)KjU`t|Luc29v&JImp}|2Eu` zowCI%x+x)YTGVO<4h5DVt@phWY#j=29c?R47(a-)Y;sJBBXF9Uz*jHH=MCiwJcl*9 zI27;wUYB3Kd#x#(gThib0c97BrH&GF=6i1meE!S!ActbYUf;ftdU3pMtCMJ3D1WYf+9WS7Mg|v&`9Jk^cqW_hF1}%X ze@kg__}MacMhBK)FTq2;mkrzwPdM&5X=!GpSd`@JoCD529}IYwU+zrESz{UB%=kb< z@mTY&bz84JUoN?n#i=8x=AWvi-0I)I^+QAX4`29xC_=c+QG(TVo-?~(3*V1rnM>aI ziypn8ynN~u0jI7Gv#y|Rb&+$oGB3WU;lH_IQ<+3um^H(Nw{Jy{3rIXxk=Zp(K%~#D zFf!=T+H1M7Z_Q43Oq%^Vdo6E6+>3>`O-?VLzHO?&Bn`gPHecd2uI{O<{rqtD_sTif z&VKl^r#5nJ#0_Dgw!$ecnV09hdoRD{($kW8Dw+#tSQOX zTI=-IYb<_!Hf`%IDRzB5{d@bW zGnX$vUTOEWdDU#Oiu>D=_SUs?GOvwmW$=6x=oW z_3z_b^F$HXdzGmz?{cRcnG(P<(c~2K^EIJCUWOXEjpb3Axh8Edc=FPwSY^w$rRQ9I zKHtuEzs~J-cjxz&US>-SnDVp6EZQeMb)NErsoQ33`c`q{?YG~D1GAHRS|$9|Coj@j z-0|HvdaLjK?}ruG3>g{}8dwB77F;_P<=G-~fr&}dMWQ&O{L{a@qx(A*QaC>5WxRhU zubDbsy!b(^Q0wK3D_-t;R#EZeOR>=3U*Bfu>*cNA=C5D>>o0$MuI=2j&$|mF)#hAi z;|bz8{&Dv0*#;c@?%d_P80<8wF5|Zx1CMP%J+IWf%rF1L{x^MCt$dT&gM;&MN03$< zqf(WhLC`sk#m6F--`(%}d2V@B=2}xH0mV1A3~@>cHixGOz2^3kUL;;@{z?67VYFU-wS29bAKl?ESTrB>{jI~n}v*O zXDqhgfBR~g=e?tuQpZ!zIsJHceg8tm)l&p~?{lv1wU_O`_IQ`%w%fkuTS~VUhW&fD zeg9&en{Tq@j)h;pp84rwmg-U=udHm%gOgvLh_1B~eaHU&%a$iR&npF4HU>p$syA4FT7UgDrz7X>21J!Xhu-kJ$L1n-iyZ{e=M*NY7%g|bz|B$ zjzf!dKCMxh(vg_4Ta3BEP>H2F`^W`O#mP^8on0OM{7=>1(sPpOzUyrpY!Yk|(mSV5 z(&Cs={;$9yW3T05p}Al0RlQz&X_i8d0GEU_gNmmN%b%VpJQCf{irros@c3Sic&^xz zIY}kcNkUD6`GMr)!YKky0&2%z7(aQ|W1`H`&d7d1TXpG_<(F5!-u<#f?6~0Kg@$bj z6{}W1pFVw6*7n!Fx4ZYrulXqVLxJn`CYh6*pQ^IdfNTgi*32C zsv<#h6)b;ptQK>aFVDJMK6gUjsg z9(}L&Co98wiKmLq4Dx)p9~WNzo-31f-F&^<(-}d3K2`kx@F6C=zVDF5&mV;~m7I(J z{H*^u-Tu!*evZwvU(Y^$^iZU2d<@g;CY8Tu^B_0YT;~t$u3SSObKQ~y|ez>tX>TUnI7u&1~Sf-q~tTMN!=y6`(+!Jn-EzT}d zVf6bRd%5K8%MIO=Dr{!^F5_nSc!6uF=n;z^VLy+(w_O7(s;hU~njL>^*KsINtkreV zFGo$*h1PQZyYEgc?ovpzd9k#rE>3Hq{j~z z<~*|LdnW$brdv?0z>t-}#dGG8%G;&45AS<#b!>;R7=ypS5{c}^=MIQ4*eI&>D7APv zIKTf}w%gb%sBf;3*Nn4;?V2S~SAHko&XiH%b6A}kx|Z=s=X)gV_RAG%Pg{O}mdDKfb(OTz6~X zttk7MJ?WcYp8fiDw|TyV;?*DH zJlE{^-NO1iZ@=Hl$TewdNm#V@<2mc+o!iAKS~wkNDERC>x;(7r*Ox6b&Z@;B*J{{MK}!neEV()LgM^*`R%|NG-TN!`&w^YOKY zZTT`EvNHBwbDehVew~9(MBcXjHNS$co3^wh)ErZs*Du4du=pF#?H}8l9z!<{$LF>MVhlVBL%lSYo^O{tcH#fjmVSjJ9E=PCEQ-%17O#6hr#O9uRGXH+ z3bS@G%0*;zs@{ucUwGw|=lb5KJpsqIOyo#B)?8$=?U+ffujE$t%UkDOBrorb$|XRlf%vAA+x_4`@dimwSdom-*Bq4>P#@}z(@QL3`K&#yndyF}*hotW*V zoA14!d;RmvKd*kZu_ubK87_DG%KyE5d2s3HZ?>u>e?Q%sH*4YRZ?|*r$~@L6l51^I z_nh?nX~ujcYmc`-lFB{HpS|7r@BmQpx@({N1w{q{W&DV#xZY|aLKvI z$8+<)StphVwDC24ttz&VeZ6es0dG}Bm&rGEl8#jGh|8a{)pcHFx~AgflO=z?z5QMO zzI^uA>HGgIy|s3cPS>U-bNuRJpXD!U?Q!??(|FvIWvQX7(GT^s6g7olM)jufD=UhVR_3b+7q<|Gm9E|FOo0BVUs~Tnjp7z|vu` zbnBE=nX}3SHve3t@kynR@k{&N#QoO3#}=Efzn7Nz<=3-Gf;YDMUN8TBw{G`+y^K4& zujT)Ldi=2DNQ;4xOQA^dIY~~7p!dTwP@$EBa*fjsn=Ftt=b%V?MsmMOV(|N zAD>=u<>^tM%;@sHo28T(tXvBXcviiw=69Ht_W0$SXU{@sJ@LKl)N;wDf3k$qieAGg z>*o@whYffRAE=z6qUq95BsF0GW#(8n>W6jg2FaP^+ifCkT=&=_+ zOPVqvd?v;`Vn8Ve7Hb7&r6seanC zF^d15wM)m=cWbT}-Ait{WHQ(1Smm@pH$$dKv9wGRp61dxuB%@cty^XM@}+kEp@<&O zxRk|3ceeT7=fB4Io-tX3p~1j%o>tGqkSLG0E96_QcsZ?JYjRD>N#e+lm#J@iFN#&l z7znlWST48Vmp$aA&G2T0#^VXeKAKCrkJ|W5`kJ*jN_TCTr;sws+G1bU_I;gB^PZp7 z(5?IZ*ZlR@eezq%?nNjpeYfjhiB;xGRzcC6UGL@u+{XJ{QGkY0iFjrSxuv&90q$-|m)E zoRXR;)BgMLL5a!BB4yfJ5*C}P%`o=kFgwQP$f1zoSZp$_UzXb8?$RpO|w@`^O}1w*{N$;uH|E!2SFY!3A0i& zr$s#e_Ph3a`GMb#9gZC7vdL$x?|*lmsD3>>eDTF6@812Zc@ua{g8x&KtE0^F%twOi;=!pME7=b%dOiDk{qOfJ0!F=$m#qssJ^krt@9W=>-SXoJ6bU{z zYui*WMV9kn>YDBQvL`+jSY&d(N|C?)aE{ST|KoEmYCLJ#U{W{P?ayU>8D{TCKQ5FW z-*ta}7I)yJWu9}i40&Rk1y4;{b7J!z8EcL;zaDK8DzSL8^upf@&Mh`y7!ECbf8;{8=HUrP6&H%!y%!UBgk3elN3%eP z=NgA*Y}k{>fqZ-7-uu0NyDe|_OO;;cRa3OOZM4--{ht2x*~`qey%W9O`_19-_zu&KIXNb^?mpE!3K0Q+=a_P3+ zXCGhwoVK@7!Y$E-Bhc%5>Dw)H&Ocipzu)UpN6;b5(rMOGR*m$E`{$ z5Oe6*BKU80-X-Z|MTVB3o{3xI^ADW+n)3f^fh$KMgUlh0$D4)DIF|HPSJX^PbADl4 za87F}BZJ0|SC@`(%;jbYIF_iS!p&lFSUVEiJ&F5%cCklge6M1;arqib^g%kSoWzwRqC(=UCxxcKa| zY>SjXoQrT5@SAib;`h^cd<+lvgnCVPUDr}@?oQ0P_2R`Y8yp=pu7=w-3d}#9)N{1Q zda-N)$K#|Bi`S1Vn9uPyzhXC?>APNEze`8)z`gpPk9-{tFUa+8NpPDnEow{Cfwsh= z;;ju6xN{i~Tsyo?n(z4Ik0l{N=DxQ}w{DhVIIq#nz{s%0%xGU7ratmoxh!+C3Y(mVCZ}TMyv-N@Ier+xmuvov#%Ii={6vOjX z2j%;gPv5Oy8{AqL@g#N9u|N@Jp7?3rs;L46P8&>@t^ED?~SL(HFlK}G!wUZ@gj}sN9zPVLrBo`YW zYv}cOLU7NNYj-zV$@B#tSTc>~;cym0QKELkY{C^+q|Nr=$?tSQD z(m{zyxp(LE`LF()WLICCZLxms_uF?9x88mKO?|DegnP$_um4JH@>*mp9`jkexOn;W z>5Z$_Y>*Lj{ zXG?56A8p9@KYg{R+9JqJk;Th%Y3p{e3i;@Gvo4EGySIz2gM;gtV|`zumE+$~{bMOM}FISj_EHOMY9^z|nLj^RevJ zQ&~&*d{3OC;(1P@n1iXItH?!Ssu$yX0lUlDwk`}2{u*a5z477Ma7?MmVA-`>hd)-_ zkbn1HuJ8EbDOzVg=kNdf?d$FBUcA?$RjV$3kG(AO?e^C)b_I{kDK>J(a$n}Y_~pmL zyd=X$s`prO%Y=}oz@*-f#}qjBd}mSg+W1}0(Z!&JMf&ZQZ7dP|33yvEm*e}&`4n0t(tAE1U~CX@3GmWfT68WeSAiu^r0^KK;;ntN7oXg@ zz@5X5gG+HvaqexlL>sw&&ZU=cn)p3Be!P9xy3Y#wT|GvJEY4qEBhD-+<vIV zw`BoK{`T9KKMJ^BTz&iX9vRu!QjQYOB^z&4x9n+`Q|Mr55m0p6X>^Hs{!_^U0q0k} zkLO6FFPU=9YOSc#2AQYf;n&@6ugaQmT(Mu`D5LrT>EtQbuQ@N?V$R3tu;=T`n=|f| zaXegf;?RV`V-o4t8uq=OyZv0`YTk4nk4h^!1(!@2%iJvi2P_wxe3&QnX76UcdD6Yd zW=;64@Zj34!+EopW*%UE|K??h&0If@g}dGgxCGtS=sSF&-O)i~W0a9WPFTYhDDn93&&gfD(k6m$T_M5r$eU=AGl*ewr{dQl?zZTA~0-C7;ZVZjv z-o4rt<~6^%IyF)vd5MTk%HkCk|C1OPx{RLNyx6sFl@`atuV;M&pXi@IUs}7yZ+Uxr z`|;z;mmhc66g_x^xxtW;!Nc-PGY69#3#0d9CGT#Vr&CrsMy(c`{4pbjfhB(4{OP_& zIr=Ue@i8~W%=&mUXNqVicfg?s8(+R!cK(vY%$aXit;<~_^~>+=t+`XHruAP^+Pi0$ zU-z7VP51J4=ViR!_5Rk+9IN?fHMu8e#LUfF*{;*6{&>;4{@GvsL@z`*NY;Jjii^2- z#AbunUK_hN{MIavmYQvG7P78P9+g15reWjdG$XY^Z);||L^C&cWc-0U7B^9qs8Z(V)_>0 zZy!@P-=9Cpg6FY|N3lgn^Ar|g!xe7)vDdBm{X+l#e8+vLAf(S?F=Kk*91g`b*XC?$ z2y8HLl29zLk(>KHa@zE#92UvFl{FO=b(NX76@52gNjoZ$q$hjIJ=yQ(;m3=wrP)aN zrf=@wBtDqZ$;+*G;RW#?eU&~lJ^?RRC z)b*`@l9qk044c+d?9ve;<;1dwnPF~g$czaVU4l(>ikJ3I(MmIPdQouW|Cj0WudUvC z>DkrS=9$wXlTTkf!ED}f(2|+KLhjr0$Az0Cotwm`i<@s>|HvfNiPdq!GQk5HeM&8z zTdzsHU86iBp<}DK6VJ7%|KVx7HcBOS38=A1F6MZAr>eWb_*~^;g(W=!9uf@8-nsPa zCf})i!N4Hr$iJqLjbWkB`ZWgA9yCu|miemw=jHP+SNzz$|M&0h`F-d8S3ll&jKRv* zAz{BwP2Q}31s1veTek`*=S*O}H($A(L+Qs7J+j!wp)1%pZf4J zL_78OKJg%prAtE6Bh_3ce9`J#Y{B_SDf?C0`fmacjhp&6J9rp=IegHX!%>7y!qJV> zAw5BvPr1!-(=`oUMTSVR-s7rH|6YB{vYQ_odbiw`frC+qW#NQR1vaIzuWc=&w^S?c zF+JC~DlacS-u>f>)z(YrTs$RtSc6lO^`0_&k5SJ>6Wtb}Imdg9I5wKgRlIIsK46}^ z_jVrRk;gB;JbR|(opkO&eBGa&xjA7^-YRe$o-*H@SJoc_A-x?tpC)~G665HgUs`ec#5#iDLf?MqIgh(QKj|EJ?*8t_eHP2 ze*E&u>Vk9E)C=c4l~k>GA?21iO?{4Vk9eVo<>UBi`m^8WZM~QImNAKAqXN$&6`|Pe z_umVA$(*$`vyJm0$5!9xb7Ie=yGBYR_XM6}Opm?3z4Wff(nAUi7q;ihsz1E?)%E`O z`aci#SAX3m%q7gHa7e(3WvW?JQ=Rc!MtUzw?mx6<74#~b!cDQwQp-|@LKcUfbL9BMb}+xxB5ujULlrIUR*r8tz>Uq z=4<9ddn`}g^A zeBGZI5!Tc#nc0u8zFmF$WsaAI^sk3Me`@+(j{8!3f4K#3vxC>oD_eP)SOn8~DxObq z3M=N2Tv%S-fBWr4>*E^1LFLU1Q~tdB{ob!szuoS%5&y%i+kZa&T$jtN2O?{BTs;3|K3?ex|gp090G zf6V@#FUaI5aOS!3dk29H8a(so&!20U&~|T6rS&`?#irk9m)Tv+dofkw+wZ%NA0K|V z!T++#>0nhh{+cN+krU(M<9gIB7pCmJX=|t3x`Y3)#uFR=M{m5Cs6-*01Q`+uKR@99TH8+Yv5)niasRdp-R zyfn6&p>5&cPcNT;%-I;BGsz&ti*fbd&0bty1`B%x-q`RoO(oKXJsWzSFl|3B>0D?fcaY|e7|@3(mW z(8C?Iadwv6v;7jJA{f@%Z{}Rs%)r2J!}3D=$Ll|7J!RLb8TNdBK5KgMIjQPnKZ?Wx z+%yi(`SkgYGsm$RD;zi%`h{NII@j#kpL53FzV~dre(c36MV89+y~aVjHXe#BvA3nN z8Jswh56zf-s&f65mpyOV7q0WaaC?uArfU1)iH2r#&+aMyYhZ9}%|?HJ{R7qvjt3$I z3c3mdMJyk4Bqo=NuwIKw`%n?$bNp#dA*14&*SqAaSWe!|EBltC=INxV+M>kKdo4G3 zYwYhopWZBxd-^oRCiL!uM(@ilg-sl6*D{wHG^#wAW5llTK|t}*p4Y#wSthSp`|5Jh zR+C#jZmMoGSl7(yzu(cl{5ZR!kHkk6f6a8p<$*_9B$^EN?2rCBSKyN6gY?ZOD`Jk# zQSK3Jnc>iM@#Ti>*_Ue!{U>)Q+~7}=Sn~Ot)MwM4=0b;;HFmT8mJ3uFcKrSKVsG!` zq&evljy-!e^94L&un0X{x7+s3_uF?Flg>!+FV;A}`Sekn^Firzjm}A=&)s@WQ?>ti z@b&M$mrH8fIsGnAF(}gu(a@aAl6%);^?dpHt#5iSrUv?)3Y>SWP$o5U=Pen~lAI?G zBP3S~e9VuJ-@k9~!Hzd&?8&;vOnQ`>1eb+)iMHAFIpy{U2r`7ow^r`feOb9E}^@w3kkOQ7u>~XVkJ+P2!p7jq*S59!7GwD%z}@mLGS2iR&@N zl)xjBlNa6BnJ?LDxZF87@yYMY|6We_?=>;e{r9xLzK6-^>>&d_;g^rRde-aN)%}^e zy}4{rcJ||~Ms)$(R^2w}Tc@wj@%X#H{^_Sde20(s%cp;S`6>0M|Nk%kq1Wra-(4?w zH0gzb+S)hv|3Aw!d|H41&Yr5z&t84%Vt;s{+hEcJ2ad$srlq%Ib=T>MaT|V_U;i`J zYOc@n;N-ViJAa*?|6gVIvm(2rHbnxG&$sXX`!Lh8WbWq#lidp(Q*5n{SFK)FRacq! z>s|%Job3Yag{KY~a8(}Q;J>j#XNg$-pNIU{ubYcJet!J?x!*^xx(0|aa&bIge{bKc zzGJRp*7SqqH7)$bz&3fQ9*`;I@p`}gkum#==A z@Be42_~rBS^H)Eb%=evZdMo?+laCvWkEKOU6L%F-UHsCL0P9gn>Dby1#Fw(OzjP3u@ML_cnKOz_c6=1|~Y@b!RIyy@N3r>p8M7#Jjy zIVKn16IF0vX!+KY>NG<@$W?H|JDJS#Yja&!GcV`lacdej~5=Oyj*&( zdhyopZ30<6OQ(plruP(|<2)$9uzKqWVNI4}8?xIk|J!n3?%C$bdCM7;yeyMlv{QSp zg?V+}-uM0}gW$)crNTwnXWvFBVMy)AK@o zhJh2K63b*qua=7yCQSFd6=yA5H~ZFH=`#m36&p9tNKkGyNqb?fS=Evt)M~=aWQx|xS> z*5TGqn;if5)nA1x^8dcJ|9@KlH~#mk+}qDyepFn4sQYJ5NT226(oo$~DQ2?$lW%xy zuK80_e*H^qW#QRHDdiRxO^nHj5^=rhlgs7VkGD+N^j<%I-SyMQ@7C?VJ1^|E^X=~L z)!r?8E_!+J8vObG|6~6By|v{l`0W2Y^`9^Q{?sO(#|M~KpAAS!JktC(?fPj(r8(1+ z<<#yuC;xp|AmHV3K zRj+pI@6Va$&2aJPRo=D$&rSgw)3@{M|D1jppx9!$F!wU&(&h%Y#4xd!IaV`$j%|LK zvN!I&y-j5D>s@LpHJ=~y@-~N^zWgqsF>jXemu)L06qo3D`E3wnK5BTFJO6B5{il0V zDrZi0yv?4C{5c!f=jHE8tJqZ^E-JKrt@`|O`uzUmvZ)7;$JhS4EC zN`8F4cf#S*mmimHebUqC{#eCx%f{b-Hp8e&EHiWQ_1V2wx4my) z_Ik^u_QxAnX}`C9+_*JNy4sCTiNjNZg@Ga4^jQ4l(jkv@UFQdwcu3*ZV5} z&kJZXxFY8K{@1p*x6K}fXszv4>?zN!t*kt1xzVkA;o_Ea&x`plB<`30l~!wgdeiKW zD`xnM$KEerG;3M1?>Fnzets5u*Ch6~gz6*<8By2!-C;P zxeGGvcx#oaQ*wa$`JVD~%>0_i?#a~#?C7{;wq;1Eo?r*V^s z`_I*+UuSpcuUog?fXS`fgfnm5GnVB33s;~1`Sxm2K6?`b(38A z?=Nv!gKKrNY8v-aTQ>=%pdPR1H3BU&E?msGQl5J~{Hez5o_ceRkDF>Lchqw=FOc!~ z=2*R}OfDlmxy^m@rQhL~10`AZ$n@UYRmj#7xZ~cts!Y4O?|Vajz5LiJ^k&XM30J|x z9GRMmA3{Sp74!1*ITrd}{!&_RRjTQ-{e52Edhv77Z*3fTGA{6H@!a|P^wGChrz%%$ zt*xl}v%LP7?U9y`7n~)G!k%TuJ-a@AdhYc*_iOWB9gA)@=zE+|Qz_au#ZZcMkL9)H zmp8t&W{s0b!nr?|+49DYmX!z3cVu zL&lr!X+{j9_)XO5wlZRzhC>M-7D)8EE#9=@SV`=Ah9Wt(L=o4(>sd>uZa!xDd=6)Y1WVxM zZLhDE?Z2I0zB}%O@nL?)Bg}bgjn8Q$8@O>Oy=OE$roeI5@>#oK@*IUbTklHlc)(C_ zF6(V?>9NRcot6m#|F8cubm6e%HgFMON_Vi%_`HeR;`C>s;$=&&-?Gy+50ub;-J~(8 zv{_)oIfnP=4IR?&R&H3_6X5lGpLu<(gw=B!mh_IlmU9!=zHPrZCwm)*a`)QOzXo%j zyDsesI>qe3!PoTRPLJJWvALz!DxWuQzE|;H@g3X6?Y3?mhE;K9QRzD$FevdesQu76 z^u$r2<<{HpJr^BM=y@)-h|v)%jV-O6pZ4-+(?s@#rfo%T2AoY345e5UlYdm$I5Mbj z)@;bw8l^j3du>lSL-HYw_8-~n-|`gMG%wq;ox_pikHMF}3XCq9v$p2`e(YcWaL#7~ z9*gD!hF$tquBTHAL-v@wsoe&!+8-Qf}$mV*pAU!mop`1c z%6%|M$eg4W*d!Pj#mw;8#@h6(g37WR64%XquYWxz!H^)coa5lCwMzvKY%M(De$;}) zX(jJM9;PR@7qS}88MJtlbnBE0N4GqXaei_1f~|c{U#PR-F~<$o zGQW!NUD~yg&pN#( z+WefgM}o-ma~4gVVOD*MHMsUn?wP2;bIC;Vmfe#*@2na=-rl}Get+fSHS1@+`@G!$ z+^oXDm{`3Y!K;OzKmC#UU~snUZr%Tf;qva2PyW5jteRn^**a~;Bo(jEHmvt-?QMCT zoOUS_eWx`rWZGj3PqEK8kLT}q zoWiqLrbS|Zt?69fW(%3Ny6vvrKTY}`f7b0RKPPdyv^RK_m5k=m_4n_*<71aAI+PJ? z9=1w@rtLBCqeV zF;o|C{qb&hzj*g)i>{`Iz^2*1c0{i4TOJ&0x;!~FeC-q_#f;v}Tk|FS@6~^H)QmQr zyYX3t)f~Uj+gmcje4dLttqP7^J8kvde>-l=?#jz(WfAaO9_-X9@k_;5ee%oY{`S{% zzyHn8)6)~ry?!lsca+}Lkj)n>jLu0O#tGUWFIrSd?~Py^Alw`CzaY$crVyzj(pkf!;dd}hhD$O+@a~> zCfRGKHYbw5r})^6&;NbcWf&T+l=ccJiOg-hBk|aRhf{IN1>-v>tG|bPt8BWV*s^Bp zxtpJPZ-&3nYdKfR!O&`=?f$rkg&}A5w*ni!B#uJ~0ndMWMwhqQG#2L^uI@KbYH3N7 zIBv+2A@S>f^J>XC505|BuzAjtoYO12W+98S69a=+wjoda(=!ZK{0twwUVeTi%v3(F z;M@n+4ndX+Q3|bLQT)#S3P#gXE9V{YynOAL#PdC7QcNC|4w8>`p2Rra$XHu``|igA zE+;#=sf8l!HXE*SU95gES*35$3XRh`zKShDClVzTz68$ETDrFR_T7gsKeATHu*7t% zdfzJhvt!fxpO*Yr}Zgw({(-NIi z7c*J}E_s#t)*H?~`&{^3hQNmj#uJt~-)8Ii78^X*FYo}fS5t%kvdJpG|9p)*1RFWB z7tfz|`uOwdXVa9~E|%^!HOytHb8+7i+A09mP|7 zz0^zd!JB=V5~-VCPD%LlEK+gnP{jJ?!c&ueJnVQ*1wR`1I+z{lRXG&yO!QOid}b(FtFDwW!u=?vkK$ zjT(N7XPD(~kI6T1D-_{z3M-XuQ9SVE&#k2bUMEhkUVXIY+P16n>wi4l9bY?Ht?=@e zo=qmcz2bija=x|S{OUU??N%fMWAY!K&)kbR5`8qDG9>iOmyfUe_w@PUe-*X0f6xBr z*Lk4Bwa6mKi}AgnOH}sz*jdMZ-~IRQv%1g844Zkr>475OW0z0O+qLd(`!@M+%Y~aN z%w|8S?em*EZ7pXn>^BlqVds^>R_X{kUQT2ea;g-Zw(}O17 zNv~~_YiuOh6ANtQUhOKp7IaGDd7_Bq>hs;f*|ts+$!n(G%>B+Gf4ITjQ{wo??ehB< zwpczlI4H4jS>~*5cF_x3MKUKnsr>lmhYH)utt(%z%Z$GA)luTn$r%BUY+w9Z_uk4Y zK;TT1f=F$6=+td(msHMgQsB~Xo!CA1`{Iu(r@O<=r(b^grs9sJs@h_s^P5yU{Yz7~ zbWMKo?W%5TheCwB+3vslq*W9x`YNZ1$ZcMFPdc|N$!4a%;nVZ0zfS-Ce3$IKJ=OB9 zfigz}4{Q;Myk5x;{gd?i_ZdyEOW{^4(W6*R^X86 zW0EW2+atfLY-`m2KcDq4e|+-m*CLPLo8^}`ojF}7zwCE@|Gs@+S6pkz zd{2&l_VeS*FK=8n#ew71E-t5Y%+p-v6w4Ie+rH&p6?=k|tJmx9n^OW8E!nqY&#t{Y zMT5S^2B#RNhNvY@b4#6NXKTAPH+yA#jDFhXlPh!%HI(>XkFeG{&Zs`;(F8+*4>wEZ zsAk^(I^VAT*84KPhL=lbpHFLf@HXr3`ThT||9v+j_2<{C86hc^cAi})?@r(StMc#P z@B8=G)^E+e;$QzS@U$Y|%=VTK4FXMqT#x6tUd%B}4=l?0^)&70XZ`qBf73SCOpJBs zkoQe9dEh#+_n0KVME7ZfF3qLi%fGiD*7KfZ(rNj8PGGjLP^-B3;_LZofnC%6PcQ#% z%6q9{xs8)oS$i!zqghn-`{SvB&no8>xUDqH+J0Alb=J<9WT!5Z@AvfQ$D6(G{jAqD zdG_nmDtxp4nsp!UzrUZE!Pdrxzjg_EPi28%CFrE9;qJdYG>FT7p)`RE;w&!7G*+IshW|Fs?6t67&V zS=D!K#?@Dr4<1JLX|>l`$tkw%-gfhCdD!fQ?`Q9t=JYN1|CcW&9gK_>eb%Cp-(LM% zc4m`b*Ym5-DlE7bt_ltf{ra+`Nn&Xv8|THS(5s4PH|OvF`|a-T?<|iK1+LvZ`gD~58nRz>g7wx0+wNL@7DW~&7J;qTDjzI1=vq`D(Y9)p zO~Z#dL5%Ust-geAZ?Z_{SY+U}CCvA-)%PYtF(&~hiN`GhUMF9ctmt!ZDHLFdie9X% zc%ynjFB6-@Vs=Xp&E*o+>{F%%X=V3>IC(x@bN5|Ys^PD9kIU~Jt_$#bd3mMTHKmrd zTTeedmF25pcx;Yjlw82dCEvvq{ycoVz1CcVcaw=^?1}RLUYxV_ zrmVt}?bkogejTmZGObH>W9X|b>nE95HrFn;NSnj0$aGFo@th<>j03ZnL5G3cj}j|e zJG=USpCprOYIkb5yPnWIcp0>-YoL_aox{P zpI%P)KWre=cQtSO^dLd6(@7{k^Q8L185 zy*d+LOg5{k;9u#3z4z7%~4;F&n=PqUZ{bR&q zlyK+e;g1`%PhG74v3vhQodB=YOh3unYqmSfw+ps1T*-Ck(Lmx972wWX46iPO~X?RkDb$z{NX=b~BoXxM_9C3!xDSzMHKhytzivPF#|1bT?Cq;t({rcO!FxQ|gR==k` zGSZ2md~Z&VAcJyLF8}e}`ueff71@h4`W{>GDCIV6+?O}&Zrx4(+8I5Yazu`M-ZoE^ z@KFMf6q7VbDDtlZr{l*3Q0@c+eNNm=L=ThvM zh0Xqn{9YJCmoe|yhmfrXZyc@R<3%Lcg2ur{`J>i)9#nvH8}Aq#69uH+2c1~ zmMofbE`9RHls!@FIgWk4sp75F^ROalNoEfx+X3Z_N`W==!(UGqZ{lz)*nfX*SoYe` zS!scj)K7M7ybxi}akQzxX0~VcZAXu_^7H4nNUXWMJUCR$HBQ8-YuW1xfoD&zL@u~n zeDzAR$OXq!1s;oaywxtBEHPp__qOcz-G^VM&$s{kbo%+!nf>mIPi7q9INI{{aNW=4 zPd}E-@>4&zvo=)r^{n=XSHE7Zu#sweI7g&ILYXCd>7L>VPVfJ|Ecx~2wEmQf^*`GG zOC0ZL!dyjJ{Hq5!`b;7OlZdvrz(uvOtr&O@7+Lbk{jnz@$z~6^E?B!Ok zV -o5Vj+x|oeA*U^+r{-8T@hR~Ox(G;T?`4;Gyjf0oVX-7v_4B!#Ig*?bYjCa4yny|#Cmgmm^?L&oHf;j1|oNMtY7RP4TKqF=o7%H6-Qv8Dot zEc`T&GB7M(E5p*#kigS?!J1o8s5xi$xk~;C!p$#!Gdg&1Je~4U;Y0miu}$ark{GTf z_6RmT&s|zPEn$v|r$pPqe^>By91Ikh%jzeD@LXp|q zZr9FSo+x3(z~EGSf5k$7p}xRV5@q*f``uTXsC`H>tN#A%SX8miSHH~nu~Uv7P2u6R z%=>Qt_ocga$ArTsz4`0%_4C$Qrr$pMY`TBBb2{5nwu|34nO|dUc>ek5*|XWYS3kYH zY3)4m{AQmodyD6Neh?CRtYE2W@|nu~wXF$_<+q77sQCWQvykJD(&}}X`Mmz~ z?D+g^tN)fQ-@5VI^UA8)>R^$bLQc|qcSIyKz24QvAs%U1ayYL(XVx{VWAy=>z7c=b`$rh@HvbFXD*=gbrr+3471&^7C_r&euYrRT)=s}{>i z@i;m-cPOwF-Yz}&;=Ntn4BzFMQp}6Lzur2DiAlU@+2RdWS1ZH3G|P9_20lCf_hJ3N z|9`I^U;g}PjiJ=}C6@}BIFHVGxII74@LYG1&CGNA{{KrBUB#vfpq2z33#9e)II{PX!jeT{Ztd+$o)FW_5b`_3-uaaqnuS{M9Ba z^t@X3{?@j8u_pN|A+09KR&6rvp#N~`sR=2`+rrG7B_3uRsH#_ zZ(qCY#^3h(|Jm6m_W%3n-tkDMpJ8F4z-q^J)AX;+x><6};;NhE;iX7=){4FCAAWsO^L_NSYWuA> z%^H4_Z$#b?5pCL_^U8`oVns;80NzSqR zM9G~p-_W^j`)uX%o@t{s z$)cCB|JG^{Kfa=qS8=4vRR(;kx&z*fx;cC&=yKldFPCNU2 z^XI+4uYMI>cqelA-L?Jg*Ccnxt=}>!@x+!*QPM42O7Af@C|vP!O_WgQJjNniWFzN# z{a7K3!m=sZxvdi<9v@9Q*!-cwLMMLO?60$g<~(6~khT5()l&T_y6%gACJDAQUcIG} zImKv>GRNNS5>AV!Nc@`qT)gST_x=CAKKiuitr7-Z?{5S z?QbjnB8CMMl>9aaS_TE>n%xe*s*%d*5*5vE)8LW$_@INq*8|QSwhd06OSiH=xTC{# zfboSO6Jz4@E38c?B3AeQyS2!P)A65NS<*<&yFooY(ha{s+w>D^Kxrjp9S zuV;VfPcpdip0DV|CXFbcfEyuE5&{hyESZ5K68FSz{r>1KgR?wX6vSp0q*|8LXn zJPFmHRbSJJZA=9Yn0Re6I{*34)z{ZI-}Lb+z44YaRs6H-ecnhJrVLH)OM84Kow2H~ zuiqYJo6K@|MaAc5Wrp(|IQCqu4X#{lxit3rUK{<}2c7!Ym0L>g@)qfiyFO;-qd<(=HB}C z@*iX34AssT)|zbwUIHxbhxfg1U;Vl@;L-8<|2Exzv&88Ay?SQFn04U-3my4Herg?H zS)@Pz-L>1c+!eJqHVOTG^r*^awr<|~y}^-Zj-`IvWAkP2gE_&6T0~Br zQt9WODYc&cYBkT2uW~JqU%EE5yeactyJkv|;-ec+7BSo_eap!pWN@sKK|#d1dX*nT z1w-O(z8MQ_&ab(eIjP`Wl2p0#`d@iAb0bVW)?Qt3$YQR~^9#c73uT1g|9Mza^`}UG z=`+RiT};V)swxeS-HC}$t-M!WdpV@XF!}Aj0)}IPZl*=90@d2Bk{U^iKb9EHEfD+S z`+DwYhiusvM(s~YA=6SVm0BWlbr@prFOB?o-2PwT-%YP>@%?4#338p~mdGKvg(EV# z!PrxzD%<=)u$w@xxQ*k zuWh=ue)hIc=gy^VHt`A5_y0ft|Cjmi_uGj7)bKT8IQaJL?3+nyPd>anJ^k`VsorI& zcIPf#Qa;}-;CP;Sf{NtxiC2F;dsVcx>fg`H>e;yxMqVFFY=jTxY;RvEKIMXrcXLC3 zS4`&JEwUdDBwUdSy}otpI(e1k#JDNz)J#~WDYCa}u7KRFj3D=%gScNXXT(j@Qr#0)eAID5ver?wJQ+h0%OF5QZ zX#Sd|+H&Iaqeq{N^ow$PCmfPmzEHKLl3|J2DJurS20n)ej7K=4+1D7Sw^%t{sC>{6 z<`$jK{6KT5gzfuliN<{m099iF>2_)fT_s z_rIX@?=Fdc4(A}=*xQWlEfZc}P1}B3|hEA`)Ram@HP!kAb zZ}^^IH+xy;o$aQM4T4jiau!HQP55|5^73})#VXDxqJQtX*OF+USA58GUZ0g>#ew|w z>-S!EzAv3S@0}c@OP~m+VS$@_s-Vllt=63%dC9weEwAy+ulxOUlY9GHA{Zh`mNs|JW8_D^3GoS z*+FCXO}QPv*2cp2(H6)}8vH-MLpTe2&rk2KE>4{w40c zDb2oZ_Sz|*!tdA37XA7B{QURt-|zieDD%Bm-t}RHk>4bfv!A!UJodOy%EigkogpB| z`gGHUjA)TciKBkfJ{PLCu3U9^w|V~Z%O@WnKK$uvby*j3pz_fE?VW@d1I>?!f-@yhqV_q~s5DG+-;>7+_$+GE4U1^3>s z>1sNv(s#<-e0vY8)2!X+=kpz3`?<$Tkr->+VFDQH>Yq*T_0Z}xn?9A(LWAhL>e!RBks0s(ED zsx1mhwqLFUJzrZcXThM*@ZycfhI?~g`d+YPXH*D0z{nDt&G13M;RU0^h05}@ZC*>) zZstgQY^$!~DeSd!b694pLKDN4Qst#0MGXRTK1xa?F(@`oD4wRmc}#-Ig6BeI`da4a z{YpFx1^2SoE}e33_xjboJ+=jM4B2-q&0nm|Thl7xnmA+0#%q@%wU^q=k8nJ2TIpk) zHFuLiP*nEPsh1>_b9=t>F?wizJ;%tgzs*$gqMP+|1Ggywe2Pv50?p-m984~qB20=h zu7avAo(rdzT`N2{Nuz~>Azk4Jhw_Wwt;a0N-KIyp<~YjnGk9zMD=CJ8jXUnV^N)?M zey%?GWR98oj5~MlmghDbUh|54%vR{8b1S~?CrbmnoPpck(%pX>KP&VsdbwmvRJN{R zgH6vS8*{#GwVyxxpIvDsrOL#0dskwMNTnOc-JI=H3jI7MPR|rkeR^ingMu}OH2!@% zeEejIluO;+y|n|!0MFLySdG5zVgmk&RFefwzp{d(=Gt7h{#>-?(w`bVks|EpI; zc6z_#-hJEs^zwAACvPvr@87vkLN#)Vl&?<8qSt1t+n+CYe}CfW*{_BB?Q_g8ICDNJ z+Md5l&Ozqr&8KIx^Lsa!g@`exhsLfCd;M&Ksk8d+b%!*3D(9s!_NAR zUslYWeO6EHb7f$k-m}XFT)*}ihJEgc)b*?nx;)h zA~xISaj0H4Q5Pr)s`>e)a{JA~WBTjXo&R{%;(^zf^Zotj4u8xsoa?i%^7Feb_y2x8 z|NoWEltn>a`PWZRIB@!?4$opgOU1p6HPts{_}VXKl$4f=vfa`8`s!2F6w%vv`& zF3f)Y`t)jU%_D2hteM7ez_C1WN}7&VS5RcT@>|9?TV19o@SV=IlG(9=cZ#CUB zXFAWia(vnZg)O%#OlRDCxIj{b|4=vU?+>f_J!N8Jbe3)QoR-cp>)np&b2pq4IJCK> z$O5!_`m##Z_g`upH}bYm4q{5yWwHPJ!8vGJetx`bh-J>|#EdD^8dx(u<{qCHdnZOU zGDo+jvZ^X7kL~p?zbn4~_uMVJyI`qKd8o)H8!m}uTIV^A-m`YvxHZg!xBB;o&znOI zNo=;4`TvvOzNRjMv0w3%=b8J=yaFe>ZEmT?s%vo1T9>RS-P@&d*5F9WIm2&mnrGIo{A};du|<|oid@V6r!xdF2zf|a9oqU?an|9Q-sv@5{QJLK zKM!_Wbunu4bIFf;-{wVMnR@M%r2xynUw@J&rTb`~|Ga9`t6SebJ^S?S85@&>#+E4a zBiuGq(}HC-dCJ9~PhYiWi;&~ImdYoFpMHJ)To}*iRk)(yMA+-6UuQ@6Z`ghQ`~P3- z>uYQ7WN^=z@4+2ke*8f8;}hM=`8y-_)c;oNF=|+8xGi_}vX!k5yd)b8RC%181j2%( zdJK+nT(;mitPtYWBKIQmz0SOIVaFN{e=J$^+Jfu(=1c*WV-1HN{@9f})u=U%N0vhXRkRh?AD4g3INXU!Oh=m1u9`=VQp7y7kugWR{6rrzJ*- zUwv56^Js^3mHrp;bZwuNrF@D^6SoFkb6(#fvumI9E)~yBI^LH~Fy&5pHLFPG&o$rJ zkf(X|HlgZ2XA3h2_{cK3c_i~V-C$&9RFsHfUt@YJqlxoSfTVSh+chf&8AS<)?6s`t zeAOfkk4YrEO-&VXJtm>qVwmRhrPuf5Es5tbPuP2mmZo0b!jkNixqQkbuH+sAsg;@@ z3$`AMVprU7EGoJF_IHI2ohBBm*LO}#%Un8DD9A%ov1OmT;jzb>8EdBoF+DI)I~8?y zYLJ&(phU&xn@2JhZ)FpZT)R{#a+;5Nu#ZN=icG1+yi1St_&Gm!J9Oq?;G9+iCys-O z69k<44NqxoaPV3pHDvuh9n| z8(F#ie^#~p`t>r{WOhu9{#>)C-`;)L_5WK%{MDleTIF3{?yjh8nql zl6K9I;-0lGbJnYuFDHJ_6*+cvz{rU923Ar-Q z?YLKO(mBn>cz}0*-1>9Vu0MZWD%HktYg@MaIdhJpmOt0;zFR*3|7M-O-23};`Y$aw z2|6*X&}FNTm-p*8IcEEJ^_)K+?kV(x@v|f&!|ynOiiZX!5|33p1=P;Zo*rNS_sfqz zf1bU)eYB|Ze?k0I-|Js?A71$Sb^X8T|M%U`UwmPDkg!-jZ+iQ*Y=8SXw#N5!E+2Dh*rQ-_%Px0bkLe7h2M=<#?bu<#K1HfukjL`f5hDR>zh<$D)!s$?^!Hy;P@g9j$@g#tkN5c zIg%dBh%b8oTgBJJg`r7-!=_)*sn?X@z}>>{A3iWJbS&N=Z&lABxm1T|iy4EHoVmBvo~5U{qpL?fvhy`=!}WXO>?w>6*mSsn35hFf8UhHY1=((CnH-wty3Zlh?+reqI;9GB`bGILslL zuy$jzgLCBSB__uh+9f_~D(-vVnwT(GmTlc{Mn?AILf$=!ED}nZOAW0)*fwpP66BTJ z!^ad~@Uqe)`XsyMhl5e<3`Z(ht{gLcoIg>ZWyAB03LKKjw=%NVOks*J@RDCL$*F*& zA!%N~qBYtq3nv6=whBCRl1N^5*;B}!&*J%!A;0jNt=GIlOK+Fn`MhWLYw4B+reRu^U*?vEaa7t&UG8i- z-*@qW`|tUGAAVV2C7FHLz+$$3So&w3)B5_?HlIB5{qNmxUqeKHKbEuLZv0#`?fj#( zjQ9I(=AOF6SfF+4@xzE|q6>eV-CfS0#NcrI<2LDY=k@S}OUwm%Wefd2scp<-r}&EG{XPWp{a_3Nj|X@45F@ zJL206h5m#d1;rMAAq#=yh9!ABZ6*gd}W!ul`D^?c-!+a9lobU#+3j zUOQXcn!ipn?BjLx&xd=;gro*uefx6DZLzh58FgmG$tFIR!!mjwcyG+FUwQp? zsKegfo3vJc`gV5r(N8(sZ<_2;ypYs%?p&H92cyW8i#qXh{dmPMgq+R$&&60*TY2+M z-0Ds5PV))8sjPb-$aDAT%{OPiin=LyHGh}CcP}n3EwREz?*04yk=c(-%GsvW{{8gp z=XCw}vi1I-Bj-Jfob+@G>vi+|`}giu$hC#>2Ia+nzqZRj=4SnU%gd#VIm zcexQU+wW8V{r?;6=lMPLl%Blo<>lg98`+}^7MUvh%_3zj2dmibzgF>{a%s-z z2AIq})%T9I|Y&c;Vg)_f9+X9PKME?u)~)vi1~*ON;{oCKX7S;+k@ zc(+~4-Ta%VbBDpPV^X=i2U`*ztx;{UVrh^O^}8Ukc#CR-LdPSEd1upDm?YR1ZfpxQ zdsV8rw5KvE``pe@E{1jiwHZCi9vqBKCoI_oxs=k@N;@(#9MAG(5ZuI|v3Tc{AkVc| zJA9crB$F411!g;Qr1dDC5@_iO^5SG^;O6Vu=q8<9lJI6r@PtEpy>t9PL-YR1* zX;aA1SukapV@2uCe|OuX$0{8BqT>17Vn!sH&kXP-^x`!C~W zSTNyJg^lLgt)>zJicb!-FU#|DwVbcNNQ3_{gYprL(ql|K5{eZe3Q^D5FW%HD6)@_# zwJX5cc(zZ^)+3fDW-RM@$RK>-DvMRd)JiM6yY}~zTlW3`*lsd^lfo_8zS6t*V(x7? z5WoKVWzT;V<^OlxY}q$`?YX|=e^1w`>-;!<^V6SSVe?MC{_1+-!`0iTEoN`qTTy%0 z?#_MZmUHLAdIIfuzOD@M`X+y`G&OYUQsWG(_^RgWyVFANDkp5+sKb$dlEpD$3loD> z?jwVFDv^QHqB56WDKUEJWpP}9>9S7bdVPt{{mU1ve6=pq?0o<3>Eh~F-#)cEr*`?} z6oEfqetdZQ&`(lYW%BIRUoV&a|N45bc-!ml?#h-T@mqSisCJ(~(TiWll2vBuEStRf@HbVy z!?_p#|9TmIZ|mhNv(J87_1ZeKhjHQixqQ*dz4ES!vn&~!6im)JwnPfV?5}-ooNw3J zVBCK9o>Y11&u4FMfB$M@2~dWa7)(A_WI_ZU-RmJd@HG_`PbDs z*KgW^T?Sn1^OmvyiZa(tEh#mdpDO%hvxeU#3tk~N$+{iCH{H#Ni;GkEYFGE)cIm(6 zzjwFqZa;qZ^Ma6RDf|Eb+im~<;c<(e9TVy#)C*k%G`f?ITPDZd=HdVT{rvg<&qq_t zX8Ucv619AZ=4viMA@{=DTWinzAN9;C-ewiAGri@Kr(NbNH&(Y*2IoIpoHI!3zBy@Q zfzj>1>+ApbpO4?i(7@2pGC`+%=Yz^=zMph_C+T!5ZV+D`dY5Lj| z_0}9S2x-b*`a7+m*~E!UlP~n*)VC%b_tssP*!lcy@$J_+n{4{?)J}%*{i%qUGGpmnzPi z_h|T^e)?zk`+bjNYN~h4ezN&x4(O_&vfXz-J9=jBQ8=@SCpqAH>iw%d8!h@cmoh9k zeth}1+?mG(79N|iEmNUk^;OB|%xX9OiX}F4EUK}Zc`p6l4H=e&*JknGu}nDo;nS{G ziMx01J^Oit>BY}FiMF6qi*%L-&3o?1VyJY0Q&V-Z$%p2bDXV&~MP;Y=u*KfbHPd$u z`^(OPh>H?x4_;tkKVxr?VTJvg?c z$uLjC>X1b0y!5rU^ZHUFtM;)gN^wtAWSA24{k!)J=CT$K8!?uLKirOUNM_%)5Nc&` zT+%x&D0A)BYb=V2Ev5&eyg3pLyAN?BE}3d}%z;JLVgJ`4ri9+M143=VLegt{O&KE6 z8A}!~WC?I%V7Mmv*k|7J*WITTxVjcu$oM;-KluN@rl|_H7+{r1(=Mur!+--U>s(&+eDWA^j!ukY7hOuz1X zy#3wF!0Z$m4Zq`$N~GBO1d^say{zftq4ijS=kN*sUAxx)dt1MM&wji4HUAA4@C381 zx$35wdTW`nm_)7s8-o+a-3!~@RWDg6^1bcWvhqH!zkbb&)sM47Lu1w7&o@t$iiuh0 zZ2#r&hRo`FrJsL(`SU19`)#n*Y4wZmBOh$sruF^yTmAk2tTKwf{`>oO_xG?DXCD_| z4w+Lve}5g@1g1BhiiVDtYs_Z*2snj#YgXLfI{SF~(xnq?Z}T}yv}YcQ((Ri%ZO^(> z3S8-*ZTyV81e9A0Cmo%6evgjtu_f7)R9>G{eO?*Ip}EMf+G)t zCq29Q>r>m0K+yH1#^OcWVj>wXon+cLfjPva*=*ZetJPnpFYB?XpY{AovaS4-u);Z3 z?cbI&DRnR&V6aF(#(!1ClcB`^|EJ(WtxqT4{B8gKeD&u=7G}O{d+*4fcDz}#t3xv` zwth#=<+t0{I~OZSoO#y0S|wg*nh0Zp@4PgD$?Gzs?0lA8-h6eE_r2|JRnDF}fBrCo z^wHIeq}cXZ@&8(vh_VeSFIotzUI6s)gZAJ7Yw_ zwkX#~qw~vhufO$YZ+rOkXQV)T=xvFsbNQY=ew-XB=9Vc`5jf{9|MK6vpX~&W0-Ly-Ruie?+MDjGpK^Dc8TZ-MJgRlDD z=6BLiv}h4U+> z{d@OPCdcqFC+qYszFL&m`%xoVj=T6d!v?pjM^ApgS+tX9&v&__MGw~d>-+D%yHKyT z`;kHk2Uqi=lye4Gh3bC%J>G4eKi@pM;bh59iCybnR)m@f2>96gL|*>>_QLe5g2`v9 zZ83|2eg zJ@vJfRW%hJmqZjaROg?6-kxmPvFml!`Nf-mwruKjPn?(fGbL%w>)#J6H{aa1M}p&! z!Jd7)Ud^)VTJ(Cgx1vjESnTnw*W9GVHk?cmYFoIqOo#DVj8r!3^VCQNhPpq8<=3yv z-|=Ai<4(gY#s&^`&s(K)i}|cL4A~fXVt&bSA6D=b@;RBe|MjV5@2d``nisrgaN=n= zXw|JKmz*8M^?D97AJdD5q7xUx zfgc=B32To^1fTj>#}?$pY+t}&{HoH)HJMEMuL4@PoZE>XVT!?5^FLOQQR zv-LTv*F9ke_?y-}=ReH7_=dWKvvf<{8&yXEy=TQ83mz$X8ZErC;&Se+Ft1L5s58&g zPp1mBdCsajb2+L#@rvnR1D^KBfm^=XeB3%I(4@RWDxz zy!h5CC4tQ6prdw|t$Tg>UxiJPqL7f@`E$=!w;n(7?dj6jZR;K%vsm2wp4IK;qk<2H zU5_n3M5OvghPG6tcmFwV|1Z-jp2zod{{L_Fc7LDmpZ-;_(q!(Lu;&ZQuBpZj`^J*5|5SwtUe?n@5KCBTh+(EwYzMiN2mWWl#0zV_C7G zMyDqqt+}@4Y}#j?skidt?P`C`Hmj=LD8~GLmf4r)pC!+JKPj>JqFaA!Puk^~*VvRc zMa(^(85L4`zIChBdG-2#pH2K8A4uMP{rcV4pSQ37o+$Aov#P9cU)=hwjKBYG)Zk)p zka3^>{JE#pb59=)-M;qY&!4|Ny*cPu_o7wnZf|?&Jbjs_;=FAO#I?93t@p{(>~yeU$>9U#?T4S0YEQkFTORgWrFY}j%O-w@9agNW3J`*N$NoV)Aony#uV2w1}xp&{iTVkc5VP`*o4h}Sxc>FVISA^dG56j=* z-@8}wfLZ9mEr(hxJDLp4uD$)X9Axi%1GW2qKg0?hEtFtl*tKpe!%W^iiAB>enn(pI+E1;b{5A_qHqJ?;j6q1=EW$=!a?2(OiRQpRi}zQsptmiF9!`>p54?CZXFw|@Tlrl@w_b59XB zxm2%$`=5U(9Lt;;)Wxm1RA&F()(JsguXFD6A3kvYy#BpC)#5CTHY-)U1zl!bFyQIE z=+v<+^Vr{WA1hY9KBcPpys6l)QN88G?t6?HOI}%;vspZ^vXe;utMEa`B&`3p4uj() zwdvxoEjn*6yZyE-G&DMUZK<1f$AV@Ci{G=GnVecS8hT|}o;JKK zyMwb>w&)%cVMy7s@kHf3`*T{Hic>CqToK^aH(i?Hz=`VK=Sn4cQ`#6AIMi-sum~Qy zVfp;`KIR#Q^;TyeTL?`m)?@hdmOmnSPHCV}@3JR4o)U){EHt0yZPz~4W%%qjqgxAi z+%(k|1J9)M=`9yb&h}ml%Ia9~T(E`Vx$}Mga}xKz8y;j}VqkRB=yXodWf61EnDd#_ zaY|jk;8`kDEb(&f6VFP=>NAbI%0o+bWrsq%Jv>VLmmE8hCoz{Y<6yYJS|ePq-x z+i=&;=l^^8@#BXj7H_^ydAQwt`;4GNCtg-m{3?@YOPurM5R;3o#|pz|&*SYyef3?} zx9fR7-hMrQQjO7EKe5ekUPga@`;k%MQRTb`mozuu%~>0C$bFC^^yhHA-5R}1zjx<JEuUX;d~0lfdUG6yrH1a# z#s_cT`U)ML-Tr&`@pgmGbLaK9l=b|&{BlylnbtJ{v(q$_?HD{bC0lhkZSQT3`tYb^ z&Fm%pHnZ91sk5>t=p0L)eSP}$v#ZPa7+$c~%~F$ieD?LS z+*=x4Y27zVtTskTu6@19=vL4%4mY{vuJ6BN54LWIx*HR#64_?BDM~_D!Jzg&!;4wl z43A~x&f91E|M_|Q@?39&nNL@W{C&COO4bL1S?9$a1=L@foCyp9xVpvY2tf4ie}jY5N+^_Fr!Q@59}qjSuFO=azqe+I{U!ZV3_^E8Ujv6+R`QZow|V^nA<3 zDVuaoyV+#x_%{kfzW%lEIWs$h#A02BYfPyPL0%kvhG{*Xn!DHK&&rD_No=|MqOhES z$>Npa8q=uk`^jFfcICNoFWQ=CVs6#myK!;JVao*G0}_Fcdh;GJF!pe|T!>^>eABit zQ)_9nYM~+^7^jrS_j$Qj0&*jaJf4~1;Wbz`FIK6Y{&Yz#X?Bt|~BF3K7DOZlJ%({K{ z^X0O735)bA%mUSRyZ7y>D6ZyYtoZTAm6>CX@usJ9$`5%iZM;0IyjEY$IPB}&m!-+7P-;=^SHt&P_YL`6hev&&$tP9dwpf)Yw?PT~ikI`ee$ar2MU%!4`K5w5MSyr}f z!!b6FVnYS?CIuIcZNA6V;_T)Mv)|tKD8?s!Gyna+20VXW-@UtWMcQV`MNdLZTP7Iz zsXQ+Y4e+1-^yQBG+kH*9+%2&xJQk6@c54vFsTJ}8ilB|4`I{?m?%yZFUwyD$gR5C# z#`$HH^TbPIOL<#xOjq;@7v*{k5qg*6iP11E-0$rWl=ktkK1E|8vZP#*->ydvyJx zlPywDC`!40oSiuU3h?#|swi) zO7_$)+pE2e3VnxFJk=&|$-e#kttSWbn(j#--u}KHA9vruXZODRTXoy@<>cngT2}f0 zU+gT^-*>YkuSZE{=SuwWoRqoN?EL!J`~1xgGb}qkdCJY&nCn$3@;vfLW%Sfdxw8(x z%jkLBY$=)=X?iSz`^;?qe7m}u>*asHeQi-VdC7<4<;Df_OP&WV{hBgMKRo^N%QKR} z=DeHF{d|z2%kbgTC#{Y}yYl8ROC)k^l>c#H=epPKx7Tm&6F4(NoFzCpBsTO|#zGYp zi?f?nz5BM?IWXiJlat_vX+~C?f|~f}@K3q)v*z1z`+tf&O=mAZdwqTPA`4bW4^82q z=;sEC98L^xW~?oEY<9+q!9nKe^ziBFr;Rz@{AW$t%u*(5SH;7?z`)??>gTe~DWM4f DifYg6 diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py deleted file mode 100644 index 0175f155a..000000000 --- a/ldm/modules/image_degradation/utils_image.py +++ /dev/null @@ -1,916 +0,0 @@ -import os -import math -import random -import numpy as np -import torch -import cv2 -from torchvision.utils import make_grid -from datetime import datetime -#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py - - -os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" - - -''' -# -------------------------------------------- -# Kai Zhang (github: https://github.com/cszn) -# 03/Mar/2019 -# -------------------------------------------- -# https://github.com/twhui/SRGAN-pyTorch -# https://github.com/xinntao/BasicSR -# -------------------------------------------- -''' - - -IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def get_timestamp(): - return datetime.now().strftime('%y%m%d-%H%M%S') - - -def imshow(x, title=None, cbar=False, figsize=None): - plt.figure(figsize=figsize) - plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') - if title: - plt.title(title) - if cbar: - plt.colorbar() - plt.show() - - -def surf(Z, cmap='rainbow', figsize=None): - plt.figure(figsize=figsize) - ax3 = plt.axes(projection='3d') - - w, h = Z.shape[:2] - xx = np.arange(0,w,1) - yy = np.arange(0,h,1) - X, Y = np.meshgrid(xx, yy) - ax3.plot_surface(X,Y,Z,cmap=cmap) - #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) - plt.show() - - -''' -# -------------------------------------------- -# get image pathes -# -------------------------------------------- -''' - - -def get_image_paths(dataroot): - paths = None # return None if dataroot is None - if dataroot is not None: - paths = sorted(_get_paths_from_images(dataroot)) - return paths - - -def _get_paths_from_images(path): - assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) - images = [] - for dirpath, _, fnames in sorted(os.walk(path)): - for fname in sorted(fnames): - if is_image_file(fname): - img_path = os.path.join(dirpath, fname) - images.append(img_path) - assert images, '{:s} has no valid image file'.format(path) - return images - - -''' -# -------------------------------------------- -# split large images into small images -# -------------------------------------------- -''' - - -def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): - w, h = img.shape[:2] - patches = [] - if w > p_max and h > p_max: - w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) - h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) - w1.append(w-p_size) - h1.append(h-p_size) -# print(w1) -# print(h1) - for i in w1: - for j in h1: - patches.append(img[i:i+p_size, j:j+p_size,:]) - else: - patches.append(img) - - return patches - - -def imssave(imgs, img_path): - """ - imgs: list, N images of size WxHxC - """ - img_name, ext = os.path.splitext(os.path.basename(img_path)) - - for i, img in enumerate(imgs): - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') - cv2.imwrite(new_path, img) - - -def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): - """ - split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), - and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) - will be splitted. - Args: - original_dataroot: - taget_dataroot: - p_size: size of small images - p_overlap: patch size in training is a good choice - p_max: images with smaller size than (p_max)x(p_max) keep unchanged. - """ - paths = get_image_paths(original_dataroot) - for img_path in paths: - # img_name, ext = os.path.splitext(os.path.basename(img_path)) - img = imread_uint(img_path, n_channels=n_channels) - patches = patches_from_image(img, p_size, p_overlap, p_max) - imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) - #if original_dataroot == taget_dataroot: - #del img_path - -''' -# -------------------------------------------- -# makedir -# -------------------------------------------- -''' - - -def mkdir(path): - if not os.path.exists(path): - os.makedirs(path) - - -def mkdirs(paths): - if isinstance(paths, str): - mkdir(paths) - else: - for path in paths: - mkdir(path) - - -def mkdir_and_rename(path): - if os.path.exists(path): - new_name = path + '_archived_' + get_timestamp() - print('Path already exists. Rename it to [{:s}]'.format(new_name)) - os.rename(path, new_name) - os.makedirs(path) - - -''' -# -------------------------------------------- -# read image from path -# opencv is fast, but read BGR numpy image -# -------------------------------------------- -''' - - -# -------------------------------------------- -# get uint8 image of size HxWxn_channles (RGB) -# -------------------------------------------- -def imread_uint(path, n_channels=3): - # input: path - # output: HxWx3(RGB or GGG), or HxWx1 (G) - if n_channels == 1: - img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE - img = np.expand_dims(img, axis=2) # HxWx1 - elif n_channels == 3: - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG - else: - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB - return img - - -# -------------------------------------------- -# matlab's imwrite -# -------------------------------------------- -def imsave(img, img_path): - img = np.squeeze(img) - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - cv2.imwrite(img_path, img) - -def imwrite(img, img_path): - img = np.squeeze(img) - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - cv2.imwrite(img_path, img) - - - -# -------------------------------------------- -# get single image of size HxWxn_channles (BGR) -# -------------------------------------------- -def read_img(path): - # read image by cv2 - # return: Numpy float32, HWC, BGR, [0,1] - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE - img = img.astype(np.float32) / 255. - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - # some images have 4 channels - if img.shape[2] > 3: - img = img[:, :, :3] - return img - - -''' -# -------------------------------------------- -# image format conversion -# -------------------------------------------- -# numpy(single) <---> numpy(unit) -# numpy(single) <---> tensor -# numpy(unit) <---> tensor -# -------------------------------------------- -''' - - -# -------------------------------------------- -# numpy(single) [0, 1] <---> numpy(unit) -# -------------------------------------------- - - -def uint2single(img): - - return np.float32(img/255.) - - -def single2uint(img): - - return np.uint8((img.clip(0, 1)*255.).round()) - - -def uint162single(img): - - return np.float32(img/65535.) - - -def single2uint16(img): - - return np.uint16((img.clip(0, 1)*65535.).round()) - - -# -------------------------------------------- -# numpy(unit) (HxWxC or HxW) <---> tensor -# -------------------------------------------- - - -# convert uint to 4-dimensional torch tensor -def uint2tensor4(img): - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) - - -# convert uint to 3-dimensional torch tensor -def uint2tensor3(img): - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) - - -# convert 2/3/4-dimensional torch tensor to uint -def tensor2uint(img): - img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - return np.uint8((img*255.0).round()) - - -# -------------------------------------------- -# numpy(single) (HxWxC) <---> tensor -# -------------------------------------------- - - -# convert single (HxWxC) to 3-dimensional torch tensor -def single2tensor3(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() - - -# convert single (HxWxC) to 4-dimensional torch tensor -def single2tensor4(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) - - -# convert torch tensor to single -def tensor2single(img): - img = img.data.squeeze().float().cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - - return img - -# convert torch tensor to single -def tensor2single3(img): - img = img.data.squeeze().float().cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - elif img.ndim == 2: - img = np.expand_dims(img, axis=2) - return img - - -def single2tensor5(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) - - -def single32tensor5(img): - return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) - - -def single42tensor4(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() - - -# from skimage.io import imread, imsave -def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): - ''' - Converts a torch Tensor into an image Numpy array of BGR channel order - Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order - Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) - ''' - tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp - tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] - n_dim = tensor.dim() - if n_dim == 4: - n_img = len(tensor) - img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() - img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR - elif n_dim == 3: - img_np = tensor.numpy() - img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR - elif n_dim == 2: - img_np = tensor.numpy() - else: - raise TypeError( - 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) - if out_type == np.uint8: - img_np = (img_np * 255.0).round() - # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. - return img_np.astype(out_type) - - -''' -# -------------------------------------------- -# Augmentation, flipe and/or rotate -# -------------------------------------------- -# The following two are enough. -# (1) augmet_img: numpy image of WxHxC or WxH -# (2) augment_img_tensor4: tensor image 1xCxWxH -# -------------------------------------------- -''' - - -def augment_img(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - if mode == 0: - return img - elif mode == 1: - return np.flipud(np.rot90(img)) - elif mode == 2: - return np.flipud(img) - elif mode == 3: - return np.rot90(img, k=3) - elif mode == 4: - return np.flipud(np.rot90(img, k=2)) - elif mode == 5: - return np.rot90(img) - elif mode == 6: - return np.rot90(img, k=2) - elif mode == 7: - return np.flipud(np.rot90(img, k=3)) - - -def augment_img_tensor4(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - if mode == 0: - return img - elif mode == 1: - return img.rot90(1, [2, 3]).flip([2]) - elif mode == 2: - return img.flip([2]) - elif mode == 3: - return img.rot90(3, [2, 3]) - elif mode == 4: - return img.rot90(2, [2, 3]).flip([2]) - elif mode == 5: - return img.rot90(1, [2, 3]) - elif mode == 6: - return img.rot90(2, [2, 3]) - elif mode == 7: - return img.rot90(3, [2, 3]).flip([2]) - - -def augment_img_tensor(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - img_size = img.size() - img_np = img.data.cpu().numpy() - if len(img_size) == 3: - img_np = np.transpose(img_np, (1, 2, 0)) - elif len(img_size) == 4: - img_np = np.transpose(img_np, (2, 3, 1, 0)) - img_np = augment_img(img_np, mode=mode) - img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) - if len(img_size) == 3: - img_tensor = img_tensor.permute(2, 0, 1) - elif len(img_size) == 4: - img_tensor = img_tensor.permute(3, 2, 0, 1) - - return img_tensor.type_as(img) - - -def augment_img_np3(img, mode=0): - if mode == 0: - return img - elif mode == 1: - return img.transpose(1, 0, 2) - elif mode == 2: - return img[::-1, :, :] - elif mode == 3: - img = img[::-1, :, :] - img = img.transpose(1, 0, 2) - return img - elif mode == 4: - return img[:, ::-1, :] - elif mode == 5: - img = img[:, ::-1, :] - img = img.transpose(1, 0, 2) - return img - elif mode == 6: - img = img[:, ::-1, :] - img = img[::-1, :, :] - return img - elif mode == 7: - img = img[:, ::-1, :] - img = img[::-1, :, :] - img = img.transpose(1, 0, 2) - return img - - -def augment_imgs(img_list, hflip=True, rot=True): - # horizontal flip OR rotate - hflip = hflip and random.random() < 0.5 - vflip = rot and random.random() < 0.5 - rot90 = rot and random.random() < 0.5 - - def _augment(img): - if hflip: - img = img[:, ::-1, :] - if vflip: - img = img[::-1, :, :] - if rot90: - img = img.transpose(1, 0, 2) - return img - - return [_augment(img) for img in img_list] - - -''' -# -------------------------------------------- -# modcrop and shave -# -------------------------------------------- -''' - - -def modcrop(img_in, scale): - # img_in: Numpy, HWC or HW - img = np.copy(img_in) - if img.ndim == 2: - H, W = img.shape - H_r, W_r = H % scale, W % scale - img = img[:H - H_r, :W - W_r] - elif img.ndim == 3: - H, W, C = img.shape - H_r, W_r = H % scale, W % scale - img = img[:H - H_r, :W - W_r, :] - else: - raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) - return img - - -def shave(img_in, border=0): - # img_in: Numpy, HWC or HW - img = np.copy(img_in) - h, w = img.shape[:2] - img = img[border:h-border, border:w-border] - return img - - -''' -# -------------------------------------------- -# image processing process on numpy image -# channel_convert(in_c, tar_type, img_list): -# rgb2ycbcr(img, only_y=True): -# bgr2ycbcr(img, only_y=True): -# ycbcr2rgb(img): -# -------------------------------------------- -''' - - -def rgb2ycbcr(img, only_y=True): - '''same as matlab rgb2ycbcr - only_y: only return Y channel - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - if only_y: - rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 - else: - rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], - [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def ycbcr2rgb(img): - '''same as matlab ycbcr2rgb - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def bgr2ycbcr(img, only_y=True): - '''bgr version of rgb2ycbcr - only_y: only return Y channel - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - if only_y: - rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 - else: - rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], - [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def channel_convert(in_c, tar_type, img_list): - # conversion among BGR, gray and y - if in_c == 3 and tar_type == 'gray': # BGR to gray - gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] - return [np.expand_dims(img, axis=2) for img in gray_list] - elif in_c == 3 and tar_type == 'y': # BGR to y - y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] - return [np.expand_dims(img, axis=2) for img in y_list] - elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR - return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] - else: - return img_list - - -''' -# -------------------------------------------- -# metric, PSNR and SSIM -# -------------------------------------------- -''' - - -# -------------------------------------------- -# PSNR -# -------------------------------------------- -def calculate_psnr(img1, img2, border=0): - # img1 and img2 have range [0, 255] - #img1 = img1.squeeze() - #img2 = img2.squeeze() - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - h, w = img1.shape[:2] - img1 = img1[border:h-border, border:w-border] - img2 = img2[border:h-border, border:w-border] - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - mse = np.mean((img1 - img2)**2) - if mse == 0: - return float('inf') - return 20 * math.log10(255.0 / math.sqrt(mse)) - - -# -------------------------------------------- -# SSIM -# -------------------------------------------- -def calculate_ssim(img1, img2, border=0): - '''calculate SSIM - the same outputs as MATLAB's - img1, img2: [0, 255] - ''' - #img1 = img1.squeeze() - #img2 = img2.squeeze() - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - h, w = img1.shape[:2] - img1 = img1[border:h-border, border:w-border] - img2 = img2[border:h-border, border:w-border] - - if img1.ndim == 2: - return ssim(img1, img2) - elif img1.ndim == 3: - if img1.shape[2] == 3: - ssims = [] - for i in range(3): - ssims.append(ssim(img1[:,:,i], img2[:,:,i])) - return np.array(ssims).mean() - elif img1.shape[2] == 1: - return ssim(np.squeeze(img1), np.squeeze(img2)) - else: - raise ValueError('Wrong input image dimensions.') - - -def ssim(img1, img2): - C1 = (0.01 * 255)**2 - C2 = (0.03 * 255)**2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1**2 - mu2_sq = mu2**2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * - (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -''' -# -------------------------------------------- -# matlab's bicubic imresize (numpy and torch) [0, 1] -# -------------------------------------------- -''' - - -# matlab 'imresize' function, now only support 'bicubic' -def cubic(x): - absx = torch.abs(x) - absx2 = absx**2 - absx3 = absx**3 - return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ - (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) - - -def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): - if (scale < 1) and (antialiasing): - # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width - kernel_width = kernel_width / scale - - # Output-space coordinates - x = torch.linspace(1, out_length, out_length) - - # Input-space coordinates. Calculate the inverse mapping such that 0.5 - # in output space maps to 0.5 in input space, and 0.5+scale in output - # space maps to 1.5 in input space. - u = x / scale + 0.5 * (1 - 1 / scale) - - # What is the left-most pixel that can be involved in the computation? - left = torch.floor(u - kernel_width / 2) - - # What is the maximum number of pixels that can be involved in the - # computation? Note: it's OK to use an extra pixel here; if the - # corresponding weights are all zero, it will be eliminated at the end - # of this function. - P = math.ceil(kernel_width) + 2 - - # The indices of the input pixels involved in computing the k-th output - # pixel are in row k of the indices matrix. - indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( - 1, P).expand(out_length, P) - - # The weights used to compute the k-th output pixel are in row k of the - # weights matrix. - distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices - # apply cubic kernel - if (scale < 1) and (antialiasing): - weights = scale * cubic(distance_to_center * scale) - else: - weights = cubic(distance_to_center) - # Normalize the weights matrix so that each row sums to 1. - weights_sum = torch.sum(weights, 1).view(out_length, 1) - weights = weights / weights_sum.expand(out_length, P) - - # If a column in weights is all zero, get rid of it. only consider the first and last column. - weights_zero_tmp = torch.sum((weights == 0), 0) - if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): - indices = indices.narrow(1, 1, P - 2) - weights = weights.narrow(1, 1, P - 2) - if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): - indices = indices.narrow(1, 0, P - 2) - weights = weights.narrow(1, 0, P - 2) - weights = weights.contiguous() - indices = indices.contiguous() - sym_len_s = -indices.min() + 1 - sym_len_e = indices.max() - in_length - indices = indices + sym_len_s - 1 - return weights, indices, int(sym_len_s), int(sym_len_e) - - -# -------------------------------------------- -# imresize for tensor image [0, 1] -# -------------------------------------------- -def imresize(img, scale, antialiasing=True): - # Now the scale should be the same for H and W - # input: img: pytorch tensor, CHW or HW [0,1] - # output: CHW or HW [0,1] w/o round - need_squeeze = True if img.dim() == 2 else False - if need_squeeze: - img.unsqueeze_(0) - in_C, in_H, in_W = img.size() - out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) - kernel_width = 4 - kernel = 'cubic' - - # Return the desired dimension order for performing the resize. The - # strategy is to perform the resize first along the dimension with the - # smallest scale factor. - # Now we do not support this. - - # get weights and indices - weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( - in_H, out_H, scale, kernel, kernel_width, antialiasing) - weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( - in_W, out_W, scale, kernel, kernel_width, antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) - img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) - - sym_patch = img[:, :sym_len_Hs, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) - - sym_patch = img[:, -sym_len_He:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(in_C, out_H, in_W) - kernel_width = weights_H.size(1) - for i in range(out_H): - idx = int(indices_H[i][0]) - for j in range(out_C): - out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) - out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) - - sym_patch = out_1[:, :, :sym_len_Ws] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, :, -sym_len_We:] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(in_C, out_H, out_W) - kernel_width = weights_W.size(1) - for i in range(out_W): - idx = int(indices_W[i][0]) - for j in range(out_C): - out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) - if need_squeeze: - out_2.squeeze_() - return out_2 - - -# -------------------------------------------- -# imresize for numpy image [0, 1] -# -------------------------------------------- -def imresize_np(img, scale, antialiasing=True): - # Now the scale should be the same for H and W - # input: img: Numpy, HWC or HW [0,1] - # output: HWC or HW [0,1] w/o round - img = torch.from_numpy(img) - need_squeeze = True if img.dim() == 2 else False - if need_squeeze: - img.unsqueeze_(2) - - in_H, in_W, in_C = img.size() - out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) - kernel_width = 4 - kernel = 'cubic' - - # Return the desired dimension order for performing the resize. The - # strategy is to perform the resize first along the dimension with the - # smallest scale factor. - # Now we do not support this. - - # get weights and indices - weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( - in_H, out_H, scale, kernel, kernel_width, antialiasing) - weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( - in_W, out_W, scale, kernel, kernel_width, antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) - img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) - - sym_patch = img[:sym_len_Hs, :, :] - inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(0, inv_idx) - img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) - - sym_patch = img[-sym_len_He:, :, :] - inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(0, inv_idx) - img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(out_H, in_W, in_C) - kernel_width = weights_H.size(1) - for i in range(out_H): - idx = int(indices_H[i][0]) - for j in range(out_C): - out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) - out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) - - sym_patch = out_1[:, :sym_len_Ws, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, -sym_len_We:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(out_H, out_W, in_C) - kernel_width = weights_W.size(1) - for i in range(out_W): - idx = int(indices_W[i][0]) - for j in range(out_C): - out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) - if need_squeeze: - out_2.squeeze_() - - return out_2.numpy() - - -if __name__ == '__main__': - print('---') -# img = imread_uint('test.bmp', 3) -# img = uint2single(img) -# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/ldm/modules/losses/__init__.py b/ldm/modules/losses/__init__.py deleted file mode 100644 index 876d7c5bd..000000000 --- a/ldm/modules/losses/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/ldm/modules/losses/contperceptual.py b/ldm/modules/losses/contperceptual.py deleted file mode 100644 index 672c1e32a..000000000 --- a/ldm/modules/losses/contperceptual.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import torch.nn as nn - -from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? - - -class LPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_loss="hinge"): - - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - self.kl_weight = kl_weight - self.pixel_weight = pixelloss_weight - self.perceptual_loss = LPIPS().eval() - self.perceptual_weight = perceptual_weight - # output log variance - self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm - ).apply(weights_init) - self.discriminator_iter_start = disc_start - self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, inputs, reconstructions, posteriors, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", - weights=None): - rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - - nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar - weighted_nll_loss = nll_loss - if weights is not None: - weighted_nll_loss = weights*nll_loss - weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] - nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - kl_loss = posteriors.kl() - kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - if self.disc_factor > 0.0: - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = torch.tensor(0.0) - else: - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), - "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss, log - diff --git a/ldm/modules/losses/vqperceptual.py b/ldm/modules/losses/vqperceptual.py deleted file mode 100644 index f69981769..000000000 --- a/ldm/modules/losses/vqperceptual.py +++ /dev/null @@ -1,167 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from einops import repeat - -from taming.modules.discriminator.model import NLayerDiscriminator, weights_init -from taming.modules.losses.lpips import LPIPS -from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss - - -def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): - assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] - loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) - loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) - loss_real = (weights * loss_real).sum() / weights.sum() - loss_fake = (weights * loss_fake).sum() / weights.sum() - d_loss = 0.5 * (loss_real + loss_fake) - return d_loss - -def adopt_weight(weight, global_step, threshold=0, value=0.): - if global_step < threshold: - weight = value - return weight - - -def measure_perplexity(predicted_indices, n_embed): - # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py - # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally - encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) - avg_probs = encodings.mean(0) - perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() - cluster_use = torch.sum(avg_probs > 0) - return perplexity, cluster_use - -def l1(x, y): - return torch.abs(x-y) - - -def l2(x, y): - return torch.pow((x-y), 2) - - -class VQLPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", - pixel_loss="l1"): - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - assert perceptual_loss in ["lpips", "clips", "dists"] - assert pixel_loss in ["l1", "l2"] - self.codebook_weight = codebook_weight - self.pixel_weight = pixelloss_weight - if perceptual_loss == "lpips": - print(f"{self.__class__.__name__}: Running with LPIPS.") - self.perceptual_loss = LPIPS().eval() - else: - raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") - self.perceptual_weight = perceptual_weight - - if pixel_loss == "l1": - self.pixel_loss = l1 - else: - self.pixel_loss = l2 - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm, - ndf=disc_ndf - ).apply(weights_init) - self.discriminator_iter_start = disc_start - if disc_loss == "hinge": - self.disc_loss = hinge_d_loss - elif disc_loss == "vanilla": - self.disc_loss = vanilla_d_loss - else: - raise ValueError(f"Unknown GAN loss '{disc_loss}'.") - print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - self.n_classes = n_classes - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", predicted_indices=None): - if not exists(codebook_loss): - codebook_loss = torch.tensor([0.]).to(inputs.device) - #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - else: - p_loss = torch.tensor([0.0]) - - nll_loss = rec_loss - #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - nll_loss = torch.mean(nll_loss) - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), - "{}/quant_loss".format(split): codebook_loss.detach().mean(), - "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/p_loss".format(split): p_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - if predicted_indices is not None: - assert self.n_classes is not None - with torch.no_grad(): - perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) - log[f"{split}/perplexity"] = perplexity - log[f"{split}/cluster_usage"] = cluster_usage - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss, log diff --git a/ldm/modules/x_transformer.py b/ldm/modules/x_transformer.py deleted file mode 100644 index 5fc15bf9c..000000000 --- a/ldm/modules/x_transformer.py +++ /dev/null @@ -1,641 +0,0 @@ -"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" -import torch -from torch import nn, einsum -import torch.nn.functional as F -from functools import partial -from inspect import isfunction -from collections import namedtuple -from einops import rearrange, repeat, reduce - -# constants - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates' -]) - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.emb = nn.Embedding(max_seq_len, dim) - self.init_() - - def init_(self): - nn.init.normal_(self.emb.weight, std=0.02) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - return self.emb(n)[None, :, :] - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return emb[None, :, :] - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def always(val): - def inner(*args, **kwargs): - return val - return inner - - -def not_equals(val): - def inner(x): - return x != val - return inner - - -def equals(val): - def inner(x): - return x == val - return inner - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# classes -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.value, *rest) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.g, *rest) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class Residual(nn.Module): - def forward(self, x, residual): - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - - def forward(self, x, residual): - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# feedforward - -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -# attention. -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - mask=None, - talking_heads=False, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False - ): - super().__init__() - if use_entmax15: - raise NotImplementedError("Check out entmax activation instead of softmax activation!") - self.scale = dim_head ** -0.5 - self.heads = heads - self.causal = causal - self.mask = mask - - inner_dim = dim_head * heads - - self.to_q = nn.Linear(dim, inner_dim, bias=False) - self.to_k = nn.Linear(dim, inner_dim, bias=False) - self.to_v = nn.Linear(dim, inner_dim, bias=False) - self.dropout = nn.Dropout(dropout) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - #self.attn_fn = entmax15 if use_entmax15 else F.softmax - self.attn_fn = F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - rel_pos=None, - sinusoidal_emb=None, - prev_attn=None, - mem=None - ): - b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if exists(rel_pos): - dots = rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rmsnorm=False, - use_rezero=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - position_infused_attn=False, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - - self.has_pos_emb = position_infused_attn - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - self.rotary_pos_emb = always(None) - - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = None - - self.pre_norm = pre_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - for layer_type in self.layer_types: - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if isinstance(layer, Attention) and exists(branch_fn): - layer = branch_fn(layer) - - if gate_residual: - residual_fn = GRUGating(dim) - else: - residual_fn = Residual() - - self.layers.append(nn.ModuleList([ - norm_fn(), - layer, - residual_fn - ])) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - mems=None, - return_hiddens=False - ): - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - is_last = ind == (len(self.layers) - 1) - - if layer_type == 'a': - hiddens.append(x) - layer_mem = mems.pop(0) - - residual = x - - if self.pre_norm: - x = norm(x) - - if layer_type == 'a': - out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, - prev_attn=prev_attn, mem=layer_mem) - elif layer_type == 'c': - out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) - elif layer_type == 'f': - out = block(x) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if not self.pre_norm and not is_last: - x = norm(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.num_tokens = num_tokens - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - # let funnel encoder know number of memory tokens, if specified - if hasattr(attn_layers, 'num_memory_tokens'): - attn_layers.num_memory_tokens = num_memory_tokens - - def init_(self): - nn.init.normal_(self.token_emb.weight, std=0.02) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_mems=False, - return_attn=False, - mems=None, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x += self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_mems: - hiddens = intermediates.hiddens - new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens - new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) - return out, new_mems - - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - return out, attn_maps - - return out - diff --git a/ldm/util.py b/ldm/util.py deleted file mode 100644 index 8ba38853e..000000000 --- a/ldm/util.py +++ /dev/null @@ -1,203 +0,0 @@ -import importlib - -import torch -import numpy as np -from collections import abc -from einops import rearrange -from functools import partial - -import multiprocessing as mp -from threading import Thread -from queue import Queue - -from inspect import isfunction -from PIL import Image, ImageDraw, ImageFont - - -def log_txt_as_img(wh, xc, size=10): - # wh a tuple of (width, height) - # xc a list of captions to plot - b = len(xc) - txts = list() - for bi in range(b): - txt = Image.new("RGB", wh, color="white") - draw = ImageDraw.Draw(txt) - font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) - nc = int(40 * (wh[0] / 256)) - lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) - - try: - draw.text((0, 0), lines, fill="black", font=font) - except UnicodeEncodeError: - print("Cant encode string for logging. Skipping.") - - txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 - txts.append(txt) - txts = np.stack(txts) - txts = torch.tensor(txts) - return txts - - -def ismap(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] > 3) - - -def isimage(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def mean_flat(tensor): - """ - https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def count_params(model, verbose=False): - total_params = sum(p.numel() for p in model.parameters()) - if verbose: - print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") - return total_params - - -def instantiate_from_config(config): - if not "target" in config: - if config == '__is_first_stage__': - return None - elif config == "__is_unconditional__": - return None - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - - -def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): - # create dummy dataset instance - - # run prefetching - if idx_to_fn: - res = func(data, worker_id=idx) - else: - res = func(data) - Q.put([idx, res]) - Q.put("Done") - - -def parallel_data_prefetch( - func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False -): - # if target_data_type not in ["ndarray", "list"]: - # raise ValueError( - # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." - # ) - if isinstance(data, np.ndarray) and target_data_type == "list": - raise ValueError("list expected but function got ndarray.") - elif isinstance(data, abc.Iterable): - if isinstance(data, dict): - print( - f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' - ) - data = list(data.values()) - if target_data_type == "ndarray": - data = np.asarray(data) - else: - data = list(data) - else: - raise TypeError( - f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." - ) - - if cpu_intensive: - Q = mp.Queue(1000) - proc = mp.Process - else: - Q = Queue(1000) - proc = Thread - # spawn processes - if target_data_type == "ndarray": - arguments = [ - [func, Q, part, i, use_worker_id] - for i, part in enumerate(np.array_split(data, n_proc)) - ] - else: - step = ( - int(len(data) / n_proc + 1) - if len(data) % n_proc != 0 - else int(len(data) / n_proc) - ) - arguments = [ - [func, Q, part, i, use_worker_id] - for i, part in enumerate( - [data[i: i + step] for i in range(0, len(data), step)] - ) - ] - processes = [] - for i in range(n_proc): - p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) - processes += [p] - - # start processes - print(f"Start prefetching...") - import time - - start = time.time() - gather_res = [[] for _ in range(n_proc)] - try: - for p in processes: - p.start() - - k = 0 - while k < n_proc: - # get result - res = Q.get() - if res == "Done": - k += 1 - else: - gather_res[res[0]] = res[1] - - except Exception as e: - print("Exception: ", e) - for p in processes: - p.terminate() - - raise e - finally: - for p in processes: - p.join() - print(f"Prefetching complete. [{time.time() - start} sec.]") - - if target_data_type == 'ndarray': - if not isinstance(gather_res[0], np.ndarray): - return np.concatenate([np.asarray(r) for r in gather_res], axis=0) - - # order outputs - return np.concatenate(gather_res, axis=0) - elif target_data_type == 'list': - out = [] - for r in gather_res: - out.extend(r) - return out - else: - return gather_res diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3ec3f98a6..edb8b4204 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At # new memory efficient cross attention blocks do not support hypernets and we already # have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention -ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention +# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention # silence new console spam from SD2 ldm.modules.attention.print = lambda *args: None @@ -82,7 +82,12 @@ class StableDiffusionModelHijack: def hijack(self, m): - if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: + if shared.text_model_name == "XLMR-Large": + model_embeddings = m.cond_stage_model.roberta.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) + m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) @@ -91,11 +96,7 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) apply_optimizations() - elif shared.text_model_name == "XLMR-Large": - model_embeddings = m.cond_stage_model.roberta.embeddings - model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) - m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - + self.clip = m.cond_stage_model fix_checkpoint() diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index b451d1cfe..9ea6e1cec 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -4,7 +4,7 @@ import torch from modules import prompt_parser, devices from modules.shared import opts - +import modules.shared as shared def get_target_prompt_token_count(token_count): return math.ceil(max(token_count, 1) / 75) * 75 @@ -177,6 +177,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): + if shared.text_model_name == "XLMR-Large": + return self.wrapped.encode(text) + use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -254,7 +257,10 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def __init__(self, wrapped, hijack): super().__init__(wrapped, hijack) self.tokenizer = wrapped.tokenizer - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + if shared.text_model_name == "XLMR-Large": + self.comma_token = None + else : + self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] self.token_mults = {} tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] diff --git a/ldm/modules/encoders/xlmr.py b/modules/xlmr.py similarity index 100% rename from ldm/modules/encoders/xlmr.py rename to modules/xlmr.py From 9c86fb8cace6d8ac0843e0ddad0ba5ae7f3148c9 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 2 Dec 2022 16:08:46 +0800 Subject: [PATCH 04/53] fix bug Signed-off-by: zhaohu xing <920232796@qq.com> --- modules/shared.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 1408dee33..ac7678c3d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -111,7 +111,11 @@ restricted_opts = { from omegaconf import OmegaConf config = OmegaConf.load(f"{cmd_opts.config}") # XLMR-Large -text_model_name = config.model.params.cond_stage_config.params.name +try: + text_model_name = config.model.params.cond_stage_config.params.name + +except : + text_model_name = "stable_diffusion" cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access From 4929503258d80abbc4b5f40da034298fe3803906 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 6 Dec 2022 09:03:55 +0800 Subject: [PATCH 05/53] fix bugs Signed-off-by: zhaohu xing <920232796@qq.com> --- modules/devices.py | 4 +-- modules/sd_hijack.py | 2 +- v2-inference.yaml | 67 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 3 deletions(-) create mode 100644 v2-inference.yaml diff --git a/modules/devices.py b/modules/devices.py index e69c1fe38..f00079c6b 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -38,8 +38,8 @@ def get_optimal_device(): if torch.cuda.is_available(): return torch.device(get_cuda_device_string()) - # if has_mps(): - # return torch.device("mps") + if has_mps(): + return torch.device("mps") return cpu diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index edb8b4204..cd65d356a 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At # new memory efficient cross attention blocks do not support hypernets and we already # have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention -# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention +ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention # silence new console spam from SD2 ldm.modules.attention.print = lambda *args: None diff --git a/v2-inference.yaml b/v2-inference.yaml new file mode 100644 index 000000000..0eb25395f --- /dev/null +++ b/v2-inference.yaml @@ -0,0 +1,67 @@ +model: + base_learning_rate: 1.0e-4 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False # we set this to false because this is an inference only config + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" \ No newline at end of file From 5dcc22606d05ebe5ae89c990bd83a3eb068fcb78 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 6 Dec 2022 16:04:50 +0800 Subject: [PATCH 06/53] add hash and fix undo hijack bug Signed-off-by: zhaohu xing <920232796@qq.com> --- .DS_Store | Bin 0 -> 6148 bytes launch.py | 10 +++++----- modules/sd_hijack.py | 6 +++++- v2-inference.yaml => v2-inference-v.yaml | 1 + 4 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 .DS_Store rename v2-inference.yaml => v2-inference-v.yaml (98%) diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z45|!R0Z1N%F(jFgL>QrFAPJ2!M?+vV1V%$(Gz3ON zU^D~25V%SxcdJP zRior+2#kinunYl47MEZbCs3t{!+W4QHvuXKVuPw;Mo^s$(F3lEVT}ML$bg~*R5_@+ b2Uo?6kTwK}57Iu`5P${HC_Nei0}uiLNUI8I literal 0 HcmV?d00001 diff --git a/launch.py b/launch.py index 0d8f2776a..0e1bbaf22 100644 --- a/launch.py +++ b/launch.py @@ -234,11 +234,11 @@ def prepare_enviroment(): os.makedirs(dir_repos, exist_ok=True) - git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", ) - git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", ) - git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", ) - git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", ) - git_clone(blip_repo, repo_dir('BLIP'), "BLIP", ) + git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) + git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) + git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) + git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) + git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) if not is_installed("lpips"): run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 9b5890e70..9fed1b6f4 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -112,7 +112,11 @@ class StableDiffusionModelHijack: self.layers = flatten(m) def undo_hijack(self, m): - if type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: + + if shared.text_model_name == "XLMR-Large": + m.cond_stage_model = m.cond_stage_model.wrapped + + elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped model_embeddings = m.cond_stage_model.transformer.text_model.embeddings diff --git a/v2-inference.yaml b/v2-inference-v.yaml similarity index 98% rename from v2-inference.yaml rename to v2-inference-v.yaml index 0eb25395f..513cd635c 100644 --- a/v2-inference.yaml +++ b/v2-inference-v.yaml @@ -2,6 +2,7 @@ model: base_learning_rate: 1.0e-4 target: ldm.models.diffusion.ddpm.LatentDiffusion params: + parameterization: "v" linear_start: 0.00085 linear_end: 0.0120 num_timesteps_cond: 1 From 965fc5ac5a6ccdf38342e21c97183011a04e799e Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 6 Dec 2022 16:15:15 +0800 Subject: [PATCH 07/53] delete a file Signed-off-by: zhaohu xing <920232796@qq.com> --- .DS_Store | Bin 6148 -> 0 bytes modules/shared.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z45|!R0Z1N%F(jFgL>QrFAPJ2!M?+vV1V%$(Gz3ON zU^D~25V%SxcdJP zRior+2#kinunYl47MEZbCs3t{!+W4QHvuXKVuPw;Mo^s$(F3lEVT}ML$bg~*R5_@+ b2Uo?6kTwK}57Iu`5P${HC_Nei0}uiLNUI8I diff --git a/modules/shared.py b/modules/shared.py index 522c56c12..8419b5310 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -22,7 +22,7 @@ demo = None sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default="configs/altdiffusion/ad-inference.yaml", help="path to config which constructs model",) +parser.add_argument("--config", type=str, default=os.path.join(script_path, "v1-inference.yaml"), help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) From 5be9387b230794a8c771120577cb213490c940c0 Mon Sep 17 00:00:00 2001 From: Philpax Date: Sun, 25 Dec 2022 21:45:44 +1100 Subject: [PATCH 08/53] fix(api): only begin/end state in lock --- modules/api/api.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 1ceba75d7..59b81c932 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -130,14 +130,12 @@ class Api: if populate.sampler_name: populate.sampler_index = None # prevent a warning later on p = StableDiffusionProcessingTxt2Img(**vars(populate)) - # Override object param - - shared.state.begin() with self.queue_lock: + shared.state.begin() processed = process_images(p) + shared.state.end() - shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) @@ -169,12 +167,10 @@ class Api: p.init_images = [decode_base64_to_image(x) for x in init_images] - shared.state.begin() - with self.queue_lock: + shared.state.begin() processed = process_images(p) - - shared.state.end() + shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) From 893933e05ad267778111b4fad6d1ecb80937afdf Mon Sep 17 00:00:00 2001 From: hitomi Date: Sun, 25 Dec 2022 20:49:25 +0800 Subject: [PATCH 09/53] Add memory cache for VAE weights --- modules/sd_vae.py | 31 +++++++++++++++++++++++++------ modules/shared.py | 1 + 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 3856418e9..ac71d62db 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,5 +1,6 @@ import torch import os +import collections from collections import namedtuple from modules import shared, devices, script_callbacks from modules.paths import models_path @@ -30,6 +31,7 @@ base_vae = None loaded_vae_file = None checkpoint_info = None +checkpoints_loaded = collections.OrderedDict() def get_base_vae(model): if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: @@ -149,13 +151,30 @@ def load_vae(model, vae_file=None): global first_load, vae_dict, vae_list, loaded_vae_file # save_settings = False + cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0 + if vae_file: - assert os.path.isfile(vae_file), f"VAE file doesn't exist: {vae_file}" - print(f"Loading VAE weights from: {vae_file}") - store_base_vae(model) - vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location) - vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys} - _load_vae_dict(model, vae_dict_1) + if cache_enabled and vae_file in checkpoints_loaded: + # use vae checkpoint cache + print(f"Loading VAE weights [{get_filename(vae_file)}] from cache") + store_base_vae(model) + _load_vae_dict(model, checkpoints_loaded[vae_file]) + else: + assert os.path.isfile(vae_file), f"VAE file doesn't exist: {vae_file}" + print(f"Loading VAE weights from: {vae_file}") + store_base_vae(model) + vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location) + vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys} + _load_vae_dict(model, vae_dict_1) + + if cache_enabled: + # cache newly loaded vae + checkpoints_loaded[vae_file] = vae_dict_1.copy() + + # clean up cache if limit is reached + if cache_enabled: + while len(checkpoints_loaded) > shared.opts.sd_vae_checkpoint_cache + 1: # we need to count the current model + checkpoints_loaded.popitem(last=False) # LRU # If vae used is not in dict, update it # It will be removed on refresh though diff --git a/modules/shared.py b/modules/shared.py index d4ddeea02..671d30e10 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -356,6 +356,7 @@ options_templates.update(options_section(('training', "Training"), { options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": sd_vae.vae_list}, refresh=sd_vae.refresh_vae_list), "sd_vae_as_default": OptionInfo(False, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), From ae955b0146a52ea2474c79655ede0d361829ef63 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 26 Dec 2022 09:53:26 -0500 Subject: [PATCH 10/53] fix rgba to rgb when using jpeg output --- modules/images.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/images.py b/modules/images.py index 31d4528dc..962a955d5 100644 --- a/modules/images.py +++ b/modules/images.py @@ -525,6 +525,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) elif extension.lower() in (".jpg", ".jpeg", ".webp"): + if image_to_save.mode == 'RGBA': + image_to_save = image_to_save.convert("RGB") + image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) if opts.enable_pnginfo and info is not None: From 5958bbd244703f7c248a91e86dea5d52acc85505 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 30 Dec 2022 19:36:36 -0500 Subject: [PATCH 11/53] add additional memory states --- modules/memmon.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/memmon.py b/modules/memmon.py index 9fb9b687c..a7060f585 100644 --- a/modules/memmon.py +++ b/modules/memmon.py @@ -71,10 +71,13 @@ class MemUsageMonitor(threading.Thread): def read(self): if not self.disabled: free, total = torch.cuda.mem_get_info() + self.data["free"] = free self.data["total"] = total torch_stats = torch.cuda.memory_stats(self.device) + self.data["active"] = torch_stats["active.all.current"] self.data["active_peak"] = torch_stats["active_bytes.all.peak"] + self.data["reserved"] = torch_stats["reserved_bytes.all.current"] self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] self.data["system_peak"] = total - self.data["min_free"] From d3aa2a48e1e896b6ffafda5367200a4bbd46b0d7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 30 Dec 2022 19:38:53 -0500 Subject: [PATCH 12/53] remove unnecessary console message --- modules/sd_hijack_inpainting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index bb5499b3c..06b75772f 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -178,7 +178,7 @@ def sample_plms(self, # sampling C, H, W = shape size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') + # print(f'Data shape for PLMS sampling is {size}') # remove unnecessary message samples, intermediates = self.plms_sampling(conditioning, size, callback=callback, From 463048344fc036b262aa132584b65ee6e9fec6cf Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 30 Dec 2022 19:41:47 -0500 Subject: [PATCH 13/53] fix shared state dictionary --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index d4ddeea02..9a13fb604 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -168,7 +168,7 @@ class State: def dict(self): obj = { "skipped": self.skipped, - "interrupted": self.skipped, + "interrupted": self.interrupted, "job": self.job, "job_count": self.job_count, "job_no": self.job_no, From fef98723b2b1c7a9893ead41bbefcb36192babd6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 12:44:26 +0300 Subject: [PATCH 14/53] set sd_model for API later, inside the lock, to prevent multiple requests with different models ending up with incorrect results #5877 #6012 --- modules/api/api.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 59b81c932..11daff0d6 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -121,7 +121,6 @@ class Api: def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): populate = txt2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True @@ -129,9 +128,10 @@ class Api: ) if populate.sampler_name: populate.sampler_index = None # prevent a warning later on - p = StableDiffusionProcessingTxt2Img(**vars(populate)) with self.queue_lock: + p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **vars(populate)) + shared.state.begin() processed = process_images(p) shared.state.end() @@ -151,7 +151,6 @@ class Api: mask = decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True, @@ -163,11 +162,11 @@ class Api: args = vars(populate) args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. - p = StableDiffusionProcessingImg2Img(**args) - - p.init_images = [decode_base64_to_image(x) for x in init_images] with self.queue_lock: + p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args) + p.init_images = [decode_base64_to_image(x) for x in init_images] + shared.state.begin() processed = process_images(p) shared.state.end() From 65be1df7bb55b21a3d76630a397c820218cbd12a Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 31 Dec 2022 07:46:04 -0500 Subject: [PATCH 15/53] initialize result so not to cause exception on empty results --- modules/interrogate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/interrogate.py b/modules/interrogate.py index 469352105..6f761c5a5 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -135,7 +135,7 @@ class InterrogateModels: return caption[0] def interrogate(self, pil_image): - res = None + res = "" try: From f34c7341720fb2059992926c9f9ae6ff25f7385b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 18:06:35 +0300 Subject: [PATCH 16/53] alt-diffusion integration --- ...ence.yaml => alt-diffusion-inference.yaml} | 0 .../v1-inference.yaml | 0 modules/sd_hijack.py | 18 +++++----- modules/sd_hijack_clip.py | 14 +++----- modules/sd_hijack_xlmr.py | 34 +++++++++++++++++++ modules/shared.py | 10 +----- 6 files changed, 50 insertions(+), 26 deletions(-) rename configs/{altdiffusion/ad-inference.yaml => alt-diffusion-inference.yaml} (100%) rename v1-inference.yaml => configs/v1-inference.yaml (100%) create mode 100644 modules/sd_hijack_xlmr.py diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/alt-diffusion-inference.yaml similarity index 100% rename from configs/altdiffusion/ad-inference.yaml rename to configs/alt-diffusion-inference.yaml diff --git a/v1-inference.yaml b/configs/v1-inference.yaml similarity index 100% rename from v1-inference.yaml rename to configs/v1-inference.yaml diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index bce23b036..edcbaf52b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -5,7 +5,7 @@ import modules.textual_inversion.textual_inversion from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts -from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet +from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr from modules.sd_hijack_optimizations import invokeAI_mps_available @@ -68,6 +68,7 @@ def fix_checkpoint(): ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = sd_hijack_checkpoint.ResBlock_forward ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = sd_hijack_checkpoint.AttentionBlock_forward + class StableDiffusionModelHijack: fixes = None comments = [] @@ -79,21 +80,22 @@ class StableDiffusionModelHijack: def hijack(self, m): - if shared.text_model_name == "XLMR-Large": + if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: model_embeddings = m.cond_stage_model.roberta.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) - m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - + m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self) + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - apply_optimizations() + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - apply_optimizations() - + + apply_optimizations() + self.clip = m.cond_stage_model fix_checkpoint() @@ -109,7 +111,7 @@ class StableDiffusionModelHijack: def undo_hijack(self, m): - if shared.text_model_name == "XLMR-Large": + if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: m.cond_stage_model = m.cond_stage_model.wrapped elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 9ea6e1cec..6ec50cca1 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -4,7 +4,6 @@ import torch from modules import prompt_parser, devices from modules.shared import opts -import modules.shared as shared def get_target_prompt_token_count(token_count): return math.ceil(max(token_count, 1) / 75) * 75 @@ -177,9 +176,6 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): - if shared.text_model_name == "XLMR-Large": - return self.wrapped.encode(text) - use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -257,13 +253,13 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def __init__(self, wrapped, hijack): super().__init__(wrapped, hijack) self.tokenizer = wrapped.tokenizer - if shared.text_model_name == "XLMR-Large": - self.comma_token = None - else : - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + + vocab = self.tokenizer.get_vocab() + + self.comma_token = vocab.get(',', None) self.token_mults = {} - tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] + tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] for text, ident in tokens_with_parens: mult = 1.0 for c in text: diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py new file mode 100644 index 000000000..4ac51c386 --- /dev/null +++ b/modules/sd_hijack_xlmr.py @@ -0,0 +1,34 @@ +import open_clip.tokenizer +import torch + +from modules import sd_hijack_clip, devices +from modules.shared import opts + + +class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + + self.id_start = wrapped.config.bos_token_id + self.id_end = wrapped.config.eos_token_id + self.id_pad = wrapped.config.pad_token_id + + self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma + + def encode_with_transformers(self, tokens): + # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a + # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer + # layer to work with - you have to use the last + + attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) + features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) + z = features['projection_state'] + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + embedding_layer = self.wrapped.roberta.embeddings + ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] + embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) + + return embedded diff --git a/modules/shared.py b/modules/shared.py index 2b31e7170..715b9169e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -23,7 +23,7 @@ demo = None sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default=os.path.join(script_path, "v1-inference.yaml"), help="path to config which constructs model",) +parser.add_argument("--config", type=str, default=os.path.join(script_path, "configs/v1-inference.yaml"), help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) @@ -108,14 +108,6 @@ restricted_opts = { "outdir_txt2img_grids", "outdir_save", } -from omegaconf import OmegaConf -config = OmegaConf.load(f"{cmd_opts.config}") -# XLMR-Large -try: - text_model_name = config.model.params.cond_stage_config.params.name - -except : - text_model_name = "stable_diffusion" cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access From f55ac33d446185680604e872ceda2ae858821d5c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 31 Dec 2022 11:27:02 -0500 Subject: [PATCH 17/53] validate textual inversion embeddings --- modules/sd_models.py | 3 ++ .../textual_inversion/textual_inversion.py | 43 ++++++++++++++++--- modules/ui.py | 2 - 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ecdd91c50..ebd4dff7f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -325,6 +325,9 @@ def load_model(checkpoint_info=None): script_callbacks.model_loaded_callback(sd_model) print("Model loaded.") + + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload = True) # Reload embeddings after model load as they may or may not fit the model + return sd_model diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f6112578e..103ace605 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -23,6 +23,8 @@ class Embedding: self.vec = vec self.name = name self.step = step + self.shape = None + self.vectors = 0 self.cached_checksum = None self.sd_checkpoint = None self.sd_checkpoint_name = None @@ -57,8 +59,10 @@ class EmbeddingDatabase: def __init__(self, embeddings_dir): self.ids_lookup = {} self.word_embeddings = {} + self.skipped_embeddings = [] self.dir_mtime = None self.embeddings_dir = embeddings_dir + self.expected_shape = -1 def register_embedding(self, embedding, model): @@ -75,14 +79,35 @@ class EmbeddingDatabase: return embedding - def load_textual_inversion_embeddings(self): + def get_expected_shape(self): + expected_shape = -1 # initialize with unknown + idx = torch.tensor(0).to(shared.device) + if expected_shape == -1: + try: # matches sd15 signature + first_embedding = shared.sd_model.cond_stage_model.wrapped.transformer.text_model.embeddings.token_embedding.wrapped(idx) + expected_shape = first_embedding.shape[0] + except: + pass + if expected_shape == -1: + try: # matches sd20 signature + first_embedding = shared.sd_model.cond_stage_model.wrapped.model.token_embedding.wrapped(idx) + expected_shape = first_embedding.shape[0] + except: + pass + if expected_shape == -1: + print('Could not determine expected embeddings shape from model') + return expected_shape + + def load_textual_inversion_embeddings(self, force_reload = False): mt = os.path.getmtime(self.embeddings_dir) - if self.dir_mtime is not None and mt <= self.dir_mtime: + if not force_reload and self.dir_mtime is not None and mt <= self.dir_mtime: return self.dir_mtime = mt self.ids_lookup.clear() self.word_embeddings.clear() + self.skipped_embeddings = [] + self.expected_shape = self.get_expected_shape() def process_file(path, filename): name = os.path.splitext(filename)[0] @@ -122,7 +147,14 @@ class EmbeddingDatabase: embedding.step = data.get('step', None) embedding.sd_checkpoint = data.get('sd_checkpoint', None) embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) - self.register_embedding(embedding, shared.sd_model) + embedding.vectors = vec.shape[0] + embedding.shape = vec.shape[-1] + + if (self.expected_shape == -1) or (self.expected_shape == embedding.shape): + self.register_embedding(embedding, shared.sd_model) + else: + self.skipped_embeddings.append(name) + # print('Skipping embedding {name}: shape was {shape} expected {expected}'.format(name = name, shape = embedding.shape, expected = self.expected_shape)) for fn in os.listdir(self.embeddings_dir): try: @@ -137,8 +169,9 @@ class EmbeddingDatabase: print(traceback.format_exc(), file=sys.stderr) continue - print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.") - print("Embeddings:", ', '.join(self.word_embeddings.keys())) + print("Textual inversion embeddings {num} loaded: {val}".format(num = len(self.word_embeddings), val = ', '.join(self.word_embeddings.keys()))) + if (len(self.skipped_embeddings) > 0): + print("Textual inversion embeddings {num} skipped: {val}".format(num = len(self.skipped_embeddings), val = ', '.join(self.skipped_embeddings))) def find_embedding_at_position(self, tokens, offset): token = tokens[offset] diff --git a/modules/ui.py b/modules/ui.py index 57ee04654..397dd8046 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1157,8 +1157,6 @@ def create_ui(): with gr.Column(variant='panel'): submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False) - sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() - with gr.Blocks(analytics_enabled=False) as train_interface: with gr.Row().style(equal_height=False): gr.HTML(value="

See wiki for detailed explanation.

") From bdbe09827b39be63c9c0b3636132ca58da38ebf6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 22:49:09 +0300 Subject: [PATCH 18/53] changed embedding accepted shape detection to use existing code and support the new alt-diffusion model, and reformatted messages a bit #6149 --- .../textual_inversion/textual_inversion.py | 30 ++++--------------- 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 103ace605..66f40367a 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -80,23 +80,8 @@ class EmbeddingDatabase: return embedding def get_expected_shape(self): - expected_shape = -1 # initialize with unknown - idx = torch.tensor(0).to(shared.device) - if expected_shape == -1: - try: # matches sd15 signature - first_embedding = shared.sd_model.cond_stage_model.wrapped.transformer.text_model.embeddings.token_embedding.wrapped(idx) - expected_shape = first_embedding.shape[0] - except: - pass - if expected_shape == -1: - try: # matches sd20 signature - first_embedding = shared.sd_model.cond_stage_model.wrapped.model.token_embedding.wrapped(idx) - expected_shape = first_embedding.shape[0] - except: - pass - if expected_shape == -1: - print('Could not determine expected embeddings shape from model') - return expected_shape + vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1) + return vec.shape[1] def load_textual_inversion_embeddings(self, force_reload = False): mt = os.path.getmtime(self.embeddings_dir) @@ -112,8 +97,6 @@ class EmbeddingDatabase: def process_file(path, filename): name = os.path.splitext(filename)[0] - data = [] - if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']: embed_image = Image.open(path) if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text: @@ -150,11 +133,10 @@ class EmbeddingDatabase: embedding.vectors = vec.shape[0] embedding.shape = vec.shape[-1] - if (self.expected_shape == -1) or (self.expected_shape == embedding.shape): + if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) else: self.skipped_embeddings.append(name) - # print('Skipping embedding {name}: shape was {shape} expected {expected}'.format(name = name, shape = embedding.shape, expected = self.expected_shape)) for fn in os.listdir(self.embeddings_dir): try: @@ -169,9 +151,9 @@ class EmbeddingDatabase: print(traceback.format_exc(), file=sys.stderr) continue - print("Textual inversion embeddings {num} loaded: {val}".format(num = len(self.word_embeddings), val = ', '.join(self.word_embeddings.keys()))) - if (len(self.skipped_embeddings) > 0): - print("Textual inversion embeddings {num} skipped: {val}".format(num = len(self.skipped_embeddings), val = ', '.join(self.skipped_embeddings))) + print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") + if len(self.skipped_embeddings) > 0: + print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings)}") def find_embedding_at_position(self, tokens, offset): token = tokens[offset] From f4535f6e4f001314bd155bc6e1b6908e02792b9a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 23:40:55 +0300 Subject: [PATCH 19/53] make it so that memory/embeddings info is displayed in a separate UI element from generation parameters, and is preserved when you change the displayed infotext by clicking on gallery images --- modules/img2img.py | 2 +- modules/processing.py | 5 +++-- modules/txt2img.py | 2 +- modules/ui.py | 31 +++++++++++++++++-------------- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 81da4b131..ca58b5d87 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -162,4 +162,4 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro if opts.do_not_show_images: processed.images = [] - return processed.images, generation_info_js, plaintext_to_html(processed.info) + return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) diff --git a/modules/processing.py b/modules/processing.py index 0a9a8f95c..42dc19ea6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -239,7 +239,7 @@ class StableDiffusionProcessing(): class Processed: - def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None): + def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): self.images = images_list self.prompt = p.prompt self.negative_prompt = p.negative_prompt @@ -247,6 +247,7 @@ class Processed: self.subseed = subseed self.subseed_strength = p.subseed_strength self.info = info + self.comments = comments self.width = p.width self.height = p.height self.sampler_name = p.sampler_name @@ -646,7 +647,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) + res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) if p.scripts is not None: p.scripts.postprocess(p, res) diff --git a/modules/txt2img.py b/modules/txt2img.py index c8f81176b..7f61e19a8 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -59,4 +59,4 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: if opts.do_not_show_images: processed.images = [] - return processed.images, generation_info_js, plaintext_to_html(processed.info) + return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) diff --git a/modules/ui.py b/modules/ui.py index 397dd8046..f550ad006 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -159,7 +159,7 @@ def save_files(js_data, images, do_make_zip, index): zip_file.writestr(filenames[i], f.read()) fullfns.insert(0, zip_filepath) - return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}") + return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}") @@ -593,6 +593,8 @@ Requested path was: {f} with gr.Group(): html_info = gr.HTML() + html_log = gr.HTML() + generation_info = gr.Textbox(visible=False) if tabname == 'txt2img' or tabname == 'img2img': generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button") @@ -615,16 +617,16 @@ Requested path was: {f} ], outputs=[ download_files, - html_info, - html_info, - html_info, + html_log, ] ) else: html_info_x = gr.HTML() html_info = gr.HTML() + html_log = gr.HTML() + parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None) - return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info + return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log def create_ui(): @@ -686,14 +688,14 @@ def create_ui(): with gr.Group(): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - txt2img_gallery, generation_info, html_info = create_output_panel("txt2img", opts.outdir_txt2img_samples) + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img), + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), _js="submit", inputs=[ txt2img_prompt, @@ -720,7 +722,8 @@ def create_ui(): outputs=[ txt2img_gallery, generation_info, - html_info + html_info, + html_log, ], show_progress=False, ) @@ -799,7 +802,6 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as img2img_interface: img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True) - with gr.Row(elem_id='img2img_progress_row'): img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -883,7 +885,7 @@ def create_ui(): with gr.Group(): custom_inputs = modules.scripts.scripts_img2img.setup_ui() - img2img_gallery, generation_info, html_info = create_output_panel("img2img", opts.outdir_img2img_samples) + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) @@ -915,7 +917,7 @@ def create_ui(): ) img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img), + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), _js="submit_img2img", inputs=[ dummy_component, @@ -954,7 +956,8 @@ def create_ui(): outputs=[ img2img_gallery, generation_info, - html_info + html_info, + html_log, ], show_progress=False, ) @@ -1078,10 +1081,10 @@ def create_ui(): with gr.Group(): upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False) - result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples) + result_images, html_info_x, html_info, html_log = create_output_panel("extras", opts.outdir_extras_samples) submit.click( - fn=wrap_gradio_gpu_call(modules.extras.run_extras), + fn=wrap_gradio_gpu_call(modules.extras.run_extras, extra_outputs=[None, '']), _js="get_extras_tab_index", inputs=[ dummy_component, From 360feed9b55fb03060c236773867b08b4265645d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 00:38:58 +0300 Subject: [PATCH 20/53] HAPPY NEW YEAR make save to zip into its own button instead of a checkbox --- modules/ui.py | 30 ++++++++++++++++++++++-------- style.css | 6 ++++++ 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f550ad006..279b51101 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -570,13 +570,14 @@ Requested path was: {f} generation_info = None with gr.Column(): - with gr.Row(): + with gr.Row(elem_id=f"image_buttons_{tabname}"): + open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder') + if tabname != "extras": save = gr.Button('Save', elem_id=f'save_{tabname}') + save_zip = gr.Button('Zip', elem_id=f'save_zip_{tabname}') buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"]) - button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' - open_folder_button = gr.Button(folder_symbol, elem_id=button_id) open_folder_button.click( fn=lambda: open_folder(opts.outdir_samples or outdir), @@ -585,9 +586,6 @@ Requested path was: {f} ) if tabname != "extras": - with gr.Row(): - do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) - with gr.Row(): download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) @@ -608,11 +606,11 @@ Requested path was: {f} save.click( fn=wrap_gradio_call(save_files), - _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]", + _js="(x, y, z, w) => [x, y, false, selected_gallery_index()]", inputs=[ generation_info, result_gallery, - do_make_zip, + html_info, html_info, ], outputs=[ @@ -620,6 +618,22 @@ Requested path was: {f} html_log, ] ) + + save_zip.click( + fn=wrap_gradio_call(save_files), + _js="(x, y, z, w) => [x, y, true, selected_gallery_index()]", + inputs=[ + generation_info, + result_gallery, + html_info, + html_info, + ], + outputs=[ + download_files, + html_log, + ] + ) + else: html_info_x = gr.HTML() html_info = gr.HTML() diff --git a/style.css b/style.css index 3ad780062..f245f6748 100644 --- a/style.css +++ b/style.css @@ -568,6 +568,12 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h font-size: 95%; } +#image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{ + min-width: auto; + padding-left: 0.5em; + padding-right: 0.5em; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running From 29a3a7eb13478297bc7093971b48827ab8246f45 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 01:19:10 +0300 Subject: [PATCH 21/53] show sampler selection in dropdown, add option selection to revert to old radio group --- modules/shared.py | 1 + modules/ui.py | 22 +++++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 715b9169e..948b95426 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -406,6 +406,7 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"), 'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index 279b51101..c7b8ea5da 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -643,6 +643,19 @@ Requested path was: {f} return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with gr.Row(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) + else: + with gr.Group(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + def create_ui(): import modules.img2img import modules.txt2img @@ -660,9 +673,6 @@ def create_ui(): dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False) - - - with gr.Row(elem_id='txt2img_progress_row'): with gr.Column(scale=1): pass @@ -674,8 +684,7 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='panel', elem_id="txt2img_settings"): - steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index") + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") with gr.Group(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) @@ -875,8 +884,7 @@ def create_ui(): with gr.Row(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) - sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index") + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") with gr.Group(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") From 210449b374d522c94a67fe54289a9eb515933a9f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 02:41:15 +0300 Subject: [PATCH 22/53] fix 'RuntimeError: Expected all tensors to be on the same device' error preventing models from loading on lowvram/medvram. --- modules/sd_hijack_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 6ec50cca1..ca92b142c 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -298,6 +298,6 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def encode_embedding_init_text(self, init_text, nvpt): embedding_layer = self.wrapped.transformer.text_model.embeddings ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] - embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) + embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0) return embedded From a939e82a0b982517aa212197a0e5f6d11daec7d0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 03:24:58 +0300 Subject: [PATCH 23/53] fix weird padding for sampler dropdown in chrome --- style.css | 5 ----- 1 file changed, 5 deletions(-) diff --git a/style.css b/style.css index f245f6748..4b98b84dc 100644 --- a/style.css +++ b/style.css @@ -245,11 +245,6 @@ input[type="range"]{ margin: 0.5em 0 -0.3em 0; } -#txt2img_sampling label{ - padding-left: 0.6em; - padding-right: 0.6em; -} - #mask_bug_info { text-align: center; display: block; From 16b9661d2741b241c3964fcbd56559c078b84822 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 09:51:37 +0300 Subject: [PATCH 24/53] change karras scheduler sigmas to values recommended by SD from old 0.1 to 10 with an option to revert to old --- modules/sd_samplers.py | 4 +++- modules/shared.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 177b53380..e904d8609 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -465,7 +465,9 @@ class KDiffusionSampler: if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) else: sigmas = self.model_wrap.get_sigmas(steps) diff --git a/modules/shared.py b/modules/shared.py index 948b95426..7f430b938 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -368,13 +368,17 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", gr.ColorPicker, {}), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), 'CLIP_stop_at_last_layers': OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), })) +options_templates.update(options_section(('compatibility', "Compatibility"), { + "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), + "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), +})) + options_templates.update(options_section(('interrogate', "Interrogate Options"), { "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"), "interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"), From 11d432d92d63660c516540dcb48faac87669b4f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 10:35:38 +0300 Subject: [PATCH 25/53] add refresh buttons to checkpoint merger --- modules/ui.py | 6 ++++++ style.css | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index c7b8ea5da..4cc2ce4f3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1167,8 +1167,14 @@ def create_ui(): with gr.Row(): primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + custom_name = gr.Textbox(label="Custom Name (Optional)") interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3) interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method") diff --git a/style.css b/style.css index 4b98b84dc..516ef7bf9 100644 --- a/style.css +++ b/style.css @@ -496,7 +496,7 @@ input[type="range"]{ padding: 0; } -#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{ +#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization, #refresh_checkpoint_A, #refresh_checkpoint_B, #refresh_checkpoint_C{ max-width: 2.5em; min-width: 2.5em; height: 2.4em; From 76f256fe8f844641f4e9b41f35c7dd2cba5090d6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 11:08:39 +0300 Subject: [PATCH 26/53] Bump gradio version #YOLO --- modules/ui_tempdir.py | 3 ++- requirements.txt | 2 +- requirements_versions.txt | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 07210d14a..8d5193107 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -15,7 +15,8 @@ Savedfile = namedtuple("Savedfile", ["name"]) def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(os.path.dirname(already_saved_as))} + shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(already_saved_as)} + file_obj = Savedfile(already_saved_as) return file_obj diff --git a/requirements.txt b/requirements.txt index 5bed694e2..e2c3876b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ fairscale==0.4.4 fonts font-roboto gfpgan -gradio==3.9 +gradio==3.15.0 invisible-watermark numpy omegaconf diff --git a/requirements_versions.txt b/requirements_versions.txt index c126c8c40..836523ba2 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -3,7 +3,7 @@ transformers==4.19.2 accelerate==0.12.0 basicsr==1.4.2 gfpgan==1.3.8 -gradio==3.9 +gradio==3.15.0 numpy==1.23.3 Pillow==9.2.0 realesrgan==0.3.0 From b46b97fa297b3a4a654da77cf98a775a2bcab4c7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 11:38:17 +0300 Subject: [PATCH 27/53] more fixes for gradio update --- modules/generation_parameters_copypaste.py | 2 +- modules/ui_tempdir.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fbd913005..54b3372d9 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -38,7 +38,7 @@ def quote(text): def image_from_url_text(filedata): if type(filedata) == dict and filedata["is_file"]: filename = filedata["name"] - is_in_right_dir = any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in shared.demo.temp_dirs) + is_in_right_dir = any([filename in fileset for fileset in shared.demo.temp_file_sets]) assert is_in_right_dir, 'trying to open image file outside of allowed directories' return Image.open(filename) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 8d5193107..363d449d3 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -45,7 +45,7 @@ def on_tmpdir_changed(): os.makedirs(shared.opts.temp_dir, exist_ok=True) - shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(shared.opts.temp_dir)} + shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(shared.opts.temp_dir)} def cleanup_tmpdr(): From e5f1a37cb9b537d95b2df47c96b4a4f7242fd294 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 13:08:40 +0300 Subject: [PATCH 28/53] make refresh buttons look more nice --- modules/ui.py | 6 +++--- modules/ui_components.py | 18 ++++++++++++++++++ style.css | 28 +++++++++++++++++++++------- 3 files changed, 42 insertions(+), 10 deletions(-) create mode 100644 modules/ui_components.py diff --git a/modules/ui.py b/modules/ui.py index 4cc2ce4f3..32fa80d17 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -19,7 +19,7 @@ import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, ui_components from modules.paths import script_path from modules.shared import opts, cmd_opts, restricted_opts @@ -532,7 +532,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele return gr.update(**(args or {})) - refresh_button = gr.Button(value=refresh_symbol, elem_id=elem_id) + refresh_button = ui_components.ToolButton(value=refresh_symbol, elem_id=elem_id) refresh_button.click( fn=refresh, inputs=[], @@ -1476,7 +1476,7 @@ def create_ui(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: - with gr.Row(variant="compact"): + with ui_components.FormRow(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: diff --git a/modules/ui_components.py b/modules/ui_components.py new file mode 100644 index 000000000..d0519d2d6 --- /dev/null +++ b/modules/ui_components.py @@ -0,0 +1,18 @@ +import gradio as gr + + +class ToolButton(gr.Button, gr.components.FormComponent): + """Small button with single emoji as text, fits inside gradio forms""" + + def __init__(self, **kwargs): + super().__init__(variant="tool", **kwargs) + + def get_block_name(self): + return "button" + + +class FormRow(gr.Row, gr.components.FormComponent): + """Same as gr.Row but fits inside gradio forms""" + + def get_block_name(self): + return "row" diff --git a/style.css b/style.css index 516ef7bf9..f168571e7 100644 --- a/style.css +++ b/style.css @@ -496,13 +496,6 @@ input[type="range"]{ padding: 0; } -#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization, #refresh_checkpoint_A, #refresh_checkpoint_B, #refresh_checkpoint_C{ - max-width: 2.5em; - min-width: 2.5em; - height: 2.4em; -} - - canvas[key="mask"] { z-index: 12 !important; filter: invert(); @@ -569,6 +562,27 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h padding-right: 0.5em; } +.gr-form{ + background-color: white; +} + +.dark .gr-form{ + background-color: rgb(31 41 55 / var(--tw-bg-opacity)); +} + +.gr-button-tool{ + max-width: 2.5em; + min-width: 2.5em !important; + height: 2.4em; + margin: 0.55em 0; +} + +#quicksettings .gr-button-tool{ + margin: 0; +} + + + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running From 5f12b23b8bb7fca585a3a1e844881d06f171364e Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Wed, 28 Dec 2022 22:18:19 +0100 Subject: [PATCH 29/53] Adding image numbers on grids New grid option in settings enables adding of image numbers on grids. This makes identifying the images, especially in larger batches, much easier. Revert "Adding image numbers on grids" This reverts commit 3530c283b4b1d3a3cab40efbffe4cf2697938b6f. Implements Callback for image grid loop Necessary to make "Add image's number to its picture in the grid" extension possible. --- modules/images.py | 1 + modules/script_callbacks.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/modules/images.py b/modules/images.py index 31d4528dc..5afd38915 100644 --- a/modules/images.py +++ b/modules/images.py @@ -43,6 +43,7 @@ def image_grid(imgs, batch_size=1, rows=None): grid = Image.new('RGB', size=(cols * w, rows * h), color='black') for i, img in enumerate(imgs): + script_callbacks.image_grid_loop_callback(img) grid.paste(img, box=(i % cols * w, i // cols * h)) return grid diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 8e22f8755..0c854407f 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -51,6 +51,11 @@ class UiTrainTabParams: self.txt2img_preview_params = txt2img_preview_params +class ImageGridLoopParams: + def __init__(self, img): + self.img = img + + ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) callback_map = dict( callbacks_app_started=[], @@ -63,6 +68,7 @@ callback_map = dict( callbacks_cfg_denoiser=[], callbacks_before_component=[], callbacks_after_component=[], + callbacks_image_grid_loop=[], ) @@ -154,6 +160,12 @@ def after_component_callback(component, **kwargs): except Exception: report_exception(c, 'after_component_callback') +def image_grid_loop_callback(component, **kwargs): + for c in callback_map['callbacks_image_grid_loop']: + try: + c.callback(component, **kwargs) + except Exception: + report_exception(c, 'image_grid_loop') def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] @@ -255,3 +267,11 @@ def on_before_component(callback): def on_after_component(callback): """register a function to be called after a component is created. See on_before_component for more.""" add_callback(callback_map['callbacks_after_component'], callback) + + +def on_image_grid_loop(callback): + """register a function to be called inside the image grid loop. + The callback is called with one argument: + - params: ImageGridLoopParams - parameters to be used inside the image grid loop. + """ + add_callback(callback_map['callbacks_image_grid_loop'], callback) From 524d532b387732d4d32f237e792c7f201a934400 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 14:07:40 +0300 Subject: [PATCH 30/53] moved roll artist to built-in extensions --- .../roll-artist/scripts/roll-artist.py | 50 +++++++++++++++++++ modules/ui.py | 37 ++------------ 2 files changed, 53 insertions(+), 34 deletions(-) create mode 100644 extensions-builtin/roll-artist/scripts/roll-artist.py diff --git a/extensions-builtin/roll-artist/scripts/roll-artist.py b/extensions-builtin/roll-artist/scripts/roll-artist.py new file mode 100644 index 000000000..c3bc1fd09 --- /dev/null +++ b/extensions-builtin/roll-artist/scripts/roll-artist.py @@ -0,0 +1,50 @@ +import random + +from modules import script_callbacks, shared +import gradio as gr + +art_symbol = '\U0001f3a8' # 🎨 +global_prompt = None +related_ids = {"txt2img_prompt", "txt2img_clear_prompt", "img2img_prompt", "img2img_clear_prompt" } + + +def roll_artist(prompt): + allowed_cats = set([x for x in shared.artist_db.categories() if len(shared.opts.random_artist_categories)==0 or x in shared.opts.random_artist_categories]) + artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats]) + + return prompt + ", " + artist.name if prompt != '' else artist.name + + +def add_roll_button(prompt): + roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) + + roll.click( + fn=roll_artist, + _js="update_txt2img_tokens", + inputs=[ + prompt, + ], + outputs=[ + prompt, + ] + ) + + +def after_component(component, **kwargs): + global global_prompt + + elem_id = kwargs.get('elem_id', None) + if elem_id not in related_ids: + return + + if elem_id == "txt2img_prompt": + global_prompt = component + elif elem_id == "txt2img_clear_prompt": + add_roll_button(global_prompt) + elif elem_id == "img2img_prompt": + global_prompt = component + elif elem_id == "img2img_clear_prompt": + add_roll_button(global_prompt) + + +script_callbacks.on_after_component(after_component) diff --git a/modules/ui.py b/modules/ui.py index 32fa80d17..27da2c2c9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -80,7 +80,6 @@ css_hide_progressbar = """ # Important that they exactly match script.js for tooltip to work. random_symbol = '\U0001f3b2\ufe0f' # 🎲️ reuse_symbol = '\u267b\ufe0f' # ♻️ -art_symbol = '\U0001f3a8' # 🎨 paste_symbol = '\u2199\ufe0f' # ↙ folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 @@ -234,13 +233,6 @@ def check_progress_call_initial(id_part): return check_progress_call(id_part) -def roll_artist(prompt): - allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories]) - artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats]) - - return prompt + ", " + artist.name if prompt != '' else artist.name - - def visit(x, func, path=""): if hasattr(x, 'children'): for c in x.children: @@ -403,7 +395,6 @@ def create_toprow(is_img2img): ) with gr.Column(scale=1, elem_id="roll_col"): - roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) paste = gr.Button(value=paste_symbol, elem_id="paste") save_style = gr.Button(value=save_style_symbol, elem_id="style_create") prompt_style_apply = gr.Button(value=apply_style_symbol, elem_id="style_apply") @@ -452,7 +443,7 @@ def create_toprow(is_img2img): prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys()))) prompt_style2.save_to_config = True - return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button + return prompt, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button def setup_progressbar(progressbar, preview, id_part, textinfo=None): @@ -668,7 +659,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -771,16 +762,6 @@ def create_ui(): outputs=[hr_options], ) - roll.click( - fn=roll_artist, - _js="update_txt2img_tokens", - inputs=[ - txt2img_prompt, - ], - outputs=[ - txt2img_prompt, - ] - ) txt2img_paste_fields = [ (txt2img_prompt, "Prompt"), @@ -823,7 +804,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -999,18 +980,6 @@ def create_ui(): outputs=[img2img_prompt], ) - - roll.click( - fn=roll_artist, - _js="update_img2img_tokens", - inputs=[ - img2img_prompt, - ], - outputs=[ - img2img_prompt, - ] - ) - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)] style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] From e672cfb07418a1a3130d3bf21c14a0d3819f81fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 18:37:37 +0300 Subject: [PATCH 31/53] rework of callback for #6094 --- modules/images.py | 12 +++++++----- modules/script_callbacks.py | 26 +++++++++++++++----------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/modules/images.py b/modules/images.py index 719aaf3b1..f84fd4858 100644 --- a/modules/images.py +++ b/modules/images.py @@ -39,12 +39,14 @@ def image_grid(imgs, batch_size=1, rows=None): cols = math.ceil(len(imgs) / rows) - w, h = imgs[0].size - grid = Image.new('RGB', size=(cols * w, rows * h), color='black') + params = script_callbacks.ImageGridLoopParams(imgs, cols, rows) + script_callbacks.image_grid_callback(params) - for i, img in enumerate(imgs): - script_callbacks.image_grid_loop_callback(img) - grid.paste(img, box=(i % cols * w, i // cols * h)) + w, h = imgs[0].size + grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black') + + for i, img in enumerate(params.imgs): + grid.paste(img, box=(i % params.cols * w, i // params.cols * h)) return grid diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 0c854407f..de69fd9f4 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -52,8 +52,10 @@ class UiTrainTabParams: class ImageGridLoopParams: - def __init__(self, img): - self.img = img + def __init__(self, imgs, cols, rows): + self.imgs = imgs + self.cols = cols + self.rows = rows ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) @@ -68,7 +70,7 @@ callback_map = dict( callbacks_cfg_denoiser=[], callbacks_before_component=[], callbacks_after_component=[], - callbacks_image_grid_loop=[], + callbacks_image_grid=[], ) @@ -160,12 +162,14 @@ def after_component_callback(component, **kwargs): except Exception: report_exception(c, 'after_component_callback') -def image_grid_loop_callback(component, **kwargs): - for c in callback_map['callbacks_image_grid_loop']: + +def image_grid_callback(params: ImageGridLoopParams): + for c in callback_map['callbacks_image_grid']: try: - c.callback(component, **kwargs) + c.callback(params) except Exception: - report_exception(c, 'image_grid_loop') + report_exception(c, 'image_grid') + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] @@ -269,9 +273,9 @@ def on_after_component(callback): add_callback(callback_map['callbacks_after_component'], callback) -def on_image_grid_loop(callback): - """register a function to be called inside the image grid loop. +def on_image_grid(callback): + """register a function to be called before making an image grid. The callback is called with one argument: - - params: ImageGridLoopParams - parameters to be used inside the image grid loop. + - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. """ - add_callback(callback_map['callbacks_image_grid_loop'], callback) + add_callback(callback_map['callbacks_image_grid'], callback) From a005fccddd5a37c57f1afe5234660b59b9a41508 Mon Sep 17 00:00:00 2001 From: me <25877290+Kryptortio@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:51:12 +0100 Subject: [PATCH 32/53] Add a lot more elem_id/HTML id, modified some that were duplicates for seed section --- modules/generation_parameters_copypaste.py | 2 +- modules/ui.py | 252 ++++++++++----------- style.css | 12 +- 3 files changed, 133 insertions(+), 133 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 54b3372d9..8e7f0df0a 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -93,7 +93,7 @@ def integrate_settings_paste_fields(component_dict): def create_buttons(tabs_list): buttons = {} for tab in tabs_list: - buttons[tab] = gr.Button(f"Send to {tab}") + buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab") return buttons diff --git a/modules/ui.py b/modules/ui.py index 27da2c2c9..7070ea155 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -272,17 +272,17 @@ def interrogate_deepbooru(image): return gr_show(True) if prompt is None else prompt -def create_seed_inputs(): +def create_seed_inputs(target_interface): with gr.Row(): with gr.Box(): - with gr.Row(elem_id='seed_row'): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1) + with gr.Row(elem_id=target_interface + '_seed_row'): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') seed.style(container=False) - random_seed = gr.Button(random_symbol, elem_id='random_seed') - reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed') + random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') + reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') - with gr.Box(elem_id='subseed_show_box'): - seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False) + with gr.Box(elem_id=target_interface + '_subseed_show_box'): + seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) # Components to show/hide based on the 'Extra' checkbox seed_extras = [] @@ -290,17 +290,17 @@ def create_seed_inputs(): with gr.Row(visible=False) as seed_extra_row_1: seed_extras.append(seed_extra_row_1) with gr.Box(): - with gr.Row(elem_id='subseed_row'): - subseed = gr.Number(label='Variation seed', value=-1) + with gr.Row(elem_id=target_interface + '_subseed_row'): + subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') subseed.style(container=False) - random_subseed = gr.Button(random_symbol, elem_id='random_subseed') - reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed') - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01) + random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') + reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') with gr.Row(visible=False) as seed_extra_row_2: seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0) - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w') + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h') random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed]) random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed]) @@ -678,28 +678,28 @@ def create_ui(): steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") with gr.Group(): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512) + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1) - tiling = gr.Checkbox(label='Tiling', value=False) - enable_hr = gr.Checkbox(label='Highres. fix', value=False) + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Highres. fix', value=False, elem_id="txt2img_enable_hr") with gr.Row(visible=False) as hr_options: - firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0) - firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) + firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0, elem_id="txt2img_firstphase_width") + firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0, elem_id="txt2img_firstphase_height") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") with gr.Row(equal_height=True): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1) - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1) + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0) + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - with gr.Group(): + with gr.Group(elem_id="txt2img_script_container"): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) @@ -821,10 +821,10 @@ def create_ui(): with gr.Column(variant='panel', elem_id="img2img_settings"): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: - with gr.TabItem('img2img', id='img2img'): + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab"): init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool, image_mode="RGBA").style(height=480) - with gr.TabItem('Inpaint', id='inpaint'): + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab"): init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_inpaint_tool, image_mode="RGBA").style(height=480) init_img_with_mask_orig = gr.State(None) @@ -843,24 +843,24 @@ def create_ui(): init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask") with gr.Row(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4) - mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch, elem_id="img2img_mask_alpha") with gr.Row(): mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode") - inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index") + inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index") + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") with gr.Row(): - inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False) - inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32) + inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, elem_id="img2img_inpaint_full_res") + inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - with gr.TabItem('Batch img2img', id='batch'): + with gr.TabItem('Batch img2img', id='batch', elem_id="img2img_batch_tab"): hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' gr.HTML(f"

Process images in a directory on the same machine where the server is running.
Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}

") - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs) - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") with gr.Row(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") @@ -872,20 +872,20 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1) - tiling = gr.Checkbox(label='Tiling', value=False) + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") with gr.Row(): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1) - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1) + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") with gr.Group(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75) + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - with gr.Group(): + with gr.Group(elem_id="img2img_script_container"): custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) @@ -1032,45 +1032,45 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): with gr.Tabs(elem_id="mode_extras"): - with gr.TabItem('Single Image'): - extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil") + with gr.TabItem('Single Image', elem_id="extras_single_tab"): + extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") - with gr.TabItem('Batch Process'): - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file") + with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab"): + image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") - with gr.TabItem('Batch from Directory'): - extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.") - extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.") - show_extras_results = gr.Checkbox(label='Show result images', value=True) + with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab"): + extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") + extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") + show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") submit = gr.Button('Generate', elem_id="extras_generate", variant='primary') with gr.Tabs(elem_id="extras_resize_mode"): - with gr.TabItem('Scale by'): - upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4) - with gr.TabItem('Scale to'): + with gr.TabItem('Scale by', elem_id="extras_scale_by_tab"): + upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") + with gr.TabItem('Scale to', elem_id="extras_scale_to_tab"): with gr.Group(): with gr.Row(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0) - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0) - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True) + upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") + upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") + upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with gr.Group(): extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") with gr.Group(): extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") - extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1) + extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1, elem_id="extras_upscaler_2_visibility") with gr.Group(): - gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan) + gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan, elem_id="extras_gfpgan_visibility") with gr.Group(): - codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer) - codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer) + codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer, elem_id="extras_codeformer_visibility") + codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer, elem_id="extras_codeformer_weight") with gr.Group(): - upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False) + upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False, elem_id="extras_upscale_before_face_fix") result_images, html_info_x, html_info, html_log = create_output_panel("extras", opts.outdir_extras_samples) @@ -1117,7 +1117,7 @@ def create_ui(): with gr.Column(variant='panel'): html = gr.HTML() - generation_info = gr.Textbox(visible=False) + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") html2 = gr.HTML() with gr.Row(): buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) @@ -1144,13 +1144,13 @@ def create_ui(): tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - custom_name = gr.Textbox(label="Custom Name (Optional)") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3) - interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method") + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") with gr.Row(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format") - save_as_half = gr.Checkbox(value=False, label="Save as float16") + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary') @@ -1165,58 +1165,58 @@ def create_ui(): with gr.Tabs(elem_id="train_tabs"): with gr.Tab(label="Create embedding"): - new_embedding_name = gr.Textbox(label="Name") - initialization_text = gr.Textbox(label="Initialization text", value="*") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1) - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding") + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary') + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") with gr.Tab(label="Create hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"]) - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys) - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"]) - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork") + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary') + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") with gr.Tab(label="Preprocess images"): - process_src = gr.Textbox(label='Source directory') - process_dst = gr.Textbox(label='Destination directory') - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512) - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"]) + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") with gr.Row(): - process_flip = gr.Checkbox(label='Create flipped copies') - process_split = gr.Checkbox(label='Split oversized images') - process_focal_crop = gr.Checkbox(label='Auto focal point crop') - process_caption = gr.Checkbox(label='Use BLIP for caption') - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True) + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05) - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05) + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05) - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05) - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05) - process_focal_crop_debug = gr.Checkbox(label='Create debug image') + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") with gr.Row(): with gr.Column(scale=3): @@ -1224,8 +1224,8 @@ def create_ui(): with gr.Column(): with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt") - run_preprocess = gr.Button(value="Preprocess", variant='primary') + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") process_split.change( fn=lambda show: gr_show(show), @@ -1248,31 +1248,31 @@ def create_ui(): train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") with gr.Row(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001") + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - batch_size = gr.Number(label='Batch size', value=1, precision=0) - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0) - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") - template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt")) - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512) - steps = gr.Number(label='Max steps', value=100000, precision=0) - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False) + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"), elem_id="train_template_file") + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") with gr.Row(): - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False) - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0) + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") with gr.Row(): - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random']) + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") with gr.Row(): - interrupt_training = gr.Button(value="Interrupt") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary') - train_embedding = gr.Button(value="Train Embedding", variant='primary') + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") params = script_callbacks.UiTrainTabParams(txt2img_preview_params) @@ -1490,7 +1490,7 @@ def create_ui(): return gr.update(value=value), opts.dumpjson() with gr.Blocks(analytics_enabled=False) as settings_interface: - settings_submit = gr.Button(value="Apply settings", variant='primary') + settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") result = gr.HTML() settings_cols = 3 @@ -1541,8 +1541,8 @@ def create_ui(): download_localization = gr.Button(value='Download localization template', elem_id="download_localization") with gr.Row(): - reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary') - restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") + restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary', elem_id="settings_restart_gradio") request_notifications.click( fn=lambda: None, diff --git a/style.css b/style.css index f168571e7..924d4ae76 100644 --- a/style.css +++ b/style.css @@ -73,7 +73,7 @@ margin-right: auto; } -#random_seed, #random_subseed, #reuse_seed, #reuse_subseed, #open_folder{ +[id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{ min-width: auto; flex-grow: 0; padding-left: 0.25em; @@ -84,27 +84,27 @@ display: none; } -#seed_row, #subseed_row{ +[id$=_seed_row], [id$=_subseed_row]{ gap: 0.5rem; } -#subseed_show_box{ +[id$=_subseed_show_box]{ min-width: auto; flex-grow: 0; } -#subseed_show_box > div{ +[id$=_subseed_show_box] > div{ border: 0; height: 100%; } -#subseed_show{ +[id$=_subseed_show]{ min-width: auto; flex-grow: 0; padding: 0; } -#subseed_show label{ +[id$=_subseed_show] label{ height: 100%; } From 311354c0bb8930ea939d6aa6b3edd50c69301320 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 00:38:09 +0300 Subject: [PATCH 33/53] fix the issue with training on SD2.0 --- modules/sd_models.py | 2 ++ modules/textual_inversion/textual_inversion.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ebd4dff7f..bff8d6c9d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -228,6 +228,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): model.sd_model_checkpoint = checkpoint_file model.sd_checkpoint_info = checkpoint_info + model.logvar = model.logvar.to(devices.device) # fix for training + sd_vae.delete_base_vae() sd_vae.clear_loaded_vae() vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 66f40367a..1e5722e74 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -282,7 +282,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ return embedding, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - # dataset loading may take a while, so input validations and early returns should be done before this + # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed @@ -310,7 +310,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ loss_step = 0 _loss_step = 0 #internal - last_saved_file = "" last_saved_image = "" forced_filename = "" From b5819d9bf1794071139c640b5f1e72c84a0e051a Mon Sep 17 00:00:00 2001 From: Philpax Date: Mon, 2 Jan 2023 10:17:33 +1100 Subject: [PATCH 34/53] feat(api): add /sdapi/v1/embeddings --- modules/api/api.py | 8 ++++++++ modules/api/models.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/api/api.py b/modules/api/api.py index 11daff0d6..30bf3dac5 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -100,6 +100,7 @@ class Api: self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) @@ -327,6 +328,13 @@ class Api: def get_artists(self): return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists] + def get_embeddings(self): + db = sd_hijack.model_hijack.embedding_db + return { + "loaded": sorted(db.word_embeddings.keys()), + "skipped": sorted(db.skipped_embeddings), + } + def refresh_checkpoints(self): shared.refresh_checkpoints() diff --git a/modules/api/models.py b/modules/api/models.py index c446ce7a6..a8472dc9d 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -249,3 +249,6 @@ class ArtistItem(BaseModel): score: float = Field(title="Score") category: str = Field(title="Category") +class EmbeddingsResponse(BaseModel): + loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file From c65909ad16a1962129114c6251de092f49479b06 Mon Sep 17 00:00:00 2001 From: Philpax Date: Mon, 2 Jan 2023 12:21:22 +1100 Subject: [PATCH 35/53] feat(api): return more data for embeddings --- modules/api/api.py | 17 +++++++++++++++-- modules/api/models.py | 11 +++++++++-- modules/textual_inversion/textual_inversion.py | 8 ++++---- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 30bf3dac5..9c670f006 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -330,9 +330,22 @@ class Api: def get_embeddings(self): db = sd_hijack.model_hijack.embedding_db + + def convert_embedding(embedding): + return { + "step": embedding.step, + "sd_checkpoint": embedding.sd_checkpoint, + "sd_checkpoint_name": embedding.sd_checkpoint_name, + "shape": embedding.shape, + "vectors": embedding.vectors, + } + + def convert_embeddings(embeddings): + return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()} + return { - "loaded": sorted(db.word_embeddings.keys()), - "skipped": sorted(db.skipped_embeddings), + "loaded": convert_embeddings(db.word_embeddings), + "skipped": convert_embeddings(db.skipped_embeddings), } def refresh_checkpoints(self): diff --git a/modules/api/models.py b/modules/api/models.py index a8472dc9d..4a632c685 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -249,6 +249,13 @@ class ArtistItem(BaseModel): score: float = Field(title="Score") category: str = Field(title="Category") +class EmbeddingItem(BaseModel): + step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available") + sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available") + sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead") + shape: int = Field(title="Shape", description="The length of each individual vector in the embedding") + vectors: int = Field(title="Vectors", description="The number of vectors in the embedding") + class EmbeddingsResponse(BaseModel): - loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model") - skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file + loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 1e5722e74..fd2534776 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -59,7 +59,7 @@ class EmbeddingDatabase: def __init__(self, embeddings_dir): self.ids_lookup = {} self.word_embeddings = {} - self.skipped_embeddings = [] + self.skipped_embeddings = {} self.dir_mtime = None self.embeddings_dir = embeddings_dir self.expected_shape = -1 @@ -91,7 +91,7 @@ class EmbeddingDatabase: self.dir_mtime = mt self.ids_lookup.clear() self.word_embeddings.clear() - self.skipped_embeddings = [] + self.skipped_embeddings.clear() self.expected_shape = self.get_expected_shape() def process_file(path, filename): @@ -136,7 +136,7 @@ class EmbeddingDatabase: if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) else: - self.skipped_embeddings.append(name) + self.skipped_embeddings[name] = embedding for fn in os.listdir(self.embeddings_dir): try: @@ -153,7 +153,7 @@ class EmbeddingDatabase: print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") if len(self.skipped_embeddings) > 0: - print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings)}") + print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}") def find_embedding_at_position(self, tokens, offset): token = tokens[offset] From ef27a18b6b7cb1a8eebdc9b2e88d25baf2c2414d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 19:42:10 +0300 Subject: [PATCH 36/53] Hires fix rework --- modules/generation_parameters_copypaste.py | 32 ++++++++++ modules/images.py | 24 ++++++-- modules/processing.py | 68 +++++++++------------- modules/shared.py | 7 ++- modules/txt2img.py | 6 +- modules/ui.py | 15 +++-- scripts/xy_grid.py | 4 +- 7 files changed, 96 insertions(+), 60 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 8e7f0df0a..d6fa822bd 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,5 +1,6 @@ import base64 import io +import math import os import re from pathlib import Path @@ -164,6 +165,35 @@ def find_hypernetwork_key(hypernet_name, hypernet_hash=None): return None +def restore_old_hires_fix_params(res): + """for infotexts that specify old First pass size parameter, convert it into + width, height, and hr scale""" + + firstpass_width = res.get('First pass size-1', None) + firstpass_height = res.get('First pass size-2', None) + + if firstpass_width is None or firstpass_height is None: + return + + firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height) + width = int(res.get("Size-1", 512)) + height = int(res.get("Size-2", 512)) + + if firstpass_width == 0 or firstpass_height == 0: + # old algorithm for auto-calculating first pass size + desired_pixel_count = 512 * 512 + actual_pixel_count = width * height + scale = math.sqrt(desired_pixel_count / actual_pixel_count) + firstpass_width = math.ceil(scale * width / 64) * 64 + firstpass_height = math.ceil(scale * height / 64) * 64 + + hr_scale = width / firstpass_width if firstpass_width > 0 else height / firstpass_height + + res['Size-1'] = firstpass_width + res['Size-2'] = firstpass_height + res['Hires upscale'] = hr_scale + + def parse_generation_parameters(x: str): """parses generation parameters string, the one you see in text field under the picture in UI: ``` @@ -221,6 +251,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hypernet_hash = res.get("Hypernet hash", None) res["Hypernet"] = find_hypernetwork_key(hypernet_name, hypernet_hash) + restore_old_hires_fix_params(res) + return res diff --git a/modules/images.py b/modules/images.py index f84fd4858..c3a5fc8bc 100644 --- a/modules/images.py +++ b/modules/images.py @@ -230,16 +230,32 @@ def draw_prompt_matrix(im, width, height, all_prompts): return draw_grid_annotations(im, width, height, hor_texts, ver_texts) -def resize_image(resize_mode, im, width, height): +def resize_image(resize_mode, im, width, height, upscaler_name=None): + """ + Resizes an image with the specified resize_mode, width, and height. + + Args: + resize_mode: The mode to use when resizing the image. + 0: Resize the image to the specified width and height. + 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. + 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. + im: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img. + """ + + upscaler_name = upscaler_name or opts.upscaler_for_img2img + def resize(im, w, h): - if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L': + if upscaler_name is None or upscaler_name == "None" or im.mode == 'L': return im.resize((w, h), resample=LANCZOS) scale = max(w / im.width, h / im.height) if scale > 1.0: - upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img] - assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}" + upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name] + assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}" upscaler = upscalers[0] im = upscaler.scaler.upscale(im, scale, upscaler.data_path) diff --git a/modules/processing.py b/modules/processing.py index 42dc19ea6..4654570c0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -658,14 +658,18 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool=False, denoising_strength: float=0.75, firstphase_width: int=0, firstphase_height: int=0, **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength - self.firstphase_width = firstphase_width - self.firstphase_height = firstphase_height - self.truncate_x = 0 - self.truncate_y = 0 + self.hr_scale = hr_scale + self.hr_upscaler = hr_upscaler + + if firstphase_width != 0 or firstphase_height != 0: + print("firstphase_width/firstphase_height no longer supported; use hr_scale", file=sys.stderr) + self.hr_scale = self.width / firstphase_width + self.width = firstphase_width + self.height = firstphase_height def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: @@ -674,47 +678,29 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 - self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" - - if self.firstphase_width == 0 or self.firstphase_height == 0: - desired_pixel_count = 512 * 512 - actual_pixel_count = self.width * self.height - scale = math.sqrt(desired_pixel_count / actual_pixel_count) - self.firstphase_width = math.ceil(scale * self.width / 64) * 64 - self.firstphase_height = math.ceil(scale * self.height / 64) * 64 - firstphase_width_truncated = int(scale * self.width) - firstphase_height_truncated = int(scale * self.height) - - else: - - width_ratio = self.width / self.firstphase_width - height_ratio = self.height / self.firstphase_height - - if width_ratio > height_ratio: - firstphase_width_truncated = self.firstphase_width - firstphase_height_truncated = self.firstphase_width * self.height / self.width - else: - firstphase_width_truncated = self.firstphase_height * self.width / self.height - firstphase_height_truncated = self.firstphase_height - - self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f - self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f + self.extra_generation_params["Hires upscale"] = self.hr_scale + if self.hr_upscaler is not None: + self.extra_generation_params["Hires upscaler"] = self.hr_upscaler def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) + latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_default_mode + if self.enable_hr and latent_scale_mode is None: + assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" + + x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + if not self.enable_hr: - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) return samples - x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height)) + target_width = int(self.width * self.hr_scale) + target_height = int(self.height * self.hr_scale) - samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] - - """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" def save_intermediate(image, index): + """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" + if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: return @@ -723,11 +709,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix") - if opts.use_scale_latent_for_hires_fix: + if latent_scale_mode is not None: for i in range(samples.shape[0]): save_intermediate(samples, i) - samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode) # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. @@ -747,7 +733,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): save_intermediate(image, i) - image = images.resize_image(0, image, self.width, self.height) + image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler) image = np.array(image).astype(np.float32) / 255.0 image = np.moveaxis(image, 2, 0) batch_images.append(image) @@ -764,7 +750,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) # GC now before running the next img2img to prevent running out of memory x = None diff --git a/modules/shared.py b/modules/shared.py index 7f430b938..b65559eeb 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -327,7 +327,6 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), - "use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { @@ -545,6 +544,12 @@ opts = Options() if os.path.exists(config_filename): opts.load(config_filename) +latent_upscale_default_mode = "Latent" +latent_upscale_modes = { + "Latent": "bilinear", + "Latent (nearest)": "nearest", +} + sd_upscalers = [] sd_model = None diff --git a/modules/txt2img.py b/modules/txt2img.py index 7f61e19a8..e189a8999 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -8,7 +8,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -33,8 +33,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: tiling=tiling, enable_hr=enable_hr, denoising_strength=denoising_strength if enable_hr else None, - firstphase_width=firstphase_width if enable_hr else None, - firstphase_height=firstphase_height if enable_hr else None, + hr_scale=hr_scale, + hr_upscaler=hr_upscaler, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/ui.py b/modules/ui.py index 7070ea155..27cd9ddd3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -684,11 +684,11 @@ def create_ui(): with gr.Row(): restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Highres. fix', value=False, elem_id="txt2img_enable_hr") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") with gr.Row(visible=False) as hr_options: - firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0, elem_id="txt2img_firstphase_width") - firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0, elem_id="txt2img_firstphase_height") + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") with gr.Row(equal_height=True): @@ -729,8 +729,8 @@ def create_ui(): width, enable_hr, denoising_strength, - firstphase_width, - firstphase_height, + hr_scale, + hr_upscaler, ] + custom_inputs, outputs=[ @@ -762,7 +762,6 @@ def create_ui(): outputs=[hr_options], ) - txt2img_paste_fields = [ (txt2img_prompt, "Prompt"), (txt2img_negative_prompt, "Negative prompt"), @@ -781,8 +780,8 @@ def create_ui(): (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (firstphase_width, "First pass size-1"), - (firstphase_height, "First pass size-2"), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), *modules.scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 3e0b28055..f92f9776f 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -202,7 +202,7 @@ axis_options = [ AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None), AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None), AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None), - AxisOption("Upscale latent space for hires.", str, apply_upscale_latent_space, format_value_add_label, None), + AxisOption("Hires upscaler", str, apply_field("hr_upscaler"), format_value_add_label, None), AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None), AxisOption("VAE", str, apply_vae, format_value_add_label, None), AxisOption("Styles", str, apply_styles, format_value_add_label, None), @@ -267,7 +267,6 @@ class SharedSettingsStackHelper(object): self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers self.hypernetwork = opts.sd_hypernetwork self.model = shared.sd_model - self.use_scale_latent_for_hires_fix = opts.use_scale_latent_for_hires_fix self.vae = opts.sd_vae def __exit__(self, exc_type, exc_value, tb): @@ -278,7 +277,6 @@ class SharedSettingsStackHelper(object): hypernetwork.apply_strength() opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers - opts.data["use_scale_latent_for_hires_fix"] = self.use_scale_latent_for_hires_fix re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") From 4dbde228ff48dbb105241b1ed25c21ce3f87d182 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 20:01:16 +0300 Subject: [PATCH 37/53] make it possible to use fractional values for SD upscale. --- modules/upscaler.py | 6 +++--- scripts/sd_upscale.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index c4e6e6bd6..231680cb0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -53,10 +53,10 @@ class Upscaler: def do_upscale(self, img: PIL.Image, selected_model: str): return img - def upscale(self, img: PIL.Image, scale: int, selected_model: str = None): + def upscale(self, img: PIL.Image, scale, selected_model: str = None): self.scale = scale - dest_w = img.width * scale - dest_h = img.height * scale + dest_w = int(img.width * scale) + dest_h = int(img.height * scale) for i in range(3): shape = (img.width, img.height) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index e8c80a6c6..9739545cc 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -19,7 +19,7 @@ class Script(scripts.Script): def ui(self, is_img2img): info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64) - scale_factor = gr.Slider(minimum=1, maximum=4, step=1, label='Scale Factor', value=2) + scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0) upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") return [info, overlap, upscaler_index, scale_factor] From 84dd7e8e2495c4fc2997e97f8267aa831eb90d11 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 20:30:02 +0300 Subject: [PATCH 38/53] error out with a readable message in chwewckpoint merger for incompatible tensor shapes (ie when trying to merge SD1.5 with SD2.0) --- modules/extras.py | 2 ++ modules/ui.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/extras.py b/modules/extras.py index 68939dea8..5e2702508 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -303,6 +303,8 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier) result_is_inpainting_model = True else: + assert a.shape == b.shape, f'Incompatible shapes for layer {key}: A is {a.shape}, and B is {b.shape}' + theta_0[key] = theta_func2(a, b, multiplier) if save_as_half: diff --git a/modules/ui.py b/modules/ui.py index 27cd9ddd3..67a51888b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1663,7 +1663,7 @@ def create_ui(): print("Error loading/saving model file:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) modules.sd_models.list_models() # to remove the potentially missing models from the list - return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)] + return [f"Error merging checkpoints: {e}"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)] return results modelmerger_merge.click( From 8d12a729b8b036cb765cf2d87576d5ae256135c8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 20:46:51 +0300 Subject: [PATCH 39/53] fix possible error with accessing nonexistent setting --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 67a51888b..9350a80f5 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -491,7 +491,7 @@ def apply_setting(key, value): return valtype = type(opts.data_labels[key].default) - oldval = opts.data[key] + oldval = opts.data.get(key, None) opts.data[key] = valtype(value) if valtype != type(None) else value if oldval != value and opts.data_labels[key].onchange is not None: opts.data_labels[key].onchange() From 251ecee6949c36e9df1d99a950b3e1af2b5fa2b6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 22:44:46 +0300 Subject: [PATCH 40/53] make "send to" buttons send actual dimension of the sent image rather than fields --- javascript/ui.js | 4 +- modules/generation_parameters_copypaste.py | 60 +++++++++++++++------- 2 files changed, 43 insertions(+), 21 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 587dd7825..d0c054d92 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -19,7 +19,7 @@ function selected_gallery_index(){ function extract_image_from_gallery(gallery){ if(gallery.length == 1){ - return gallery[0] + return [gallery[0]] } index = selected_gallery_index() @@ -28,7 +28,7 @@ function extract_image_from_gallery(gallery){ return [null] } - return gallery[index]; + return [gallery[index]]; } function args_to_array(args){ diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d6fa822bd..ec60319af 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -103,35 +103,57 @@ def bind_buttons(buttons, send_image, send_generate_info): bind_list.append([buttons, send_image, send_generate_info]) +def send_image_and_dimensions(x): + if isinstance(x, Image.Image): + img = x + else: + img = image_from_url_text(x) + + if shared.opts.send_size and isinstance(img, Image.Image): + w = img.width + h = img.height + else: + w = gr.update() + h = gr.update() + + return img, w, h + + def run_bind(): - for buttons, send_image, send_generate_info in bind_list: + for buttons, source_image_component, send_generate_info in bind_list: for tab in buttons: button = buttons[tab] - if send_image and paste_fields[tab]["init_img"]: - if type(send_image) == gr.Gallery: - button.click( - fn=lambda x: image_from_url_text(x), - _js="extract_image_from_gallery", - inputs=[send_image], - outputs=[paste_fields[tab]["init_img"]], - ) - else: - button.click( - fn=lambda x: x, - inputs=[send_image], - outputs=[paste_fields[tab]["init_img"]], - ) + destination_image_component = paste_fields[tab]["init_img"] + fields = paste_fields[tab]["fields"] - if send_generate_info and paste_fields[tab]["fields"] is not None: + destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) + destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) + + if source_image_component and destination_image_component: + if isinstance(source_image_component, gr.Gallery): + func = send_image_and_dimensions if destination_width_component else image_from_url_text + jsfunc = "extract_image_from_gallery" + else: + func = send_image_and_dimensions if destination_width_component else lambda x: x + jsfunc = None + + button.click( + fn=func, + _js=jsfunc, + inputs=[source_image_component], + outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], + ) + + if send_generate_info and fields is not None: if send_generate_info in paste_fields: - paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (['Size-1', 'Size-2'] if shared.opts.send_size else []) + (["Seed"] if shared.opts.send_seed else []) + paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) button.click( fn=lambda *x: x, inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names], - outputs=[field for field, name in paste_fields[tab]["fields"] if name in paste_field_names], + outputs=[field for field, name in fields if name in paste_field_names], ) else: - connect_paste(button, paste_fields[tab]["fields"], send_generate_info) + connect_paste(button, fields, send_generate_info) button.click( fn=None, From 1d7a31def8b5f4c348e2dd07536ac56cb4350614 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 06:21:53 +0300 Subject: [PATCH 41/53] make edit fields for sliders not get hidden by slider's label when there's not enough space --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 924d4ae76..77551dd7c 100644 --- a/style.css +++ b/style.css @@ -509,7 +509,7 @@ canvas[key="mask"] { position: absolute; right: 0.5em; top: -0.6em; - z-index: 200; + z-index: 400; width: 8em; } #quicksettings .gr-box > div > div > input.gr-text-input { From 269f6e867651cadef40d2c939a79d13291280bcd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 07:20:20 +0300 Subject: [PATCH 42/53] change settings UI to use vertical tabs --- modules/ui.py | 45 +++++++++++++++++---------------------------- style.css | 27 +++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 9350a80f5..f8c973ba0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1489,41 +1489,34 @@ def create_ui(): return gr.update(value=value), opts.dumpjson() with gr.Blocks(analytics_enabled=False) as settings_interface: - settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") - result = gr.HTML() + with gr.Row(): + settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") + restart_gradio = gr.Button(value='Restart UI', variant='primary', elem_id="settings_restart_gradio") - settings_cols = 3 - items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols) + result = gr.HTML(elem_id="settings_result") quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")] quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings') quicksettings_list = [] - cols_displayed = 0 - items_displayed = 0 previous_section = None - column = None - with gr.Row(elem_id="settings").style(equal_height=False): + current_tab = None + with gr.Tabs(elem_id="settings"): for i, (k, item) in enumerate(opts.data_labels.items()): section_must_be_skipped = item.section[0] is None if previous_section != item.section and not section_must_be_skipped: - if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None): - if column is not None: - column.__exit__() + elem_id, text = item.section - column = gr.Column(variant='panel') - column.__enter__() + if current_tab is not None: + current_tab.__exit__() - items_displayed = 0 - cols_displayed += 1 + current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text) + current_tab.__enter__() previous_section = item.section - elem_id, text = item.section - gr.HTML(elem_id="settings_header_text_{}".format(elem_id), value='

{}

'.format(text)) - if k in quicksettings_names and not shared.cmd_opts.freeze_settings: quicksettings_list.append((i, k, item)) components.append(dummy_component) @@ -1533,15 +1526,14 @@ def create_ui(): component = create_setting_component(k) component_dict[k] = component components.append(component) - items_displayed += 1 - with gr.Row(): - request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") - download_localization = gr.Button(value='Download localization template', elem_id="download_localization") + if current_tab is not None: + current_tab.__exit__() - with gr.Row(): - reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") - restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary', elem_id="settings_restart_gradio") + with gr.TabItem("Actions"): + request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") + download_localization = gr.Button(value='Download localization template', elem_id="download_localization") + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") request_notifications.click( fn=lambda: None, @@ -1578,9 +1570,6 @@ def create_ui(): outputs=[], ) - if column is not None: - column.__exit__() - interfaces = [ (txt2img_interface, "txt2img", "txt2img"), (img2img_interface, "img2img", "img2img"), diff --git a/style.css b/style.css index 77551dd7c..7df4d9602 100644 --- a/style.css +++ b/style.css @@ -241,6 +241,33 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s z-index: 200; } +#settings{ + display: block; +} + +#settings > div{ + border: none; + margin-left: 10em; +} + +#settings > div.flex-wrap{ + float: left; + display: block; + margin-left: 0; + width: 10em; +} + +#settings > div.flex-wrap button{ + display: block; + border: none; + text-align: left; +} + +#settings_result{ + height: 1.4em; + margin: 0 1.2em; +} + input[type="range"]{ margin: 0.5em 0 -0.3em 0; } From 18c03cdeac6272734b0c09afd3fbe47d1372dd07 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 09:04:29 +0300 Subject: [PATCH 43/53] styling rework to make things more compact --- modules/ui.py | 127 ++++++++++++++++++++------------------- modules/ui_components.py | 7 +++ style.css | 35 ++++++----- 3 files changed, 92 insertions(+), 77 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f8c973ba0..f787b5182 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -19,7 +19,8 @@ import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, ui_components +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru +from modules.ui_components import FormRow, FormGroup, ToolButton from modules.paths import script_path from modules.shared import opts, cmd_opts, restricted_opts @@ -273,31 +274,27 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): - with gr.Row(): - with gr.Box(): - with gr.Row(elem_id=target_interface + '_seed_row'): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') - seed.style(container=False) - random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') - reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') + with FormRow(elem_id=target_interface + '_seed_row'): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') + seed.style(container=False) + random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') + reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') - with gr.Box(elem_id=target_interface + '_subseed_show_box'): + with gr.Group(elem_id=target_interface + '_subseed_show_box'): seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) # Components to show/hide based on the 'Extra' checkbox seed_extras = [] - with gr.Row(visible=False) as seed_extra_row_1: + with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1: seed_extras.append(seed_extra_row_1) - with gr.Box(): - with gr.Row(elem_id=target_interface + '_subseed_row'): - subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') - subseed.style(container=False) - random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') - reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') + subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') + subseed.style(container=False) + random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') + reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') - with gr.Row(visible=False) as seed_extra_row_2: + with FormRow(visible=False) as seed_extra_row_2: seed_extras.append(seed_extra_row_2) seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w') seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h') @@ -523,7 +520,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele return gr.update(**(args or {})) - refresh_button = ui_components.ToolButton(value=refresh_symbol, elem_id=elem_id) + refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id) refresh_button.click( fn=refresh, inputs=[], @@ -636,11 +633,11 @@ Requested path was: {f} def create_sampler_and_steps_selection(choices, tabname): if opts.samplers_in_dropdown: - with gr.Row(elem_id=f"sampler_selection_{tabname}"): + with FormRow(elem_id=f"sampler_selection_{tabname}"): sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) else: - with gr.Group(elem_id=f"sampler_selection_{tabname}"): + with FormGroup(elem_id=f"sampler_selection_{tabname}"): steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") @@ -677,29 +674,29 @@ def create_ui(): with gr.Column(variant='panel', elem_id="txt2img_settings"): steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - with gr.Group(): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - - with gr.Row(visible=False) as hr_options: - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with gr.Row(equal_height=True): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - with gr.Group(elem_id="txt2img_script_container"): + with FormRow(elem_id="txt2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + + with FormRow(visible=False) as hr_options: + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormGroup(elem_id="txt2img_script_container"): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) @@ -816,7 +813,7 @@ def create_ui(): img2img_preview = gr.Image(elem_id='img2img_preview', visible=False) setup_progressbar(progressbar, img2img_preview, 'img2img') - with gr.Row().style(equal_height=False): + with FormRow().style(equal_height=False): with gr.Column(variant='panel', elem_id="img2img_settings"): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: @@ -841,19 +838,23 @@ def create_ui(): init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base") init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask") - with gr.Row(): + with FormRow(): mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch, elem_id="img2img_mask_alpha") - with gr.Row(): - mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode") - inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + with FormRow(): + mask_mode = gr.Radio(label="Mask source", choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode") + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - with gr.Row(): - inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, elem_id="img2img_inpaint_full_res") - inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") with gr.TabItem('Batch img2img', id='batch', elem_id="img2img_batch_tab"): hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' @@ -861,30 +862,30 @@ def create_ui(): img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - with gr.Row(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - with gr.Group(): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - with gr.Row(): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - with gr.Group(): + with FormGroup(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - with gr.Group(elem_id="img2img_script_container"): + with FormRow(elem_id="img2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + with FormGroup(elem_id="img2img_script_container"): custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) @@ -1444,7 +1445,7 @@ def create_ui(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: - with ui_components.FormRow(): + with FormRow(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: diff --git a/modules/ui_components.py b/modules/ui_components.py index d0519d2d6..91eb0e3da 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -16,3 +16,10 @@ class FormRow(gr.Row, gr.components.FormComponent): def get_block_name(self): return "row" + + +class FormGroup(gr.Group, gr.components.FormComponent): + """Same as gr.Row but fits inside gradio forms""" + + def get_block_name(self): + return "group" diff --git a/style.css b/style.css index 7df4d9602..86a265f61 100644 --- a/style.css +++ b/style.css @@ -74,7 +74,8 @@ } [id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{ - min-width: auto; + min-width: 2.3em; + height: 2.5em; flex-grow: 0; padding-left: 0.25em; padding-right: 0.25em; @@ -86,6 +87,7 @@ [id$=_seed_row], [id$=_subseed_row]{ gap: 0.5rem; + padding: 0.6em; } [id$=_subseed_show_box]{ @@ -206,24 +208,24 @@ button{ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ position: absolute; - top: -0.6em; + top: -0.5em; line-height: 1.2em; - padding: 0 0.5em; - margin: 0; + padding: 0; + margin: 0 0.5em; background-color: white; - border-top: 1px solid #eee; - border-left: 1px solid #eee; - border-right: 1px solid #eee; + box-shadow: 0 0 5px 5px white; z-index: 300; } .dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ background-color: rgb(31, 41, 55); - border-top: 1px solid rgb(55 65 81); - border-left: 1px solid rgb(55 65 81); - border-right: 1px solid rgb(55 65 81); + box-shadow: 0 0 5px 5px rgb(31, 41, 55); +} + +#txt2img_column_batch, #img2img_column_batch{ + min-width: min(13.5em, 100%) !important; } #settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{ @@ -232,10 +234,6 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s margin-right: 8em; } -.gr-panel div.flex-col div.justify-between label span{ - margin: 0; -} - #settings .gr-panel div.flex-col div.justify-between div{ position: relative; z-index: 200; @@ -609,6 +607,15 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h } +#img2img_settings > div.gr-form, #txt2img_settings > div.gr-form { + padding-top: 0.9em; +} + +#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form{ + border: none; + padding-bottom: 0.5em; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. From 2bc86712ec16cada01a2353f1d978c1aabc84dbb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 09:13:35 +0300 Subject: [PATCH 44/53] make quicksettings UI elements appear in same order as they are listed in the setting --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f787b5182..d7b911da9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1497,7 +1497,7 @@ def create_ui(): result = gr.HTML(elem_id="settings_result") quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")] - quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings') + quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'} quicksettings_list = [] @@ -1604,7 +1604,7 @@ def create_ui(): with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings"): - for i, k, item in quicksettings_list: + for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) component_dict[k] = component From 9d4eff097deff6153c4023f158bd9fbd4f3e88b3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 10:01:06 +0300 Subject: [PATCH 45/53] add a button to show all setting pages --- javascript/ui.js | 11 +++++++++++ modules/ui.py | 2 ++ 2 files changed, 13 insertions(+) diff --git a/javascript/ui.js b/javascript/ui.js index d0c054d92..34406f3f5 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -188,6 +188,17 @@ onUiUpdate(function(){ img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea"); img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button")); } + + show_all_pages = gradioApp().getElementById('settings_show_all_pages') + settings_tabs = gradioApp().querySelector('#settings div') + if(show_all_pages && settings_tabs){ + settings_tabs.appendChild(show_all_pages) + show_all_pages.onclick = function(){ + gradioApp().querySelectorAll('#settings > div').forEach(function(elem){ + elem.style.display = "block"; + }) + } + } }) let txt2img_textarea, img2img_textarea = undefined; diff --git a/modules/ui.py b/modules/ui.py index d7b911da9..2c92c4225 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1536,6 +1536,8 @@ def create_ui(): download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") + gr.Button(value="Show all pages", elem_id="settings_show_all_pages") + request_notifications.click( fn=lambda: None, inputs=[], From a1cf55a9d1c82f8e56c00d549bca5c8fa069f412 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 10:39:21 +0300 Subject: [PATCH 46/53] add option to reorder items in main UI --- modules/shared.py | 13 ++++++ modules/ui.py | 112 +++++++++++++++++++++++++++++++--------------- 2 files changed, 88 insertions(+), 37 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index b65559eeb..23657a939 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -109,6 +109,17 @@ restricted_opts = { "outdir_save", } +ui_reorder_categories = [ + "sampler", + "dimensions", + "cfg", + "seed", + "checkboxes", + "hires_fix", + "batch", + "scripts", +] + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -410,7 +421,9 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), + "dimensions_and_batch_together": OptionInfo(True, "Show Witdth/Height and Batch sliders in same row"), 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"), + 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/ing2img UI item order"), 'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index 2c92c4225..f2e7c0d66 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -644,6 +644,13 @@ def create_sampler_and_steps_selection(choices, tabname): return steps, sampler_index +def ordered_ui_categories(): + user_order = {x.strip(): i for i, x in enumerate(shared.opts.ui_reorder.split(","))} + + for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] + 1000)): + yield category + + def create_ui(): import modules.img2img import modules.txt2img @@ -672,32 +679,48 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='panel', elem_id="txt2img_settings"): - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - with FormRow(elem_id="txt2img_checkboxes"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - with FormRow(visible=False) as hr_options: - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + elif category == "checkboxes": + with FormRow(elem_id="txt2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + elif category == "hires_fix": + with FormRow(visible=False, elem_id="txt2img_hires_fix") as hr_options: + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) @@ -865,28 +888,43 @@ def create_ui(): with FormRow(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with FormGroup(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + elif category == "cfg": + with FormGroup(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - with FormRow(elem_id="img2img_checkboxes"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() + elif category == "checkboxes": + with FormRow(elem_id="img2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt) From fda1ed184381fdf8aa81be4f64e77787f3fac1b2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 12:01:32 +0300 Subject: [PATCH 47/53] some minor improvements for dark mode UI --- style.css | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/style.css b/style.css index 86a265f61..7296ce912 100644 --- a/style.css +++ b/style.css @@ -208,20 +208,20 @@ button{ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ position: absolute; - top: -0.5em; + top: -0.7em; line-height: 1.2em; padding: 0; margin: 0 0.5em; background-color: white; - box-shadow: 0 0 5px 5px white; + box-shadow: 6px 0 6px 0px white, -6px 0 6px 0px white; z-index: 300; } .dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ background-color: rgb(31, 41, 55); - box-shadow: 0 0 5px 5px rgb(31, 41, 55); + box-shadow: 6px 0 6px 0px rgb(31, 41, 55), -6px 0 6px 0px rgb(31, 41, 55); } #txt2img_column_batch, #img2img_column_batch{ From c0ee1488702d5a6ae35fbf7e0422f9f685394920 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 14:18:48 +0300 Subject: [PATCH 48/53] add support for running with gradio 3.9 installed --- modules/generation_parameters_copypaste.py | 4 ++-- modules/ui_tempdir.py | 23 ++++++++++++++++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index ec60319af..d94f11a3e 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -7,7 +7,7 @@ from pathlib import Path import gradio as gr from modules.shared import script_path -from modules import shared +from modules import shared, ui_tempdir import tempfile from PIL import Image @@ -39,7 +39,7 @@ def quote(text): def image_from_url_text(filedata): if type(filedata) == dict and filedata["is_file"]: filename = filedata["name"] - is_in_right_dir = any([filename in fileset for fileset in shared.demo.temp_file_sets]) + is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) assert is_in_right_dir, 'trying to open image file outside of allowed directories' return Image.open(filename) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 363d449d3..21945235e 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -1,6 +1,7 @@ import os import tempfile from collections import namedtuple +from pathlib import Path import gradio as gr @@ -12,10 +13,28 @@ from modules import shared Savedfile = namedtuple("Savedfile", ["name"]) +def register_tmp_file(gradio, filename): + if hasattr(gradio, 'temp_file_sets'): # gradio 3.15 + gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)} + + if hasattr(gradio, 'temp_dirs'): # gradio 3.9 + gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))} + + +def check_tmp_file(gradio, filename): + if hasattr(gradio, 'temp_file_sets'): + return any([filename in fileset for fileset in gradio.temp_file_sets]) + + if hasattr(gradio, 'temp_dirs'): + return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) + + return False + + def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(already_saved_as)} + register_tmp_file(shared.demo, already_saved_as) file_obj = Savedfile(already_saved_as) return file_obj @@ -45,7 +64,7 @@ def on_tmpdir_changed(): os.makedirs(shared.opts.temp_dir, exist_ok=True) - shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(shared.opts.temp_dir)} + register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x")) def cleanup_tmpdr(): From e9fb9bb0c25f59109a816fc53c385bed58965c24 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 17:40:20 +0300 Subject: [PATCH 49/53] fix hires fix not working in API when user does not specify upscaler --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 4654570c0..a172af0ba 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -685,7 +685,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_default_mode + latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") if self.enable_hr and latent_scale_mode is None: assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" From 2d5a5076bb2a0c05cc27d75a1bcadab7f32a46d0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 18:38:21 +0300 Subject: [PATCH 50/53] Make it so that upscalers are not repeated when restarting UI. --- modules/modelloader.py | 20 ++++++++++++++++++++ webui.py | 14 +++++++------- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/modules/modelloader.py b/modules/modelloader.py index e647f6fa7..6a1a7ac83 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -123,6 +123,23 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): pass +builtin_upscaler_classes = [] +forbidden_upscaler_classes = set() + + +def list_builtin_upscalers(): + load_upscalers() + + builtin_upscaler_classes.clear() + builtin_upscaler_classes.extend(Upscaler.__subclasses__()) + + +def forbid_loaded_nonbuiltin_upscalers(): + for cls in Upscaler.__subclasses__(): + if cls not in builtin_upscaler_classes: + forbidden_upscaler_classes.add(cls) + + def load_upscalers(): # We can only do this 'magic' method to dynamically load upscalers if they are referenced, # so we'll try to import any _model.py files before looking in __subclasses__ @@ -139,6 +156,9 @@ def load_upscalers(): datas = [] commandline_options = vars(shared.cmd_opts) for cls in Upscaler.__subclasses__(): + if cls in forbidden_upscaler_classes: + continue + name = cls.__name__ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" scaler = cls(commandline_options.get(cmd_name, None)) diff --git a/webui.py b/webui.py index 3aee87924..c7d55a978 100644 --- a/webui.py +++ b/webui.py @@ -1,4 +1,5 @@ import os +import sys import threading import time import importlib @@ -55,8 +56,8 @@ def initialize(): gfpgan.setup_model(cmd_opts.gfpgan_models_path) shared.face_restorers.append(modules.face_restoration.FaceRestoration()) + modelloader.list_builtin_upscalers() modules.scripts.load_scripts() - modelloader.load_upscalers() modules.sd_vae.refresh_vae_list() @@ -169,23 +170,22 @@ def webui(): modules.script_callbacks.app_started_callback(shared.demo, app) wait_on_server(shared.demo) + print('Restarting UI...') sd_samplers.set_samplers() - print('Reloading extensions') extensions.list_extensions() localization.list_localizations(cmd_opts.localizations_dir) - print('Reloading custom scripts') + modelloader.forbid_loaded_nonbuiltin_upscalers() modules.scripts.reload_scripts() modelloader.load_upscalers() - print('Reloading modules: modules.ui') - importlib.reload(modules.ui) - print('Refreshing Model List') + for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: + importlib.reload(module) + modules.sd_models.list_models() - print('Restarting Gradio') if __name__ == "__main__": From 8f96f9289981a66741ba770d14f3d27ce335a0fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 18:39:14 +0300 Subject: [PATCH 51/53] call script callbacks for reloaded model after loading embeddings --- modules/sd_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index bff8d6c9d..b98b05fc2 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -324,12 +324,12 @@ def load_model(checkpoint_info=None): sd_model.eval() shared.sd_model = sd_model + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model + script_callbacks.model_loaded_callback(sd_model) print("Model loaded.") - sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload = True) # Reload embeddings after model load as they may or may not fit the model - return sd_model From 82cfc227d735c140447d5b8dca29a71ee9bde127 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 20:23:17 +0300 Subject: [PATCH 52/53] added licenses screen to settings added footer removed unused inpainting code --- README.md | 2 + html/footer.html | 9 + html/licenses.html | 392 ++++++++++++++++++++++++++++++++ modules/sd_hijack_inpainting.py | 232 ------------------- modules/ui.py | 15 +- style.css | 11 + 6 files changed, 427 insertions(+), 234 deletions(-) create mode 100644 html/footer.html create mode 100644 html/licenses.html diff --git a/README.md b/README.md index 556000fb8..88250a6bd 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,8 @@ Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). ## Credits +Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. + - Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers - k-diffusion - https://github.com/crowsonkb/k-diffusion.git - GFPGAN - https://github.com/TencentARC/GFPGAN.git diff --git a/html/footer.html b/html/footer.html new file mode 100644 index 000000000..a8f2adf7e --- /dev/null +++ b/html/footer.html @@ -0,0 +1,9 @@ +
+ API +  •  + Github +  •  + Gradio +  •  + Reload UI +
diff --git a/html/licenses.html b/html/licenses.html new file mode 100644 index 000000000..9eeaa0726 --- /dev/null +++ b/html/licenses.html @@ -0,0 +1,392 @@ + + +

CodeFormer

+Parts of CodeFormer code had to be copied to be compatible with GFPGAN. +
+S-Lab License 1.0
+
+Copyright 2022 S-Lab
+
+Redistribution and use for non-commercial purpose in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+In the event that redistribution and/or use for commercial purpose in
+source or binary forms, with or without modification is required,
+please contact the contributor(s) of the work.
+
+ + +

ESRGAN

+Code for architecture and reading models copied. +
+MIT License
+
+Copyright (c) 2021 victorca25
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

Real-ESRGAN

+Some code is copied to support ESRGAN models. +
+BSD 3-Clause License
+
+Copyright (c) 2021, Xintao Wang
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ +

InvokeAI

+Some code for compatibility with OSX is taken from lstein's repository. +
+MIT License
+
+Copyright (c) 2022 InvokeAI Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

LDSR

+Code added by contirubtors, most likely copied from this repository. +
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

CLIP Interrogator

+Some small amounts of code borrowed and reworked. +
+MIT License
+
+Copyright (c) 2022 pharmapsychotic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

SwinIR

+Code added by contirubtors, most likely copied from this repository. + +
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [2021] [SwinIR Authors]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 06b75772f..3c214a357 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -12,191 +12,6 @@ from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like -# ================================================================================================= -# Monkey patch DDIMSampler methods from RunwayML repo directly. -# Adapted from: -# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddim.py -# ================================================================================================= -@torch.no_grad() -def sample_ddim(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): - ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - -@torch.no_grad() -def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = dict() - for k in c: - if isinstance(c[k], list): - c_in[k] = [ - torch.cat([unconditional_conditioning[k][i], c[k][i]]) - for i in range(len(c[k])) - ] - else: - c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) - else: - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - -# ================================================================================================= -# Monkey patch PLMSSampler methods. -# This one was not actually patched correctly in the RunwayML repo, but we can replicate the changes. -# Adapted from: -# https://github.com/CompVis/stable-diffusion/blob/main/ldm/models/diffusion/plms.py -# ================================================================================================= -@torch.no_grad() -def sample_plms(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): - ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - # print(f'Data shape for PLMS sampling is {size}') # remove unnecessary message - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - @torch.no_grad() def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, @@ -280,44 +95,6 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F return x_prev, pred_x0, e_t -# ================================================================================================= -# Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config. -# Adapted from: -# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddpm.py -# ================================================================================================= - -@torch.no_grad() -def get_unconditional_conditioning(self, batch_size, null_label=None): - if null_label is not None: - xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - if hasattr(xc, "to"): - xc = xc.to(self.device) - c = self.get_learned_conditioning(xc) - else: - # todo: get null label from cond_stage_model - raise NotImplementedError() - c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) - return c - - -class LatentInpaintDiffusion(LatentDiffusion): - def __init__( - self, - concat_keys=("mask", "masked_image"), - masked_image_key="masked_image", - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.masked_image_key = masked_image_key - assert self.masked_image_key in concat_keys - self.concat_keys = concat_keys - def should_hijack_inpainting(checkpoint_info): ckpt_basename = os.path.basename(checkpoint_info.filename).lower() @@ -326,15 +103,6 @@ def should_hijack_inpainting(checkpoint_info): def do_inpainting_hijack(): - # most of this stuff seems to no longer be needed because it is already included into SD2.0 # p_sample_plms is needed because PLMS can't work with dicts as conditionings - # this file should be cleaned up later if everything turns out to work fine - - # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning - # ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion - - # ldm.models.diffusion.ddim.DDIMSampler.p_sample_ddim = p_sample_ddim - # ldm.models.diffusion.ddim.DDIMSampler.sample = sample_ddim ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms - # ldm.models.diffusion.plms.PLMSSampler.sample = sample_plms diff --git a/modules/ui.py b/modules/ui.py index f2e7c0d66..d941cb5fa 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1529,8 +1529,10 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as settings_interface: with gr.Row(): - settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") - restart_gradio = gr.Button(value='Restart UI', variant='primary', elem_id="settings_restart_gradio") + with gr.Column(scale=6): + settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") + with gr.Column(): + restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio") result = gr.HTML(elem_id="settings_result") @@ -1574,6 +1576,11 @@ def create_ui(): download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") + if os.path.exists("html/licenses.html"): + with open("html/licenses.html", encoding="utf8") as file: + with gr.TabItem("Licenses"): + gr.HTML(file.read(), elem_id="licenses") + gr.Button(value="Show all pages", elem_id="settings_show_all_pages") request_notifications.click( @@ -1659,6 +1666,10 @@ def create_ui(): if os.path.exists(os.path.join(script_path, "notification.mp3")): audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + if os.path.exists("html/footer.html"): + with open("html/footer.html", encoding="utf8") as file: + gr.HTML(file.read(), elem_id="footer") + text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False) settings_submit.click( fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]), diff --git a/style.css b/style.css index 7296ce912..2116ec3c8 100644 --- a/style.css +++ b/style.css @@ -616,6 +616,17 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h padding-bottom: 0.5em; } +footer { + display: none !important; +} + +#footer{ + text-align: center; +} + +#footer div{ + display: inline-block; +} /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. From 3e22e294135ed0327ce9d9738655ff03c53df3c0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 21:49:24 +0300 Subject: [PATCH 53/53] fix broken send to extras button --- modules/generation_parameters_copypaste.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d94f11a3e..4baf4d9ae 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -37,7 +37,10 @@ def quote(text): def image_from_url_text(filedata): - if type(filedata) == dict and filedata["is_file"]: + if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False): + filedata = filedata[0] + + if type(filedata) == dict and filedata.get("is_file", False): filename = filedata["name"] is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) assert is_in_right_dir, 'trying to open image file outside of allowed directories'