From dbacc36a9244c55fa9cbfef6a913bf9ea1aa91d1 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Fri, 8 Dec 2023 00:45:34 -0600 Subject: [PATCH 01/25] (WIP): Studio2 app infra and SD API UI/app structure and utility implementation. - Initializers for webui/API launch - Schedulers file for SD scheduling utilities - Additions to API-level utilities - Added embeddings module for LoRA, Lycoris, yada yada - Added image_processing module for resamplers, resize tools, transforms, and any image annotation (PNG metadata) - shared_cmd_opts module -- sorry, this is stable_args.py. It lives on. We still want to have some global control over the app exclusively from the command-line. At least we will be free from shark_args. - Moving around some utility pieces. - Try to make api+webui concurrency possible in index.py - SD UI -- this is just img2imgUI but hopefully a little better. - UI utilities for your nod logos and your gradio temps. Enable UI / bugfixes / tweaks --- apps/shark_studio/api/controlnet.py | 134 +++ apps/shark_studio/api/initializers.py | 87 ++ apps/shark_studio/api/sd.py | 308 +++++++ apps/shark_studio/api/utils.py | 397 ++++++++- apps/shark_studio/modules/checkpoint_proc.py | 66 ++ apps/shark_studio/modules/embeddings.py | 171 ++++ apps/shark_studio/modules/img_processing.py | 168 ++++ apps/shark_studio/modules/pipeline.py | 71 ++ apps/shark_studio/modules/schedulers.py | 30 + apps/shark_studio/modules/shared.py | 69 ++ apps/shark_studio/modules/shared_cmd_opts.py | 763 ++++++++++++++++++ apps/shark_studio/modules/timer.py | 111 +++ apps/shark_studio/web/api/compat.py | 310 +++++++ apps/shark_studio/web/api/sd.py | 1 + apps/shark_studio/web/configs/foo.json | 1 + apps/shark_studio/web/index.py | 448 +++------- apps/shark_studio/web/ui/chat.py | 8 +- apps/shark_studio/web/ui/common_events.py | 55 ++ .../shark_studio/web/ui/css/sd_dark_theme.css | 324 ++++++++ apps/shark_studio/web/ui/logos/nod-icon.png | Bin 0 -> 16058 bytes apps/shark_studio/web/ui/logos/nod-logo.png | Bin 0 -> 10641 bytes apps/shark_studio/web/ui/outputgallery.py | 416 ++++++++++ apps/shark_studio/web/ui/sd.py | 650 +++++++++++++++ apps/shark_studio/web/ui/utils.py | 33 + apps/shark_studio/web/utils/globals.py | 74 ++ .../web/utils/metadata/__init__.py | 6 + .../web/utils/metadata/csv_metadata.py | 45 ++ .../web/utils/metadata/display.py | 53 ++ .../web/utils/metadata/exif_metadata.py | 52 ++ .../shark_studio/web/utils/metadata/format.py | 143 ++++ .../web/utils/metadata/png_metadata.py | 222 +++++ apps/shark_studio/web/utils/state.py | 41 + apps/shark_studio/web/utils/tmp_configs.py | 77 ++ 33 files changed, 5003 insertions(+), 331 deletions(-) create mode 100644 apps/shark_studio/api/controlnet.py create mode 100644 apps/shark_studio/api/initializers.py create mode 100644 apps/shark_studio/api/sd.py create mode 100644 apps/shark_studio/modules/checkpoint_proc.py create mode 100644 apps/shark_studio/modules/embeddings.py create mode 100644 apps/shark_studio/modules/img_processing.py create mode 100644 apps/shark_studio/modules/pipeline.py create mode 100644 apps/shark_studio/modules/schedulers.py create mode 100644 apps/shark_studio/modules/shared.py create mode 100644 apps/shark_studio/modules/shared_cmd_opts.py create mode 100644 apps/shark_studio/modules/timer.py create mode 100644 apps/shark_studio/web/api/compat.py create mode 100644 apps/shark_studio/web/api/sd.py create mode 100644 apps/shark_studio/web/configs/foo.json create mode 100644 apps/shark_studio/web/ui/common_events.py create mode 100644 apps/shark_studio/web/ui/css/sd_dark_theme.css create mode 100644 apps/shark_studio/web/ui/logos/nod-icon.png create mode 100644 apps/shark_studio/web/ui/logos/nod-logo.png create mode 100644 apps/shark_studio/web/ui/outputgallery.py create mode 100644 apps/shark_studio/web/ui/sd.py create mode 100644 apps/shark_studio/web/ui/utils.py create mode 100644 apps/shark_studio/web/utils/globals.py create mode 100644 apps/shark_studio/web/utils/metadata/__init__.py create mode 100644 apps/shark_studio/web/utils/metadata/csv_metadata.py create mode 100644 apps/shark_studio/web/utils/metadata/display.py create mode 100644 apps/shark_studio/web/utils/metadata/exif_metadata.py create mode 100644 apps/shark_studio/web/utils/metadata/format.py create mode 100644 apps/shark_studio/web/utils/metadata/png_metadata.py create mode 100644 apps/shark_studio/web/utils/state.py create mode 100644 apps/shark_studio/web/utils/tmp_configs.py diff --git a/apps/shark_studio/api/controlnet.py b/apps/shark_studio/api/controlnet.py new file mode 100644 index 0000000000..ea8cdf0cc9 --- /dev/null +++ b/apps/shark_studio/api/controlnet.py @@ -0,0 +1,134 @@ +# from turbine_models.custom_models.controlnet import control_adapter, preprocessors + + +class control_adapter: + def __init__( + self, + model: str, + ): + self.model = None + + def export_control_adapter_model(model_keyword): + return None + + def export_xl_control_adapter_model(model_keyword): + return None + + +class preprocessors: + def __init__( + self, + model: str, + ): + self.model = None + + def export_controlnet_model(model_keyword): + return None + + +control_adapter_map = { + "sd15": { + "canny": {"initializer": control_adapter.export_control_adapter_model}, + "openpose": { + "initializer": control_adapter.export_control_adapter_model + }, + "scribble": { + "initializer": control_adapter.export_control_adapter_model + }, + "zoedepth": { + "initializer": control_adapter.export_control_adapter_model + }, + }, + "sdxl": { + "canny": { + "initializer": control_adapter.export_xl_control_adapter_model + }, + }, +} +preprocessor_model_map = { + "canny": {"initializer": preprocessors.export_controlnet_model}, + "openpose": {"initializer": preprocessors.export_controlnet_model}, + "scribble": {"initializer": preprocessors.export_controlnet_model}, + "zoedepth": {"initializer": preprocessors.export_controlnet_model}, +} + + +class PreprocessorModel: + def __init__( + self, + hf_model_id, + device, + ): + self.model = None + + def compile(self, device): + print("compile not implemented for preprocessor.") + return + + def run(self, inputs): + print("run not implemented for preprocessor.") + return + + +def cnet_preview(model, input_img, stencils, images, preprocessed_hints): + if isinstance(input_image, PIL.Image.Image): + img_dict = { + "background": None, + "layers": [None], + "composite": input_image, + } + input_image = EditorValue(img_dict) + images[index] = input_image + if model: + stencils[index] = model + match model: + case "canny": + canny = CannyDetector() + result = canny( + np.array(input_image["composite"]), + 100, + 200, + ) + preprocessed_hints[index] = Image.fromarray(result) + return ( + Image.fromarray(result), + stencils, + images, + preprocessed_hints, + ) + case "openpose": + openpose = OpenposeDetector() + result = openpose(np.array(input_image["composite"])) + preprocessed_hints[index] = Image.fromarray(result[0]) + return ( + Image.fromarray(result[0]), + stencils, + images, + preprocessed_hints, + ) + case "zoedepth": + zoedepth = ZoeDetector() + result = zoedepth(np.array(input_image["composite"])) + preprocessed_hints[index] = Image.fromarray(result) + return ( + Image.fromarray(result), + stencils, + images, + preprocessed_hints, + ) + case "scribble": + preprocessed_hints[index] = input_image["composite"] + return ( + input_image["composite"], + stencils, + images, + preprocessed_hints, + ) + case _: + preprocessed_hints[index] = None + return ( + None, + stencils, + images, + preprocessed_hints, + ) diff --git a/apps/shark_studio/api/initializers.py b/apps/shark_studio/api/initializers.py new file mode 100644 index 0000000000..bbb273354c --- /dev/null +++ b/apps/shark_studio/api/initializers.py @@ -0,0 +1,87 @@ +import importlib +import logging +import os +import signal +import sys +import re +import warnings +import json +from threading import Thread + +from apps.shark_studio.modules.timer import startup_timer + + +def imports(): + import torch # noqa: F401 + + startup_timer.record("import torch") + warnings.filterwarnings( + action="ignore", category=DeprecationWarning, module="torch" + ) + warnings.filterwarnings( + action="ignore", category=UserWarning, module="torchvision" + ) + + import gradio # noqa: F401 + + startup_timer.record("import gradio") + + import apps.shark_studio.web.utils.globals as global_obj + + global_obj._init() + startup_timer.record("initialize globals") + + from apps.shark_studio.modules import ( + img_processing, + ) # noqa: F401 + from apps.shark_studio.modules.schedulers import scheduler_model_map + + startup_timer.record("other imports") + + +def initialize(): + configure_sigint_handler() + + # from apps.shark_studio.modules import modelloader + # modelloader.cleanup_models() + + # from apps.shark_studio.modules import sd_models + # sd_models.setup_model() + # startup_timer.record("setup SD model") + + # initialize_rest(reload_script_modules=False) + + +def initialize_rest(*, reload_script_modules=False): + """ + Called both from initialize() and when reloading the webui. + """ + # Keep this for adding reload options to the webUI. + + +def dumpstacks(): + import threading + import traceback + + id2name = {th.ident: th.name for th in threading.enumerate()} + code = [] + for threadId, stack in sys._current_frames().items(): + code.append(f"\n# Thread: {id2name.get(threadId, '')}({threadId})") + for filename, lineno, name, line in traceback.extract_stack(stack): + code.append(f"""File: "{filename}", line {lineno}, in {name}""") + if line: + code.append(" " + line.strip()) + + print("\n".join(code)) + + +def configure_sigint_handler(): + # make the program just exit at ctrl+c without waiting for anything + def sigint_handler(sig, frame): + print(f"Interrupted with signal {sig} in {frame}") + + dumpstacks() + + os._exit(0) + + signal.signal(signal.SIGINT, sigint_handler) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py new file mode 100644 index 0000000000..a601a068f7 --- /dev/null +++ b/apps/shark_studio/api/sd.py @@ -0,0 +1,308 @@ +from turbine_models.custom_models.sd_inference import clip, unet, vae +from shark.iree_utils.compile_utils import get_iree_compiled_module +from apps.shark_studio.api.utils import get_resource_path +from apps.shark_studio.api.controlnet import control_adapter_map +from apps.shark_studio.web.utils.state import status_label +from apps.shark_studio.modules.pipeline import SharkPipelineBase +import iree.runtime as ireert +import gc +import torch +import gradio as gr + +sd_model_map = { + "CompVis/stable-diffusion-v1-4": { + "clip": { + "initializer": clip.export_clip_model, + "max_tokens": 64, + }, + "vae_encode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + "unet": { + "initializer": unet.export_unet_model, + "max_tokens": 512, + }, + "vae_decode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + }, + "runwayml/stable-diffusion-v1-5": { + "clip": { + "initializer": clip.export_clip_model, + "max_tokens": 64, + }, + "vae_encode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + "unet": { + "initializer": unet.export_unet_model, + "max_tokens": 512, + }, + "vae_decode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + }, + "stabilityai/stable-diffusion-2-1-base": { + "clip": { + "initializer": clip.export_clip_model, + "max_tokens": 64, + }, + "vae_encode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + "unet": { + "initializer": unet.export_unet_model, + "max_tokens": 512, + }, + "vae_decode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + }, + "stabilityai/stable_diffusion-xl-1.0": { + "clip_1": { + "initializer": clip.export_clip_model, + "max_tokens": 64, + }, + "clip_2": { + "initializer": clip.export_clip_model, + "max_tokens": 64, + }, + "vae_encode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + "unet": { + "initializer": unet.export_unet_model, + "max_tokens": 512, + }, + "vae_decode": { + "initializer": vae.export_vae_model, + "max_tokens": 64, + }, + }, +} + + +class StableDiffusion(SharkPipelineBase): + # This class is responsible for executing image generation and creating + # /managing a set of compiled modules to run Stable Diffusion. The init + # aims to be as general as possible, and the class will infer and compile + # a list of necessary modules or a combined "pipeline module" for a + # specified job based on the inference task. + # + # custom_model_ids: a dict of submodel + HF ID pairs for custom submodels. + # e.g. {"vae_decode": "madebyollin/sdxl-vae-fp16-fix"} + # + # embeddings: a dict of embedding checkpoints or model IDs to use when + # initializing the compiled modules. + + def __init__( + self, + base_model_id: str = "runwayml/stable-diffusion-v1-5", + height: int = 512, + width: int = 512, + precision: str = "fp16", + device: str = None, + custom_model_map: dict = {}, + embeddings: dict = {}, + import_ir: bool = True, + ): + super().__init__(sd_model_map[base_model_id], device, import_ir) + self.base_model_id = base_model_id + self.device = device + self.precision = precision + self.iree_module_dict = None + self.get_compiled_map() + + def prepare_pipeline(self, scheduler, custom_model_map): + return None + + def generate_images( + self, + prompt, + negative_prompt, + steps, + strength, + guidance_scale, + seed, + ondemand, + repeatable_seeds, + resample_type, + control_mode, + preprocessed_hints, + ): + return None, None, None, None, None + + +# NOTE: Each `hf_model_id` should have its own starting configuration. + +# model_vmfb_key = "" + + +def shark_sd_fn( + prompt, + negative_prompt, + image_dict, + height: int, + width: int, + steps: int, + strength: float, + guidance_scale: float, + seed: str | int, + batch_count: int, + batch_size: int, + scheduler: str, + base_model_id: str, + custom_weights: str, + custom_vae: str, + precision: str, + device: str, + lora_weights: str | list, + ondemand: bool, + repeatable_seeds: bool, + resample_type: str, + control_mode: str, + stencils: list, + images: list, + preprocessed_hints: list, + progress=gr.Progress(), +): + # Handling gradio ImageEditor datatypes so we have unified inputs to the SD API + for i, stencil in enumerate(stencils): + if images[i] is None and stencil is not None: + continue + elif stencil is None and any( + img is not None for img in [images[i], preprocessed_hints[i]] + ): + images[i] = None + preprocessed_hints[i] = None + elif images[i] is not None: + if isinstance(images[i], dict): + images[i] = images[i]["composite"] + images[i] = images[i].convert("RGB") + + if isinstance(image_dict, PIL.Image.Image): + image = image_dict.convert("RGB") + elif image_dict: + image = image_dict["image"].convert("RGB") + else: + image = None + is_img2img = False + if image: + ( + image, + _, + _, + ) = resize_stencil(image, width, height) + is_img2img = True + print("Performing Stable Diffusion Pipeline setup...") + + device_id = None + + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + import apps.shark_studio.web.utils.globals as global_obj + + custom_model_map = {} + if custom_weights != "None": + custom_model_map["unet"] = {"custom_weights": custom_weights} + if custom_vae != "None": + custom_model_map["vae"] = {"custom_weights": custom_vae} + if stencils: + for i, stencil in enumerate(stencils): + if "xl" not in base_model_id.lower(): + custom_model_map[f"control_adapter_{i}"] = stencil_adapter_map[ + "runwayml/stable-diffusion-v1-5" + ][stencil] + else: + custom_model_map[f"control_adapter_{i}"] = stencil_adapter_map[ + "stabilityai/stable-diffusion-xl-1.0" + ][stencil] + + submit_pipe_kwargs = { + "base_model_id": base_model_id, + "height": height, + "width": width, + "precision": precision, + "device": device, + "custom_model_map": custom_model_map, + "import_ir": cmd_opts.import_mlir, + "is_img2img": is_img2img, + } + submit_prep_kwargs = { + "scheduler": scheduler, + "custom_model_map": custom_model_map, + "embeddings": lora_weights, + } + submit_run_kwargs = { + "prompt": prompt, + "negative_prompt": negative_prompt, + "steps": steps, + "strength": strength, + "guidance_scale": guidance_scale, + "seed": seed, + "ondemand": ondemand, + "repeatable_seeds": repeatable_seeds, + "resample_type": resample_type, + "control_mode": control_mode, + "preprocessed_hints": preprocessed_hints, + } + + global sd_pipe + global sd_pipe_kwargs + + if sd_pipe_kwargs and sd_pipe_kwargs != submit_pipe_kwargs: + sd_pipe = None + sd_pipe_kwargs = submit_pipe_kwargs + gc.collect() + + if sd_pipe is None: + history[-1][-1] = "Getting the pipeline ready..." + yield history, "" + + # Initializes the pipeline and retrieves IR based on all + # parameters that are static in the turbine output format, + # which is currently MLIR in the torch dialect. + + sd_pipe = SharkStableDiffusionPipeline( + **submit_pipe_kwargs, + ) + + sd_pipe.prepare_pipe(**submit_prep_kwargs) + + for prompt, msg, exec_time in progress.tqdm( + out_imgs=sd_pipe.generate_images(**submit_run_kwargs), + desc="Generating Image...", + ): + text_output = get_generation_text_info( + seeds[: current_batch + 1], device + ) + save_output_img( + out_imgs[0], + seeds[current_batch], + extra_info, + ) + generated_imgs.extend(out_imgs) + yield generated_imgs, text_output, status_label( + "Stable Diffusion", current_batch + 1, batch_count, batch_size + ), stencils, images + + return generated_imgs, text_output, "", stencils, images + + +def cancel_sd(): + print("Inject call to cancel longer API calls.") + return + + +if __name__ == "__main__": + sd = StableDiffusion( + "runwayml/stable-diffusion-v1-5", + device="vulkan", + ) + print("model loaded") diff --git a/apps/shark_studio/api/utils.py b/apps/shark_studio/api/utils.py index 4072491cbf..a4f52dca24 100644 --- a/apps/shark_studio/api/utils.py +++ b/apps/shark_studio/api/utils.py @@ -1,12 +1,407 @@ import os import sys +import os +import numpy as np +import glob +from random import ( + randint, + seed as seed_random, + getstate as random_getstate, + setstate as random_setstate, +) + +from pathlib import Path +from safetensors.torch import load_file +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +from cpuinfo import get_cpu_info + +# TODO: migrate these utils to studio +from shark.iree_utils.vulkan_utils import ( + set_iree_vulkan_runtime_flags, + get_vulkan_target_triple, + get_iree_vulkan_runtime_flags, +) + +checkpoints_filetypes = ( + "*.ckpt", + "*.safetensors", +) def get_available_devices(): - return ["cpu-task"] + def get_devices_by_name(driver_name): + from shark.iree_utils._common import iree_device_map + + device_list = [] + try: + driver_name = iree_device_map(driver_name) + device_list_dict = get_all_devices(driver_name) + print(f"{driver_name} devices are available.") + except: + print(f"{driver_name} devices are not available.") + else: + cpu_name = get_cpu_info()["brand_raw"] + for i, device in enumerate(device_list_dict): + device_name = ( + cpu_name if device["name"] == "default" else device["name"] + ) + if "local" in driver_name: + device_list.append( + f"{device_name} => {driver_name.replace('local', 'cpu')}" + ) + else: + # for drivers with single devices + # let the default device be selected without any indexing + if len(device_list_dict) == 1: + device_list.append(f"{device_name} => {driver_name}") + else: + device_list.append( + f"{device_name} => {driver_name}://{i}" + ) + return device_list + + set_iree_runtime_flags() + + available_devices = [] + from shark.iree_utils.vulkan_utils import ( + get_all_vulkan_devices, + ) + + vulkaninfo_list = get_all_vulkan_devices() + vulkan_devices = [] + id = 0 + for device in vulkaninfo_list: + vulkan_devices.append(f"{device.strip()} => vulkan://{id}") + id += 1 + if id != 0: + print(f"vulkan devices are available.") + available_devices.extend(vulkan_devices) + metal_devices = get_devices_by_name("metal") + available_devices.extend(metal_devices) + cuda_devices = get_devices_by_name("cuda") + available_devices.extend(cuda_devices) + rocm_devices = get_devices_by_name("rocm") + available_devices.extend(rocm_devices) + cpu_device = get_devices_by_name("cpu-sync") + available_devices.extend(cpu_device) + cpu_device = get_devices_by_name("cpu-task") + available_devices.extend(cpu_device) + return available_devices + + +def set_init_device_flags(): + if "vulkan" in cmd_opts.device: + # set runtime flags for vulkan. + set_iree_runtime_flags() + + # set triple flag to avoid multiple calls to get_vulkan_triple_flag + device_name, cmd_opts.device = map_device_to_name_path(cmd_opts.device) + if not cmd_opts.iree_vulkan_target_triple: + triple = get_vulkan_target_triple(device_name) + if triple is not None: + cmd_opts.iree_vulkan_target_triple = triple + print( + f"Found device {device_name}. Using target triple " + f"{cmd_opts.iree_vulkan_target_triple}." + ) + elif "cuda" in cmd_opts.device: + cmd_opts.device = "cuda" + elif "metal" in cmd_opts.device: + device_name, cmd_opts.device = map_device_to_name_path(cmd_opts.device) + if not cmd_opts.iree_metal_target_platform: + triple = get_metal_target_triple(device_name) + if triple is not None: + cmd_opts.iree_metal_target_platform = triple.split("-")[-1] + print( + f"Found device {device_name}. Using target triple " + f"{cmd_opts.iree_metal_target_platform}." + ) + elif "cpu" in cmd_opts.device: + cmd_opts.device = "cpu" + + +def set_iree_runtime_flags(): + # TODO: This function should be device-agnostic and piped properly + # to general runtime driver init. + vulkan_runtime_flags = get_iree_vulkan_runtime_flags() + if cmd_opts.enable_rgp: + vulkan_runtime_flags += [ + f"--enable_rgp=true", + f"--vulkan_debug_utils=true", + ] + if cmd_opts.device_allocator_heap_key: + vulkan_runtime_flags += [ + f"--device_allocator=caching:device_local={cmd_opts.device_allocator_heap_key}", + ] + set_iree_vulkan_runtime_flags(flags=vulkan_runtime_flags) + + +def get_all_devices(driver_name): + """ + Inputs: driver_name + Returns a list of all the available devices for a given driver sorted by + the iree path names of the device as in --list_devices option in iree. + """ + from iree.runtime import get_driver + + driver = get_driver(driver_name) + device_list_src = driver.query_available_devices() + device_list_src.sort(key=lambda d: d["path"]) + return device_list_src def get_resource_path(relative_path): """Get absolute path to resource, works for dev and for PyInstaller""" base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) return os.path.join(base_path, relative_path) + + +def get_generated_imgs_path() -> Path: + return Path( + cmd_opts.output_dir + if cmd_opts.output_dir + else get_resource_path("..\web\generated_imgs") + ) + + +def get_generated_imgs_todays_subdir() -> str: + return dt.now().strftime("%Y%m%d") + + +def create_checkpoint_folders(): + dir = ["vae", "lora"] + if not cmd_opts.ckpt_dir: + dir.insert(0, "models") + else: + if not os.path.isdir(cmd_opts.ckpt_dir): + sys.exit( + f"Invalid --ckpt_dir argument, " + f"{args.ckpt_dir} folder does not exists." + ) + for root in dir: + Path(get_checkpoints_path(root)).mkdir(parents=True, exist_ok=True) + + +def get_checkpoints_path(model=""): + return get_resource_path(f"..\web\models\{model}") + + +def get_checkpoints(model="models"): + ckpt_files = [] + file_types = checkpoints_filetypes + if model == "lora": + file_types = file_types + ("*.pt", "*.bin") + for extn in file_types: + files = [ + os.path.basename(x) + for x in glob.glob(os.path.join(get_checkpoints_path(model), extn)) + ] + ckpt_files.extend(files) + return sorted(ckpt_files, key=str.casefold) + + +def get_checkpoint_pathfile(checkpoint_name, model="models"): + return os.path.join(get_checkpoints_path(model), checkpoint_name) + + +def get_device_mapping(driver, key_combination=3): + """This method ensures consistent device ordering when choosing + specific devices for execution + Args: + driver (str): execution driver (vulkan, cuda, rocm, etc) + key_combination (int, optional): choice for mapping value for + device name. + 1 : path + 2 : name + 3 : (name, path) + Defaults to 3. + Returns: + dict: map to possible device names user can input mapped to desired + combination of name/path. + """ + from shark.iree_utils._common import iree_device_map + + driver = iree_device_map(driver) + device_list = get_all_devices(driver) + device_map = dict() + + def get_output_value(dev_dict): + if key_combination == 1: + return f"{driver}://{dev_dict['path']}" + if key_combination == 2: + return dev_dict["name"] + if key_combination == 3: + return dev_dict["name"], f"{driver}://{dev_dict['path']}" + + # mapping driver name to default device (driver://0) + device_map[f"{driver}"] = get_output_value(device_list[0]) + for i, device in enumerate(device_list): + # mapping with index + device_map[f"{driver}://{i}"] = get_output_value(device) + # mapping with full path + device_map[f"{driver}://{device['path']}"] = get_output_value(device) + return device_map + + +def get_opt_flags(model, precision="fp16"): + iree_flags = [] + if len(cmd_opts.iree_vulkan_target_triple) > 0: + iree_flags.append( + f"-iree-vulkan-target-triple={cmd_opts.iree_vulkan_target_triple}" + ) + if "rocm" in cmd_opts.device: + rocm_args = get_iree_rocm_args() + iree_flags.extend(rocm_args) + if cmd_opts.iree_constant_folding == False: + iree_flags.append("--iree-opt-const-expr-hoisting=False") + iree_flags.append( + "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807" + ) + if cmd_opts.data_tiling == False: + iree_flags.append("--iree-opt-data-tiling=False") + + if "vae" not in model: + # Due to lack of support for multi-reduce, we always collapse reduction + # dims before dispatch formation right now. + iree_flags += ["--iree-flow-collapse-reduction-dims"] + return iree_flags + + +def map_device_to_name_path(device, key_combination=3): + """Gives the appropriate device data (supported name/path) for user + selected execution device + Args: + device (str): user + key_combination (int, optional): choice for mapping value for + device name. + 1 : path + 2 : name + 3 : (name, path) + Defaults to 3. + Raises: + ValueError: + Returns: + str / tuple: returns the mapping str or tuple of mapping str for + the device depending on key_combination value + """ + driver = device.split("://")[0] + device_map = get_device_mapping(driver, key_combination) + try: + device_mapping = device_map[device] + except KeyError: + raise ValueError(f"Device '{device}' is not a valid device.") + return device_mapping + + def get_devices_by_name(driver_name): + from shark.iree_utils._common import iree_device_map + + device_list = [] + try: + driver_name = iree_device_map(driver_name) + device_list_dict = get_all_devices(driver_name) + print(f"{driver_name} devices are available.") + except: + print(f"{driver_name} devices are not available.") + else: + cpu_name = get_cpu_info()["brand_raw"] + for i, device in enumerate(device_list_dict): + device_name = ( + cpu_name if device["name"] == "default" else device["name"] + ) + if "local" in driver_name: + device_list.append( + f"{device_name} => {driver_name.replace('local', 'cpu')}" + ) + else: + # for drivers with single devices + # let the default device be selected without any indexing + if len(device_list_dict) == 1: + device_list.append(f"{device_name} => {driver_name}") + else: + device_list.append( + f"{device_name} => {driver_name}://{i}" + ) + return device_list + + set_iree_runtime_flags() + + available_devices = [] + from shark.iree_utils.vulkan_utils import ( + get_all_vulkan_devices, + ) + + vulkaninfo_list = get_all_vulkan_devices() + vulkan_devices = [] + id = 0 + for device in vulkaninfo_list: + vulkan_devices.append(f"{device.strip()} => vulkan://{id}") + id += 1 + if id != 0: + print(f"vulkan devices are available.") + available_devices.extend(vulkan_devices) + metal_devices = get_devices_by_name("metal") + available_devices.extend(metal_devices) + cuda_devices = get_devices_by_name("cuda") + available_devices.extend(cuda_devices) + rocm_devices = get_devices_by_name("rocm") + available_devices.extend(rocm_devices) + cpu_device = get_devices_by_name("cpu-sync") + available_devices.extend(cpu_device) + cpu_device = get_devices_by_name("cpu-task") + available_devices.extend(cpu_device) + return available_devices + + +# take a seed expression in an input format and convert it to +# a list of integers, where possible +def parse_seed_input(seed_input: str | list | int): + if isinstance(seed_input, str): + try: + seed_input = json.loads(seed_input) + except (ValueError, TypeError): + seed_input = None + + if isinstance(seed_input, int): + return [seed_input] + + if isinstance(seed_input, list) and all( + type(seed) is int for seed in seed_input + ): + return seed_input + + raise TypeError( + "Seed input must be an integer or an array of integers in JSON format" + ) + + +# Generate and return a new seed if the provided one is not in the +# supported range (including -1) +def sanitize_seed(seed: int | str): + seed = int(seed) + uint32_info = np.iinfo(np.uint32) + uint32_min, uint32_max = uint32_info.min, uint32_info.max + if seed < uint32_min or seed >= uint32_max: + seed = randint(uint32_min, uint32_max) + return seed + + +# take a seed expression in an input format and convert it to +# a list of integers, where possible +def parse_seed_input(seed_input: str | list | int): + if isinstance(seed_input, str): + try: + seed_input = json.loads(seed_input) + except (ValueError, TypeError): + seed_input = None + + if isinstance(seed_input, int): + return [seed_input] + + if isinstance(seed_input, list) and all( + type(seed) is int for seed in seed_input + ): + return seed_input + + raise TypeError( + "Seed input must be an integer or an array of integers in JSON format" + ) diff --git a/apps/shark_studio/modules/checkpoint_proc.py b/apps/shark_studio/modules/checkpoint_proc.py new file mode 100644 index 0000000000..e924de4640 --- /dev/null +++ b/apps/shark_studio/modules/checkpoint_proc.py @@ -0,0 +1,66 @@ +import os +import json +import re +from pathlib import Path +from omegaconf import OmegaConf + + +def get_path_to_diffusers_checkpoint(custom_weights): + path = Path(custom_weights) + diffusers_path = path.parent.absolute() + diffusers_directory_name = os.path.join("diffusers", path.stem) + complete_path_to_diffusers = diffusers_path / diffusers_directory_name + complete_path_to_diffusers.mkdir(parents=True, exist_ok=True) + path_to_diffusers = complete_path_to_diffusers.as_posix() + return path_to_diffusers + + +def preprocessCKPT(custom_weights, is_inpaint=False): + path_to_diffusers = get_path_to_diffusers_checkpoint(custom_weights) + if next(Path(path_to_diffusers).iterdir(), None): + print("Checkpoint already loaded at : ", path_to_diffusers) + return + else: + print( + "Diffusers' checkpoint will be identified here : ", + path_to_diffusers, + ) + from_safetensors = ( + True if custom_weights.lower().endswith(".safetensors") else False + ) + # EMA weights usually yield higher quality images for inference but + # non-EMA weights have been yielding better results in our case. + # TODO: Add an option `--ema` (`--no-ema`) for users to specify if + # they want to go for EMA weight extraction or not. + extract_ema = False + print( + "Loading diffusers' pipeline from original stable diffusion checkpoint" + ) + num_in_channels = 9 if is_inpaint else 4 + pipe = download_from_original_stable_diffusion_ckpt( + checkpoint_path_or_dict=custom_weights, + extract_ema=extract_ema, + from_safetensors=from_safetensors, + num_in_channels=num_in_channels, + ) + pipe.save_pretrained(path_to_diffusers) + print("Loading complete") + + +def convert_original_vae(vae_checkpoint): + vae_state_dict = {} + for key in list(vae_checkpoint.keys()): + vae_state_dict["first_stage_model." + key] = vae_checkpoint.get(key) + + config_url = ( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/" + "main/configs/stable-diffusion/v1-inference.yaml" + ) + original_config_file = BytesIO(requests.get(config_url).content) + original_config = OmegaConf.load(original_config_file) + vae_config = create_vae_diffusers_config(original_config, image_size=512) + + converted_vae_checkpoint = convert_ldm_vae_checkpoint( + vae_state_dict, vae_config + ) + return converted_vae_checkpoint diff --git a/apps/shark_studio/modules/embeddings.py b/apps/shark_studio/modules/embeddings.py new file mode 100644 index 0000000000..d8cf544f81 --- /dev/null +++ b/apps/shark_studio/modules/embeddings.py @@ -0,0 +1,171 @@ +import os +import sys +import torch +import json +import safetensors +from safetensors.torch import load_file +from apps.shark_studio.api.utils import get_checkpoint_pathfile + + +def processLoRA(model, use_lora, splitting_prefix): + state_dict = "" + if ".safetensors" in use_lora: + state_dict = load_file(use_lora) + else: + state_dict = torch.load(use_lora) + alpha = 0.75 + visited = [] + + # directly update weight in model + process_unet = "te" not in splitting_prefix + for key in state_dict: + if ".alpha" in key or key in visited: + continue + + curr_layer = model + if ("text" not in key and process_unet) or ( + "text" in key and not process_unet + ): + layer_infos = ( + key.split(".")[0].split(splitting_prefix)[-1].split("_") + ) + else: + continue + + # find the target layer + temp_name = layer_infos.pop(0) + while len(layer_infos) > -1: + try: + curr_layer = curr_layer.__getattr__(temp_name) + if len(layer_infos) > 0: + temp_name = layer_infos.pop(0) + elif len(layer_infos) == 0: + break + except Exception: + if len(temp_name) > 0: + temp_name += "_" + layer_infos.pop(0) + else: + temp_name = layer_infos.pop(0) + + pair_keys = [] + if "lora_down" in key: + pair_keys.append(key.replace("lora_down", "lora_up")) + pair_keys.append(key) + else: + pair_keys.append(key) + pair_keys.append(key.replace("lora_up", "lora_down")) + + # update weight + if len(state_dict[pair_keys[0]].shape) == 4: + weight_up = ( + state_dict[pair_keys[0]] + .squeeze(3) + .squeeze(2) + .to(torch.float32) + ) + weight_down = ( + state_dict[pair_keys[1]] + .squeeze(3) + .squeeze(2) + .to(torch.float32) + ) + curr_layer.weight.data += alpha * torch.mm( + weight_up, weight_down + ).unsqueeze(2).unsqueeze(3) + else: + weight_up = state_dict[pair_keys[0]].to(torch.float32) + weight_down = state_dict[pair_keys[1]].to(torch.float32) + curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down) + # update visited list + for item in pair_keys: + visited.append(item) + return model + + +def update_lora_weight_for_unet(unet, use_lora): + extensions = [".bin", ".safetensors", ".pt"] + if not any([extension in use_lora for extension in extensions]): + # We assume if it is a HF ID with standalone LoRA weights. + unet.load_attn_procs(use_lora) + return unet + + main_file_name = get_path_stem(use_lora) + if ".bin" in use_lora: + main_file_name += ".bin" + elif ".safetensors" in use_lora: + main_file_name += ".safetensors" + elif ".pt" in use_lora: + main_file_name += ".pt" + else: + sys.exit("Only .bin and .safetensors format for LoRA is supported") + + try: + dir_name = os.path.dirname(use_lora) + unet.load_attn_procs(dir_name, weight_name=main_file_name) + return unet + except: + return processLoRA(unet, use_lora, "lora_unet_") + + +def update_lora_weight(model, use_lora, model_name): + if "unet" in model_name: + return update_lora_weight_for_unet(model, use_lora) + try: + return processLoRA(model, use_lora, "lora_te_") + except: + return None + + +def get_lora_metadata(lora_filename): + # get the metadata from the file + filename = get_checkpoint_pathfile(lora_filename, "lora") + with safetensors.safe_open(filename, framework="pt", device="cpu") as f: + metadata = f.metadata() + + # guard clause for if there isn't any metadata + if not metadata: + return None + + # metadata is a dictionary of strings, the values of the keys we're + # interested in are actually json, and need to be loaded as such + tag_frequencies = json.loads(metadata.get("ss_tag_frequency", str("{}"))) + dataset_dirs = json.loads(metadata.get("ss_dataset_dirs", str("{}"))) + tag_dirs = [dir for dir in tag_frequencies.keys()] + + # gather the tag frequency information for all the datasets trained + all_frequencies = {} + for dataset in tag_dirs: + frequencies = sorted( + [entry for entry in tag_frequencies[dataset].items()], + reverse=True, + key=lambda x: x[1], + ) + + # get a figure for the total number of images processed for this dataset + # either then number actually listed or in its dataset_dir entry or + # the highest frequency's number if that doesn't exist + img_count = dataset_dirs.get(dir, {}).get( + "img_count", frequencies[0][1] + ) + + # add the dataset frequencies to the overall frequencies replacing the + # frequency counts on the tags with a percentage/ratio + all_frequencies.update( + [(entry[0], entry[1] / img_count) for entry in frequencies] + ) + + trained_model_id = " ".join( + [ + metadata.get("ss_sd_model_hash", ""), + metadata.get("ss_sd_model_name", ""), + metadata.get("ss_base_model_version", ""), + ] + ).strip() + + # return the topmost of all frequencies in all datasets + return { + "model": trained_model_id, + "frequencies": sorted( + all_frequencies.items(), reverse=True, key=lambda x: x[1] + ), + } diff --git a/apps/shark_studio/modules/img_processing.py b/apps/shark_studio/modules/img_processing.py new file mode 100644 index 0000000000..b5cf28ce47 --- /dev/null +++ b/apps/shark_studio/modules/img_processing.py @@ -0,0 +1,168 @@ +import os +import sys +from PIL import Image +from pathlib import Path + + +# save output images and the inputs corresponding to it. +def save_output_img(output_img, img_seed, extra_info=None): + if extra_info is None: + extra_info = {} + generated_imgs_path = Path( + get_generated_imgs_path(), get_generated_imgs_todays_subdir() + ) + generated_imgs_path.mkdir(parents=True, exist_ok=True) + csv_path = Path(generated_imgs_path, "imgs_details.csv") + + prompt_slice = re.sub("[^a-zA-Z0-9]", "_", cmd_opts.prompts[0][:15]) + out_img_name = f"{dt.now().strftime('%H%M%S')}_{prompt_slice}_{img_seed}" + + img_model = cmd_opts.hf_model_id + if cmd_opts.ckpt_loc: + img_model = Path(os.path.basename(cmd_opts.ckpt_loc)).stem + + img_vae = None + if cmd_opts.custom_vae: + img_vae = Path(os.path.basename(cmd_opts.custom_vae)).stem + + img_lora = None + if cmd_opts.use_lora: + img_lora = Path(os.path.basename(cmd_opts.use_lora)).stem + + if cmd_opts.output_img_format == "jpg": + out_img_path = Path(generated_imgs_path, f"{out_img_name}.jpg") + output_img.save(out_img_path, quality=95, subsampling=0) + else: + out_img_path = Path(generated_imgs_path, f"{out_img_name}.png") + pngInfo = PngImagePlugin.PngInfo() + + if cmd_opts.write_metadata_to_png: + # Using a conditional expression caused problems, so setting a new + # variable for now. + if cmd_opts.use_hiresfix: + png_size_text = ( + f"{cmd_opts.hiresfix_width}x{cmd_opts.hiresfix_height}" + ) + else: + png_size_text = f"{cmd_opts.width}x{cmd_opts.height}" + + pngInfo.add_text( + "parameters", + f"{cmd_opts.prompts[0]}" + f"\nNegative prompt: {cmd_opts.negative_prompts[0]}" + f"\nSteps: {cmd_opts.steps}," + f"Sampler: {cmd_opts.scheduler}, " + f"CFG scale: {cmd_opts.guidance_scale}, " + f"Seed: {img_seed}," + f"Size: {png_size_text}, " + f"Model: {img_model}, " + f"VAE: {img_vae}, " + f"LoRA: {img_lora}", + ) + + output_img.save(out_img_path, "PNG", pnginfo=pngInfo) + + if cmd_opts.output_img_format not in ["png", "jpg"]: + print( + f"[ERROR] Format {cmd_opts.output_img_format} is not " + f"supported yet. Image saved as png instead." + f"Supported formats: png / jpg" + ) + + # To be as low-impact as possible to the existing CSV format, we append + # "VAE" and "LORA" to the end. However, it does not fit the hierarchy of + # importance for each data point. Something to consider. + new_entry = { + "VARIANT": img_model, + "SCHEDULER": cmd_opts.scheduler, + "PROMPT": cmd_opts.prompts[0], + "NEG_PROMPT": cmd_opts.negative_prompts[0], + "SEED": img_seed, + "CFG_SCALE": cmd_opts.guidance_scale, + "PRECISION": cmd_opts.precision, + "STEPS": cmd_opts.steps, + "HEIGHT": cmd_opts.height + if not cmd_opts.use_hiresfix + else cmd_opts.hiresfix_height, + "WIDTH": cmd_opts.width + if not cmd_opts.use_hiresfix + else cmd_opts.hiresfix_width, + "MAX_LENGTH": cmd_opts.max_length, + "OUTPUT": out_img_path, + "VAE": img_vae, + "LORA": img_lora, + } + + new_entry.update(extra_info) + + csv_mode = "a" if os.path.isfile(csv_path) else "w" + with open(csv_path, csv_mode, encoding="utf-8") as csv_obj: + dictwriter_obj = DictWriter(csv_obj, fieldnames=list(new_entry.keys())) + if csv_mode == "w": + dictwriter_obj.writeheader() + dictwriter_obj.writerow(new_entry) + csv_obj.close() + + if cmd_opts.save_metadata_to_json: + del new_entry["OUTPUT"] + json_path = Path(generated_imgs_path, f"{out_img_name}.json") + with open(json_path, "w") as f: + json.dump(new_entry, f, indent=4) + + +resamplers = { + "Lanczos": Image.Resampling.LANCZOS, + "Nearest Neighbor": Image.Resampling.NEAREST, + "Bilinear": Image.Resampling.BILINEAR, + "Bicubic": Image.Resampling.BICUBIC, + "Hamming": Image.Resampling.HAMMING, + "Box": Image.Resampling.BOX, +} + +resampler_list = resamplers.keys() + + +# For stencil, the input image can be of any size, but we need to ensure that +# it conforms with our model constraints :- +# Both width and height should be in the range of [128, 768] and multiple of 8. +# This utility function performs the transformation on the input image while +# also maintaining the aspect ratio before sending it to the stencil pipeline. +def resize_stencil(image: Image.Image, width, height, resampler_type=None): + aspect_ratio = width / height + min_size = min(width, height) + if min_size < 128: + n_size = 128 + if width == min_size: + width = n_size + height = n_size / aspect_ratio + else: + height = n_size + width = n_size * aspect_ratio + width = int(width) + height = int(height) + n_width = width // 8 + n_height = height // 8 + n_width *= 8 + n_height *= 8 + + min_size = min(width, height) + if min_size > 768: + n_size = 768 + if width == min_size: + height = n_size + width = n_size * aspect_ratio + else: + width = n_size + height = n_size / aspect_ratio + width = int(width) + height = int(height) + n_width = width // 8 + n_height = height // 8 + n_width *= 8 + n_height *= 8 + if resampler_type in resamplers: + resampler = resamplers[resampler_type] + else: + resampler = resamplers["Nearest Neighbor"] + new_image = image.resize((n_width, n_height), resampler=resampler) + return new_image, n_width, n_height diff --git a/apps/shark_studio/modules/pipeline.py b/apps/shark_studio/modules/pipeline.py new file mode 100644 index 0000000000..c087175de4 --- /dev/null +++ b/apps/shark_studio/modules/pipeline.py @@ -0,0 +1,71 @@ +from shark.iree_utils.compile_utils import get_iree_compiled_module + + +class SharkPipelineBase: + # This class is a lightweight base for managing an + # inference API class. It should provide methods for: + # - compiling a set (model map) of torch IR modules + # - preparing weights for an inference job + # - loading weights for an inference job + # - utilites like benchmarks, tests + + def __init__( + self, + model_map: dict, + device: str, + import_mlir: bool = True, + ): + self.model_map = model_map + self.device = device + self.import_mlir = import_mlir + + def import_torch_ir(self, base_model_id): + for submodel in self.model_map: + hf_id = ( + submodel["custom_hf_id"] + if submodel["custom_hf_id"] + else base_model_id + ) + torch_ir = submodel["initializer"]( + hf_id, **submodel["init_kwargs"], compile_to="torch" + ) + submodel["tempfile_name"] = get_resource_path( + f"{submodel}.torch.tempfile" + ) + with open(submodel["tempfile_name"], "w+") as f: + f.write(torch_ir) + del torch_ir + gc.collect() + + def load_vmfb(self, submodel): + if self.iree_module_dict[submodel]: + print( + f".vmfb for {submodel} found at {self.iree_module_dict[submodel]['vmfb']}" + ) + elif self.model_map[submodel]["tempfile_name"]: + submodel["tempfile_name"] + + return submodel["vmfb"] + + def merge_custom_map(self, custom_model_map): + for submodel in custom_model_map: + for key in submodel: + self.model_map[submodel][key] = key + print(self.model_map) + + def get_compiled_map(self, device) -> None: + # this comes with keys: "vmfb", "config", and "temp_file_to_unlink". + for submodel in self.model_map: + if not self.iree_module_dict[submodel][vmfb]: + self.iree_module_dict[submodel] = get_iree_compiled_module( + submodel.tempfile_name, + device=self.device, + frontend="torch", + ) + # TODO: delete the temp file + + def run(self, submodel, inputs): + return + + def safe_name(name): + return name.replace("/", "_").replace("-", "_") diff --git a/apps/shark_studio/modules/schedulers.py b/apps/shark_studio/modules/schedulers.py new file mode 100644 index 0000000000..c62646f69c --- /dev/null +++ b/apps/shark_studio/modules/schedulers.py @@ -0,0 +1,30 @@ +# from shark_turbine.turbine_models.schedulers import export_scheduler_model + + +def export_scheduler_model(model): + return "None", "None" + + +scheduler_model_map = { + "EulerDiscrete": export_scheduler_model("EulerDiscreteScheduler"), + "EulerAncestralDiscrete": export_scheduler_model( + "EulerAncestralDiscreteScheduler" + ), + "LCM": export_scheduler_model("LCMScheduler"), + "LMSDiscrete": export_scheduler_model("LMSDiscreteScheduler"), + "PNDM": export_scheduler_model("PNDMScheduler"), + "DDPM": export_scheduler_model("DDPMScheduler"), + "DDIM": export_scheduler_model("DDIMScheduler"), + "DPMSolverMultistep": export_scheduler_model( + "DPMSolverMultistepScheduler" + ), + "KDPM2Discrete": export_scheduler_model("KDPM2DiscreteScheduler"), + "DEISMultistep": export_scheduler_model("DEISMultistepScheduler"), + "DPMSolverSinglestep": export_scheduler_model( + "DPMSolverSingleStepScheduler" + ), + "KDPM2AncestralDiscrete": export_scheduler_model( + "KDPM2AncestralDiscreteScheduler" + ), + "HeunDiscrete": export_scheduler_model("HeunDiscreteScheduler"), +} diff --git a/apps/shark_studio/modules/shared.py b/apps/shark_studio/modules/shared.py new file mode 100644 index 0000000000..d9dc3ea26e --- /dev/null +++ b/apps/shark_studio/modules/shared.py @@ -0,0 +1,69 @@ +import sys + +import gradio as gr + +from modules import ( + shared_cmd_options, + shared_gradio, + options, + shared_items, + sd_models_types, +) +from modules.paths_internal import ( + models_path, + script_path, + data_path, + sd_configs_path, + sd_default_config, + sd_model_file, + default_sd_model_file, + extensions_dir, + extensions_builtin_dir, +) # noqa: F401 +from modules import util + +cmd_opts = shared_cmd_options.cmd_opts +parser = shared_cmd_options.parser + +parallel_processing_allowed = True +styles_filename = cmd_opts.styles_file +config_filename = cmd_opts.ui_settings_file + +demo = None + +device = None + +weight_load_location = None + +state = None + +prompt_styles = None + +options_templates = None +opts = None +restricted_opts = None + +sd_model: sd_models_types.WebuiSdModel = None + +settings_components = None +"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" + +tab_names = [] + +sd_upscalers = [] + +clip_model = None + +progress_print_out = sys.stdout + +gradio_theme = gr.themes.Base() + +total_tqdm = None + +mem_mon = None + +reload_gradio_theme = shared_gradio.reload_gradio_theme + +list_checkpoint_tiles = shared_items.list_checkpoint_tiles +refresh_checkpoints = shared_items.refresh_checkpoints +list_samplers = shared_items.list_samplers diff --git a/apps/shark_studio/modules/shared_cmd_opts.py b/apps/shark_studio/modules/shared_cmd_opts.py new file mode 100644 index 0000000000..dfb166a52e --- /dev/null +++ b/apps/shark_studio/modules/shared_cmd_opts.py @@ -0,0 +1,763 @@ +import argparse +import os +from pathlib import Path + +from apps.shark_studio.modules.img_processing import resampler_list + + +def path_expand(s): + return Path(s).expanduser().resolve() + + +def is_valid_file(arg): + if not os.path.exists(arg): + return None + else: + return arg + + +p = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter +) + +############################################################################## +# Stable Diffusion Params +############################################################################## + +p.add_argument( + "-a", + "--app", + default="txt2img", + help="Which app to use, one of: txt2img, img2img, outpaint, inpaint.", +) +p.add_argument( + "-p", + "--prompts", + nargs="+", + default=[ + "a photo taken of the front of a super-car drifting on a road near " + "mountains at high speeds with smoke coming off the tires, front " + "angle, front point of view, trees in the mountains of the " + "background, ((sharp focus))" + ], + help="Text of which images to be generated.", +) + +p.add_argument( + "--negative_prompts", + nargs="+", + default=[ + "watermark, signature, logo, text, lowres, ((monochrome, grayscale)), " + "blurry, ugly, blur, oversaturated, cropped" + ], + help="Text you don't want to see in the generated image.", +) + +p.add_argument( + "--img_path", + type=str, + help="Path to the image input for img2img/inpainting.", +) + +p.add_argument( + "--steps", + type=int, + default=50, + help="The number of steps to do the sampling.", +) + +p.add_argument( + "--seed", + type=str, + default=-1, + help="The seed or list of seeds to use. -1 for a random one.", +) + +p.add_argument( + "--batch_size", + type=int, + default=1, + choices=range(1, 4), + help="The number of inferences to be made in a single `batch_count`.", +) + +p.add_argument( + "--height", + type=int, + default=512, + choices=range(128, 1025, 8), + help="The height of the output image.", +) + +p.add_argument( + "--width", + type=int, + default=512, + choices=range(128, 1025, 8), + help="The width of the output image.", +) + +p.add_argument( + "--guidance_scale", + type=float, + default=7.5, + help="The value to be used for guidance scaling.", +) + +p.add_argument( + "--noise_level", + type=int, + default=20, + help="The value to be used for noise level of upscaler.", +) + +p.add_argument( + "--max_length", + type=int, + default=64, + help="Max length of the tokenizer output, options are 64 and 77.", +) + +p.add_argument( + "--max_embeddings_multiples", + type=int, + default=5, + help="The max multiple length of prompt embeddings compared to the max " + "output length of text encoder.", +) + +p.add_argument( + "--strength", + type=float, + default=0.8, + help="The strength of change applied on the given input image for " + "img2img.", +) + +p.add_argument( + "--use_hiresfix", + type=bool, + default=False, + help="Use Hires Fix to do higher resolution images, while trying to " + "avoid the issues that come with it. This is accomplished by first " + "generating an image using txt2img, then running it through img2img.", +) + +p.add_argument( + "--hiresfix_height", + type=int, + default=768, + choices=range(128, 769, 8), + help="The height of the Hires Fix image.", +) + +p.add_argument( + "--hiresfix_width", + type=int, + default=768, + choices=range(128, 769, 8), + help="The width of the Hires Fix image.", +) + +p.add_argument( + "--hiresfix_strength", + type=float, + default=0.6, + help="The denoising strength to apply for the Hires Fix.", +) + +p.add_argument( + "--resample_type", + type=str, + default="Nearest Neighbor", + choices=resampler_list, + help="The resample type to use when resizing an image before being run " + "through stable diffusion.", +) + +############################################################################## +# Stable Diffusion Training Params +############################################################################## + +p.add_argument( + "--lora_save_dir", + type=str, + default="models/lora/", + help="Directory to save the lora fine tuned model.", +) + +p.add_argument( + "--training_images_dir", + type=str, + default="models/lora/training_images/", + help="Directory containing images that are an example of the prompt.", +) + +p.add_argument( + "--training_steps", + type=int, + default=2000, + help="The number of steps to train.", +) + +############################################################################## +# Inpainting and Outpainting Params +############################################################################## + +p.add_argument( + "--mask_path", + type=str, + help="Path to the mask image input for inpainting.", +) + +p.add_argument( + "--inpaint_full_res", + default=False, + action=argparse.BooleanOptionalAction, + help="If inpaint only masked area or whole picture.", +) + +p.add_argument( + "--inpaint_full_res_padding", + type=int, + default=32, + choices=range(0, 257, 4), + help="Number of pixels for only masked padding.", +) + +p.add_argument( + "--pixels", + type=int, + default=128, + choices=range(8, 257, 8), + help="Number of expended pixels for one direction for outpainting.", +) + +p.add_argument( + "--mask_blur", + type=int, + default=8, + choices=range(0, 65), + help="Number of blur pixels for outpainting.", +) + +p.add_argument( + "--left", + default=False, + action=argparse.BooleanOptionalAction, + help="If extend left for outpainting.", +) + +p.add_argument( + "--right", + default=False, + action=argparse.BooleanOptionalAction, + help="If extend right for outpainting.", +) + +p.add_argument( + "--up", + "--top", + default=False, + action=argparse.BooleanOptionalAction, + help="If extend top for outpainting.", +) + +p.add_argument( + "--down", + "--bottom", + default=False, + action=argparse.BooleanOptionalAction, + help="If extend bottom for outpainting.", +) + +p.add_argument( + "--noise_q", + type=float, + default=1.0, + help="Fall-off exponent for outpainting (lower=higher detail) " + "(min=0.0, max=4.0).", +) + +p.add_argument( + "--color_variation", + type=float, + default=0.05, + help="Color variation for outpainting (min=0.0, max=1.0).", +) + +############################################################################## +# Model Config and Usage Params +############################################################################## + +p.add_argument( + "--device", type=str, default="vulkan", help="Device to run the model." +) + +p.add_argument( + "--precision", type=str, default="fp16", help="Precision to run the model." +) + +p.add_argument( + "--import_mlir", + default=True, + action=argparse.BooleanOptionalAction, + help="Imports the model from torch module to shark_module otherwise " + "downloads the model from shark_tank.", +) + +p.add_argument( + "--use_tuned", + default=False, + action=argparse.BooleanOptionalAction, + help="Download and use the tuned version of the model if available.", +) + +p.add_argument( + "--use_base_vae", + default=False, + action=argparse.BooleanOptionalAction, + help="Do conversion from the VAE output to pixel space on cpu.", +) + +p.add_argument( + "--scheduler", + type=str, + default="SharkEulerDiscrete", + help="Other supported schedulers are [DDIM, PNDM, LMSDiscrete, " + "DPMSolverMultistep, DPMSolverMultistep++, DPMSolverMultistepKarras, " + "DPMSolverMultistepKarras++, EulerDiscrete, EulerAncestralDiscrete, " + "DEISMultistep, KDPM2AncestralDiscrete, DPMSolverSinglestep, DDPM, " + "HeunDiscrete].", +) + +p.add_argument( + "--output_img_format", + type=str, + default="png", + help="Specify the format in which output image is save. " + "Supported options: jpg / png.", +) + +p.add_argument( + "--output_dir", + type=str, + default=None, + help="Directory path to save the output images and json.", +) + +p.add_argument( + "--batch_count", + type=int, + default=1, + help="Number of batches to be generated with random seeds in " + "single execution.", +) + +p.add_argument( + "--repeatable_seeds", + default=False, + action=argparse.BooleanOptionalAction, + help="The seed of the first batch will be used as the rng seed to " + "generate the subsequent seeds for subsequent batches in that run.", +) + +p.add_argument( + "--ckpt_loc", + type=str, + default="", + help="Path to SD's .ckpt file.", +) + +p.add_argument( + "--custom_vae", + type=str, + default="", + help="HuggingFace repo-id or path to SD model's checkpoint whose VAE " + "needs to be plugged in.", +) + +p.add_argument( + "--hf_model_id", + type=str, + default="stabilityai/stable-diffusion-2-1-base", + help="The repo-id of hugging face.", +) + +p.add_argument( + "--low_cpu_mem_usage", + default=False, + action=argparse.BooleanOptionalAction, + help="Use the accelerate package to reduce cpu memory consumption.", +) + +p.add_argument( + "--attention_slicing", + type=str, + default="none", + help="Amount of attention slicing to use (one of 'max', 'auto', 'none', " + "or an integer).", +) + +p.add_argument( + "--use_stencil", + choices=["canny", "openpose", "scribble", "zoedepth"], + help="Enable the stencil feature.", +) + +p.add_argument( + "--control_mode", + choices=["Prompt", "Balanced", "Controlnet"], + default="Balanced", + help="How Controlnet injection should be prioritized.", +) + +p.add_argument( + "--use_lora", + type=str, + default="", + help="Use standalone LoRA weight using a HF ID or a checkpoint " + "file (~3 MB).", +) + +p.add_argument( + "--use_quantize", + type=str, + default="none", + help="Runs the quantized version of stable diffusion model. " + "This is currently in experimental phase. " + "Currently, only runs the stable-diffusion-2-1-base model in " + "int8 quantization.", +) + +p.add_argument( + "--lowvram", + default=False, + action=argparse.BooleanOptionalAction, + help="Load and unload models for low VRAM.", +) + +p.add_argument( + "--hf_auth_token", + type=str, + default=None, + help="Specify your own huggingface authentication tokens for models like Llama2.", +) + +p.add_argument( + "--device_allocator_heap_key", + type=str, + default="", + help="Specify heap key for device caching allocator." + "Expected form: max_allocation_size;max_allocation_capacity;max_free_allocation_count" + "Example: --device_allocator_heap_key='*;1gib' (will limit caching on device to 1 gigabyte)", +) + +p.add_argument( + "--custom_model_map", + type=str, + default="", + help="path to custom model map to import. This should be a .json file", +) +############################################################################## +# IREE - Vulkan supported flags +############################################################################## + +p.add_argument( + "--iree_vulkan_target_triple", + type=str, + default="", + help="Specify target triple for vulkan.", +) + +p.add_argument( + "--iree_metal_target_platform", + type=str, + default="", + help="Specify target triple for metal.", +) + +############################################################################## +# Misc. Debug and Optimization flags +############################################################################## + +p.add_argument( + "--use_compiled_scheduler", + default=True, + action=argparse.BooleanOptionalAction, + help="Use the default scheduler precompiled into the model if available.", +) + +p.add_argument( + "--local_tank_cache", + default="", + help="Specify where to save downloaded shark_tank artifacts. " + "If this is not set, the default is ~/.local/shark_tank/.", +) + +p.add_argument( + "--dump_isa", + default=False, + action="store_true", + help="When enabled call amdllpc to get ISA dumps. " + "Use with dispatch benchmarks.", +) + +p.add_argument( + "--dispatch_benchmarks", + default=None, + help="Dispatches to return benchmark data on. " + 'Use "All" for all, and None for none.', +) + +p.add_argument( + "--dispatch_benchmarks_dir", + default="temp_dispatch_benchmarks", + help="Directory where you want to store dispatch data " + 'generated with "--dispatch_benchmarks".', +) + +p.add_argument( + "--enable_rgp", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag for inserting debug frames between iterations " + "for use with rgp.", +) + +p.add_argument( + "--hide_steps", + default=True, + action=argparse.BooleanOptionalAction, + help="Flag for hiding the details of iteration/sec for each step.", +) + +p.add_argument( + "--warmup_count", + type=int, + default=0, + help="Flag setting warmup count for CLIP and VAE [>= 0].", +) + +p.add_argument( + "--clear_all", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag to clear all mlir and vmfb from common locations. " + "Recompiling will take several minutes.", +) + +p.add_argument( + "--save_metadata_to_json", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag for whether or not to save a generation information " + "json file with the image.", +) + +p.add_argument( + "--write_metadata_to_png", + default=True, + action=argparse.BooleanOptionalAction, + help="Flag for whether or not to save generation information in " + "PNG chunk text to generated images.", +) + +p.add_argument( + "--import_debug", + default=False, + action=argparse.BooleanOptionalAction, + help="If import_mlir is True, saves mlir via the debug option " + "in shark importer. Does nothing if import_mlir is false (the default).", +) + +p.add_argument( + "--compile_debug", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag to toggle debug assert/verify flags for imported IR in the" + "iree-compiler. Default to false.", +) + +p.add_argument( + "--iree_constant_folding", + default=True, + action=argparse.BooleanOptionalAction, + help="Controls constant folding in iree-compile for all SD models.", +) + +p.add_argument( + "--data_tiling", + default=False, + action=argparse.BooleanOptionalAction, + help="Controls data tiling in iree-compile for all SD models.", +) + +############################################################################## +# Web UI flags +############################################################################## + +p.add_argument( + "--webui", + default=True, + action=argparse.BooleanOptionalAction, + help="controls whether the webui is launched.", +) + +p.add_argument( + "--progress_bar", + default=True, + action=argparse.BooleanOptionalAction, + help="Flag for removing the progress bar animation during " + "image generation.", +) + +p.add_argument( + "--ckpt_dir", + type=str, + default="", + help="Path to directory where all .ckpts are stored in order to populate " + "them in the web UI.", +) +# TODO: replace API flag when these can be run together +p.add_argument( + "--ui", + type=str, + default="app" if os.name == "nt" else "web", + help="One of: [api, app, web].", +) + +p.add_argument( + "--share", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag for generating a public URL.", +) + +p.add_argument( + "--server_port", + type=int, + default=8080, + help="Flag for setting server port.", +) + +p.add_argument( + "--api", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag for enabling rest API.", +) + +p.add_argument( + "--api_accept_origin", + action="append", + type=str, + help="An origin to be accepted by the REST api for Cross Origin" + "Resource Sharing (CORS). Use multiple times for multiple origins, " + 'or use --api_accept_origin="*" to accept all origins. If no origins ' + "are set no CORS headers will be returned by the api. Use, for " + "instance, if you need to access the REST api from Javascript running " + "in a web browser.", +) + +p.add_argument( + "--debug", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag for enabling debugging log in WebUI.", +) + +p.add_argument( + "--output_gallery", + default=True, + action=argparse.BooleanOptionalAction, + help="Flag for removing the output gallery tab, and avoid exposing " + "images under --output_dir in the UI.", +) + +p.add_argument( + "--output_gallery_followlinks", + default=False, + action=argparse.BooleanOptionalAction, + help="Flag for whether the output gallery tab in the UI should " + "follow symlinks when listing subdirectories under --output_dir.", +) + + +############################################################################## +# SD model auto-annotation flags +############################################################################## + +p.add_argument( + "--annotation_output", + type=path_expand, + default="./", + help="Directory to save the annotated mlir file.", +) + +p.add_argument( + "--annotation_model", + type=str, + default="unet", + help="Options are unet and vae.", +) + +p.add_argument( + "--save_annotation", + default=False, + action=argparse.BooleanOptionalAction, + help="Save annotated mlir file.", +) +############################################################################## +# SD model auto-tuner flags +############################################################################## + +p.add_argument( + "--tuned_config_dir", + type=path_expand, + default="./", + help="Directory to save the tuned config file.", +) + +p.add_argument( + "--num_iters", + type=int, + default=400, + help="Number of iterations for tuning.", +) + +p.add_argument( + "--search_op", + type=str, + default="all", + help="Op to be optimized, options are matmul, bmm, conv and all.", +) + +############################################################################## +# DocuChat Flags +############################################################################## + +p.add_argument( + "--run_docuchat_web", + default=False, + action=argparse.BooleanOptionalAction, + help="Specifies whether the docuchat's web version is running or not.", +) + +############################################################################## +# rocm Flags +############################################################################## + +p.add_argument( + "--iree_rocm_target_chip", + type=str, + default="", + help="Add the rocm device architecture ex gfx1100, gfx90a, etc. Use `hipinfo` " + "or `iree-run-module --dump_devices=rocm` or `hipinfo` to get desired arch name", +) + +cmd_opts, unknown = p.parse_known_args() +if cmd_opts.import_debug: + os.environ["IREE_SAVE_TEMPS"] = os.path.join( + os.getcwd(), cmd_opts.hf_model_id.replace("/", "_") + ) diff --git a/apps/shark_studio/modules/timer.py b/apps/shark_studio/modules/timer.py new file mode 100644 index 0000000000..8fd1e6a7df --- /dev/null +++ b/apps/shark_studio/modules/timer.py @@ -0,0 +1,111 @@ +import time +import argparse + + +class TimerSubcategory: + def __init__(self, timer, category): + self.timer = timer + self.category = category + self.start = None + self.original_base_category = timer.base_category + + def __enter__(self): + self.start = time.time() + self.timer.base_category = ( + self.original_base_category + self.category + "/" + ) + self.timer.subcategory_level += 1 + + if self.timer.print_log: + print(f"{' ' * self.timer.subcategory_level}{self.category}:") + + def __exit__(self, exc_type, exc_val, exc_tb): + elapsed_for_subcategroy = time.time() - self.start + self.timer.base_category = self.original_base_category + self.timer.add_time_to_record( + self.original_base_category + self.category, + elapsed_for_subcategroy, + ) + self.timer.subcategory_level -= 1 + self.timer.record(self.category, disable_log=True) + + +class Timer: + def __init__(self, print_log=False): + self.start = time.time() + self.records = {} + self.total = 0 + self.base_category = "" + self.print_log = print_log + self.subcategory_level = 0 + + def elapsed(self): + end = time.time() + res = end - self.start + self.start = end + return res + + def add_time_to_record(self, category, amount): + if category not in self.records: + self.records[category] = 0 + + self.records[category] += amount + + def record(self, category, extra_time=0, disable_log=False): + e = self.elapsed() + + self.add_time_to_record(self.base_category + category, e + extra_time) + + self.total += e + extra_time + + if self.print_log and not disable_log: + print( + f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s" + ) + + def subcategory(self, name): + self.elapsed() + + subcat = TimerSubcategory(self, name) + return subcat + + def summary(self): + res = f"{self.total:.1f}s" + + additions = [ + (category, time_taken) + for category, time_taken in self.records.items() + if time_taken >= 0.1 and "/" not in category + ] + if not additions: + return res + + res += " (" + res += ", ".join( + [ + f"{category}: {time_taken:.1f}s" + for category, time_taken in additions + ] + ) + res += ")" + + return res + + def dump(self): + return {"total": self.total, "records": self.records} + + def reset(self): + self.__init__() + + +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument( + "--log-startup", + action="store_true", + help="print a detailed log of what's happening at startup", +) +args = parser.parse_known_args()[0] + +startup_timer = Timer(print_log=args.log_startup) + +startup_record = None diff --git a/apps/shark_studio/web/api/compat.py b/apps/shark_studio/web/api/compat.py new file mode 100644 index 0000000000..80399505c4 --- /dev/null +++ b/apps/shark_studio/web/api/compat.py @@ -0,0 +1,310 @@ +import base64 +import io +import os +import time +import datetime +import uvicorn +import ipaddress +import requests +import gradio as gr +from threading import Lock +from io import BytesIO +from fastapi import APIRouter, Depends, FastAPI, Request, Response +from fastapi.security import HTTPBasic, HTTPBasicCredentials +from fastapi.exceptions import HTTPException +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder + +from apps.shark_studio.modules.img_processing import sampler_list +from sdapi_v1 import shark_sd_api +from api.llm import chat_api + + +def decode_base64_to_image(encoding): + if encoding.startswith("http://") or encoding.startswith("https://"): + if not opts.api_enable_requests: + raise HTTPException(status_code=500, detail="Requests not allowed") + + if opts.api_forbid_local_requests and not verify_url(encoding): + raise HTTPException( + status_code=500, detail="Request to local resource not allowed" + ) + + headers = ( + {"user-agent": opts.api_useragent} if opts.api_useragent else {} + ) + response = requests.get(encoding, timeout=30, headers=headers) + try: + image = Image.open(BytesIO(response.content)) + return image + except Exception as e: + raise HTTPException( + status_code=500, detail="Invalid image url" + ) from e + + if encoding.startswith("data:image/"): + encoding = encoding.split(";")[1].split(",")[1] + try: + image = Image.open(BytesIO(base64.b64decode(encoding))) + return image + except Exception as e: + raise HTTPException( + status_code=500, detail="Invalid encoded image" + ) from e + + +def encode_pil_to_base64(image): + with io.BytesIO() as output_bytes: + if opts.samples_format.lower() == "png": + use_metadata = False + metadata = PngImagePlugin.PngInfo() + for key, value in image.info.items(): + if isinstance(key, str) and isinstance(value, str): + metadata.add_text(key, value) + use_metadata = True + image.save( + output_bytes, + format="PNG", + pnginfo=(metadata if use_metadata else None), + quality=opts.jpeg_quality, + ) + + elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"): + if image.mode == "RGBA": + image = image.convert("RGB") + parameters = image.info.get("parameters", None) + exif_bytes = piexif.dump( + { + "Exif": { + piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump( + parameters or "", encoding="unicode" + ) + } + } + ) + if opts.samples_format.lower() in ("jpg", "jpeg"): + image.save( + output_bytes, + format="JPEG", + exif=exif_bytes, + quality=opts.jpeg_quality, + ) + else: + image.save( + output_bytes, + format="WEBP", + exif=exif_bytes, + quality=opts.jpeg_quality, + ) + + else: + raise HTTPException(status_code=500, detail="Invalid image format") + + bytes_data = output_bytes.getvalue() + + return base64.b64encode(bytes_data) + + +def api_middleware(app: FastAPI): + rich_available = False + try: + if os.environ.get("WEBUI_RICH_EXCEPTIONS", None) is not None: + import anyio # importing just so it can be placed on silent list + import starlette # importing just so it can be placed on silent list + from rich.console import Console + + console = Console() + rich_available = True + except Exception: + pass + + @app.middleware("http") + async def log_and_time(req: Request, call_next): + ts = time.time() + res: Response = await call_next(req) + duration = str(round(time.time() - ts, 4)) + res.headers["X-Process-Time"] = duration + endpoint = req.scope.get("path", "err") + if shared.cmd_opts.api_log and endpoint.startswith("/sdapi"): + print( + "API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}".format( + t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), + code=res.status_code, + ver=req.scope.get("http_version", "0.0"), + cli=req.scope.get("client", ("0:0.0.0", 0))[0], + prot=req.scope.get("scheme", "err"), + method=req.scope.get("method", "err"), + endpoint=endpoint, + duration=duration, + ) + ) + return res + + def handle_exception(request: Request, e: Exception): + err = { + "error": type(e).__name__, + "detail": vars(e).get("detail", ""), + "body": vars(e).get("body", ""), + "errors": str(e), + } + if not isinstance( + e, HTTPException + ): # do not print backtrace on known httpexceptions + message = f"API error: {request.method}: {request.url} {err}" + if rich_available: + print(message) + console.print_exception( + show_locals=True, + max_frames=2, + extra_lines=1, + suppress=[anyio, starlette], + word_wrap=False, + width=min([console.width, 200]), + ) + else: + errors.report(message, exc_info=True) + return JSONResponse( + status_code=vars(e).get("status_code", 500), + content=jsonable_encoder(err), + ) + + @app.middleware("http") + async def exception_handling(request: Request, call_next): + try: + return await call_next(request) + except Exception as e: + return handle_exception(request, e) + + @app.exception_handler(Exception) + async def fastapi_exception_handler(request: Request, e: Exception): + return handle_exception(request, e) + + @app.exception_handler(HTTPException) + async def http_exception_handler(request: Request, e: HTTPException): + return handle_exception(request, e) + + +class ApiCompat: + def __init__(self, queue_lock: Lock): + self.router = APIRouter() + self.app = FastAPI() + self.queue_lock = queue_lock + api_middleware(self.app) + self.add_api_route("/sdapi/v1/txt2img", shark_sd_api, methods=["post"]) + self.add_api_route("/sdapi/v1/img2img", shark_sd_api, methods=["post"]) + # self.add_api_route("/sdapi/v1/upscaler", self.upscaler_api, methods=["post"]) + # self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) + # self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) + # self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse) + # self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse) + # self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) + # self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) + # self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"]) + # self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel) + # self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) + # self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) + # self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem]) + # self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem]) + # self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem]) + # self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem]) + # self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem]) + # self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem]) + # self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem]) + # self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem]) + # self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) + # self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) + # self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) + # self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"]) + # self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) + # self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) + # self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) + # self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse) + # self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse) + # self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse) + # self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) + # self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) + # self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) + # self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) + + # chat APIs needed for compatibility with multiple extensions using OpenAI API + self.add_api_route("/v1/chat/completions", chat_api, methods=["post"]) + self.add_api_route("/v1/completions", chat_api, methods=["post"]) + self.add_api_route("/chat/completions", chat_api, methods=["post"]) + self.add_api_route("/completions", chat_api, methods=["post"]) + self.add_api_route( + "/v1/engines/codegen/completions", chat_api, methods=["post"] + ) + if studio.cmd_opts.api_server_stop: + self.add_api_route( + "/sdapi/v1/server-kill", self.kill_studio, methods=["POST"] + ) + self.add_api_route( + "/sdapi/v1/server-restart", + self.restart_studio, + methods=["POST"], + ) + self.add_api_route( + "/sdapi/v1/server-stop", self.stop_studio, methods=["POST"] + ) + + self.default_script_arg_txt2img = [] + self.default_script_arg_img2img = [] + + def add_api_route(self, path: str, endpoint, **kwargs): + if studio.cmd_opts.api_auth: + return self.app.add_api_route( + path, endpoint, dependencies=[Depends(self.auth)], **kwargs + ) + return self.app.add_api_route(path, endpoint, **kwargs) + + def refresh_checkpoints(self): + with self.queue_lock: + studio_data.refresh_checkpoints() + + def refresh_vae(self): + with self.queue_lock: + studio_data.refresh_vae_list() + + def unloadapi(self): + unload_model_weights() + + return {} + + def reloadapi(self): + reload_model_weights() + + return {} + + def skip(self): + studio.state.skip() + + def launch(self, server_name, port, root_path): + self.app.include_router(self.router) + uvicorn.run( + self.app, + host=server_name, + port=port, + timeout_keep_alive=studio.cmd_opts.timeout_keep_alive, + root_path=root_path, + ) + + def kill_studio(self): + restart.stop_program() + + def restart_studio(self): + if restart.is_restartable(): + restart.restart_program() + return Response(status_code=501) + + def preprocess(self, args: dict): + try: + studio.state.begin(job="preprocess") + preprocess(**args) + studio.state.end() + return models.PreprocessResponse(info="preprocess complete") + except: + studio.state.end() + + def stop_studio(request): + studio.state.server_command = "stop" + return Response("Stopping.") diff --git a/apps/shark_studio/web/api/sd.py b/apps/shark_studio/web/api/sd.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/apps/shark_studio/web/api/sd.py @@ -0,0 +1 @@ + diff --git a/apps/shark_studio/web/configs/foo.json b/apps/shark_studio/web/configs/foo.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/apps/shark_studio/web/configs/foo.json @@ -0,0 +1 @@ +{} diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index 3ef6bc5739..58b0c6c00b 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -1,20 +1,58 @@ from multiprocessing import Process, freeze_support import os +import time import sys import logging +import apps.shark_studio.api.initializers as initialize + from ui.chat import chat_element +from ui.sd import sd_element +from ui.outputgallery import outputgallery_element + +from apps.shark_studio.modules import timer + +startup_timer = timer.startup_timer +startup_timer.record("launcher") + +initialize.imports() if sys.platform == "darwin": os.environ["DYLD_LIBRARY_PATH"] = "/usr/local/lib" # import before IREE to avoid MLIR library issues import torch_mlir -# import PIL, transformers, sentencepiece # ensures inclusion in pysintaller exe generation -# from apps.stable_diffusion.src import args, clear_all -# import apps.stable_diffusion.web.utils.global_obj as global_obj +def create_api(app): + from apps.shark_studio.api.compat import ApiCompat + from modules.call_queue import queue_lock + + api = ApiCompat(app, queue_lock) + return api + + +def api_only(): + from fastapi import FastAPI + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + + initialize.initialize() + + app = FastAPI() + initialize.setup_middleware(app) + api = create_api(app) + + # from modules import script_callbacks + # script_callbacks.before_ui_callback() + # script_callbacks.app_started_callback(None, app) -def launch_app(address): + print(f"Startup time: {startup_timer.summary()}.") + api.launch( + server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", + port=cmd_opts.port if cmd_opts.port else 8080, + root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "", + ) + + +def launch_webui(address): from tkinter import Tk import webview @@ -34,62 +72,81 @@ def launch_app(address): webview.start(private_mode=False, storage_path=os.getcwd()) -if __name__ == "__main__": - # if args.debug: +def webui(): + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + logging.basicConfig(level=logging.DEBUG) + + launch_api = cmd_opts.api + initialize.initialize() + # required to do multiprocessing in a pyinstaller freeze freeze_support() - # if args.api or "api" in args.ui.split(","): - # from apps.stable_diffusion.web.ui import ( - # txt2img_api, - # img2img_api, - # upscaler_api, - # inpaint_api, - # outpaint_api, - # llm_chat_api, - # ) + + # if args.api or "api" in args.ui.split(","): + # from apps.shark_studio.api.llm import ( + # chat, + # ) + # from apps.shark_studio.web.api import sdapi + # + # from fastapi import FastAPI, APIRouter + # from fastapi.middleware.cors import CORSMiddleware + # import uvicorn # - # from fastapi import FastAPI, APIRouter - # import uvicorn + # # init global sd pipeline and config + # global_obj._init() # - # # init global sd pipeline and config - # global_obj._init() + # api = FastAPI() + # api.mount("/sdapi/", sdapi) # - # app = FastAPI() - # app.add_api_route("/sdapi/v1/txt2img", txt2img_api, methods=["post"]) - # app.add_api_route("/sdapi/v1/img2img", img2img_api, methods=["post"]) - # app.add_api_route("/sdapi/v1/inpaint", inpaint_api, methods=["post"]) - # app.add_api_route("/sdapi/v1/outpaint", outpaint_api, methods=["post"]) - # app.add_api_route("/sdapi/v1/upscaler", upscaler_api, methods=["post"]) + # # chat APIs needed for compatibility with multiple extensions using OpenAI API + # api.add_api_route( + # "/v1/chat/completions", llm_chat_api, methods=["post"] + # ) + # api.add_api_route("/v1/completions", llm_chat_api, methods=["post"]) + # api.add_api_route("/chat/completions", llm_chat_api, methods=["post"]) + # api.add_api_route("/completions", llm_chat_api, methods=["post"]) + # api.add_api_route( + # "/v1/engines/codegen/completions", llm_chat_api, methods=["post"] + # ) + # api.include_router(APIRouter()) # - # # chat APIs needed for compatibility with multiple extensions using OpenAI API - # app.add_api_route( - # "/v1/chat/completions", llm_chat_api, methods=["post"] - # ) - # app.add_api_route("/v1/completions", llm_chat_api, methods=["post"]) - # app.add_api_route("/chat/completions", llm_chat_api, methods=["post"]) - # app.add_api_route("/completions", llm_chat_api, methods=["post"]) - # app.add_api_route( - # "/v1/engines/codegen/completions", llm_chat_api, methods=["post"] - # ) - # app.include_router(APIRouter()) - # uvicorn.run(app, host="0.0.0.0", port=args.server_port) - # sys.exit(0) + # # deal with CORS requests if CORS accept origins are set + # if args.api_accept_origin: + # print( + # f"API Configured for CORS. Accepting origins: { args.api_accept_origin }" + # ) + # api.add_middleware( + # CORSMiddleware, + # allow_origins=args.api_accept_origin, + # allow_methods=["GET", "POST"], + # allow_headers=["*"], + # ) + # else: + # print("API not configured for CORS") # + # uvicorn.run(api, host="0.0.0.0", port=args.server_port) + # sys.exit(0) # Setup to use shark_tmp for gradio's temporary image files and clear any # existing temporary images there if they exist. Then we can import gradio. # It has to be in this order or gradio ignores what we've set up. - # from apps.stable_diffusion.web.utils.gradio_configs import ( - # config_gradio_tmp_imgs_folder, - # ) + from apps.shark_studio.web.utils.tmp_configs import ( + config_tmp, + clear_tmp_mlir, + clear_tmp_imgs, + ) + from apps.shark_studio.api.utils import ( + create_checkpoint_folders, + ) - # config_gradio_tmp_imgs_folder() import gradio as gr - # Create custom models folders if they don't exist - # from apps.stable_diffusion.web.ui.utils import create_custom_models_folders + config_tmp() + clear_tmp_mlir() + clear_tmp_imgs() - # create_custom_models_folders() + # Create custom models folders if they don't exist + create_checkpoint_folders() def resource_path(relative_path): """Get absolute path to resource, works for dev and for PyInstaller""" @@ -98,74 +155,7 @@ def resource_path(relative_path): dark_theme = resource_path("ui/css/sd_dark_theme.css") - # from apps.stable_diffusion.web.ui import ( - # txt2img_web, - # txt2img_custom_model, - # txt2img_gallery, - # txt2img_png_info_img, - # txt2img_status, - # txt2img_sendto_img2img, - # txt2img_sendto_inpaint, - # txt2img_sendto_outpaint, - # txt2img_sendto_upscaler, - ## h2ogpt_upload, - ## h2ogpt_web, - # img2img_web, - # img2img_custom_model, - # img2img_gallery, - # img2img_init_image, - # img2img_status, - # img2img_sendto_inpaint, - # img2img_sendto_outpaint, - # img2img_sendto_upscaler, - # inpaint_web, - # inpaint_custom_model, - # inpaint_gallery, - # inpaint_init_image, - # inpaint_status, - # inpaint_sendto_img2img, - # inpaint_sendto_outpaint, - # inpaint_sendto_upscaler, - # outpaint_web, - # outpaint_custom_model, - # outpaint_gallery, - # outpaint_init_image, - # outpaint_status, - # outpaint_sendto_img2img, - # outpaint_sendto_inpaint, - # outpaint_sendto_upscaler, - # upscaler_web, - # upscaler_custom_model, - # upscaler_gallery, - # upscaler_init_image, - # upscaler_status, - # upscaler_sendto_img2img, - # upscaler_sendto_inpaint, - # upscaler_sendto_outpaint, - ## lora_train_web, - ## model_web, - ## model_config_web, - # hf_models, - # modelmanager_sendto_txt2img, - # modelmanager_sendto_img2img, - # modelmanager_sendto_inpaint, - # modelmanager_sendto_outpaint, - # modelmanager_sendto_upscaler, - # stablelm_chat, - # minigpt4_web, - # outputgallery_web, - # outputgallery_tab_select, - # outputgallery_watch, - # outputgallery_filename, - # outputgallery_sendto_txt2img, - # outputgallery_sendto_img2img, - # outputgallery_sendto_inpaint, - # outputgallery_sendto_outpaint, - # outputgallery_sendto_upscaler, - # ) - - # init global sd pipeline and config - # global_obj._init() + # from apps.shark_studio.web.ui import load_ui_from_script def register_button_click(button, selectedid, inputs, outputs): button.click( @@ -177,17 +167,6 @@ def register_button_click(button, selectedid, inputs, outputs): outputs, ) - def register_modelmanager_button(button, selectedid, inputs, outputs): - button.click( - lambda x: ( - "None", - x, - gr.Tabs.update(selected=selectedid), - ), - inputs, - outputs, - ) - def register_outputgallery_button(button, selectedid, inputs, outputs): button.click( lambda x: ( @@ -200,7 +179,7 @@ def register_outputgallery_button(button, selectedid, inputs, outputs): with gr.Blocks( css=dark_theme, analytics_enabled=False, title="Shark Studio 2.0 Beta" - ) as sd_web: + ) as studio_web: with gr.Tabs() as tabs: # NOTE: If adding, removing, or re-ordering tabs, make sure that they # have a unique id that doesn't clash with any of the other tabs, @@ -211,216 +190,31 @@ def register_outputgallery_button(button, selectedid, inputs, outputs): # destination of one of the 'send to' buttons. If you do have to change # that id, make sure you update the relevant register_button_click calls # further down with the new id. - # with gr.TabItem(label="Text-to-Image", id=0): - # txt2img_web.render() - # with gr.TabItem(label="Image-to-Image", id=1): - # img2img_web.render() - # with gr.TabItem(label="Inpainting", id=2): - # inpaint_web.render() - # with gr.TabItem(label="Outpainting", id=3): - # outpaint_web.render() - # with gr.TabItem(label="Upscaler", id=4): - # upscaler_web.render() - # if args.output_gallery: - # with gr.TabItem(label="Output Gallery", id=5) as og_tab: - # outputgallery_web.render() - - # # extra output gallery configuration - # outputgallery_tab_select(og_tab.select) - # outputgallery_watch( - # [ - # txt2img_status, - # img2img_status, - # inpaint_status, - # outpaint_status, - # upscaler_status, - # ] - # ) - ## with gr.TabItem(label="Model Manager", id=6): - ## model_web.render() - ## with gr.TabItem(label="LoRA Training (Experimental)", id=7): - ## lora_train_web.render() - with gr.TabItem(label="Chat Bot", id=0): + with gr.TabItem(label="Stable Diffusion", id=0): + sd_element.render() + with gr.TabItem(label="Output Gallery", id=1): + outputgallery_element.render() + with gr.TabItem(label="Chat Bot", id=2): chat_element.render() - ## with gr.TabItem( - ## label="Generate Sharding Config (Experimental)", id=9 - ## ): - ## model_config_web.render() - # with gr.TabItem(label="MultiModal (Experimental)", id=10): - # minigpt4_web.render() - # with gr.TabItem(label="DocuChat Upload", id=11): - # h2ogpt_upload.render() - # with gr.TabItem(label="DocuChat(Experimental)", id=12): - # h2ogpt_web.render() - - # send to buttons - # register_button_click( - # txt2img_sendto_img2img, - # 1, - # [txt2img_gallery], - # [img2img_init_image, tabs], - # ) - # register_button_click( - # txt2img_sendto_inpaint, - # 2, - # [txt2img_gallery], - # [inpaint_init_image, tabs], - # ) - # register_button_click( - # txt2img_sendto_outpaint, - # 3, - # [txt2img_gallery], - # [outpaint_init_image, tabs], - # ) - # register_button_click( - # txt2img_sendto_upscaler, - # 4, - # [txt2img_gallery], - # [upscaler_init_image, tabs], - # ) - # register_button_click( - # img2img_sendto_inpaint, - # 2, - # [img2img_gallery], - # [inpaint_init_image, tabs], - # ) - # register_button_click( - # img2img_sendto_outpaint, - # 3, - # [img2img_gallery], - # [outpaint_init_image, tabs], - # ) - # register_button_click( - # img2img_sendto_upscaler, - # 4, - # [img2img_gallery], - # [upscaler_init_image, tabs], - # ) - # register_button_click( - # inpaint_sendto_img2img, - # 1, - # [inpaint_gallery], - # [img2img_init_image, tabs], - # ) - # register_button_click( - # inpaint_sendto_outpaint, - # 3, - # [inpaint_gallery], - # [outpaint_init_image, tabs], - # ) - # register_button_click( - # inpaint_sendto_upscaler, - # 4, - # [inpaint_gallery], - # [upscaler_init_image, tabs], - # ) - # register_button_click( - # outpaint_sendto_img2img, - # 1, - # [outpaint_gallery], - # [img2img_init_image, tabs], - # ) - # register_button_click( - # outpaint_sendto_inpaint, - # 2, - # [outpaint_gallery], - # [inpaint_init_image, tabs], - # ) - # register_button_click( - # outpaint_sendto_upscaler, - # 4, - # [outpaint_gallery], - # [upscaler_init_image, tabs], - # ) - # register_button_click( - # upscaler_sendto_img2img, - # 1, - # [upscaler_gallery], - # [img2img_init_image, tabs], - # ) - # register_button_click( - # upscaler_sendto_inpaint, - # 2, - # [upscaler_gallery], - # [inpaint_init_image, tabs], - # ) - # register_button_click( - # upscaler_sendto_outpaint, - # 3, - # [upscaler_gallery], - # [outpaint_init_image, tabs], - # ) - # if args.output_gallery: - # register_outputgallery_button( - # outputgallery_sendto_txt2img, - # 0, - # [outputgallery_filename], - # [txt2img_png_info_img, tabs], - # ) - # register_outputgallery_button( - # outputgallery_sendto_img2img, - # 1, - # [outputgallery_filename], - # [img2img_init_image, tabs], - # ) - # register_outputgallery_button( - # outputgallery_sendto_inpaint, - # 2, - # [outputgallery_filename], - # [inpaint_init_image, tabs], - # ) - # register_outputgallery_button( - # outputgallery_sendto_outpaint, - # 3, - # [outputgallery_filename], - # [outpaint_init_image, tabs], - # ) - # register_outputgallery_button( - # outputgallery_sendto_upscaler, - # 4, - # [outputgallery_filename], - # [upscaler_init_image, tabs], - # ) - # register_modelmanager_button( - # modelmanager_sendto_txt2img, - # 0, - # [hf_models], - # [txt2img_custom_model, tabs], - # ) - # register_modelmanager_button( - # modelmanager_sendto_img2img, - # 1, - # [hf_models], - # [img2img_custom_model, tabs], - # ) - # register_modelmanager_button( - # modelmanager_sendto_inpaint, - # 2, - # [hf_models], - # [inpaint_custom_model, tabs], - # ) - # register_modelmanager_button( - # modelmanager_sendto_outpaint, - # 3, - # [hf_models], - # [outpaint_custom_model, tabs], - # ) - # register_modelmanager_button( - # modelmanager_sendto_upscaler, - # 4, - # [hf_models], - # [upscaler_custom_model, tabs], - # ) - - sd_web.queue() + + studio_web.queue() # if args.ui == "app": # t = Process( # target=launch_app, args=[f"http://localhost:{args.server_port}"] # ) # t.start() - sd_web.launch( + studio_web.launch( share=True, inbrowser=True, server_name="0.0.0.0", server_port=11911, # args.server_port, ) + + +if __name__ == "__main__": + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + + if cmd_opts.webui == False: + api_only() + else: + webui() diff --git a/apps/shark_studio/web/ui/chat.py b/apps/shark_studio/web/ui/chat.py index 4726eef6e8..3a374eb5e2 100644 --- a/apps/shark_studio/web/ui/chat.py +++ b/apps/shark_studio/web/ui/chat.py @@ -240,9 +240,11 @@ def view_json_file(file_obj): with gr.Row(visible=False): with gr.Group(): - config_file = gr.File(label="Upload sharding configuration", visible=False) - json_view_button = gr.Button(label="View as JSON", visible=False) - json_view = gr.JSON(interactive=True, visible=False) + config_file = gr.File( + label="Upload sharding configuration", visible=False + ) + json_view_button = gr.Button("View as JSON", visible=False) + json_view = gr.JSON(visible=False) json_view_button.click( fn=view_json_file, inputs=[config_file], outputs=[json_view] ) diff --git a/apps/shark_studio/web/ui/common_events.py b/apps/shark_studio/web/ui/common_events.py new file mode 100644 index 0000000000..37555ed7ee --- /dev/null +++ b/apps/shark_studio/web/ui/common_events.py @@ -0,0 +1,55 @@ +from apps.shark_studio.web.ui.utils import ( + HSLHue, + hsl_color, +) +from apps.shark_studio.modules.embeddings import get_lora_metadata + + +# Answers HTML to show the most frequent tags used when a LoRA was trained, +# taken from the metadata of its .safetensors file. +def lora_changed(lora_file): + # tag frequency percentage, that gets maximum amount of the staring hue + TAG_COLOR_THRESHOLD = 0.55 + # tag frequency percentage, above which a tag is displayed + TAG_DISPLAY_THRESHOLD = 0.65 + # template for the html used to display a tag + TAG_HTML_TEMPLATE = '{tag}' + + if lora_file == "None": + return ["
No LoRA selected
"] + elif not lora_file.lower().endswith(".safetensors"): + return [ + "
Only metadata queries for .safetensors files are currently supported
" + ] + else: + metadata = get_lora_metadata(lora_file) + if metadata: + frequencies = metadata["frequencies"] + return [ + "".join( + [ + f'
Trained against weights in: {metadata["model"]}
' + ] + + [ + TAG_HTML_TEMPLATE.format( + color=hsl_color( + (tag[1] - TAG_COLOR_THRESHOLD) + / (1 - TAG_COLOR_THRESHOLD), + start=HSLHue.RED, + end=HSLHue.GREEN, + ), + tag=tag[0], + ) + for tag in frequencies + if tag[1] > TAG_DISPLAY_THRESHOLD + ], + ) + ] + elif metadata is None: + return [ + "
This LoRA does not publish tag frequency metadata
" + ] + else: + return [ + "
This LoRA has empty tag frequency metadata, or we could not parse it
" + ] diff --git a/apps/shark_studio/web/ui/css/sd_dark_theme.css b/apps/shark_studio/web/ui/css/sd_dark_theme.css new file mode 100644 index 0000000000..5686f0868c --- /dev/null +++ b/apps/shark_studio/web/ui/css/sd_dark_theme.css @@ -0,0 +1,324 @@ +/* +Apply Gradio dark theme to the default Gradio theme. +Procedure to upgrade the dark theme: +- Using your browser, visit http://localhost:8080/?__theme=dark +- Open your browser inspector, search for the .dark css class +- Copy .dark class declarations, apply them here into :root +*/ + +:root { + --body-background-fill: var(--background-fill-primary); + --body-text-color: var(--neutral-100); + --color-accent-soft: var(--neutral-700); + --background-fill-primary: var(--neutral-950); + --background-fill-secondary: var(--neutral-900); + --border-color-accent: var(--neutral-600); + --border-color-primary: var(--neutral-700); + --link-text-color-active: var(--secondary-500); + --link-text-color: var(--secondary-500); + --link-text-color-hover: var(--secondary-400); + --link-text-color-visited: var(--secondary-600); + --body-text-color-subdued: var(--neutral-400); + --shadow-spread: 1px; + --block-background-fill: var(--neutral-800); + --block-border-color: var(--border-color-primary); + --block_border_width: None; + --block-info-text-color: var(--body-text-color-subdued); + --block-label-background-fill: var(--background-fill-secondary); + --block-label-border-color: var(--border-color-primary); + --block_label_border_width: None; + --block-label-text-color: var(--neutral-200); + --block_shadow: None; + --block_title_background_fill: None; + --block_title_border_color: None; + --block_title_border_width: None; + --block-title-text-color: var(--neutral-200); + --panel-background-fill: var(--background-fill-secondary); + --panel-border-color: var(--border-color-primary); + --panel_border_width: None; + --checkbox-background-color: var(--neutral-800); + --checkbox-background-color-focus: var(--checkbox-background-color); + --checkbox-background-color-hover: var(--checkbox-background-color); + --checkbox-background-color-selected: var(--secondary-600); + --checkbox-border-color: var(--neutral-700); + --checkbox-border-color-focus: var(--secondary-500); + --checkbox-border-color-hover: var(--neutral-600); + --checkbox-border-color-selected: var(--secondary-600); + --checkbox-border-width: var(--input-border-width); + --checkbox-label-background-fill: linear-gradient(to top, var(--neutral-900), var(--neutral-800)); + --checkbox-label-background-fill-hover: linear-gradient(to top, var(--neutral-900), var(--neutral-800)); + --checkbox-label-background-fill-selected: var(--checkbox-label-background-fill); + --checkbox-label-border-color: var(--border-color-primary); + --checkbox-label-border-color-hover: var(--checkbox-label-border-color); + --checkbox-label-border-width: var(--input-border-width); + --checkbox-label-text-color: var(--body-text-color); + --checkbox-label-text-color-selected: var(--checkbox-label-text-color); + --error-background-fill: var(--background-fill-primary); + --error-border-color: var(--border-color-primary); + --error_border_width: None; + --error-text-color: #ef4444; + --input-background-fill: var(--neutral-800); + --input-background-fill-focus: var(--secondary-600); + --input-background-fill-hover: var(--input-background-fill); + --input-border-color: var(--border-color-primary); + --input-border-color-focus: var(--neutral-700); + --input-border-color-hover: var(--input-border-color); + --input_border_width: None; + --input-placeholder-color: var(--neutral-500); + --input_shadow: None; + --input-shadow-focus: 0 0 0 var(--shadow-spread) var(--neutral-700), var(--shadow-inset); + --loader_color: None; + --slider_color: None; + --stat-background-fill: linear-gradient(to right, var(--primary-400), var(--primary-600)); + --table-border-color: var(--neutral-700); + --table-even-background-fill: var(--neutral-950); + --table-odd-background-fill: var(--neutral-900); + --table-row-focus: var(--color-accent-soft); + --button-border-width: var(--input-border-width); + --button-cancel-background-fill: linear-gradient(to bottom right, #dc2626, #b91c1c); + --button-cancel-background-fill-hover: linear-gradient(to bottom right, #dc2626, #dc2626); + --button-cancel-border-color: #dc2626; + --button-cancel-border-color-hover: var(--button-cancel-border-color); + --button-cancel-text-color: white; + --button-cancel-text-color-hover: var(--button-cancel-text-color); + --button-primary-background-fill: linear-gradient(to bottom right, var(--primary-500), var(--primary-600)); + --button-primary-background-fill-hover: linear-gradient(to bottom right, var(--primary-500), var(--primary-500)); + --button-primary-border-color: var(--primary-500); + --button-primary-border-color-hover: var(--button-primary-border-color); + --button-primary-text-color: white; + --button-primary-text-color-hover: var(--button-primary-text-color); + --button-secondary-background-fill: linear-gradient(to bottom right, var(--neutral-600), var(--neutral-700)); + --button-secondary-background-fill-hover: linear-gradient(to bottom right, var(--neutral-600), var(--neutral-600)); + --button-secondary-border-color: var(--neutral-600); + --button-secondary-border-color-hover: var(--button-secondary-border-color); + --button-secondary-text-color: white; + --button-secondary-text-color-hover: var(--button-secondary-text-color); + --block-border-width: 1px; + --block-label-border-width: 1px; + --form-gap-width: 1px; + --error-border-width: 1px; + --input-border-width: 1px; +} + +/* SHARK theme */ +body { + background-color: var(--background-fill-primary); +} + +.generating.svelte-zlszon.svelte-zlszon { + border: none; +} + +.generating { + border: none !important; +} + +#chatbot { + height: 100% !important; +} + +/* display in full width for desktop devices */ +@media (min-width: 1536px) +{ + .gradio-container { + max-width: var(--size-full) !important; + } +} + +.gradio-container .contain { + padding: 0 var(--size-4) !important; +} + +#top_logo { + color: transparent; + background-color: transparent; + border-radius: 0 !important; + border: 0; +} + +#ui_title { + padding: var(--size-2) 0 0 var(--size-1); +} + +#demo_title_outer { + border-radius: 0; +} + +#prompt_box_outer div:first-child { + border-radius: 0 !important +} + +#prompt_box textarea, #negative_prompt_box textarea { + background-color: var(--background-fill-primary) !important; +} + +#prompt_examples { + margin: 0 !important; +} + +#prompt_examples svg { + display: none !important; +} + +#ui_body { + padding: var(--size-2) !important; + border-radius: 0.5em !important; +} + +#img_result+div { + display: none !important; +} + +footer { + display: none !important; +} + +#gallery + div { + border-radius: 0 !important; +} + +/* Gallery: Remove the default square ratio thumbnail and limit images height to the container */ +#gallery .thumbnail-item.thumbnail-lg { + aspect-ratio: unset; + max-height: calc(55vh - (2 * var(--spacing-lg))); +} +@media (min-width: 1921px) { + /* Force a 768px_height + 4px_margin_height + navbar_height for the gallery */ + #gallery .grid-wrap, #gallery .preview{ + min-height: calc(768px + 4px + var(--size-14)); + max-height: calc(768px + 4px + var(--size-14)); + } + /* Limit height to 768px_height + 2px_margin_height for the thumbnails */ + #gallery .thumbnail-item.thumbnail-lg { + max-height: 770px !important; + } +} +/* Don't upscale when viewing in solo image mode */ +#gallery .preview img { + object-fit: scale-down; +} +/* Navbar images in cover mode*/ +#gallery .preview .thumbnail-item img { + object-fit: cover; +} + +/* Limit the stable diffusion text output height */ +#std_output textarea { + max-height: 215px; +} + +/* Prevent progress bar to block gallery navigation while building images (Gradio V3.19.0) */ +#gallery .wrap.default { + pointer-events: none; +} + +/* Import Png info box */ +#txt2img_prompt_image { + height: var(--size-32) !important; +} + +/* Hide "remove buttons" from ui dropdowns */ +#custom_model .token-remove.remove-all, +#lora_weights .token-remove.remove-all, +#scheduler .token-remove.remove-all, +#device .token-remove.remove-all, +#stencil_model .token-remove.remove-all { + display: none; +} + +/* Hide selected items from ui dropdowns */ +#custom_model .options .item .inner-item, +#scheduler .options .item .inner-item, +#device .options .item .inner-item, +#stencil_model .options .item .inner-item { + display:none; +} + +/* workarounds for container=false not currently working for dropdowns */ +.dropdown_no_container { + padding: 0 !important; +} + +#output_subdir_container :first-child { + border: none; +} + +/* reduced animation load when generating */ +.generating { + animation-play-state: paused !important; +} + +/* better clarity when progress bars are minimal */ +.meta-text { + background-color: var(--block-label-background-fill); +} + +/* lora tag pills */ +.lora-tags { + border: 1px solid var(--border-color-primary); + color: var(--block-info-text-color) !important; + padding: var(--block-padding); +} + +.lora-tag { + display: inline-block; + height: 2em; + color: rgb(212 212 212) !important; + margin-right: 5pt; + margin-bottom: 5pt; + padding: 2pt 5pt; + border-radius: 5pt; + white-space: nowrap; +} + +.lora-model { + margin-bottom: var(--spacing-lg); + color: var(--block-info-text-color) !important; + line-height: var(--line-sm); +} + +/* output gallery tab */ +.output_parameters_dataframe table.table { + /* works around a gradio bug that always shows scrollbars */ + overflow: clip auto; +} + +.output_parameters_dataframe tbody td { + font-size: small; + line-height: var(--line-xs); +} + +.output_icon_button { + max-width: 30px; + align-self: end; + padding-bottom: 8px; +} + +.outputgallery_sendto { + min-width: 7em !important; +} + +/* output gallery should take up most of the viewport height regardless of image size/number */ +#outputgallery_gallery .fixed-height { + min-height: 89vh !important; +} + +/* don't stretch non-square images to be square, breaking their aspect ratio */ +#outputgallery_gallery .thumbnail-item.thumbnail-lg > img { + object-fit: contain !important; +} + +/* centered logo for when there are no images */ +#top_logo.logo_centered { + height: 100%; + width: 100%; +} + +#top_logo.logo_centered img{ + object-fit: scale-down; + position: absolute; + width: 80%; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); +} diff --git a/apps/shark_studio/web/ui/logos/nod-icon.png b/apps/shark_studio/web/ui/logos/nod-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..29f7e32220bd6df9cb4cbd168c0456818c5bc994 GIT binary patch literal 16058 zcmV;rK1IQaP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>DK2J$RK~#8N?R^QD z996mYS*ohLr+bzxB!MJ^C6JH=LjpG<+pi$tf?fobAc*=4-V0X&LE*Y9iu@=7dKIsV zBFGX@)Q3$#R0LdzS!J?^NFZb*%Pc*;RngdKV*Q}L#V~&4tdQ%WDV)xBoJcDU|HdoLa_CJSly4lTW(oqx=cosZA0@XnE�SP46vkguM=wz) zZHx!zbvS%PURx5~w{Y=6B2X7<%|7G^F*PRzE7ONk7&5JRg>fA+&N*%BGNz^E0=pT6 znn=tzZK!EGvrRff`=@BOEeO^U*S|6IxG6uxws$^aMFhtDzTUo>;tG5@L)w{IAmva?w!AQ3Vp|=MBdA?^gwil%BM#`JCf5^FjfiJ zxikN5(y!~p?HvXcX`x%KWt}yD!J%XguOlsbVOA1Z(Z&k~!U$^qU8_Gemv?He@2w>& zjup=`Xkjz$nrEguJDtOs!@S5oquxK^Eb}CQydu;;Y;omDZ51-*g6uTYu1gO_ow&Wn zpdc-z0Eh(L3bHBv%9m*IHPmFMqvoEM=|6|aDqW&lhaWB^_WciYya(Xv;~3UL=f(@w zSC1dMIloc~OU7AqTW@J{gE>L8Q)H1CB0duW}3q}LOY!&veIVXAdSkKsRNek?~1h0gK(iOA_S0e{SnFjgGvrBk+lwQsu}w%lqm%vbHB< zJ8ZwJzkh!~cW`k(0tg4P8p6(L{W$m4B9a2|YzH}FcB*3=>{pgHX~pha zuwn!cD?cT&o|On+aX4Anwit$YYe!s*rWl`hmu1}^aIW^dX3c4RKIh6AAT7CZuF0;+ zVA9O#QylzcoiX#+DIW%5 zceiET9`Ic_XY)Y@Gf7VuCFk@&@p2A1VC8%lt87G~j&AH1qy_O34$duEmp|wT(b7>- z$YQCCD?FrGw}$!#IdbUH2!`ahpZTVyc+tNAo8SfvyM0u4<95> zmE3h0?DO&fgaz?t&0ji}g~6{{TxW)jfyUZZkfXIwOYw>Gr%jrTeLw3 zKx-3^jY3CZ8~wZw%{*@M@33s3S=UHWjQTyFMS9pGSn%Va}09h3mI` z2n)I`^5DikMOy1{-Lqg-!>l=ro~7pVVnhe%mYW=Y&T(ZV2PU6<{xL^vw0d`6uugao z(Sn?R%#`^`^ZT0|b8GxCI`dEBKg4&?}KU17LHJN!+!(U#o%va@2XEz|tRW=eGJM9&As--B{W zJdC!kVz6RB|9C42Oh}7|fA*xvm|_x3RE9 zSWSdgnj<9(4QoPLWMTgc+rsMXiw@Tt0rduW7)?!rU{B`gLNF~V#_CG_!FaPlivdMi zYhlqdmI+0KCI7<`7BvJxcpXAoXc|FS+}vo-{SVe53pu~rEa2S9N4*^0d*A$(L(sYm zR*5iRNNX)DddEs#c?wlokt7INlZP4t5O(rcV1e?vC;9$NSO6U&%~Uv#<%4h|y5j_) zu8`Fop>*X8tQ)KnVL*_!s$u6XoWPa-gC`srYgmS={!QAw)e;tcwf1+y0=6VE?pXN{ zy0T+b8{9Cd)d#T6V2#=Cb<*O-7z?{##R#V5A2U+q;wXW#PS-`pjQ=#`01Bc9Wx+_u zAHYaABoVwWWXd!<4cBY1#%%ZM54N5^EG-Ypa5xS^NgmckVXfe9>vZMvXXwFTVUw^i z+aRT$DF)$cq^r>jBj?H%x?X2u9Y)E)bTDDB{orJ(HPhJs>lW9U8ugKi3G0VXpi2xs z!1Or#y5@G(zA$afyTNGih6oEqym{{zb)=SC4-qsdW$l^+d5nhU6xK@kw zl=G*J-wG?Uy~F`-6vlV0=IRru9j3^_4p-FG(ZRD#P1SYS9@`GYhr@>UP>v~s!eUQ~ zag~51f+xIuMVq0Nc66QgN9=LZTEZe0n_}uuK|GeL+5n2=ihJs|V07+Gd#VM&RicT+>`2Ss|=Gtm5yP&nuPG0Cz$>`~I%iyMte zP=3J^Vnjum(3ZrAg{6SKmn_G_cQ1S)<%Q$SP1v3VF_uSnjAWsE5Yv1lHJkLuN0B{k zqSZP2OVeKl+a|D8GYp6<+5;}y+|XYa3g;Eh)h5ooS7F`b?Y4m>@~#>lsjsM#_8*Ji zlXdw+UB0SdD5r{2Yqj(yA}qGQCtH%(W2A+}v^N{Q4sxzIDR#U}JpoybFOt=`9)yMC zf$c0na;3saQU_D%;}jz4L~=+DqthQs9(5Yy#!3Dd&uc1v=YA%=aP|sn~Uld63QtkPGM3^Q=MBJUQ*A5z%tI|@3ixGj=p^;M`&zb$|Pj3xH{H#4D5C0?CCi;g5M*J8Y90%DdV zCF}^?Nb`ooNaAVKmDL~`HvaY4cAJRHjJYIF7>cdwAu!~QM)3ASrp3SLBL=2>R!jDb zR0qG1BnPZ8bn@<$EP!pLsvRHw&`N|`k=nM`H{N`|#vUPUx8t?wof$_w77Yg<>Wp+{ zF^C3SLpIFk>3?@ra3xY|Mc^7(a#1dhN zlY40jEPk>z+n=%9NxOX=OLi;5?L?5V)ZCv*PKF^;Atd9exliQ5I}E_IG%O(g>|15b zHj1^;x=r*m*3s2h#j7T0SBx zg?Rt~^R5N-ne2Wk2yD4aL=mv~c$m6EwV5`3ine(`yUy=x$Fkj(V}n`E8V^dcj7~G7 z6<2t01J2%zo*VXB$Ghye*zKgPT8O!e9`26EYBLBNF@kiN8x3S^U#4LeGtZI(7SpWL zj4T{qN^9qZoG%taEoUtCs+(E1X=imMv?6hOv|X6id;63iN}}cgK#Z91i$5)-AgS8-p62o-@mK0uO;q%a~J)g(GjPTPf((3ih-ex zKQLHy?QSD2E?{S{PM{D5qofRRo!hr1tOZ$AG|5C%j0l5a;}gyB--$d=Dklmi#T|9b%kMZE{2k>k#1YevwG``x(sJ zZbCJizWm3@k;HL55JKA-OK4>l#wxX{cZsmeNn3k!&>@k? z_c%E^Il9TDWi=7YTw4rmOlB-1su`JEv)M$9$F<;(7JRv&mZY#WHrH+w0;oyY*_&iZ zZxa>UBbRwkL+G4D=ZI|G+jEW>hLfow@HX!Z!#f*D;rwvsw280a7&}AWZmHEeO@wS7 zS$0~ApU%rn_(!2=d@zK*b{T1LK{2f%>wn`FN2d2d7{Aew$clkA$WmC{xP(sQmrC$o z_Vh9iSVnX|*%jpM3qWKfHSmz&9BTC`Bn9ePqcTBGdc5YHWAr~9C0JirL?YMSjE6U2 z%vtP@-_}>zfOE3vKn~f0j3cKOLsh2lPHpvri_br9%1?3J{aqsLG->fPJJMb1xLp)r3k*w82Bnp1oB9C~=-U!?j)Fi+X4TMZo5=m=ZYbnj}_`X5` z%SSyaW`L;X)uK9jrvn$-!$V5srG4}rN%Eq}?~TJ|AEHueoQt?I>DnI*apYyVmQAkA zknVqwbnW{Q=k~V_QC9x!>4$w!V5dl%j13}W#k9QM6XM(iS#gsjMz#iFNvF0Ep>JZs zxyv?}0FKk2x5n&EBpF-S#UqfdIh&r^K}WDuhg(%!Mi^mjH(iS-18;{ZR{k1&VhJ(V>xrI0_oMhhzuXNz#k{szyT51`=Ty;&gXLuRGqY zVA-EGJ$yy_;R}vAtbn~>h~Vt*EBB>Wq_v?I%tzK$-%e5>M;8I{)eslQGTGi_&9lSzd?VM$)|ntz8Rgxf&-oBjN7H7&bSOYa9578T2IfA-`X ze)@|yByEytsOQW%^5PP?5wUZ@sY!C9+5mN{7U}DFSbiwuh?5YCK~q)?!i7ZGW3WFQ zi~Y@$?Je-zlR7neH?MmGJ=gDNOhim#W}IDV-j!=QcZOslCAYC`6MfLS6^21`v5L6< zBGLC#X(^^+0A?brL}G)uyPPvz0{N9wxpy&F_9rM%{!gLM6q8Hu-{qO;V`}H}Tp9t<$7p^N_sZM0z6!9Dic`nx`sE(Y#+e zjS2M;I{w*^BGs1y67U-T9LO64OIXOSNJe=Enz2cE`Q50Fr!{BMJBhH*6G?~V<3Wr` z%ettTuY~#}5Ed67%cajRY;q+C3u31rA++RQ#yXoG9jt)W!*v2#@rND=nZ~w-M4T_< zeP@va#lx(OWCC<41}vdjpJbRq<}1tj3#diLdcTF5cAjSGk$)W()C!ViX)MNtVkX^#-8 z-!s^)ti7I?j}kD>O{DeBMW<_3`(kPHzB3-MF+`*v6RC%F%Q1#GPDXM}1~XwvZ7+|g zN3(M!BtkMfmb~^hQISlBn1Ub`TtzbEa=azd6uslS$ncu6)BIp{D4dU8Fn!|F*dH0e zfp9OkA#IhgvlpFC?b4H2W(aFvA{WP6?Y2l2;z>kAp;_`-Bp>jDOq1phvKr4?z`EH0 zJ`ylwOh3o9x`rNT8NYR$n%vNVA&8O;8s&xN)uKSuwWj<-l3C{!y9;tSeRk`zyj3@bH6%~XS$2S>b1lekxU)&Q^#MM4bbnNaBCIO*1Op`J&`4H|cROJCK zGEN5XH6Tg#h*4EBVeu5wg~f=4hOqe2*cL(^pM}<+z?9(A4@Yl+CqP&qVA%vO|M$qfWZ8voHcjREPATWGow!nx+vsfZq&}L7yvHW++jz1 zF{;%@LHN43>|g-Ml}sL>r4mZtu#Ij-^@XiB@BN;tkD{({i z=99Q5fNQXAvcNbsU#1InaU(fQap4fH3mv?aFS1(fb}dt1=xY$)8Wqe#ztRJ6F9xAY zh9D8y4K>U#A`s~4$ytB;j^QD+>3XAemAs!U_f#@U*k93cUBr4zp)y%c9D7ffk(!f( z7b=yy5&8w*BQ64--cUANgpJ@eCQr)hB&C9PVUS-mee`39uwh<1;+I%geQ&^i2CD;c zJwRA=*t~1O%3ILckSyuNxJY=fEfE&u5~Q*umk_TG1)GlM+P+B=r0)%30T2$`V?QKn z$klxlY2Dc{EVNp{x}sA^L|6kK5`-&tIzA-3A-xeG{cv8e@X)$))X-z0yV2@Gl0Y+K zOfh~!3g;5U_HdzUpHehun4)Um96&IY zMIUa)9z}g9p1y&YN!b}#(%^SRWL@4!a_Vb5C@&kqgDa&F86&F{uh2tJQb4+pN?p+R z$4!0*ULOj}3esc$zFH>xkQm}loA?S@xi>?%8Zw!Lgo)f(2z10`n!f`d`eQ&KNpQZn z_izZehWZ9L< z)9*C{hG5%0(NjVw^$K#*L|D^LJ7u&O55Iwx)}kg+?N}I8PRdAWhUbCKA$_$OgI8c3 z#5tz>GH?TGEi3Z;(D2ChL&w9i((f;$I}IEn^vs|e_(hDbWeE#{;TXG$umA{)^TV<6 zJ)}k6M(2?yIpV_7(|rV~LNX*d?kn>OCv?*>JNFSdcMuWR0)5ab0XtW4^3v&^(_?CS zVIZ*qi;Hddgj5ikv$IU8n-+Kp1)BQDpk>}rybE<|w4bg-(m+3r*dq4Z-A~%?1kT?Q zEh6iR5k>~okQJ?xAuBnbNT0&mnAi0mJLP(G8^=w?&D&iq4$cq9hW13piUh%%NRF&T z8aI{-vMUOY{RrVVWX^C-SkPB&BTc9!4Fi1!oIZ5(=_aoHvXu%w#fEn!1lRgR)Lx=! zI1;$}0W;Z7y+HLQX!bU0mucS+6NWC5wTIEN1<79Lm-1JS&^%c z$?D^s5T^S!HILTGf)&I~-V+Zmw5M^JrXBOI&}o>8LUL2LfY+~~N{qBNvYUtwS>pBA z5@LVzT{9;CKIGop10pV2To6~$wNWH3iMXv-;QICV0qG6+){_tIB&uO(Oampv(Fh** zWNC3T@Pyl;c}O_LLuqo_k8gLsfeQSdq+SK0sy6j0`c3B%G2LnL**4xmB{XqM!OJWGaz9?H=_&(3zn7Cei0r!lffi}uRvI9Sz%rF7GVL9 z2)_%~o^IUiOcc!_A_<9}5$LM3tHM;)xJE*^(=^m3yAZx5+Ks6tyiEz4TBZuTyZ9>6Ud6X z)HrmNqGfq#RwC_`ITmawLwDVJfV_bupXt5g;zE zCo*G!QWv2c5CO;XnU`S&ui+19k#T)+t*~-efxd+2si{NfL;Tnr(`qDEAVg!fNWhMd zMgJXP77(K0Gx;LS_a%tECx9Db$z`o7mct4VS$`7gRp(ZeRFZ`V^MS$ucp*KADMSNM zJVFG%E-q;KG<2gq9!NjXWKebdGR>bx_ZlPv%x54P-6!ERwcdgU7?FYg64zQ%k{rM2 z(BfK|ZI)${`UTC)jh>s@oNE5lCp#O=8`b;!9X)}RnqC|~C1ngG!9#WmKGYHrO^L1Q zB6=T&?s#N!xtWqYFdGd3;5+_!E&kC&y&7Xpz{0}zQmesGw%G0MJ6ED^ zWLFRu*ZRZLCVZ<{D&5+UPJ0#M@y!9RD26#I0>9g z?GFJ7u#9Ux47qNCi_|aPTYwK3CB`h>!$=YpHvz=LA1d#SR-L(w+lF07M0ZzM=CNHH818@j@8#`PGeWVOI=$Ae}1pK&(FNH@j;5EdOC>Gy7? zWifHX5QR>bxc1(f1N9Nsm`yzA!K=f3`xBW=#;Z6kYxjdnD&6qDZ?A0sG>#9eYj)U4 z2n_s2dW~jvif*Rg;yUA%e)-#23D4U85TxlaMW;NPv_MW!^TUygkgU88>w0qDb_YOG zoL5goMI8maTHFj77FJ|Ps}d>CqbDubZB>KgLl^r#<6YmqP85)blR&FPbB~KY-$D0NJ}w3E#k3aO{Pym%f4<5 zS-Pk9YMHvvR$bZe*c;_`y9yu=S_D%f8R&)ZSE4mM)Y2WobZ`{<{h1Dv)t6>V^A2LX z(iPXBzo}U81P?jB`<2GgbG4K5$Zx1%O3%u=!`s$QMgr=9HEH?Um{Px6EkoLP#F%CD zLvl8Moh6;y6^16Kj16JYV9DZcAS+a1GQeBRmvC(r7r_$C?E?1~PIlx^%^fmvp zYvmmyxcVrFTcGy4*%f>(OMm)*hqvDfnE<&E@6sn2OI!Vsb6P)4P3v8_gK?Q6H$@nP zVi1}k*|`XxV_N>vBg+Pd2Ktbc@ev3Ifdp{IwwKDa1vqc?N>B4(sLrt=bB_sycf1#r zk0a6{q0s`T5oLEp6*3ie5BY;+K!T_~5%qam_bVoXhmvCe>`zv8d$7$k?k7Ficjbd^ zNBiksZ^jb-Gg7hSqtFFq-1N%zqs*DzEC;bj!m2J{X3yK$W;r-=+uX$wWA)T78ecUmy$-0)v=2M4~jpDP{;yjDe77 zZhHeXLBVTZXz$2;YK8lV#k)~sD0YO5LG=41;h!$VO&_#|N8WmwT zy3j?<`QM1V50M3V+@@Cnd>0<1y_sg+ldvsbJoM5Mx}IiWK)T&NM-}FRyx+TAQdE;vA84+$%T0+bknRHs*3i~ zo(lYOIxL#^6!+I6S_$x+c>R=BZYCPF~JSE`!qLtIini;m<3!~CFiSPug=`j* z18H(#yO?~jrBZwou5o{zCo2{HV{^;1B9l9*R4yCxce!c)BGX6C&8=D0*J#4)NDJTu zla=-uFN%lkBI4Xx=-~j8M%Fh5vMvI}8RieH?3pYPAuSLqG_UuBHODb6;o~7L@p28) zqWqt}aOMfEXAABuOgd7)U~Bq&iz;dVvG|;<%OC6tm5-1gwN5LAIQS^6vyldCsD3ej z*){1*_C{p0kQ``s9gjTz-&{Fi_&NP0ttI5K`7wvR$uAqp2Et}sS9S*K(O+j@I?EE< z?9dDJNLma0@FW@V7Zl!>+vTYuCt4W{euknaa~G>S;1u~@YlZ(mX3HS_iEmrui0q_=oM*` zu!EkV+cbKiwgq7cDN%&PNZ`LY-mK5S38R}vGG05_b?MHc>R8WP^36ucKDSxB6%@3D zUG3(r@7B`Bn^z?TGFmSWIqU^2>*7v;#J~nEm)~?yFagVYORgl2&rNs!o}Xu;2_20i z7J#%^?lv??(b@LB-!%N|ir!k=K9kn|&{*friP+TII9bARQ*f}F{Y{f$d};}3iGv~+mTkG&X0+UF*71wQ>%Tr$e6cT0 zc@mbm?<*YoUDeofdAlhQjL_Nj+W6i&Wql?sV2L|x?xKgO8957sQ;^6{!MoSe8;|O! z+NZdVJ&&rcU9e(AyVLLrA_67TTbHrsVC(T1i=iiy*!@2LiheYmNx#b`27tCy<21;G z{?rdxhwI4v*de1DhLO(Z!?8TfHu7@0{M*^#^Y637Halzwz4x4YfEN7gg{Vf+RERY> z68)3zFe?doisRWj_5vDPCu-aS6a+Pw+n(d828O5rN(a~E#=aHXd~4;(*Ky#|?EB4q zJ9^8KOOlx^mTl&aR^P|xiBqOjiQHR&u&~n0bFXaOoNl@aWG%R!Dp$&3n97_zE`QK; z5Ec(ayi1Sh7inQZ(ez-PC|~Q_NE(91N}nEOUa^)k31>4t5fRwZMGTx;Mt9OwY z09m($JU}+9(+^&rE!|^aG5Z>01#Cgh!MQU&bbzYdQwc-E4V`IpR?Ey=IyXP~_Z`WE zev=lkWQ?3LJ`ah45T$N0aXSN5ttvw1WwbnWkBkMdw^2(*kkEDt1^KK4+7nTBC%*Bo z!di!m0}&C(>cAPdb(I-a5zR5I+tSuneqM7FY`?!DQQecthJG@7+A%S~+aljXi4Sogyt@!z#$Uq#gEP zS(&;7`m(sx%@3brBDmNVuweWBp&kIlg#4+sihYeGM${a9z>ifwUsY;AX%w16cp3V$=4 z&B8KU+L)EXMNd~+biJ1DO=ym=9#$IDftoPbp0$Lf>`l)GGj^|5Et&_zdB8_$rVpZq zXcVm?kC`7xJ0F0)cJw(*q->%mok+?u7=Hq3ZJXXs)^ga|jbf@XT981&ZHVV+o-}JM z@?MtN@D+giR!r#}R$$th%55ZauS|#jt7{rZuI_SDFTKdviBQyD8ubPpmm zb{KK%U|M_#UD8oZ>#_9t0Ge+i@{v;7mVlOg(Ll&FZ>FY&!cw|PQkjhe{*UW8I1kG* z<%Jk~gMgwIGj+OsAvjsJ$h+BX9AQh=Dq<(gB1w1@xA;P?&3g?SiQIpszr&@Mes?l4 zkZOPJj0bXSeh-~`$iZXvi%S>l-dbI2Nuh0dL(@H-L3csRx>U>L)ViN9>xsC#jI=;i zD$HLxR;%#mLmYXk8CFPZ^L9YIEycdtlJ3tVMCW_8<34AtuLT@~>S#8EWbwFJK(Gmk zo&f6@&sN88sBOl9H?s0*x*ZiB6zcRip@PGikcX4?JdB8RD4qWpI-{XP$aJ#U2k~-{ zF&bhXhoe}r4N%k)&oUIZ6r%T(FiTjVKzFmH)IN7{=$`?nEM9C3!AB1r?SzF+wIc#s zU5j~GTb9mF?+Su|H#9o!VR*Cq>YZ0xr_JmTW0#T^u*AhY#u=b{_*E2?SIJ8t>ytvM zV^qx*lEvlE(hdEymSQgB&XQUZ?@r)+(=EjzjFm@oUyNabNXgg~vU-QXve5hpPl}8! zkpytwu#l$a*XVa})i4x{1Ml!xtzbjhrhTQoMYV1i%;t1mI7i_Sn&CD&2!4MTEyw4r zbS@RtZkYYJXj|Vns#lIrN2(F>gO#K^+AJW@u}&zsnep+*Dp}!NnFWM`j0kEATk^FJ z2$40OH9)MD4*YPJO#LmtW#vPwb8DBzs~9ZhyI=i1-(DoZl2VMg;N$6Z>ULe|G^8z7 z%7q0ZCyt!D@JspH2*oZZEr90UHuG^oIV!>~ZgFHTs*9zkRg>oB+(EgVtqzNbt!zR0 zQo7A6=q9Zu4s1X-b@ETP(mG#pQLeOk=Em6v1VDsB6tdcDsklO`6dl9mx+|-hcL4Ww zbLjA)QmauFnLfk|%Lmehrkklp(Rq;8)H!s3Oj^upF*+*#$>14F2{ULqOfnCo#QO)F z8cQ3{ex>Nml^wJXSs=ci&SeAbyn!>lmX5NXYq2quqLXvAi4N9I8mcYi<;w72LM16n zW*>H=xj5*ajd@J*+3G>e*CM%HEf8ZJD)f(&;j2_wd2grJFd`e24{7sqQ}D1VAx4Ib zj|qiy0AxVCJ4>cs0Yg{<;;oprg*xDxEunqyPH$QDE2L(z9Oy~=T5G#->eqAswyt#% zJw#(16_hI3mWEVk+r?_*JwIJI{YGc%ylbnZ-z}sCa8Z*l(uhjr)W_&2e-trYbls^T zFx~Z#tdk>Q)vRU0H8O8K$4fA@pzT;EE$X0YL{?3oBS6r>sX_m+2AX1kJeY_S5yi!u zy_41wKsO%qhlod16HNOiEoMl36w;$vOH7y)6t5TQ0V8aN@Sq3jWg;bdO}s>_U&i1i z<{{(RjZ%;3>}!k~*RH3?#LnQEBSvvV2$FTJ7XNfp6O!bzI3zzhcUq-*_^Y`Cj#yUd zeA5BVV~1-``hUzpF=nJbZUCX z!P0^Z=6zeRY_^*X5a~ZW#pH2lYHlO#X8~(xfhOAmIxy-M1R#2W-WD}Q>Y7WuwZA7Dj)dK2@cnb^G?cKgD!dyCUmSj8R4&_L$=#VYXS1s zjs+XVDCN|E$+x=7pFxC}N+cbJ9>8{}CXfddgGGc35qa4Ft%r|oUNawqm}I8$02N8+ z2IR&eZ8Hu)n?eHMjfnK|RXQe`@t0_RX~@M&Aruib^{O4rV|%c_4fB?Wm>*~rY}Q*x z%(Y4K%b{75JqVKVR#^M%PdkAl;c)oGN&L8VOF0q_cKsopV@5O%KlD{kN(ULOE+UoI5$2_S-OshqZS=Xy&N#-IyC$E4C0RI+M?%t^Ad#zsxapNKI5h`Cl~14 zDw#&vlHX(;vV~K=6K zBP@Av9KY$nEg&qcYBDhZ%Xs=L^WIMC?$Qn?^G=YpGYA!&tUx$vXjUA{?P~wU!jTg$ zLzDoYpFZy91CHFhYApy0;#P7)ob@by!tpPB;cr$>>{Zg%!jdNGfb1%Z21{u(Tk#?T z+msKM{$@#ufI@7)P%xymy$5F(V5d|1)9vO_vWy((VW# zbUa&49waNHEI{yhcAEsmU?5~~U`hFo%7pi)G7Z19AuOf!rVZL3hbFo=m0Pg3n8M=P z#DJ<4Ly#7__0V;#6TiD)e(AtVZjm-z1@dmlG=B@46`!G7;cUUG;&WIjT{Y@oKe*+Y z>EAkkxGLSt3Y{TqXq3Frx%}lD{$nD(3)}58q^)Duz-m@FQU_;;w~9bVR0)-8=AxnSzum|tUtaH(U^0U<0*Uu$3U8?%5=V1i}*%JFcn zKt|jAzY$2q8p0p4;_1&I^$I5opP^ktDSm=^gtKrw92c276TTwH>xdQKd0#re83xqrlI9nC#dLxaLw^cZu(eEVYTa|9fYt%)0#hy0Fr}zDBLbe_p@-Yuza%;MJJt|vFymt zd=H(=9(kR~KZokRU*!0I^pz`yjU2xEH|ohr3%$)BbAn=uaS{ zg;*;h1BePVlsoQudfK-p_C#FctX==0vFy#ouFsJ3g#xGUZ9f2N1|Mz#83hyg6kCKQAIOV)zUgm`L4a1(c1u#jAyZ#I38V9i@ zto(%)@|&6Kxv;ioC1IC zqsFuBjM-Oy2+oy_3W!oZWZC8u);p;`S1oQ#Tyl-IFEtJ~!&;%>1=GF&4}>NAsB-6-q)S&K-fE%(g}iTwn;o&(I%hUZW-Yts zVmU%xwWyP}4rASn#UtzH3;Au370Cf)w1#s1slOk6{adV)6z|e&7{?4UfR3FC{S%0+ zD_KJ>k^>-aLvDQ8%ls4JFq&T_3!~ z9kub9ZG!&20rtW1k6-YG&aUAn+|d>4X{>3;1n!q43qzlzgJRjn2|+r0VV=9^zc~8L znT*TRc~F5=P%NF1q>GoHI6ux8*HH&xvr@=t1*NXp=<0&yCNWUSe|vk0f!@M~xF8kI zn0@)Xy!O|=rX6y$l5!!kyyKyFw@EILwk7ks8nboM)=@<=E43y!>LVblt#h@B0mMly z`LMjNfK-6PWmC4z?VCQ^+jTU3wZ(@CVaL8$9!J_s}75ekQl5kvS*MC zp@j8G|AQQUnk1V%;&@HyTVu_1h1UlRJ&L?Kv z`j@IyAma8l)oX!}7^e6>91jtOA*#Gab3TKJYPLZIny37GtaY!GwvHZ=jIVN$Is;ZV zNJVLX(YYncXr)``n;0$;7fn9M+tQ^*55SUFzoW6mMU(+fn?090Y3q1HB#B%0G7l4x#>4ty9W0^FwJ`d;nLgd! zTxh@Fjug`uAR|_EGuM%Y9bY9Zq6uB?e}$ulkI=p#cb&9#^a@qt9(VHr72({en})m- z9=!=q0kFF0{SYl%Nd$g0;}@RyeE+4A<8Ecr**wJW!eD@=Qj3MbFwXTjCEd4FgnNM@ zD>SS<9tc~=Z-!$A3l7;~^>3ZDb@UoW4fPcstuxrYgnvAoP~}UZ9@Ft)tf%weBf1gnsMo|I3il zU31{+cZ7c6F%=#`M9VS9k?1C42v{j8nkrhSj5FnW4NS_R@m~Yip{#Caw`s`1iIcq0 zao?zN$63cO`25;h!UAK>2nyD|5ml2v84r(tKDW2 zF!9&8LuC{p&`+Vq0%EpU)DvNWI%(_ZH(+&BElebd1!oQ86BONsH*>9y)SMqdi^si= zuu*S>)nsx1f%L3m;!#Ketb=|tP&|Su9)!tlt!ymK zEIt7LkpRfT-24M@DM(^xZTq3IG`IXeU#N=hJ7MyVAOyur(BHRjh5q9|=voN$45s)m zmzg!xkHZhJ^#>+z?BEO4g$VhF9;)H=BkTurF8rVPAJW|N;(zsgF#o0g&-!0^mtV*( zglR7Y{+CpM%)#8|Qi%8ic>$)m5Tf`1{Ew{8!1Ncw{~!FHi;cPUNB@6W938wcQ7v+#FrAEFCcN=PV-&Edw)X^2dlX`eh<30^Gx@3=PX|9Jj^UK2`!! z(IR}=ntE6PV)2r3P$qHV^r|$sqpkhNYJo1_Kq~&N7o0PsCd-o23Xb`aZDAka7FKJZ zFBo47Rk35?pE{9Hx=vqD zI=TYBVCGVc{uoS~Y2}!2Zq1q0^wr8K$=0pJ#C$`NV_Ak*D*C%ZWC?kCuZL$0D5LL> zBY4Y68-??^CkHe|)`%d>t8Pd+2421@2{_KCF!rPw74#i7YO9x?AJ#XFE3!o>l?(grR{%Jeds@0 z3g1WfYWB(kd3tvX=9PMcJVD;eb*92yXqxH<2a16B0*% z)OR;(i+AhhQz1uVIn7I}P%KXO*-%g*C>e1Pb?>#aymh7ZQIZk2>z(Ad1hXqN+X`+& zty&yW)iQFGff^>du_ACsL=~KBvmA<4Xl$SDDib{-rpCwKg+%tKTN}OnnaXC9>0F-& z(iHuHo)IG;N>b=8c-kEP)?vI(D77HR0cG!^&X0CzI}*BO?mk#yb(sVUf~A&W5Z@A^P;)~fzuXXPeZAV5TgND31W zrHm6iu1SQIi5gl0N?Cw*7{yW(@{d2?H=@IeI8fq<6`++D8Nhs-2ZOR(slmT?6dJlh zgDOVd7^xZPeq_st(8P690*MgRs;><*@cY6eLIijsfmc~`+6_=L^C@raV2~T{B-E6$ zh^yPN6v4D~JZuJBu3}ajDo}cBRq$)W&s6u+sxasRWD^;48;>;?#dCxZeTYb8iqGeG z`QIbP`CTNKsMk2Ii5Cv=A!$Y4IN4#!r9Tc6o*fWtp*7=3d9&4WMF6{=Na+alv@?T| zLsTYoj?xqvxvsjM;KDrRterEZlBTn;Cb!Jf$A zlK44(Q9kN9ZlLrtpil=5$C8jUio^FMES9mBSEbkh6zW_HW!_F+elBQPxIRse z1|UaN!$zIR`G?%3g$r~Rsfptf0j9)n()>2bW)_@ zxlk7MdDwN;Q(}8})>_!2j!0oYQHM?30>6cw3e?c7yAA!H>tkYY^_4$PtC-VfK>4%p2!2g5}5w1;_b&kle0b`8*x$&;EL38u(=` z19Z$>lWo78?(XbUq~T{0r3Kexzu&K3K6DBRd#pA^{Vk2z8|0U6X3a;+g2Jr$+6TXT zC-BDqdv0#`!g*EA*;7T-=ae^4aT7UOMh1_)QIT%<+S~D;d%N;!3v6g;|IcXNyrxHi zknm2YWG7e6_i1n&8zMhm$k6tn5=60~K9K8=S9 zjvR40m|8CGDMyNoF7$hkW{lAf)V)Bn@_~N7v)nwZ<*8sWocPxaI5UDf=|p5K;jF3S z+&B(;1}D*mNLTO*oJjvFg_@>s>khL@{)pSD8iHNB`<^*Y^N{!T!uZIpe}ZHH0bnEc z_GoXv`Ww$GLWnt2^bb_-UFLU>doyko?ZqixSSVw0l_B8}S~~v2xxM}-kHkByM+}xs z;n?x%eX6>h7CR-O|5Cd1&hkEe#U z%ja-;;5?KU1<0AtTNc7QD(U_UM}>w%g@AY_KzG8j(QZ}$+1&C#I)*~6=qb7=2h`K( zYwVB?nwiLLp}^J1>fPI2V@8D0a^4t_Q5Gb zH9xs!NUJt}Yp<9{|68mmq1G#L=&;C6{(_p{Kw(uqE-2w3l8IZSc1wVoZvYwPsZ7Og zg3Ad;knJrYOqAv;6)cgV!}cOAB6BobR&nunnCKshsorL?VoK!6HpVE@HRpV7 zGaREq?y+-C{CCe3ISOK+hX@FDp#(8ZzFO4MjgYh6>2uB6de3^VhxNTBo&gm*)=n-4h3AgIU?PtVX)oRii@Sug0uXAisni^BK@9xb)q zU{s|P&Mbf-H`&JG>fdHX&EMx=MiWznS027IN0AtJ_Ql7l-HEML9v|0HdLaJrzjerS)B&s&Iz%CIWvc|=9t)Surr zZcrZ?Sb=uX06ixwI_G{QOXwc(XZkgs`E`T*=kroIQO_`nsf}y&?^j)8lc;}^q~xoQ7jytR9L`keSqEG5M8(_eu{n}kYrUTW1iYX2 zk}5DVFP#LsuAlGgdi9=tKjY0kOr(W8@NmywB@kXB1imGCOs*Gld6=qoyr1#7LlRyl z2)&#Fy#p_ydw$L=01-J~GE;w({E87&OX=6OixVY~==mNNOAWh^C>>k0fCUAL?fPc( zmLz&sNZT_uCAxme88ox|tNP(`ySGW&8yG32WVrbkCBpal<>mRsPZVW$Wl;EX*C(Js zN~}V__uh9E*AP)F3epU`Sb5yfu4BV_yuZo}>0I`mzSv&g#x_K}+iZV&_4SC=2f+Nz z0G0?4;?F;YnoA#M@O1C?)%R4*HR-eIpy5)w`ZRlMU!+gX%jn>T(CzQBsO3e}95zBf z?z$=Q5}gFabV!ft!Fmmt1bHzYU7SePFyNiuJVd>E6?hx>`4z?XVr*O@-L-F6|I6I1 zeOI2jaVRalEbfKd+M^V*5r8)o-YP}z^s=a za zOV7@?-S>)7#(=wzFXn6F=+E-(FlVoUA;JaK>Hw1ibR*b;nm1{jc9ZG=G(4T>q#Z-yLK%UvH=f)p8(4!46!$2^D^sVW;P;ig%3n0;D9lo~}9tX^R&ef?0#fffBsepXVVi(x7b z&Lab_@R+#8m@OLRV20AVwF!ubFVl1XHFhovr>d!U87?y%WwTL?#@)D3gb1*zBg>G~ za#|TCG2{MyF@uRp6+c4pW(LcEPK;4^fg%yGi@P?vK=kMZ=n(qmr@zS@Inh7QH`@@J zaj#xV!7s>t3+6a-W_yr305fHFKse5+B5e4v&c-XDhxI`hb0+VXwzf82-tjh|fwHg# zs&E~y!~imnnW!#TG4$bS&!**}C&u(=efY8LJ?R@~r+>jc-^_*zH`Ayuowm-!gbCGI zB_8JO_KT}&ftJVDWu?-_06FcSmd9M0=kOYHRIlsa1-(fpB6t%NG*Hwe%DuNtbU z4;n{2ITkaK&`Tcph zT=xT2*l_$K(y{2wp5ei*JJvrKQjMNfNbv5LD^+P~vA`k=`waio+uc$jiRto(E{#?OnhuReM>&8-$w5s;~~BtVBe?;r}UTGrQ#H zf%a*=<(Wn%`HWM%J3=m94B&81E#KY2oZeEdtzWe+G2b&O*aSD=n8C-!h^UP^GzbZt zB@haGv!*TrE5_#pdHE=b?6tf+6^dbvbrofVJlljn$)YbGs?+frV$tS~&I_q;2aH39 zKL4<)K+7Jsr68s*8MFN5*}E!5%uTHKr@xeKKN7r8MvP6;vOAE-qvPsCYO9Gxhpew3 z7moC3SIY@Eza$?kb@zscoy5@J5+xchDNmhGcoSno1x#NNPV?YE*&0qZ?zCgBa>0Bk zyqoeXdfE9@i3*bq-XuKaKUqI$EO10v{cVNcI1WrZp9c+(8phCY7(toGf#$tF7?)Sn$l z6OcMk7en&8Htk#%^7!p8B26mR;ObZ<&8?byEWE@duoPw4TE^39e z05isFGvlk5jX*$p^ea3vN16ECzeG72A7fvp84Y>gIt%j5z#ZYnDTK432T=I8(OLQR z0Fe|=JbxLbE;UM;(_J%0oEfN(`994C=#_KV2$a^|dA3$&D+xg8(=5N~`B{6+&emoZ z!T6+|;Dk&0Ysb2Me$!G!x>OGOT0PM+af4&9hiE+dRo7U0&!fGT-Enm#`spV&ey8bc~s`-2kTrDc_v?HRY1v$Z$#BoS^P!yH?&|(rW6cBsB z!o%>-%E8%+lNJxbROz0tk+ACCd;_s6He0&Fw6hVVPL*G~GN^MvR|#R#P(hNM4k8P! zPwb2%LDkbpzmx$Niaq}@4r5Z1=~_%9b|dhk61qXTVQR|Cp{Gy?bQ?%5eD^ijl_Kk0 z@DO?~QCvO=3)Y)@ei!jU9UC(%&G;fAT-E*KG)G5JL>D`x&R_Kz<509FHW_?|! z6T=A1a(P6afa9k$uxuZ*4)4c4D~Yf8B%i)5vB18sqh8lCz;q)&-S^9bQJcVv-G-bL zQHqj6Yl_xH`l(qn!niNbd{y|qF)Aj!z=3j~BR$cLhW?7432P!sL4hMa(pzx$OU3)*3cF2 z5DLb{inOn^X)jojt(CzwuAVRw*4V=kOWE8*BxvW~rP zaqZW5QMoc~r+}*p4eEG%CgxCx$`1{cl*jkC-XK57h?Q^LI3w`BId(DFmPggsWgyv< z3!AoPwka{!tx;SO|H@hI31bFyoUK?tAUSD(8m*z@V59wY`t*DpOuJx;->H@Ms8ksa z8?x1Tt07;kbJa5}%^TBf8w8DI>HYK`McVVG)C_ipFZwB`yGw)wHC*O?k!(p$qWE2M zCJSC^Y>JquTMV_%m-qIGiFh(xiulo^%zh4vZGbv;(DGGJudkCPN;$=#4mM)-t;SOm zoku3{chFr#Yeyx4e&0)KbrRv%v?$JW;@88+^@70X3&O`A^q`IYkW*X-n+UkfENm!l zg5oA_#15j*)dr%D@F^fGz&=;JyvTN`d`J1X9IQCT#jCq1e=1rV=7{+xttM|Ih*22ziL4lCK)18s;Xr(WxjSm}~9ZdPxgrR4(r*!$#!l#wR2pI5nj}`s%%&OO$eOD8NlxK-9VJ^Gb_ltoK?} zUT&R=#%Xx|EqvkJ>1x0eCMH}aD}pUZ8yV6A2|B73{9ehBkVcrA94-ZdP9;D~R`!}t zR0x5NP-y@~gH^Vyq4V)bL2bt2>GZ|%sL^_TpS<8qBJoW6=HPtXX|(})9$!5)TZD#f z15UpPRke5+@Hd{Xx=nZUP=-)yVOjdV0|{8XMjtx2UtR*20Z3A;s=w!4Zji$$C-6Xs z^a;U8-&oABm0+*=Qo960g4w0@sio?gzG~8^r|Y;(z*JXhQffrNF~^~7mn}Na<{Ebg zylS8n8mwiQ~37ge|4EgOqGuspJu>05YiwC_Nq%( zR-oj!16<_Q2rx(dHbT8MPza6}Bk|JdBcju_dcZFU@Kl&`*_5(rYcaXq!)0#U*GZHx zzTM9?a|)E_xSq6kE^90;tzD_|KZ6d3ty1D;yTc?YO`^?jNQzd;f`;*0pdTpaV&~!E z5oSB`1o@B*$yUPekw0L}2>!-H3GU{uCF<{iVw0uxX)9;r7NSbS3pI@%Jf^C~8yx-~ zGZ|O;W5=PGnmsMDZKs3#%>+3|j8bmcR8y%z+$UPTY;$yoZIcVOwn`y81a04*FKf zlzulB%v?i_|3*_vj5fJ-5_ZEp%a6rozfMzIwrv!fB7?N{wYivkSvLHXxSBB4a12$0L zIpm1Gn4mHoTjLjAWm>`uif*QA=bSXKbcCzfR1drSKtp|Af8 zcY;`h7zo)i;0P%vzeUM`<9$a#(ZSz^6Nz$(BuIj zs_{AQjS8sXtG<)S>&QWKavDGpYf|JB;rfu#_Q{uk=Z?dZFb6QmDM6tJM4cXzV70(O zTFgN=O#B7mcLWZ#@@LK(CN_*hGV@CxB0cNAPwVAoXf!>KG4q0Xy&id8PWE};;Kz>Y z>t~BNte=6Y;H-RIAv~2@a{_$yk7{FiEOeGYym57{78#{#hYR zyhU=K2|cToe2_ipSRFU(Y|-NE`%1SFenzLSo_j)4t~y2L?%Go3F()Ude+YWKc2Y+A zW>3K3^w67?^~~QxYlCen<1XDkSbT&TG+k9B!jRb{da_On#Oo<9>*4K?=@@;Lh2k;Ht0!4#Fl zQVNS3Mf-||glCnNpDI}SSH7f&wt#l3l$&Ld)s_8)r#?b_)QS;d7+lChRd*ykH@mVI z*2z(iQSsF0b09LbWBw4WnRQd+@RH`bZ1Gg`xk$bz_#o=8KzIh@WRR#`o;po!EqG~{ z!MjJSQcOw;;U5~+EN#NQ2$5}d0X1m%*voULqGM%i;bpcf*d7jMi!Z{E4f;iXVil=6pO7_nzDZi3yz|_$O5%D-_IT7yv)Cr^x93})k_%3uZae>Cz1b7 zv3fvY77Q?4-`q@a;oV$XdVJPo)Aad)fc3-l8>hlCSE7Jj02n(3UPUj*NMY%)J zi$%jyNq=aF!)L=w34oO?CqgD}vU1a8!K_dABt6i%7#i`2^oOc(M*1UTvhBZb^#FiQV3LSsq+RkWV!|7eV>2UB3Zj&xOS&^{}n8dY-yo#&JWK`%3 z5FbIob`R60ynPBmWGtI=AKvtK^q;weqqO1{knVa?{j{C8DHMf|!)F9YURW--SNkcZ zNH4Rvk)D&2durk;=PzyQr?2d{-yW7cd$Vwmpl@=bl;2yfw!*$`JxkpwCy!jJGO8U zW7n)OMZw9FXK0sDG_JMB$=UhmWjaFM&|<$kebV?~++Fb3otk>b_eClTy4M!nzc-g1 z8toMP>E(kELQlemOw3LO7v3vv7woz0&d{-C!*pvBEO6Cp#||7MWZc(7=c4Ux3+~gI zx>R(AA-Wv>qGi&Df`Y^U?=HZ>PFJFN#jY!H-{rv-7--x7iQDgKhw&deYnovjyeQkN zn4H}yPsMxB!wW0|`m4orTMW39TDQj2kvfc+n=E>yDb5YDJ^d8k;_%DGKb`EF6m^hM zkSa<^M35nd)KB5a za=$UVR#IlH?YU;r{%_GE;^1oilBVL9>d8(5uXPT|%IMExf!>*y?(NH$tp%r3j+cIA zETbty##d>&r{RrHPtKD;K5j_WUiJ?q0@!hX|El|6#J|8Rs`d?Ryt(~@RS`BU7(gCj zd4vbFOynk55hA>y5rlauy=xnkjg@JWG(D`}`3C$M^y?lagxxC#n75{m9sJ&4eTk+R z*sQ_rdc3iE&^HzfZ1lHa=L`9^i+VG_C85X9C}fV0A zsePwC0m)eR117f*@3qu}YVPyL1m7Wl zB&=~6e#O?FmUW9jpN%YtIWXLmDhTOPRnY%oVX`2ZF8{O8=*7`3FuX5DQ|5W_H$OY# z>}}QV_O=m&l#ylY#r@+Xmh^%ExWDGf6}Bh&_u$cw>3n)x=ckg2+8lckOq-4{9Whco zr|mv^whd!QB*&4MY_={jad4G4D`>MP2^>GDq3Wz;Ko%dL zX!6#2juADj6IrfK?^4cLTVJxwFU{a#^gM@V6wmy#hj>SxWJIv$U z4WXt|v&^#{b;+W!nN++b8h?C4<5=0_9+pL33zsi&wnr9;Uz4$RA{ErVop${L=D-=o zDT%+)jja6~O5h=~S|15B-FZb3`Zw8__>1E`I9$lI)KJu~v!$o|GN8>s5{up}Rn=i^ zcDQIPSqxahjWq8hRo-qfTSYHbqKujw@nsLED;(;D|4!+-CdfUsJbVya7Y-x+uF^V& z{lq#z>9)n)FgT}%7RwA!j5*Df5a?|CRk}AMGdx&fyGqv@m?zh~|4j=?Bu^gEuVtL! zOWi<Qox+9r?Pei=2%cwx4mj=`%hxC6D!jM)7$VMf-7a^|skjUQWd+O;LdO&G(A^2VU tb68rh4voRxzIe})cyar7%5_>kwNwHFQJk2{r6G7^g7wW7wM{}081Mmzuj literal 0 HcmV?d00001 diff --git a/apps/shark_studio/web/ui/outputgallery.py b/apps/shark_studio/web/ui/outputgallery.py new file mode 100644 index 0000000000..dd58541aae --- /dev/null +++ b/apps/shark_studio/web/ui/outputgallery.py @@ -0,0 +1,416 @@ +import glob +import gradio as gr +import os +import subprocess +import sys +from PIL import Image + +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +from apps.shark_studio.api.utils import ( + get_generated_imgs_path, + get_generated_imgs_todays_subdir, +) +from apps.shark_studio.web.ui.utils import nodlogo_loc +from apps.shark_studio.web.utils.metadata import displayable_metadata + +# -- Functions for file, directory and image info querying + +output_dir = get_generated_imgs_path() + + +def outputgallery_filenames(subdir) -> list[str]: + new_dir_path = os.path.join(output_dir, subdir) + if os.path.exists(new_dir_path): + filenames = [ + glob.glob(new_dir_path + "/" + ext) + for ext in ("*.png", "*.jpg", "*.jpeg") + ] + + return sorted(sum(filenames, []), key=os.path.getmtime, reverse=True) + else: + return [] + + +def output_subdirs() -> list[str]: + # Gets a list of subdirectories of output_dir and below, as relative paths. + relative_paths = [ + os.path.relpath(entry[0], output_dir) + for entry in os.walk( + output_dir, followlinks=cmd_opts.output_gallery_followlinks + ) + ] + + # It is less confusing to always including the subdir that will take any + # images generated today even if it doesn't exist yet + if get_generated_imgs_todays_subdir() not in relative_paths: + relative_paths.append(get_generated_imgs_todays_subdir()) + + # sort subdirectories so that the date named ones we probably + # created in this or previous sessions come first, sorted with the most + # recent first. Other subdirs are listed after. + generated_paths = sorted( + [path for path in relative_paths if path.isnumeric()], reverse=True + ) + result_paths = generated_paths + sorted( + [ + path + for path in relative_paths + if (not path.isnumeric()) and path != "." + ] + ) + + return result_paths + + +# --- Define UI layout for Gradio + +with gr.Blocks() as outputgallery_element: + nod_logo = Image.open(nodlogo_loc) + + with gr.Row(elem_id="outputgallery_gallery"): + # needed to workaround gradio issue: + # https://github.com/gradio-app/gradio/issues/2907 + dev_null = gr.Textbox("", visible=False) + + gallery_files = gr.State(value=[]) + subdirectory_paths = gr.State(value=[]) + + with gr.Column(scale=6): + logo = gr.Image( + label="Getting subdirectories...", + value=nod_logo, + interactive=False, + visible=True, + show_label=True, + elem_id="top_logo", + elem_classes="logo_centered", + show_download_button=False, + ) + + gallery = gr.Gallery( + label="", + value=gallery_files.value, + visible=False, + show_label=True, + columns=4, + ) + + with gr.Column(scale=4): + with gr.Group(): + with gr.Row(): + with gr.Column( + scale=15, + min_width=160, + elem_id="output_subdir_container", + ): + subdirectories = gr.Dropdown( + label=f"Subdirectories of {output_dir}", + type="value", + choices=subdirectory_paths.value, + value="", + interactive=True, + elem_classes="dropdown_no_container", + allow_custom_value=True, + ) + with gr.Column( + scale=1, + min_width=32, + elem_classes="output_icon_button", + ): + open_subdir = gr.Button( + variant="secondary", + value="\U0001F5C1", # unicode open folder + interactive=False, + size="sm", + ) + with gr.Column( + scale=1, + min_width=32, + elem_classes="output_icon_button", + ): + refresh = gr.Button( + variant="secondary", + value="\u21BB", # unicode clockwise arrow circle + size="sm", + ) + + image_columns = gr.Slider( + label="Columns shown", value=4, minimum=1, maximum=16, step=1 + ) + outputgallery_filename = gr.Textbox( + label="Filename", + value="None", + interactive=False, + show_copy_button=True, + ) + + with gr.Accordion( + label="Parameter Information", open=False + ) as parameters_accordian: + image_parameters = gr.DataFrame( + headers=["Parameter", "Value"], + col_count=2, + wrap=True, + elem_classes="output_parameters_dataframe", + value=[["Status", "No image selected"]], + interactive=True, + ) + + with gr.Accordion(label="Send To", open=True): + with gr.Row(): + outputgallery_sendto_sd = gr.Button( + value="Stable Diffusion", + interactive=False, + elem_classes="outputgallery_sendto", + size="sm", + ) + + # --- Event handlers + + def on_clear_gallery(): + return [ + gr.Gallery( + value=[], + visible=False, + ), + gr.Image( + visible=True, + ), + ] + + def on_image_columns_change(columns): + return gr.Gallery(columns=columns) + + def on_select_subdir(subdir) -> list: + # evt.value is the subdirectory name + new_images = outputgallery_filenames(subdir) + new_label = ( + f"{len(new_images)} images in {os.path.join(output_dir, subdir)}" + ) + return [ + new_images, + gr.Gallery( + value=new_images, + label=new_label, + visible=len(new_images) > 0, + ), + gr.Image( + label=new_label, + visible=len(new_images) == 0, + ), + ] + + def on_open_subdir(subdir): + subdir_path = os.path.normpath(os.path.join(output_dir, subdir)) + + if os.path.isdir(subdir_path): + if sys.platform == "linux": + subprocess.run(["xdg-open", subdir_path]) + elif sys.platform == "darwin": + subprocess.run(["open", subdir_path]) + elif sys.platform == "win32": + os.startfile(subdir_path) + + def on_refresh(current_subdir: str) -> list: + # get an up-to-date subdirectory list + refreshed_subdirs = output_subdirs() + # get the images using either the current subdirectory or the most + # recent valid one + new_subdir = ( + current_subdir + if current_subdir in refreshed_subdirs + else refreshed_subdirs[0] + ) + new_images = outputgallery_filenames(new_subdir) + new_label = ( + f"{len(new_images)} images in " + f"{os.path.join(output_dir, new_subdir)}" + ) + + return [ + gr.Dropdown( + choices=refreshed_subdirs, + value=new_subdir, + ), + refreshed_subdirs, + new_images, + gr.Gallery( + value=new_images, label=new_label, visible=len(new_images) > 0 + ), + gr.Image( + label=new_label, + visible=len(new_images) == 0, + ), + ] + + def on_new_image(subdir, subdir_paths, status) -> list: + # prevent error triggered when an image generates before the tab + # has even been selected + subdir_paths = ( + subdir_paths + if len(subdir_paths) > 0 + else [get_generated_imgs_todays_subdir()] + ) + + # only update if the current subdir is the most recent one as + # new images only go there + if subdir_paths[0] == subdir: + new_images = outputgallery_filenames(subdir) + new_label = ( + f"{len(new_images)} images in " + f"{os.path.join(output_dir, subdir)} - {status}" + ) + + return [ + new_images, + gr.Gallery( + value=new_images, + label=new_label, + visible=len(new_images) > 0, + ), + gr.Image( + label=new_label, + visible=len(new_images) == 0, + ), + ] + else: + # otherwise change nothing, + # (only untyped gradio gr.update() does this) + return [gr.update(), gr.update(), gr.update()] + + def on_select_image(images: list[str], evt: gr.SelectData) -> list: + # evt.index is an index into the full list of filenames for + # the current subdirectory + filename = images[evt.index] + params = displayable_metadata(filename) + + if params: + if params["source"] == "missing": + return [ + "Could not find this image file, refresh the gallery and update the images", + [["Status", "File missing"]], + ] + else: + return [ + filename, + list(map(list, params["parameters"].items())), + ] + + return [ + filename, + [["Status", "No parameters found"]], + ] + + def on_outputgallery_filename_change(filename: str) -> list: + exists = filename != "None" and os.path.exists(filename) + return [ + # disable or enable each of the sendto button based on whether + # an image is selected + gr.Button(interactive=exists), + ] + + # The time first our tab is selected we need to do an initial refresh + # to populate the subdirectory select box and the images from the most + # recent subdirectory. + # + # We do it at this point rather than setting this up in the controls' + # definitions as when you refresh the browser you always get what was + # *initially* set, which won't include any new subdirectories or images + # that might have created since the application was started. Doing it + # this way means a browser refresh/reload always gets the most + # up-to-date data. + def on_select_tab(subdir_paths, request: gr.Request): + local_client = request.headers["host"].startswith( + "127.0.0.1:" + ) or request.headers["host"].startswith("localhost:") + + if len(subdir_paths) == 0: + return on_refresh("") + [gr.update(interactive=local_client)] + else: + return ( + # Change nothing, (only untyped gr.update() does this) + gr.update(), + gr.update(), + gr.update(), + gr.update(), + gr.update(), + gr.update(), + ) + + # clearing images when we need to completely change what's in the + # gallery avoids current images being shown replacing piecemeal and + # prevents weirdness and errors if the user selects an image during the + # replacement phase. + clear_gallery = dict( + fn=on_clear_gallery, + inputs=None, + outputs=[gallery, logo], + queue=False, + ) + + subdirectories.select(**clear_gallery).then( + on_select_subdir, + [subdirectories], + [gallery_files, gallery, logo], + queue=False, + ) + + open_subdir.click(on_open_subdir, inputs=[subdirectories], queue=False) + + refresh.click(**clear_gallery).then( + on_refresh, + [subdirectories], + [subdirectories, subdirectory_paths, gallery_files, gallery, logo], + queue=False, + ) + + image_columns.change( + fn=on_image_columns_change, + inputs=[image_columns], + outputs=[gallery], + queue=False, + ) + + gallery.select( + on_select_image, + [gallery_files], + [outputgallery_filename, image_parameters], + queue=False, + ) + + outputgallery_filename.change( + on_outputgallery_filename_change, + [outputgallery_filename], + [ + outputgallery_sendto_sd, + ], + queue=False, + ) + + # We should have been given the .select function for our tab, so set it up + def outputgallery_tab_select(select): + select( + fn=on_select_tab, + inputs=[subdirectory_paths], + outputs=[ + subdirectories, + subdirectory_paths, + gallery_files, + gallery, + logo, + open_subdir, + ], + queue=False, + ) + + # We should have been passed a list of components on other tabs that update + # when a new image has generated on that tab, so set things up so the user + # will see that new image if they are looking at today's subdirectory + def outputgallery_watch(components: gr.Textbox): + for component in components: + component.change( + on_new_image, + inputs=[subdirectories, subdirectory_paths, component], + outputs=[gallery_files, gallery, logo], + queue=False, + ) diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py new file mode 100644 index 0000000000..f26c7967e3 --- /dev/null +++ b/apps/shark_studio/web/ui/sd.py @@ -0,0 +1,650 @@ +import os +import time +import gradio as gr +import PIL +import json +import sys + +from math import ceil +from inspect import signature +from PIL import Image +from pathlib import Path +from datetime import datetime as dt +from gradio.components.image_editor import ( + Brush, + Eraser, + EditorValue, +) + +from apps.shark_studio.api.utils import ( + get_available_devices, + get_generated_imgs_path, + get_checkpoints_path, + get_checkpoints, +) +from apps.shark_studio.api.sd import ( + sd_model_map, + shark_sd_fn, + cancel_sd, +) +from apps.shark_studio.api.controlnet import ( + preprocessor_model_map, + PreprocessorModel, + cnet_preview, +) +from apps.shark_studio.modules.schedulers import ( + scheduler_model_map, +) +from apps.shark_studio.modules.img_processing import ( + resampler_list, + resize_stencil, +) +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +from apps.shark_studio.web.ui.utils import ( + nodlogo_loc, +) +from apps.shark_studio.web.utils.state import ( + get_generation_text_info, + status_label, +) +from apps.shark_studio.web.ui.common_events import lora_changed + + +def view_json_file(file_obj): + content = "" + with open(file_obj.name, "r") as fopen: + content = fopen.read() + return content + + +max_controlnets = 3 +max_loras = 5 + + +def show_loras(k): + k = int(k) + return gr.State( + [gr.Dropdown(visible=True)] * k + + [gr.Dropdown(visible=False, value="None")] * (max_loras - k) + ) + + +def show_controlnets(k): + k = int(k) + return [ + gr.State( + [ + [gr.Row(visible=True, render=True)] * k + + [gr.Row(visible=False)] * (max_controlnets - k) + ] + ), + gr.State([None] * k), + gr.State([None] * k), + gr.State([None] * k), + ] + + +def create_canvas(width, height): + data = Image.fromarray( + np.zeros( + shape=(height, width, 3), + dtype=np.uint8, + ) + + 255 + ) + img_dict = { + "background": data, + "layers": [data], + "composite": None, + } + return EditorValue(img_dict) + + +def import_original(original_img, width, height): + resized_img, _, _ = resize_stencil(original_img, width, height) + img_dict = { + "background": resized_img, + "layers": [resized_img], + "composite": None, + } + return gr.ImageEditor( + value=EditorValue(img_dict), + crop_size=(width, height), + ) + + +def update_cn_input( + model, + width, + height, + stencils, + images, + preprocessed_hints, +): + if model == None: + stencils[index] = None + images[index] = None + preprocessed_hints[index] = None + return [ + gr.update(), + gr.update(), + gr.update(), + gr.update(), + gr.update(), + gr.update(), + stencils, + images, + preprocessed_hints, + ] + elif model == "scribble": + return [ + gr.ImageEditor( + visible=True, + interactive=True, + show_label=False, + image_mode="RGB", + type="pil", + brush=Brush( + colors=["#000000"], + color_mode="fixed", + default_size=5, + ), + ), + gr.Image( + visible=True, + show_label=False, + interactive=True, + show_download_button=False, + ), + gr.Slider(visible=True, label="Canvas Width"), + gr.Slider(visible=True, label="Canvas Height"), + gr.Button(visible=True), + gr.Button(visible=False), + stencils, + images, + preprocessed_hints, + ] + else: + return [ + gr.ImageEditor( + visible=True, + interactive=True, + show_label=False, + image_mode="RGB", + type="pil", + ), + gr.Image( + visible=True, + show_label=False, + interactive=True, + show_download_button=False, + ), + gr.Slider(visible=True, label="Canvas Width"), + gr.Slider(visible=True, label="Canvas Height"), + gr.Button(visible=True), + gr.Button(visible=False), + stencils, + images, + preprocessed_hints, + ] + + +sd_fn_inputs = [] +sd_fn_sig = signature(shark_sd_fn).replace() +for i in sd_fn_sig.parameters: + sd_fn_inputs.append(i) + +with gr.Blocks(title="Stable Diffusion") as sd_element: + # Get a list of arguments needed for the API call, then + # initialize an empty list that will manage the corresponding + # gradio values. + with gr.Row(elem_id="ui_title"): + nod_logo = Image.open(nodlogo_loc) + with gr.Row(variant="compact", equal_height=True): + with gr.Column( + scale=1, + elem_id="demo_title_outer", + ): + gr.Image( + value=nod_logo, + show_label=False, + interactive=False, + elem_id="top_logo", + width=150, + height=50, + show_download_button=False, + ) + with gr.Column(elem_id="ui_body"): + with gr.Row(): + with gr.Column(scale=1, min_width=600): + with gr.Row(equal_height=True): + with gr.Column(scale=3): + sd_model_info = ( + f"Checkpoint Path: {str(get_checkpoints_path())}" + ) + sd_base = gr.Dropdown( + label="Base Model", + info="Select or enter HF model ID", + elem_id="custom_model", + value="stabilityai/stable-diffusion-2-1-base", + choices=sd_model_map.keys(), + ) # base_model_id + sd_custom_weights = gr.Dropdown( + label="Weights (Optional)", + info="Select or enter HF model ID", + elem_id="custom_model", + value="None", + allow_custom_value=True, + choices=get_checkpoints(sd_base), + ) # + with gr.Column(scale=2): + sd_vae_info = ( + str(get_checkpoints_path("vae")) + ).replace("\\", "\n\\") + sd_vae_info = f"VAE Path: {sd_vae_info}" + sd_custom_vae = gr.Dropdown( + label=f"Custom VAE Models", + info=sd_vae_info, + elem_id="custom_model", + value=os.path.basename(cmd_opts.custom_vae) + if cmd_opts.custom_vae + else "None", + choices=["None"] + get_checkpoints("vae"), + allow_custom_value=True, + scale=1, + ) + with gr.Column(scale=1): + save_sd_config = gr.Button( + value="Save Config", size="sm" + ) + clear_sd_config = gr.ClearButton( + value="Clear Config", size="sm" + ) + load_sd_config = gr.FileExplorer( + label="Load Config", + root=os.path.basename("./configs"), + ) + + with gr.Group(elem_id="prompt_box_outer"): + prompt = gr.Textbox( + label="Prompt", + value=cmd_opts.prompts[0], + lines=2, + elem_id="prompt_box", + ) + negative_prompt = gr.Textbox( + label="Negative Prompt", + value=cmd_opts.negative_prompts[0], + lines=2, + elem_id="negative_prompt_box", + ) + + with gr.Accordion(label="Input Image", open=False): + # TODO: make this import image prompt info if it exists + sd_init_image = gr.Image( + label="Input Image", + type="pil", + height=300, + interactive=True, + ) + with gr.Accordion( + label="Embeddings options", open=False, render=True + ): + sd_lora_info = ( + str(get_checkpoints_path("loras")) + ).replace("\\", "\n\\") + num_loras = gr.Slider( + 1, max_loras, value=1, step=1, label="LoRA Count" + ) + loras = gr.State([]) + for i in range(max_loras): + with gr.Row(): + lora_opt = gr.Dropdown( + allow_custom_value=True, + label=f"Standalone LoRA Weights", + info=sd_lora_info, + elem_id="lora_weights", + value="None", + choices=["None"] + get_checkpoints("lora"), + ) + with gr.Row(): + lora_tags = gr.HTML( + value="
No LoRA selected
", + elem_classes="lora-tags", + ) + gr.on( + triggers=[lora_opt.change], + fn=lora_changed, + inputs=[lora_opt], + outputs=[lora_tags], + queue=True, + ) + loras.value.append(lora_opt) + + num_loras.change(show_loras, [num_loras], [loras]) + with gr.Accordion(label="Advanced Options", open=True): + with gr.Row(): + scheduler = gr.Dropdown( + elem_id="scheduler", + label="Scheduler", + value="EulerDiscrete", + choices=scheduler_model_map.keys(), + allow_custom_value=False, + ) + with gr.Row(): + height = gr.Slider( + 384, + 768, + value=cmd_opts.height, + step=8, + label="Height", + ) + width = gr.Slider( + 384, + 768, + value=cmd_opts.width, + step=8, + label="Width", + ) + with gr.Row(): + with gr.Column(scale=3): + steps = gr.Slider( + 1, + 100, + value=cmd_opts.steps, + step=1, + label="Steps", + ) + batch_count = gr.Slider( + 1, + 100, + value=cmd_opts.batch_count, + step=1, + label="Batch Count", + interactive=True, + ) + batch_size = gr.Slider( + 1, + 4, + value=cmd_opts.batch_size, + step=1, + label="Batch Size", + interactive=True, + visible=True, + ) + repeatable_seeds = gr.Checkbox( + cmd_opts.repeatable_seeds, + label="Repeatable Seeds", + ) + with gr.Column(scale=3): + strength = gr.Slider( + 0, + 1, + value=cmd_opts.strength, + step=0.01, + label="Denoising Strength", + ) + resample_type = gr.Dropdown( + value=cmd_opts.resample_type, + choices=resampler_list, + label="Resample Type", + allow_custom_value=True, + ) + guidance_scale = gr.Slider( + 0, + 50, + value=cmd_opts.guidance_scale, + step=0.1, + label="CFG Scale", + ) + ondemand = gr.Checkbox( + value=cmd_opts.lowvram, + label="Low VRAM", + interactive=True, + ) + precision = gr.Radio( + label="Precision", + value=cmd_opts.precision, + choices=[ + "fp16", + "fp32", + ], + visible=True, + ) + with gr.Row(): + seed = gr.Textbox( + value=cmd_opts.seed, + label="Seed", + info="An integer or a JSON list of integers, -1 for random", + ) + device = gr.Dropdown( + elem_id="device", + label="Device", + value=get_available_devices()[0], + choices=get_available_devices(), + allow_custom_value=False, + ) + with gr.Accordion( + label="Controlnet Options", open=False, render=False + ): + sd_cnet_info = ( + str(get_checkpoints_path("controlnet")) + ).replace("\\", "\n\\") + num_cnets = gr.Slider( + 0, + max_controlnets, + value=0, + step=1, + label="Controlnet Count", + ) + cnet_rows = [] + stencils = gr.State([]) + images = gr.State([]) + preprocessed_hints = gr.State([]) + control_mode = gr.Radio( + choices=["Prompt", "Balanced", "Controlnet"], + value="Balanced", + label="Control Mode", + ) + + for i in range(max_controlnets): + with gr.Row(visible=False) as cnet_row: + with gr.Column(): + cnet_gen = gr.Button( + value="Preprocess controlnet input", + ) + cnet_model = gr.Dropdown( + allow_custom_value=True, + label=f"Controlnet Model", + info=sd_cnet_info, + elem_id="lora_weights", + value="None", + choices=[ + "None", + "canny", + "openpose", + "scribble", + "zoedepth", + ] + + get_checkpoints("controlnet"), + ) + canvas_width = gr.Slider( + label="Canvas Width", + minimum=256, + maximum=1024, + value=512, + step=1, + visible=False, + ) + canvas_height = gr.Slider( + label="Canvas Height", + minimum=256, + maximum=1024, + value=512, + step=1, + visible=False, + ) + make_canvas = gr.Button( + value="Make Canvas!", + visible=False, + ) + use_input_img = gr.Button( + value="Use Original Image", + visible=False, + ) + cnet_input = gr.ImageEditor( + visible=True, + image_mode="RGB", + interactive=True, + show_label=True, + label="Input Image", + type="pil", + ) + cnet_output = gr.Image( + value=None, + visible=True, + label="Preprocessed Hint", + interactive=True, + show_label=True, + ) + use_input_img.click( + import_original, + [sd_init_image, canvas_width, canvas_height], + [cnet_input], + ) + cnet_model.change( + fn=update_cn_input, + inputs=[ + cnet_model, + canvas_width, + canvas_height, + stencils, + images, + preprocessed_hints, + ], + outputs=[ + cnet_input, + cnet_output, + canvas_width, + canvas_height, + make_canvas, + use_input_img, + stencils, + images, + preprocessed_hints, + ], + ) + make_canvas.click( + create_canvas, + [canvas_width, canvas_height], + [ + cnet_input, + ], + ) + gr.on( + triggers=[cnet_gen.click], + fn=cnet_preview, + inputs=[ + cnet_model, + cnet_input, + stencils, + images, + preprocessed_hints, + ], + outputs=[ + cnet_output, + stencils, + images, + preprocessed_hints, + ], + ) + cnet_rows.value.append(cnet_row) + + num_cnets.change( + show_controlnets, + [num_cnets], + [cnet_rows, stencils, images, preprocessed_hints], + ) + with gr.Column(scale=1, min_width=600): + with gr.Group(): + sd_gallery = gr.Gallery( + label="Generated images", + show_label=False, + elem_id="gallery", + columns=2, + object_fit="contain", + ) + std_output = gr.Textbox( + value=f"{sd_model_info}\n" + f"Images will be saved at " + f"{get_generated_imgs_path()}", + lines=2, + elem_id="std_output", + show_label=False, + ) + sd_status = gr.Textbox(visible=False) + with gr.Row(): + stable_diffusion = gr.Button("Generate Image(s)") + random_seed = gr.Button("Randomize Seed") + random_seed.click( + lambda: -1, + inputs=[], + outputs=[seed], + queue=False, + ) + stop_batch = gr.Button("Stop Batch") + + kwargs = dict( + fn=shark_sd_fn, + inputs=[ + prompt, + negative_prompt, + sd_init_image, + height, + width, + steps, + strength, + guidance_scale, + seed, + batch_count, + batch_size, + scheduler, + sd_base, + sd_custom_weights, + sd_custom_vae, + precision, + device, + loras, + ondemand, + repeatable_seeds, + resample_type, + control_mode, + stencils, + images, + preprocessed_hints, + ], + outputs=[ + sd_gallery, + std_output, + sd_status, + stencils, + images, + ], + show_progress="minimal", + ) + + status_kwargs = dict( + fn=lambda bc, bs: status_label("Stable Diffusion", 0, bc, bs), + inputs=[batch_count, batch_size], + outputs=sd_status, + ) + + prompt_submit = prompt.submit(**status_kwargs).then(**kwargs) + neg_prompt_submit = negative_prompt.submit(**status_kwargs).then( + **kwargs + ) + generate_click = stable_diffusion.click(**status_kwargs).then(**kwargs) + stop_batch.click( + fn=cancel_sd, + cancels=[prompt_submit, neg_prompt_submit, generate_click], + ) diff --git a/apps/shark_studio/web/ui/utils.py b/apps/shark_studio/web/ui/utils.py new file mode 100644 index 0000000000..ba62e5adc0 --- /dev/null +++ b/apps/shark_studio/web/ui/utils.py @@ -0,0 +1,33 @@ +from enum import IntEnum +import math +import sys +import os + + +def resource_path(relative_path): + """Get absolute path to resource, works for dev and for PyInstaller""" + base_path = getattr( + sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__)) + ) + return os.path.join(base_path, relative_path) + + +nodlogo_loc = resource_path("logos/nod-logo.png") +nodicon_loc = resource_path("logos/nod-icon.png") + + +class HSLHue(IntEnum): + RED = 0 + YELLOW = 60 + GREEN = 120 + CYAN = 180 + BLUE = 240 + MAGENTA = 300 + + +def hsl_color(alpha: float, start, end): + b = (end - start) * (alpha if alpha > 0 else 0) + result = b + start + + # Return a CSS HSL string + return f"hsl({math.floor(result)}, 80%, 35%)" diff --git a/apps/shark_studio/web/utils/globals.py b/apps/shark_studio/web/utils/globals.py new file mode 100644 index 0000000000..0b5f54636a --- /dev/null +++ b/apps/shark_studio/web/utils/globals.py @@ -0,0 +1,74 @@ +import gc + +""" +The global objects include SD pipeline and config. +Maintaining the global objects would avoid creating extra pipeline objects when switching modes. +Also we could avoid memory leak when switching models by clearing the cache. +""" + + +def _init(): + global _sd_obj + global _config_obj + global _schedulers + _sd_obj = None + _config_obj = None + _schedulers = None + + +def set_sd_obj(value): + global _sd_obj + _sd_obj = value + + +def set_sd_scheduler(key): + global _sd_obj + _sd_obj.scheduler = _schedulers[key] + + +def set_sd_status(value): + global _sd_obj + _sd_obj.status = value + + +def set_cfg_obj(value): + global _config_obj + _config_obj = value + + +def set_schedulers(value): + global _schedulers + _schedulers = value + + +def get_sd_obj(): + global _sd_obj + return _sd_obj + + +def get_sd_status(): + global _sd_obj + return _sd_obj.status + + +def get_cfg_obj(): + global _config_obj + return _config_obj + + +def get_scheduler(key): + global _schedulers + return _schedulers[key] + + +def clear_cache(): + global _sd_obj + global _config_obj + global _schedulers + del _sd_obj + del _config_obj + del _schedulers + gc.collect() + _sd_obj = None + _config_obj = None + _schedulers = None diff --git a/apps/shark_studio/web/utils/metadata/__init__.py b/apps/shark_studio/web/utils/metadata/__init__.py new file mode 100644 index 0000000000..bcbcf746ca --- /dev/null +++ b/apps/shark_studio/web/utils/metadata/__init__.py @@ -0,0 +1,6 @@ +from .png_metadata import ( + import_png_metadata, +) +from .display import ( + displayable_metadata, +) diff --git a/apps/shark_studio/web/utils/metadata/csv_metadata.py b/apps/shark_studio/web/utils/metadata/csv_metadata.py new file mode 100644 index 0000000000..d617e802bf --- /dev/null +++ b/apps/shark_studio/web/utils/metadata/csv_metadata.py @@ -0,0 +1,45 @@ +import csv +import os +from .format import humanize, humanizable + + +def csv_path(image_filename: str): + return os.path.join(os.path.dirname(image_filename), "imgs_details.csv") + + +def has_csv(image_filename: str) -> bool: + return os.path.exists(csv_path(image_filename)) + + +def matching_filename(image_filename: str, row): + # we assume the final column of the csv has the original filename with full path and match that + # against the image_filename if we are given a list. Otherwise we assume a dict and and take + # the value of the OUTPUT key + return os.path.basename(image_filename) in ( + row[-1] if isinstance(row, list) else row["OUTPUT"] + ) + + +def parse_csv(image_filename: str): + csv_filename = csv_path(image_filename) + + with open(csv_filename, "r", newline="") as csv_file: + # We use a reader or DictReader here for images_details.csv depending on whether we think it + # has headers or not. Having headers means less guessing of the format. + has_header = csv.Sniffer().has_header(csv_file.read(2048)) + csv_file.seek(0) + + reader = ( + csv.DictReader(csv_file) if has_header else csv.reader(csv_file) + ) + + matches = [ + # we rely on humanize and humanizable to work out the parsing of the individual .csv rows + humanize(row) + for row in reader + if row + and (has_header or humanizable(row)) + and matching_filename(image_filename, row) + ] + + return matches[0] if matches else {} diff --git a/apps/shark_studio/web/utils/metadata/display.py b/apps/shark_studio/web/utils/metadata/display.py new file mode 100644 index 0000000000..26234aab5c --- /dev/null +++ b/apps/shark_studio/web/utils/metadata/display.py @@ -0,0 +1,53 @@ +import json +import os +from PIL import Image +from .png_metadata import parse_generation_parameters +from .exif_metadata import has_exif, parse_exif +from .csv_metadata import has_csv, parse_csv +from .format import compact, humanize + + +def displayable_metadata(image_filename: str) -> dict: + if not os.path.isfile(image_filename): + return {"source": "missing", "parameters": {}} + + pil_image = Image.open(image_filename) + + # we have PNG generation parameters (preferred, as it's what the txt2img dropzone reads, + # and we go via that for SendTo, and is directly tied to the image) + if "parameters" in pil_image.info: + return { + "source": "png", + "parameters": compact( + parse_generation_parameters(pil_image.info["parameters"]) + ), + } + + # we have a matching json file (next most likely to be accurate when it's there) + json_path = os.path.splitext(image_filename)[0] + ".json" + if os.path.isfile(json_path): + with open(json_path) as params_file: + return { + "source": "json", + "parameters": compact( + humanize(json.load(params_file), includes_filename=False) + ), + } + + # we have a CSV file so try that (can be different shapes, and it usually has no + # headers/param names so of the things we we *know* have parameters, it's the + # last resort) + if has_csv(image_filename): + params = parse_csv(image_filename) + if params: # we might not have found the filename in the csv + return { + "source": "csv", + "parameters": compact(params), # already humanized + } + + # EXIF data, probably a .jpeg, may well not include parameters, but at least it's *something* + if has_exif(image_filename): + return {"source": "exif", "parameters": parse_exif(pil_image)} + + # we've got nothing + return None diff --git a/apps/shark_studio/web/utils/metadata/exif_metadata.py b/apps/shark_studio/web/utils/metadata/exif_metadata.py new file mode 100644 index 0000000000..c72da8a935 --- /dev/null +++ b/apps/shark_studio/web/utils/metadata/exif_metadata.py @@ -0,0 +1,52 @@ +from PIL import Image +from PIL.ExifTags import Base as EXIFKeys, TAGS, IFD, GPSTAGS + + +def has_exif(image_filename: str) -> bool: + return True if Image.open(image_filename).getexif() else False + + +def parse_exif(pil_image: Image) -> dict: + img_exif = pil_image.getexif() + + # See this stackoverflow answer for where most this comes from: https://stackoverflow.com/a/75357594 + # I did try to use the exif library but it broke just as much as my initial attempt at this (albeit I + # I was probably using it wrong) so I reverted back to using PIL with more filtering and saved a + # dependency + exif_tags = { + TAGS.get(key, key): str(val) + for (key, val) in img_exif.items() + if key in TAGS + and key not in (EXIFKeys.ExifOffset, EXIFKeys.GPSInfo) + and val + and (not isinstance(val, bytes)) + and (not str(val).isspace()) + } + + def try_get_ifd(ifd_id): + try: + return img_exif.get_ifd(ifd_id).items() + except KeyError: + return {} + + ifd_tags = { + TAGS.get(key, key): str(val) + for ifd_id in IFD + for (key, val) in try_get_ifd(ifd_id) + if ifd_id != IFD.GPSInfo + and key in TAGS + and val + and (not isinstance(val, bytes)) + and (not str(val).isspace()) + } + + gps_tags = { + GPSTAGS.get(key, key): str(val) + for (key, val) in try_get_ifd(IFD.GPSInfo) + if key in GPSTAGS + and val + and (not isinstance(val, bytes)) + and (not str(val).isspace()) + } + + return {**exif_tags, **ifd_tags, **gps_tags} diff --git a/apps/shark_studio/web/utils/metadata/format.py b/apps/shark_studio/web/utils/metadata/format.py new file mode 100644 index 0000000000..f097dab54f --- /dev/null +++ b/apps/shark_studio/web/utils/metadata/format.py @@ -0,0 +1,143 @@ +# As SHARK has evolved more columns have been added to images_details.csv. However, since +# no version of the CSV has any headers (yet) we don't actually have anything within the +# file that tells us which parameter each column is for. So this is a list of known patterns +# indexed by length which is what we're going to have to use to guess which columns are the +# right ones for the file we're looking at. + +# The same ordering is used for JSON, but these do have key names, however they are not very +# human friendly, nor do they match up with the what is written to the .png headers + +# So these are functions to try and get something consistent out the raw input from all +# these sources + +PARAMS_FORMATS = { + 9: { + "VARIANT": "Model", + "SCHEDULER": "Sampler", + "PROMPT": "Prompt", + "NEG_PROMPT": "Negative prompt", + "SEED": "Seed", + "CFG_SCALE": "CFG scale", + "PRECISION": "Precision", + "STEPS": "Steps", + "OUTPUT": "Filename", + }, + 10: { + "MODEL": "Model", + "VARIANT": "Variant", + "SCHEDULER": "Sampler", + "PROMPT": "Prompt", + "NEG_PROMPT": "Negative prompt", + "SEED": "Seed", + "CFG_SCALE": "CFG scale", + "PRECISION": "Precision", + "STEPS": "Steps", + "OUTPUT": "Filename", + }, + 12: { + "VARIANT": "Model", + "SCHEDULER": "Sampler", + "PROMPT": "Prompt", + "NEG_PROMPT": "Negative prompt", + "SEED": "Seed", + "CFG_SCALE": "CFG scale", + "PRECISION": "Precision", + "STEPS": "Steps", + "HEIGHT": "Height", + "WIDTH": "Width", + "MAX_LENGTH": "Max Length", + "OUTPUT": "Filename", + }, +} + +PARAMS_FORMAT_CURRENT = { + "VARIANT": "Model", + "VAE": "VAE", + "LORA": "LoRA", + "SCHEDULER": "Sampler", + "PROMPT": "Prompt", + "NEG_PROMPT": "Negative prompt", + "SEED": "Seed", + "CFG_SCALE": "CFG scale", + "PRECISION": "Precision", + "STEPS": "Steps", + "HEIGHT": "Height", + "WIDTH": "Width", + "MAX_LENGTH": "Max Length", + "OUTPUT": "Filename", +} + + +def compact(metadata: dict) -> dict: + # we don't want to alter the original dictionary + result = dict(metadata) + + # discard the filename because we should already have it + if result.keys() & {"Filename"}: + result.pop("Filename") + + # make showing the sizes more compact by using only one line each + if result.keys() & {"Size-1", "Size-2"}: + result["Size"] = f"{result.pop('Size-1')}x{result.pop('Size-2')}" + elif result.keys() & {"Height", "Width"}: + result["Size"] = f"{result.pop('Height')}x{result.pop('Width')}" + + if result.keys() & {"Hires resize-1", "Hires resize-1"}: + hires_y = result.pop("Hires resize-1") + hires_x = result.pop("Hires resize-2") + + if hires_x == 0 and hires_y == 0: + result["Hires resize"] = "None" + else: + result["Hires resize"] = f"{hires_y}x{hires_x}" + + # remove VAE if it exists and is empty + if (result.keys() & {"VAE"}) and ( + not result["VAE"] or result["VAE"] == "None" + ): + result.pop("VAE") + + # remove LoRA if it exists and is empty + if (result.keys() & {"LoRA"}) and ( + not result["LoRA"] or result["LoRA"] == "None" + ): + result.pop("LoRA") + + return result + + +def humanizable(metadata: dict | list[str], includes_filename=True) -> dict: + lookup_key = len(metadata) + (0 if includes_filename else 1) + return lookup_key in PARAMS_FORMATS.keys() + + +def humanize(metadata: dict | list[str], includes_filename=True) -> dict: + lookup_key = len(metadata) + (0 if includes_filename else 1) + + # For lists we can only work based on the length, we have no other information + if isinstance(metadata, list): + if humanizable(metadata, includes_filename): + return dict(zip(PARAMS_FORMATS[lookup_key].values(), metadata)) + else: + raise KeyError( + f"Humanize could not find the format for a parameter list of length {len(metadata)}" + ) + + # For dictionaries we try to use the matching length parameter format if + # available, otherwise we just use the current format which is assumed to + # have everything currently known about. Then we swap keys in the metadata + # that match keys in the format for the friendlier name that we have set + # in the format value + if isinstance(metadata, dict): + if humanizable(metadata, includes_filename): + format = PARAMS_FORMATS[lookup_key] + else: + format = PARAMS_FORMAT_CURRENT + + return { + format[key]: metadata[key] + for key in format.keys() + if key in metadata.keys() and metadata[key] + } + + raise TypeError("Can only humanize parameter lists or dictionaries") diff --git a/apps/shark_studio/web/utils/metadata/png_metadata.py b/apps/shark_studio/web/utils/metadata/png_metadata.py new file mode 100644 index 0000000000..cffc385ab7 --- /dev/null +++ b/apps/shark_studio/web/utils/metadata/png_metadata.py @@ -0,0 +1,222 @@ +import re +from pathlib import Path +from apps.shark_studio.api.utils import ( + get_checkpoint_pathfile, +) +from apps.shark_studio.api.sd import ( + sd_model_map, +) +from apps.shark_studio.modules.schedulers import ( + scheduler_model_map, +) + +re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") + + +def parse_generation_parameters(x: str): + res = {} + prompt = "" + negative_prompt = "" + done_with_prompt = False + + *lines, lastline = x.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = "" + + for i, line in enumerate(lines): + line = line.strip() + if line.startswith("Negative prompt:"): + done_with_prompt = True + line = line[16:].strip() + + if done_with_prompt: + negative_prompt += ("" if negative_prompt == "" else "\n") + line + else: + prompt += ("" if prompt == "" else "\n") + line + + res["Prompt"] = prompt + res["Negative prompt"] = negative_prompt + + for k, v in re_param.findall(lastline): + v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v + m = re_imagesize.match(v) + if m is not None: + res[k + "-1"] = m.group(1) + res[k + "-2"] = m.group(2) + else: + res[k] = v + + # Missing CLIP skip means it was set to 1 (the default) + if "Clip skip" not in res: + res["Clip skip"] = "1" + + hypernet = res.get("Hypernet", None) + if hypernet is not None: + res[ + "Prompt" + ] += f"""""" + + if "Hires resize-1" not in res: + res["Hires resize-1"] = 0 + res["Hires resize-2"] = 0 + + return res + + +def try_find_model_base_from_png_metadata( + file: str, folder: str = "models" +) -> str: + custom = "" + + # Remove extension from file info + if file.endswith(".safetensors") or file.endswith(".ckpt"): + file = Path(file).stem + # Check for the file name match with one of the local ckpt or safetensors files + if Path(get_checkpoint_pathfile(file + ".ckpt", folder)).is_file(): + custom = file + ".ckpt" + if Path(get_checkpoint_pathfile(file + ".safetensors", folder)).is_file(): + custom = file + ".safetensors" + + return custom + + +def find_model_from_png_metadata( + key: str, metadata: dict[str, str | int] +) -> tuple[str, str]: + png_hf_id = "" + png_custom = "" + + if key in metadata: + model_file = metadata[key] + png_custom = try_find_model_base_from_png_metadata(model_file) + # Check for a model match with one of the default model list (ex: "Linaqruf/anything-v3.0") + if model_file in sd_model_map: + png_custom = model_file + # If nothing had matched, check vendor/hf_model_id + if not png_custom and model_file.count("/"): + png_hf_id = model_file + # No matching model was found + if not png_custom and not png_hf_id: + print( + "Import PNG info: Unable to find a matching model for %s" + % model_file + ) + + return png_custom, png_hf_id + + +def find_vae_from_png_metadata( + key: str, metadata: dict[str, str | int] +) -> str: + vae_custom = "" + + if key in metadata: + vae_file = metadata[key] + vae_custom = try_find_model_base_from_png_metadata(vae_file, "vae") + + # VAE input is optional, should not print or throw an error if missing + + return vae_custom + + +def find_lora_from_png_metadata( + key: str, metadata: dict[str, str | int] +) -> tuple[str, str]: + lora_hf_id = "" + lora_custom = "" + + if key in metadata: + lora_file = metadata[key] + lora_custom = try_find_model_base_from_png_metadata(lora_file, "lora") + # If nothing had matched, check vendor/hf_model_id + if not lora_custom and lora_file.count("/"): + lora_hf_id = lora_file + + # LoRA input is optional, should not print or throw an error if missing + + return lora_custom, lora_hf_id + + +def import_png_metadata( + pil_data, + prompt, + negative_prompt, + steps, + sampler, + cfg_scale, + seed, + width, + height, + custom_model, + custom_lora, + hf_lora_id, + custom_vae, +): + try: + png_info = pil_data.info["parameters"] + metadata = parse_generation_parameters(png_info) + + (png_custom_model, png_hf_model_id) = find_model_from_png_metadata( + "Model", metadata + ) + (lora_custom_model, lora_hf_model_id) = find_lora_from_png_metadata( + "LoRA", metadata + ) + vae_custom_model = find_vae_from_png_metadata("VAE", metadata) + + negative_prompt = metadata["Negative prompt"] + steps = int(metadata["Steps"]) + cfg_scale = float(metadata["CFG scale"]) + seed = int(metadata["Seed"]) + width = float(metadata["Size-1"]) + height = float(metadata["Size-2"]) + + if "Model" in metadata and png_custom_model: + custom_model = png_custom_model + elif "Model" in metadata and png_hf_model_id: + custom_model = png_hf_model_id + + if "LoRA" in metadata and lora_custom_model: + custom_lora = lora_custom_model + hf_lora_id = "" + if "LoRA" in metadata and lora_hf_model_id: + custom_lora = "None" + hf_lora_id = lora_hf_model_id + + if "VAE" in metadata and vae_custom_model: + custom_vae = vae_custom_model + + if "Prompt" in metadata: + prompt = metadata["Prompt"] + if "Sampler" in metadata: + if metadata["Sampler"] in scheduler_model_map: + sampler = metadata["Sampler"] + else: + print( + "Import PNG info: Unable to find a scheduler for %s" + % metadata["Sampler"] + ) + + except Exception as ex: + if pil_data and pil_data.info.get("parameters"): + print("import_png_metadata failed with %s" % ex) + pass + + return ( + None, + prompt, + negative_prompt, + steps, + sampler, + cfg_scale, + seed, + width, + height, + custom_model, + custom_lora, + hf_lora_id, + custom_vae, + ) diff --git a/apps/shark_studio/web/utils/state.py b/apps/shark_studio/web/utils/state.py new file mode 100644 index 0000000000..626d4ce53f --- /dev/null +++ b/apps/shark_studio/web/utils/state.py @@ -0,0 +1,41 @@ +import apps.shark_studio.web.utils.globals as global_obj +import gc + + +def status_label(tab_name, batch_index=0, batch_count=1, batch_size=1): + print(f"Getting status label for {tab_name}") + if batch_index < batch_count: + bs = f"x{batch_size}" if batch_size > 1 else "" + return f"{tab_name} generating {batch_index+1}/{batch_count}{bs}" + else: + return f"{tab_name} complete" + + +def get_generation_text_info(seeds, device): + cfg_dump = {} + for cfg in global_obj.get_config_dict(): + cfg_dump[cfg] = cfg + text_output = f"prompt={cfg_dump['prompts']}" + text_output += f"\nnegative prompt={cfg_dump['negative_prompts']}" + text_output += ( + f"\nmodel_id={cfg_dump['hf_model_id']}, " + f"ckpt_loc={cfg_dump['ckpt_loc']}" + ) + text_output += f"\nscheduler={cfg_dump['scheduler']}, " f"device={device}" + text_output += ( + f"\nsteps={cfg_dump['steps']}, " + f"guidance_scale={cfg_dump['guidance_scale']}, " + f"seed={seeds}" + ) + text_output += ( + f"\nsize={cfg_dump['height']}x{cfg_dump['width']}, " + if not cfg_dump.use_hiresfix + else f"\nsize={cfg_dump['hiresfix_height']}x{cfg_dump['hiresfix_width']}, " + ) + text_output += ( + f"batch_count={cfg_dump['batch_count']}, " + f"batch_size={cfg_dump['batch_size']}, " + f"max_length={cfg_dump['max_length']}" + ) + + return text_output diff --git a/apps/shark_studio/web/utils/tmp_configs.py b/apps/shark_studio/web/utils/tmp_configs.py new file mode 100644 index 0000000000..3e6ba46bfe --- /dev/null +++ b/apps/shark_studio/web/utils/tmp_configs.py @@ -0,0 +1,77 @@ +import os +import shutil +from time import time + +shark_tmp = os.path.join(os.getcwd(), "shark_tmp/") + + +def clear_tmp_mlir(): + cleanup_start = time() + print( + "Clearing .mlir temporary files from a prior run. This may take some time..." + ) + mlir_files = [ + filename + for filename in os.listdir(shark_tmp) + if os.path.isfile(os.path.join(shark_tmp, filename)) + and filename.endswith(".mlir") + ] + for filename in mlir_files: + os.remove(shark_tmp + filename) + print( + f"Clearing .mlir temporary files took {time() - cleanup_start:.4f} seconds." + ) + + +def clear_tmp_imgs(): + # tell gradio to use a directory under shark_tmp for its temporary + # image files unless somewhere else has been set + if "GRADIO_TEMP_DIR" not in os.environ: + os.environ["GRADIO_TEMP_DIR"] = os.path.join(shark_tmp, "gradio") + + print( + f"gradio temporary image cache located at {os.environ['GRADIO_TEMP_DIR']}. " + + "You may change this by setting the GRADIO_TEMP_DIR environment variable." + ) + + # Clear all gradio tmp images from the last session + if os.path.exists(os.environ["GRADIO_TEMP_DIR"]): + cleanup_start = time() + print( + "Clearing gradio UI temporary image files from a prior run. This may take some time..." + ) + shutil.rmtree(os.environ["GRADIO_TEMP_DIR"], ignore_errors=True) + print( + f"Clearing gradio UI temporary image files took {time() - cleanup_start:.4f} seconds." + ) + + # older SHARK versions had to workaround gradio bugs and stored things differently + else: + image_files = [ + filename + for filename in os.listdir(shark_tmp) + if os.path.isfile(os.path.join(shark_tmp, filename)) + and filename.startswith("tmp") + and filename.endswith(".png") + ] + if len(image_files) > 0: + print( + "Clearing temporary image files of a prior run of a previous SHARK version. This may take some time..." + ) + cleanup_start = time() + for filename in image_files: + os.remove(shark_tmp + filename) + print( + f"Clearing temporary image files took {time() - cleanup_start:.4f} seconds." + ) + else: + print("No temporary images files to clear.") + + +def config_tmp(): + # create shark_tmp if it does not exist + if not os.path.exists(shark_tmp): + os.mkdir(shark_tmp) + + clear_tmp_mlir() + clear_tmp_imgs() From cdf2eb51d7543efd00b70de840d25de8e5c2a879 Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Tue, 12 Dec 2023 18:27:37 +0000 Subject: [PATCH 02/25] Studio2/SD: Use more correct LoRA alpha calculation (#2034) * Updates ProcessLoRA to use both embedded LoRA alpha, and lora_strength optional parameter (default 1.0) when applying LoRA weights. * Updates ProcessLoRA to cover more dim cases. * This bring ProcessLoRA into line with PR #2015 against Studio1 --- apps/shark_studio/modules/embeddings.py | 119 ++++++++++++++---------- 1 file changed, 69 insertions(+), 50 deletions(-) diff --git a/apps/shark_studio/modules/embeddings.py b/apps/shark_studio/modules/embeddings.py index d8cf544f81..131c9006e5 100644 --- a/apps/shark_studio/modules/embeddings.py +++ b/apps/shark_studio/modules/embeddings.py @@ -3,34 +3,56 @@ import torch import json import safetensors +from dataclasses import dataclass from safetensors.torch import load_file from apps.shark_studio.api.utils import get_checkpoint_pathfile -def processLoRA(model, use_lora, splitting_prefix): +@dataclass +class LoRAweight: + up: torch.tensor + down: torch.tensor + mid: torch.tensor + alpha: torch.float32 = 1.0 + + +def processLoRA(model, use_lora, splitting_prefix, lora_strength=0.75): state_dict = "" if ".safetensors" in use_lora: state_dict = load_file(use_lora) else: state_dict = torch.load(use_lora) - alpha = 0.75 - visited = [] - # directly update weight in model - process_unet = "te" not in splitting_prefix + # gather the weights from the LoRA in a more convenient form, assumes + # everything will have an up.weight. + weight_dict: dict[str, LoRAweight] = {} for key in state_dict: - if ".alpha" in key or key in visited: - continue - + if key.startswith(splitting_prefix) and key.endswith("up.weight"): + stem = key.split("up.weight")[0] + weight_key = stem.removesuffix(".lora_") + weight_key = weight_key.removesuffix("_lora_") + weight_key = weight_key.removesuffix(".lora_linear_layer.") + + if weight_key not in weight_dict: + weight_dict[weight_key] = LoRAweight( + state_dict[f"{stem}up.weight"], + state_dict[f"{stem}down.weight"], + state_dict.get(f"{stem}mid.weight", None), + state_dict[f"{weight_key}.alpha"] + / state_dict[f"{stem}up.weight"].shape[1] + if f"{weight_key}.alpha" in state_dict + else 1.0, + ) + + # Directly update weight in model + + # Mostly adaptions of https://github.com/kohya-ss/sd-scripts/blob/main/networks/merge_lora.py + # and similar code in https://github.com/huggingface/diffusers/issues/3064 + + # TODO: handle mid weights (how do they even work?) + for key, lora_weight in weight_dict.items(): curr_layer = model - if ("text" not in key and process_unet) or ( - "text" in key and not process_unet - ): - layer_infos = ( - key.split(".")[0].split(splitting_prefix)[-1].split("_") - ) - else: - continue + layer_infos = key.split(".")[0].split(splitting_prefix)[-1].split("_") # find the target layer temp_name = layer_infos.pop(0) @@ -47,42 +69,39 @@ def processLoRA(model, use_lora, splitting_prefix): else: temp_name = layer_infos.pop(0) - pair_keys = [] - if "lora_down" in key: - pair_keys.append(key.replace("lora_down", "lora_up")) - pair_keys.append(key) - else: - pair_keys.append(key) - pair_keys.append(key.replace("lora_up", "lora_down")) - - # update weight - if len(state_dict[pair_keys[0]].shape) == 4: - weight_up = ( - state_dict[pair_keys[0]] - .squeeze(3) - .squeeze(2) - .to(torch.float32) - ) + weight = curr_layer.weight.data + scale = lora_weight.alpha * lora_strength + if len(weight.size()) == 2: + if len(lora_weight.up.shape) == 4: + weight_up = ( + lora_weight.up.squeeze(3).squeeze(2).to(torch.float32) + ) + weight_down = ( + lora_weight.down.squeeze(3).squeeze(2).to(torch.float32) + ) + change = ( + torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) + ) + else: + change = torch.mm(lora_weight.up, lora_weight.down) + elif lora_weight.down.size()[2:4] == (1, 1): + weight_up = lora_weight.up.squeeze(3).squeeze(2).to(torch.float32) weight_down = ( - state_dict[pair_keys[1]] - .squeeze(3) - .squeeze(2) - .to(torch.float32) + lora_weight.down.squeeze(3).squeeze(2).to(torch.float32) ) - curr_layer.weight.data += alpha * torch.mm( - weight_up, weight_down - ).unsqueeze(2).unsqueeze(3) + change = torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) else: - weight_up = state_dict[pair_keys[0]].to(torch.float32) - weight_down = state_dict[pair_keys[1]].to(torch.float32) - curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down) - # update visited list - for item in pair_keys: - visited.append(item) + change = torch.nn.functional.conv2d( + lora_weight.down.permute(1, 0, 2, 3), + lora_weight.up, + ).permute(1, 0, 2, 3) + + curr_layer.weight.data += change * scale + return model -def update_lora_weight_for_unet(unet, use_lora): +def update_lora_weight_for_unet(unet, use_lora, lora_strength): extensions = [".bin", ".safetensors", ".pt"] if not any([extension in use_lora for extension in extensions]): # We assume if it is a HF ID with standalone LoRA weights. @@ -104,14 +123,14 @@ def update_lora_weight_for_unet(unet, use_lora): unet.load_attn_procs(dir_name, weight_name=main_file_name) return unet except: - return processLoRA(unet, use_lora, "lora_unet_") + return processLoRA(unet, use_lora, "lora_unet_", lora_strength) -def update_lora_weight(model, use_lora, model_name): +def update_lora_weight(model, use_lora, model_name, lora_strength=1.0): if "unet" in model_name: - return update_lora_weight_for_unet(model, use_lora) + return update_lora_weight_for_unet(model, use_lora, lora_strength) try: - return processLoRA(model, use_lora, "lora_te_") + return processLoRA(model, use_lora, "lora_te_", lora_strength) except: return None From 7a0017df33a6bf9658f95a90a6453949d1a1c1dc Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Tue, 12 Dec 2023 18:27:50 +0000 Subject: [PATCH 03/25] Studio2: Remove duplications from api/utils.py (#2035) * Remove duplicate os import * Remove duplicate parse_seed_input function Migrating to JSON requests in SD UI More UI and app flow improvements, logging, shared device cache Model loading Complete SD pipeline. Tweaks to VAE, pipeline states Pipeline tweaks, add cmd_opts parsing to sd api --- apps/shark_studio/api/controlnet.py | 109 +-- apps/shark_studio/api/initializers.py | 32 +- apps/shark_studio/api/llm.py | 2 +- apps/shark_studio/api/sd.py | 758 +++++++++++++----- apps/shark_studio/api/utils.py | 103 +-- apps/shark_studio/modules/checkpoint_proc.py | 66 -- apps/shark_studio/modules/ckpt_processing.py | 122 +++ apps/shark_studio/modules/embeddings.py | 25 +- apps/shark_studio/modules/img_processing.py | 112 ++- apps/shark_studio/modules/logger.py | 37 + apps/shark_studio/modules/pipeline.py | 221 ++++- apps/shark_studio/modules/prompt_encoding.py | 376 +++++++++ apps/shark_studio/modules/schedulers.py | 111 ++- apps/shark_studio/modules/seed.py | 66 ++ apps/shark_studio/modules/shared.py | 69 -- apps/shark_studio/modules/shared_cmd_opts.py | 49 +- apps/shark_studio/modules/timer.py | 9 +- apps/shark_studio/tests/jupiter.png | Bin 0 -> 355202 bytes apps/shark_studio/web/api/compat.py | 12 +- .../web/configs/default_sd_config.json | 1 + apps/shark_studio/web/configs/foo.json | 1 - apps/shark_studio/web/index.py | 29 +- apps/shark_studio/web/ui/chat.py | 10 +- apps/shark_studio/web/ui/common_events.py | 88 +- apps/shark_studio/web/ui/outputgallery.py | 22 +- apps/shark_studio/web/ui/sd.py | 757 +++++++++-------- apps/shark_studio/web/ui/utils.py | 4 +- apps/shark_studio/web/utils/file_utils.py | 83 ++ apps/shark_studio/web/utils/globals.py | 63 +- .../web/utils/metadata/csv_metadata.py | 4 +- .../shark_studio/web/utils/metadata/format.py | 8 +- .../web/utils/metadata/png_metadata.py | 13 +- apps/shark_studio/web/utils/state.py | 4 +- apps/shark_studio/web/utils/tmp_configs.py | 8 +- shark/iree_utils/compile_utils.py | 9 +- 35 files changed, 2236 insertions(+), 1147 deletions(-) delete mode 100644 apps/shark_studio/modules/checkpoint_proc.py create mode 100644 apps/shark_studio/modules/ckpt_processing.py create mode 100644 apps/shark_studio/modules/logger.py create mode 100644 apps/shark_studio/modules/prompt_encoding.py create mode 100644 apps/shark_studio/modules/seed.py delete mode 100644 apps/shark_studio/modules/shared.py create mode 100644 apps/shark_studio/tests/jupiter.png create mode 100644 apps/shark_studio/web/configs/default_sd_config.json delete mode 100644 apps/shark_studio/web/configs/foo.json create mode 100644 apps/shark_studio/web/utils/file_utils.py diff --git a/apps/shark_studio/api/controlnet.py b/apps/shark_studio/api/controlnet.py index ea8cdf0cc9..2c8a8b566b 100644 --- a/apps/shark_studio/api/controlnet.py +++ b/apps/shark_studio/api/controlnet.py @@ -1,4 +1,15 @@ # from turbine_models.custom_models.controlnet import control_adapter, preprocessors +import os +import PIL +import numpy as np +from apps.shark_studio.web.utils.file_utils import ( + get_generated_imgs_path, +) +from datetime import datetime +from PIL import Image +from gradio.components.image_editor import ( + EditorValue, +) class control_adapter: @@ -29,20 +40,12 @@ def export_controlnet_model(model_keyword): control_adapter_map = { "sd15": { "canny": {"initializer": control_adapter.export_control_adapter_model}, - "openpose": { - "initializer": control_adapter.export_control_adapter_model - }, - "scribble": { - "initializer": control_adapter.export_control_adapter_model - }, - "zoedepth": { - "initializer": control_adapter.export_control_adapter_model - }, + "openpose": {"initializer": control_adapter.export_control_adapter_model}, + "scribble": {"initializer": control_adapter.export_control_adapter_model}, + "zoedepth": {"initializer": control_adapter.export_control_adapter_model}, }, "sdxl": { - "canny": { - "initializer": control_adapter.export_xl_control_adapter_model - }, + "canny": {"initializer": control_adapter.export_xl_control_adapter_model}, }, } preprocessor_model_map = { @@ -57,78 +60,48 @@ class PreprocessorModel: def __init__( self, hf_model_id, - device, + device="cpu", ): - self.model = None + self.model = hf_model_id + self.device = device - def compile(self, device): + def compile(self): print("compile not implemented for preprocessor.") return def run(self, inputs): print("run not implemented for preprocessor.") - return + return inputs -def cnet_preview(model, input_img, stencils, images, preprocessed_hints): - if isinstance(input_image, PIL.Image.Image): - img_dict = { - "background": None, - "layers": [None], - "composite": input_image, - } - input_image = EditorValue(img_dict) - images[index] = input_image - if model: - stencils[index] = model +def cnet_preview(model, input_image): + curr_datetime = datetime.now().strftime("%Y-%m-%d.%H-%M-%S") + control_imgs_path = os.path.join(get_generated_imgs_path(), "control_hints") + if not os.path.exists(control_imgs_path): + os.mkdir(control_imgs_path) + img_dest = os.path.join(control_imgs_path, model + curr_datetime + ".png") match model: case "canny": - canny = CannyDetector() + canny = PreprocessorModel("canny") result = canny( - np.array(input_image["composite"]), + np.array(input_image), 100, 200, ) - preprocessed_hints[index] = Image.fromarray(result) - return ( - Image.fromarray(result), - stencils, - images, - preprocessed_hints, - ) + Image.fromarray(result).save(fp=img_dest) + return result, img_dest case "openpose": - openpose = OpenposeDetector() - result = openpose(np.array(input_image["composite"])) - preprocessed_hints[index] = Image.fromarray(result[0]) - return ( - Image.fromarray(result[0]), - stencils, - images, - preprocessed_hints, - ) + openpose = PreprocessorModel("openpose") + result = openpose(np.array(input_image)) + Image.fromarray(result[0]).save(fp=img_dest) + return result, img_dest case "zoedepth": - zoedepth = ZoeDetector() - result = zoedepth(np.array(input_image["composite"])) - preprocessed_hints[index] = Image.fromarray(result) - return ( - Image.fromarray(result), - stencils, - images, - preprocessed_hints, - ) + zoedepth = PreprocessorModel("ZoeDepth") + result = zoedepth(np.array(input_image)) + Image.fromarray(result).save(fp=img_dest) + return result, img_dest case "scribble": - preprocessed_hints[index] = input_image["composite"] - return ( - input_image["composite"], - stencils, - images, - preprocessed_hints, - ) + input_image.save(fp=img_dest) + return input_image, img_dest case _: - preprocessed_hints[index] = None - return ( - None, - stencils, - images, - preprocessed_hints, - ) + return None, None diff --git a/apps/shark_studio/api/initializers.py b/apps/shark_studio/api/initializers.py index bbb273354c..ef9816cfca 100644 --- a/apps/shark_studio/api/initializers.py +++ b/apps/shark_studio/api/initializers.py @@ -1,14 +1,17 @@ import importlib -import logging import os import signal import sys -import re import warnings import json from threading import Thread from apps.shark_studio.modules.timer import startup_timer +from apps.shark_studio.web.utils.tmp_configs import ( + config_tmp, + clear_tmp_mlir, + clear_tmp_imgs, +) def imports(): @@ -18,9 +21,8 @@ def imports(): warnings.filterwarnings( action="ignore", category=DeprecationWarning, module="torch" ) - warnings.filterwarnings( - action="ignore", category=UserWarning, module="torchvision" - ) + warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") + warnings.filterwarnings(action="ignore", category=UserWarning, module="torch") import gradio # noqa: F401 @@ -34,20 +36,28 @@ def imports(): from apps.shark_studio.modules import ( img_processing, ) # noqa: F401 - from apps.shark_studio.modules.schedulers import scheduler_model_map startup_timer.record("other imports") def initialize(): configure_sigint_handler() + # Setup to use shark_tmp for gradio's temporary image files and clear any + # existing temporary images there if they exist. Then we can import gradio. + # It has to be in this order or gradio ignores what we've set up. + + config_tmp() + clear_tmp_mlir() + clear_tmp_imgs() + + from apps.shark_studio.web.utils.file_utils import ( + create_checkpoint_folders, + ) - # from apps.shark_studio.modules import modelloader - # modelloader.cleanup_models() + # Create custom models folders if they don't exist + create_checkpoint_folders() - # from apps.shark_studio.modules import sd_models - # sd_models.setup_model() - # startup_timer.record("setup SD model") + import gradio as gr # initialize_rest(reload_script_modules=False) diff --git a/apps/shark_studio/api/llm.py b/apps/shark_studio/api/llm.py index a209d8d1ba..852f5eff58 100644 --- a/apps/shark_studio/api/llm.py +++ b/apps/shark_studio/api/llm.py @@ -4,7 +4,7 @@ get_iree_compiled_module, load_vmfb_using_mmap, ) -from apps.shark_studio.api.utils import get_resource_path +from apps.shark_studio.web.utils.file_utils import get_resource_path import iree.runtime as ireert from itertools import chain import gc diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index a601a068f7..2822d83829 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -1,90 +1,78 @@ +import gc +import torch +import time +import os +import json +import numpy as np +from tqdm.auto import tqdm + +from pathlib import Path +from random import randint from turbine_models.custom_models.sd_inference import clip, unet, vae -from shark.iree_utils.compile_utils import get_iree_compiled_module -from apps.shark_studio.api.utils import get_resource_path from apps.shark_studio.api.controlnet import control_adapter_map from apps.shark_studio.web.utils.state import status_label +from apps.shark_studio.web.utils.file_utils import ( + safe_name, + get_resource_path, + get_checkpoints_path, +) from apps.shark_studio.modules.pipeline import SharkPipelineBase -import iree.runtime as ireert -import gc -import torch -import gradio as gr +from apps.shark_studio.modules.schedulers import get_schedulers +from apps.shark_studio.modules.prompt_encoding import ( + get_weighted_text_embeddings, +) +from apps.shark_studio.modules.img_processing import ( + resize_stencil, + save_output_img, + resamplers, + resampler_list, +) + +from apps.shark_studio.modules.ckpt_processing import ( + preprocessCKPT, + process_custom_pipe_weights, +) +from transformers import CLIPTokenizer +from diffusers.image_processor import VaeImageProcessor sd_model_map = { - "CompVis/stable-diffusion-v1-4": { - "clip": { - "initializer": clip.export_clip_model, - "max_tokens": 64, - }, - "vae_encode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, - "unet": { - "initializer": unet.export_unet_model, - "max_tokens": 512, - }, - "vae_decode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, + "clip": { + "initializer": clip.export_clip_model, + "ireec_flags": [ + "--iree-flow-collapse-reduction-dims", + "--iree-opt-const-expr-hoisting=False", + "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", + ], }, - "runwayml/stable-diffusion-v1-5": { - "clip": { - "initializer": clip.export_clip_model, - "max_tokens": 64, - }, - "vae_encode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, - "unet": { - "initializer": unet.export_unet_model, - "max_tokens": 512, - }, - "vae_decode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, + "vae_encode": { + "initializer": vae.export_vae_model, + "ireec_flags": [ + "--iree-flow-collapse-reduction-dims", + "--iree-opt-const-expr-hoisting=False", + "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", + ], }, - "stabilityai/stable-diffusion-2-1-base": { - "clip": { - "initializer": clip.export_clip_model, - "max_tokens": 64, - }, - "vae_encode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, - "unet": { - "initializer": unet.export_unet_model, - "max_tokens": 512, - }, - "vae_decode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, + "unet": { + "initializer": unet.export_unet_model, + "ireec_flags": [ + "--iree-flow-collapse-reduction-dims", + "--iree-opt-const-expr-hoisting=False", + "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32}))", + ], }, - "stabilityai/stable_diffusion-xl-1.0": { - "clip_1": { - "initializer": clip.export_clip_model, - "max_tokens": 64, - }, - "clip_2": { - "initializer": clip.export_clip_model, - "max_tokens": 64, - }, - "vae_encode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, - "unet": { - "initializer": unet.export_unet_model, - "max_tokens": 512, - }, - "vae_decode": { - "initializer": vae.export_vae_model, - "max_tokens": 64, - }, + "vae_decode": { + "initializer": vae.export_vae_model, + "ireec_flags": [ + "--iree-flow-collapse-reduction-dims", + "--iree-opt-const-expr-hoisting=False", + "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", + ], }, } @@ -95,38 +83,343 @@ class StableDiffusion(SharkPipelineBase): # aims to be as general as possible, and the class will infer and compile # a list of necessary modules or a combined "pipeline module" for a # specified job based on the inference task. - # - # custom_model_ids: a dict of submodel + HF ID pairs for custom submodels. - # e.g. {"vae_decode": "madebyollin/sdxl-vae-fp16-fix"} - # - # embeddings: a dict of embedding checkpoints or model IDs to use when - # initializing the compiled modules. def __init__( self, - base_model_id: str = "runwayml/stable-diffusion-v1-5", - height: int = 512, - width: int = 512, - precision: str = "fp16", - device: str = None, - custom_model_map: dict = {}, - embeddings: dict = {}, + base_model_id, + height: int, + width: int, + batch_size: int, + precision: str, + device: str, + custom_vae: str = None, + num_loras: int = 0, import_ir: bool = True, + is_controlled: bool = False, ): - super().__init__(sd_model_map[base_model_id], device, import_ir) - self.base_model_id = base_model_id - self.device = device + self.model_max_length = 77 + self.batch_size = batch_size self.precision = precision - self.iree_module_dict = None - self.get_compiled_map() + self.dtype = torch.float16 if precision == "fp16" else torch.float32 + self.height = height + self.width = width + self.scheduler_obj = {} + static_kwargs = { + "pipe": { + "external_weights": "safetensors", + }, + "clip": {"hf_model_name": base_model_id}, + "unet": { + "hf_model_name": base_model_id, + "unet_model": unet.UnetModel( + hf_model_name=base_model_id, hf_auth_token=None + ), + "batch_size": batch_size, + # "is_controlled": is_controlled, + # "num_loras": num_loras, + "height": height, + "width": width, + "precision": precision, + "max_length": self.model_max_length, + }, + "vae_encode": { + "hf_model_name": base_model_id, + "vae_model": vae.VaeModel( + hf_model_name=base_model_id, + custom_vae=custom_vae, + ), + "batch_size": batch_size, + "height": height, + "width": width, + "precision": precision, + }, + "vae_decode": { + "hf_model_name": base_model_id, + "vae_model": vae.VaeModel( + hf_model_name=base_model_id, + custom_vae=custom_vae, + ), + "batch_size": batch_size, + "height": height, + "width": width, + "precision": precision, + }, + } + super().__init__(sd_model_map, base_model_id, static_kwargs, device, import_ir) + pipe_id_list = [ + safe_name(base_model_id), + str(batch_size), + str(static_kwargs["unet"]["max_length"]), + f"{str(height)}x{str(width)}", + precision, + ] + if num_loras > 0: + pipe_id_list.append(str(num_loras) + "lora") + if is_controlled: + pipe_id_list.append("controlled") + if custom_vae: + pipe_id_list.append(custom_vae) + self.pipe_id = "_".join(pipe_id_list) + print(f"\n[LOG] Pipeline initialized with pipe_id: {self.pipe_id}.") + del static_kwargs + gc.collect() + + def prepare_pipe(self, custom_weights, adapters, embeddings, is_img2img): + print(f"\n[LOG] Preparing pipeline...") + self.is_img2img = is_img2img + self.schedulers = get_schedulers(self.base_model_id) + + self.weights_path = os.path.join( + get_checkpoints_path(), self.safe_name(self.base_model_id) + ) + if not os.path.exists(self.weights_path): + os.mkdir(self.weights_path) + + for model in adapters: + self.model_map[model] = adapters[model] + + for submodel in self.static_kwargs: + if custom_weights: + custom_weights_params, _ = process_custom_pipe_weights(custom_weights) + if submodel not in ["clip", "clip2"]: + self.static_kwargs[submodel][ + "external_weight_file" + ] = custom_weights_params + else: + self.static_kwargs[submodel]["external_weight_path"] = os.path.join( + self.weights_path, submodel + ".safetensors" + ) + else: + self.static_kwargs[submodel]["external_weight_path"] = os.path.join( + self.weights_path, submodel + ".safetensors" + ) + + self.get_compiled_map(pipe_id=self.pipe_id) + print("\n[LOG] Pipeline successfully prepared for runtime.") + return + + def encode_prompts_weight( + self, + prompt, + negative_prompt, + do_classifier_free_guidance=True, + ): + # Encodes the prompt into text encoder hidden states. + self.load_submodels(["clip"]) + self.tokenizer = CLIPTokenizer.from_pretrained( + self.base_model_id, + subfolder="tokenizer", + ) + clip_inf_start = time.time() + + text_embeddings, uncond_embeddings = get_weighted_text_embeddings( + pipe=self, + prompt=prompt, + uncond_prompt=negative_prompt if do_classifier_free_guidance else None, + ) + + if do_classifier_free_guidance: + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + + pad = (0, 0) * (len(text_embeddings.shape) - 2) + pad = pad + ( + 0, + self.static_kwargs["unet"]["max_length"] - text_embeddings.shape[1], + ) + text_embeddings = torch.nn.functional.pad(text_embeddings, pad) + + # SHARK: Report clip inference time + clip_inf_time = (time.time() - clip_inf_start) * 1000 + if self.ondemand: + self.unload_submodels(["clip"]) + gc.collect() + print(f"\n[LOG] Clip Inference time (ms) = {clip_inf_time:.3f}") + + return text_embeddings.numpy().astype(np.float16) + + def prepare_latents( + self, + generator, + num_inference_steps, + image, + strength, + ): + noise = torch.randn( + ( + self.batch_size, + 4, + self.height // 8, + self.width // 8, + ), + generator=generator, + dtype=self.dtype, + ).to("cpu") + + self.scheduler.set_timesteps(num_inference_steps) + if self.is_img2img: + init_timestep = min( + int(num_inference_steps * strength), num_inference_steps + ) + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + latents = self.encode_image(image) + latents = self.scheduler.add_noise(latents, noise, timesteps[0].repeat(1)) + return latents, [timesteps] + else: + self.scheduler.is_scale_input_called = True + latents = noise * self.scheduler.init_noise_sigma + return latents, self.scheduler.timesteps + + def encode_image(self, input_image): + self.load_submodels(["vae_encode"]) + vae_encode_start = time.time() + latents = self.run("vae_encode", input_image) + vae_inf_time = (time.time() - vae_encode_start) * 1000 + if self.ondemand: + self.unload_submodels(["vae_encode"]) + print(f"\n[LOG] VAE Encode Inference time (ms): {vae_inf_time:.3f}") + + return latents + + def produce_img_latents( + self, + latents, + text_embeddings, + guidance_scale, + total_timesteps, + cpu_scheduling, + mask=None, + masked_image_latents=None, + return_all_latents=False, + ): + # self.status = SD_STATE_IDLE + step_time_sum = 0 + latent_history = [latents] + text_embeddings = torch.from_numpy(text_embeddings).to(self.dtype) + text_embeddings_numpy = text_embeddings.detach().numpy() + guidance_scale = torch.Tensor([guidance_scale]).to(self.dtype) + self.load_submodels(["unet"]) + for i, t in tqdm(enumerate(total_timesteps)): + step_start_time = time.time() + timestep = torch.tensor([t]).to(self.dtype).detach().numpy() + latent_model_input = self.scheduler.scale_model_input(latents, t).to( + self.dtype + ) + if mask is not None and masked_image_latents is not None: + latent_model_input = torch.cat( + [ + torch.from_numpy(np.asarray(latent_model_input)).to(self.dtype), + mask, + masked_image_latents, + ], + dim=1, + ).to(self.dtype) + if cpu_scheduling: + latent_model_input = latent_model_input.detach().numpy() + + # Profiling Unet. + # profile_device = start_profiling(file_path="unet.rdc") + noise_pred = self.run( + "unet", + [ + latent_model_input, + timestep, + text_embeddings_numpy, + guidance_scale, + ], + ) + # end_profiling(profile_device) + + if cpu_scheduling: + noise_pred = torch.from_numpy(noise_pred.to_host()) + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + else: + latents = self.run("scheduler_step", (noise_pred, t, latents)) + + latent_history.append(latents) + step_time = (time.time() - step_start_time) * 1000 + # print( + # f"\n [LOG] step = {i} | timestep = {t} | time = {step_time:.2f}ms" + # ) + step_time_sum += step_time + + # if self.status == SD_STATE_CANCEL: + # break - def prepare_pipeline(self, scheduler, custom_model_map): - return None + if self.ondemand: + self.unload_submodels(["unet"]) + gc.collect() + + avg_step_time = step_time_sum / len(total_timesteps) + print(f"\n[LOG] Average step time: {avg_step_time}ms/it") + + if not return_all_latents: + return latents + all_latents = torch.cat(latent_history, dim=0) + return all_latents + + def decode_latents(self, latents, cpu_scheduling=True): + latents_numpy = latents.to(self.dtype) + if cpu_scheduling: + latents_numpy = latents.detach().numpy() + + # profile_device = start_profiling(file_path="vae.rdc") + vae_start = time.time() + images = self.run("vae_decode", latents_numpy).to_host() + vae_inf_time = (time.time() - vae_start) * 1000 + # end_profiling(profile_device) + print(f"\n[LOG] VAE Inference time (ms): {vae_inf_time:.3f}") + + images = torch.from_numpy(images).permute(0, 2, 3, 1).float().numpy() + pil_images = self.image_processor.numpy_to_pil(images) + return pil_images + + # def process_sd_init_image(self, sd_init_image, resample_type): + # if isinstance(sd_init_image, list): + # images = [] + # for img in sd_init_image: + # img, _ = self.process_sd_init_image(img, resample_type) + # images.append(img) + # is_img2img = True + # return images, is_img2img + # if isinstance(sd_init_image, str): + # if os.path.isfile(sd_init_image): + # sd_init_image = Image.open(sd_init_image, mode="r").convert("RGB") + # image, is_img2img = self.process_sd_init_image( + # sd_init_image, resample_type + # ) + # else: + # image = None + # is_img2img = False + # elif isinstance(sd_init_image, Image.Image): + # image = sd_init_image.convert("RGB") + # elif sd_init_image: + # image = sd_init_image["image"].convert("RGB") + # else: + # image = None + # is_img2img = False + # if image: + # resample_type = ( + # resamplers[resample_type] + # if resample_type in resampler_list + # # Fallback to Lanczos + # else Image.Resampling.LANCZOS + # ) + # image = image.resize((self.width, self.height), resample=resample_type) + # image_arr = np.stack([np.array(i) for i in (image,)], axis=0) + # image_arr = image_arr / 255.0 + # image_arr = torch.from_numpy(image_arr).permute(0, 3, 1, 2).to(self.dtype) + # image_arr = 2 * (image_arr - 0.5) + # is_img2img = True + # image = image_arr + # return image, is_img2img def generate_images( self, prompt, negative_prompt, + image, + scheduler, steps, strength, guidance_scale, @@ -135,26 +428,101 @@ def generate_images( repeatable_seeds, resample_type, control_mode, - preprocessed_hints, + hints, ): - return None, None, None, None, None + # TODO: Batched args + self.image_processor = VaeImageProcessor(do_convert_rgb=True) + self.scheduler = self.schedulers[scheduler] + self.ondemand = ondemand + if self.is_img2img: + image, _ = self.image_processor.preprocess(image, resample_type) + else: + image = None + + print("\n[LOG] Generating images...") + batched_args = [ + prompt, + negative_prompt, + image, + ] + for arg in batched_args: + if not isinstance(arg, list): + arg = [arg] * self.batch_size + if len(arg) < self.batch_size: + arg = arg * self.batch_size + else: + arg = [arg[i] for i in range(self.batch_size)] + + text_embeddings = self.encode_prompts_weight( + prompt, + negative_prompt, + ) + uint32_info = np.iinfo(np.uint32) + uint32_min, uint32_max = uint32_info.min, uint32_info.max + if seed < uint32_min or seed >= uint32_max: + seed = randint(uint32_min, uint32_max) + + generator = torch.manual_seed(seed) + + init_latents, final_timesteps = self.prepare_latents( + generator=generator, + num_inference_steps=steps, + image=image, + strength=strength, + ) + + latents = self.produce_img_latents( + latents=init_latents, + text_embeddings=text_embeddings, + guidance_scale=guidance_scale, + total_timesteps=final_timesteps, + cpu_scheduling=True, # until we have schedulers through Turbine + ) + + # Img latents -> PIL images + all_imgs = [] + self.load_submodels(["vae_decode"]) + for i in tqdm(range(0, latents.shape[0], self.batch_size)): + imgs = self.decode_latents( + latents=latents[i : i + self.batch_size], + cpu_scheduling=True, + ) + all_imgs.extend(imgs) + if self.ondemand: + self.unload_submodels(["vae_decode"]) + + return all_imgs + + +def shark_sd_fn_dict_input( + sd_kwargs: dict, +): + print("[LOG] Submitting Request...") -# NOTE: Each `hf_model_id` should have its own starting configuration. + for key in sd_kwargs: + if sd_kwargs[key] in [None, []]: + sd_kwargs[key] = None + if sd_kwargs[key] in ["None"]: + sd_kwargs[key] = "" + if key == "seed": + sd_kwargs[key] = int(sd_kwargs[key]) -# model_vmfb_key = "" + for i in range(1): + generated_imgs = yield from shark_sd_fn(**sd_kwargs) + yield generated_imgs def shark_sd_fn( prompt, negative_prompt, - image_dict, + sd_init_image: list, height: int, width: int, steps: int, strength: float, guidance_scale: float, - seed: str | int, + seed: list, batch_count: int, batch_size: int, scheduler: str, @@ -163,86 +531,75 @@ def shark_sd_fn( custom_vae: str, precision: str, device: str, - lora_weights: str | list, ondemand: bool, repeatable_seeds: bool, resample_type: str, - control_mode: str, - stencils: list, - images: list, - preprocessed_hints: list, - progress=gr.Progress(), + controlnets: dict, + embeddings: dict, ): - # Handling gradio ImageEditor datatypes so we have unified inputs to the SD API - for i, stencil in enumerate(stencils): - if images[i] is None and stencil is not None: - continue - elif stencil is None and any( - img is not None for img in [images[i], preprocessed_hints[i]] - ): - images[i] = None - preprocessed_hints[i] = None - elif images[i] is not None: - if isinstance(images[i], dict): - images[i] = images[i]["composite"] - images[i] = images[i].convert("RGB") - - if isinstance(image_dict, PIL.Image.Image): - image = image_dict.convert("RGB") - elif image_dict: - image = image_dict["image"].convert("RGB") - else: - image = None - is_img2img = False - if image: - ( - image, - _, - _, - ) = resize_stencil(image, width, height) - is_img2img = True - print("Performing Stable Diffusion Pipeline setup...") + sd_kwargs = locals() + if not isinstance(sd_init_image, list): + sd_init_image = [sd_init_image] + is_img2img = True if sd_init_image[0] is not None else False - device_id = None + print("\n[LOG] Performing Stable Diffusion Pipeline setup...") from apps.shark_studio.modules.shared_cmd_opts import cmd_opts import apps.shark_studio.web.utils.globals as global_obj - custom_model_map = {} - if custom_weights != "None": - custom_model_map["unet"] = {"custom_weights": custom_weights} - if custom_vae != "None": - custom_model_map["vae"] = {"custom_weights": custom_vae} - if stencils: - for i, stencil in enumerate(stencils): + adapters = {} + is_controlled = False + control_mode = None + hints = [] + num_loras = 0 + for i in embeddings: + num_loras += 1 if embeddings[i] else 0 + if "model" in controlnets: + for i, model in enumerate(controlnets["model"]): if "xl" not in base_model_id.lower(): - custom_model_map[f"control_adapter_{i}"] = stencil_adapter_map[ - "runwayml/stable-diffusion-v1-5" - ][stencil] + adapters[f"control_adapter_{model}"] = { + "hf_id": control_adapter_map["runwayml/stable-diffusion-v1-5"][ + model + ], + "strength": controlnets["strength"][i], + } else: - custom_model_map[f"control_adapter_{i}"] = stencil_adapter_map[ - "stabilityai/stable-diffusion-xl-1.0" - ][stencil] + adapters[f"control_adapter_{model}"] = { + "hf_id": control_adapter_map["stabilityai/stable-diffusion-xl-1.0"][ + model + ], + "strength": controlnets["strength"][i], + } + if model is not None: + is_controlled = True + control_mode = controlnets["control_mode"] + for i in controlnets["hint"]: + hints.append[i] submit_pipe_kwargs = { "base_model_id": base_model_id, "height": height, "width": width, + "batch_size": batch_size, "precision": precision, "device": device, - "custom_model_map": custom_model_map, + "custom_vae": custom_vae, + "num_loras": num_loras, "import_ir": cmd_opts.import_mlir, - "is_img2img": is_img2img, + "is_controlled": is_controlled, } submit_prep_kwargs = { - "scheduler": scheduler, - "custom_model_map": custom_model_map, - "embeddings": lora_weights, + "custom_weights": custom_weights, + "adapters": adapters, + "embeddings": embeddings, + "is_img2img": is_img2img, } submit_run_kwargs = { "prompt": prompt, "negative_prompt": negative_prompt, + "image": sd_init_image, "steps": steps, + "scheduler": scheduler, "strength": strength, "guidance_scale": guidance_scale, "seed": seed, @@ -250,49 +607,52 @@ def shark_sd_fn( "repeatable_seeds": repeatable_seeds, "resample_type": resample_type, "control_mode": control_mode, - "preprocessed_hints": preprocessed_hints, + "hints": hints, } - - global sd_pipe - global sd_pipe_kwargs - - if sd_pipe_kwargs and sd_pipe_kwargs != submit_pipe_kwargs: - sd_pipe = None - sd_pipe_kwargs = submit_pipe_kwargs + if ( + not global_obj.get_sd_obj() + or global_obj.get_pipe_kwargs() != submit_pipe_kwargs + ): + print("\n[LOG] Initializing new pipeline...") + global_obj.clear_cache() gc.collect() - if sd_pipe is None: - history[-1][-1] = "Getting the pipeline ready..." - yield history, "" - # Initializes the pipeline and retrieves IR based on all # parameters that are static in the turbine output format, # which is currently MLIR in the torch dialect. - sd_pipe = SharkStableDiffusionPipeline( + sd_pipe = StableDiffusion( **submit_pipe_kwargs, ) - - sd_pipe.prepare_pipe(**submit_prep_kwargs) - - for prompt, msg, exec_time in progress.tqdm( - out_imgs=sd_pipe.generate_images(**submit_run_kwargs), - desc="Generating Image...", + global_obj.set_sd_obj(sd_pipe) + global_obj.set_pipe_kwargs(submit_pipe_kwargs) + if ( + not global_obj.get_prep_kwargs() + or global_obj.get_prep_kwargs() != submit_prep_kwargs ): - text_output = get_generation_text_info( - seeds[: current_batch + 1], device - ) + global_obj.set_prep_kwargs(submit_prep_kwargs) + global_obj.get_sd_obj().prepare_pipe(**submit_prep_kwargs) + + generated_imgs = [] + for current_batch in range(batch_count): + start_time = time.time() + out_imgs = global_obj.get_sd_obj().generate_images(**submit_run_kwargs) + total_time = time.time() - start_time + text_output = f"Total image(s) generation time: {total_time:.4f}sec" + print(f"\n[LOG] {text_output}") + # if global_obj.get_sd_status() == SD_STATE_CANCEL: + # break + # else: save_output_img( - out_imgs[0], - seeds[current_batch], - extra_info, + out_imgs[current_batch], + seed, + sd_kwargs, ) generated_imgs.extend(out_imgs) - yield generated_imgs, text_output, status_label( + yield generated_imgs, status_label( "Stable Diffusion", current_batch + 1, batch_count, batch_size - ), stencils, images - - return generated_imgs, text_output, "", stencils, images + ) + return generated_imgs, "" def cancel_sd(): @@ -300,9 +660,23 @@ def cancel_sd(): return +def view_json_file(file_path): + content = "" + with open(file_path, "r") as fopen: + content = fopen.read() + return content + + if __name__ == "__main__": - sd = StableDiffusion( - "runwayml/stable-diffusion-v1-5", - device="vulkan", - ) - print("model loaded") + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + import apps.shark_studio.web.utils.globals as global_obj + + global_obj._init() + + sd_json = view_json_file(get_resource_path("../configs/default_sd_config.json")) + sd_kwargs = json.loads(sd_json) + for arg in vars(cmd_opts): + if arg in sd_kwargs: + sd_kwargs[arg] = getattr(cmd_opts, arg) + for i in shark_sd_fn_dict_input(sd_kwargs): + print(i) diff --git a/apps/shark_studio/api/utils.py b/apps/shark_studio/api/utils.py index a4f52dca24..e9268aa83b 100644 --- a/apps/shark_studio/api/utils.py +++ b/apps/shark_studio/api/utils.py @@ -1,8 +1,5 @@ -import os -import sys -import os import numpy as np -import glob +import json from random import ( randint, seed as seed_random, @@ -11,7 +8,6 @@ ) from pathlib import Path -from safetensors.torch import load_file from apps.shark_studio.modules.shared_cmd_opts import cmd_opts from cpuinfo import get_cpu_info @@ -22,11 +18,6 @@ get_iree_vulkan_runtime_flags, ) -checkpoints_filetypes = ( - "*.ckpt", - "*.safetensors", -) - def get_available_devices(): def get_devices_by_name(driver_name): @@ -55,9 +46,7 @@ def get_devices_by_name(driver_name): if len(device_list_dict) == 1: device_list.append(f"{device_name} => {driver_name}") else: - device_list.append( - f"{device_name} => {driver_name}://{i}" - ) + device_list.append(f"{device_name} => {driver_name}://{i}") return device_list set_iree_runtime_flags() @@ -109,6 +98,8 @@ def set_init_device_flags(): elif "metal" in cmd_opts.device: device_name, cmd_opts.device = map_device_to_name_path(cmd_opts.device) if not cmd_opts.iree_metal_target_platform: + from shark.iree_utils.metal_utils import get_metal_target_triple + triple = get_metal_target_triple(device_name) if triple is not None: cmd_opts.iree_metal_target_platform = triple.split("-")[-1] @@ -150,60 +141,6 @@ def get_all_devices(driver_name): return device_list_src -def get_resource_path(relative_path): - """Get absolute path to resource, works for dev and for PyInstaller""" - base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) - return os.path.join(base_path, relative_path) - - -def get_generated_imgs_path() -> Path: - return Path( - cmd_opts.output_dir - if cmd_opts.output_dir - else get_resource_path("..\web\generated_imgs") - ) - - -def get_generated_imgs_todays_subdir() -> str: - return dt.now().strftime("%Y%m%d") - - -def create_checkpoint_folders(): - dir = ["vae", "lora"] - if not cmd_opts.ckpt_dir: - dir.insert(0, "models") - else: - if not os.path.isdir(cmd_opts.ckpt_dir): - sys.exit( - f"Invalid --ckpt_dir argument, " - f"{args.ckpt_dir} folder does not exists." - ) - for root in dir: - Path(get_checkpoints_path(root)).mkdir(parents=True, exist_ok=True) - - -def get_checkpoints_path(model=""): - return get_resource_path(f"..\web\models\{model}") - - -def get_checkpoints(model="models"): - ckpt_files = [] - file_types = checkpoints_filetypes - if model == "lora": - file_types = file_types + ("*.pt", "*.bin") - for extn in file_types: - files = [ - os.path.basename(x) - for x in glob.glob(os.path.join(get_checkpoints_path(model), extn)) - ] - ckpt_files.extend(files) - return sorted(ckpt_files, key=str.casefold) - - -def get_checkpoint_pathfile(checkpoint_name, model="models"): - return os.path.join(get_checkpoints_path(model), checkpoint_name) - - def get_device_mapping(driver, key_combination=3): """This method ensures consistent device ordering when choosing specific devices for execution @@ -250,6 +187,8 @@ def get_opt_flags(model, precision="fp16"): f"-iree-vulkan-target-triple={cmd_opts.iree_vulkan_target_triple}" ) if "rocm" in cmd_opts.device: + from shark.iree_utils.gpu_utils import get_iree_rocm_args + rocm_args = get_iree_rocm_args() iree_flags.extend(rocm_args) if cmd_opts.iree_constant_folding == False: @@ -318,9 +257,7 @@ def get_devices_by_name(driver_name): if len(device_list_dict) == 1: device_list.append(f"{device_name} => {driver_name}") else: - device_list.append( - f"{device_name} => {driver_name}://{i}" - ) + device_list.append(f"{device_name} => {driver_name}://{i}") return device_list set_iree_runtime_flags() @@ -352,28 +289,6 @@ def get_devices_by_name(driver_name): return available_devices -# take a seed expression in an input format and convert it to -# a list of integers, where possible -def parse_seed_input(seed_input: str | list | int): - if isinstance(seed_input, str): - try: - seed_input = json.loads(seed_input) - except (ValueError, TypeError): - seed_input = None - - if isinstance(seed_input, int): - return [seed_input] - - if isinstance(seed_input, list) and all( - type(seed) is int for seed in seed_input - ): - return seed_input - - raise TypeError( - "Seed input must be an integer or an array of integers in JSON format" - ) - - # Generate and return a new seed if the provided one is not in the # supported range (including -1) def sanitize_seed(seed: int | str): @@ -397,9 +312,7 @@ def parse_seed_input(seed_input: str | list | int): if isinstance(seed_input, int): return [seed_input] - if isinstance(seed_input, list) and all( - type(seed) is int for seed in seed_input - ): + if isinstance(seed_input, list) and all(type(seed) is int for seed in seed_input): return seed_input raise TypeError( diff --git a/apps/shark_studio/modules/checkpoint_proc.py b/apps/shark_studio/modules/checkpoint_proc.py deleted file mode 100644 index e924de4640..0000000000 --- a/apps/shark_studio/modules/checkpoint_proc.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import json -import re -from pathlib import Path -from omegaconf import OmegaConf - - -def get_path_to_diffusers_checkpoint(custom_weights): - path = Path(custom_weights) - diffusers_path = path.parent.absolute() - diffusers_directory_name = os.path.join("diffusers", path.stem) - complete_path_to_diffusers = diffusers_path / diffusers_directory_name - complete_path_to_diffusers.mkdir(parents=True, exist_ok=True) - path_to_diffusers = complete_path_to_diffusers.as_posix() - return path_to_diffusers - - -def preprocessCKPT(custom_weights, is_inpaint=False): - path_to_diffusers = get_path_to_diffusers_checkpoint(custom_weights) - if next(Path(path_to_diffusers).iterdir(), None): - print("Checkpoint already loaded at : ", path_to_diffusers) - return - else: - print( - "Diffusers' checkpoint will be identified here : ", - path_to_diffusers, - ) - from_safetensors = ( - True if custom_weights.lower().endswith(".safetensors") else False - ) - # EMA weights usually yield higher quality images for inference but - # non-EMA weights have been yielding better results in our case. - # TODO: Add an option `--ema` (`--no-ema`) for users to specify if - # they want to go for EMA weight extraction or not. - extract_ema = False - print( - "Loading diffusers' pipeline from original stable diffusion checkpoint" - ) - num_in_channels = 9 if is_inpaint else 4 - pipe = download_from_original_stable_diffusion_ckpt( - checkpoint_path_or_dict=custom_weights, - extract_ema=extract_ema, - from_safetensors=from_safetensors, - num_in_channels=num_in_channels, - ) - pipe.save_pretrained(path_to_diffusers) - print("Loading complete") - - -def convert_original_vae(vae_checkpoint): - vae_state_dict = {} - for key in list(vae_checkpoint.keys()): - vae_state_dict["first_stage_model." + key] = vae_checkpoint.get(key) - - config_url = ( - "https://raw.githubusercontent.com/CompVis/stable-diffusion/" - "main/configs/stable-diffusion/v1-inference.yaml" - ) - original_config_file = BytesIO(requests.get(config_url).content) - original_config = OmegaConf.load(original_config_file) - vae_config = create_vae_diffusers_config(original_config, image_size=512) - - converted_vae_checkpoint = convert_ldm_vae_checkpoint( - vae_state_dict, vae_config - ) - return converted_vae_checkpoint diff --git a/apps/shark_studio/modules/ckpt_processing.py b/apps/shark_studio/modules/ckpt_processing.py new file mode 100644 index 0000000000..08681f6c56 --- /dev/null +++ b/apps/shark_studio/modules/ckpt_processing.py @@ -0,0 +1,122 @@ +import os +import json +import re +import requests +from io import BytesIO +from pathlib import Path +from tqdm import tqdm +from omegaconf import OmegaConf +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( + download_from_original_stable_diffusion_ckpt, + create_vae_diffusers_config, + convert_ldm_vae_checkpoint, +) + + +def get_path_to_diffusers_checkpoint(custom_weights): + path = Path(custom_weights) + diffusers_path = path.parent.absolute() + diffusers_directory_name = os.path.join("diffusers", path.stem) + complete_path_to_diffusers = diffusers_path / diffusers_directory_name + complete_path_to_diffusers.mkdir(parents=True, exist_ok=True) + path_to_diffusers = complete_path_to_diffusers.as_posix() + return path_to_diffusers + + +def preprocessCKPT(custom_weights, is_inpaint=False): + path_to_diffusers = get_path_to_diffusers_checkpoint(custom_weights) + if next(Path(path_to_diffusers).iterdir(), None): + print("Checkpoint already loaded at : ", path_to_diffusers) + return + else: + print( + "Diffusers' checkpoint will be identified here : ", + path_to_diffusers, + ) + from_safetensors = ( + True if custom_weights.lower().endswith(".safetensors") else False + ) + # EMA weights usually yield higher quality images for inference but + # non-EMA weights have been yielding better results in our case. + # TODO: Add an option `--ema` (`--no-ema`) for users to specify if + # they want to go for EMA weight extraction or not. + extract_ema = False + print("Loading diffusers' pipeline from original stable diffusion checkpoint") + num_in_channels = 9 if is_inpaint else 4 + pipe = download_from_original_stable_diffusion_ckpt( + checkpoint_path_or_dict=custom_weights, + extract_ema=extract_ema, + from_safetensors=from_safetensors, + num_in_channels=num_in_channels, + ) + pipe.save_pretrained(path_to_diffusers) + print("Loading complete") + + +def convert_original_vae(vae_checkpoint): + vae_state_dict = {} + for key in list(vae_checkpoint.keys()): + vae_state_dict["first_stage_model." + key] = vae_checkpoint.get(key) + + config_url = ( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/" + "main/configs/stable-diffusion/v1-inference.yaml" + ) + original_config_file = BytesIO(requests.get(config_url).content) + original_config = OmegaConf.load(original_config_file) + vae_config = create_vae_diffusers_config(original_config, image_size=512) + + converted_vae_checkpoint = convert_ldm_vae_checkpoint(vae_state_dict, vae_config) + return converted_vae_checkpoint + + +def process_custom_pipe_weights(custom_weights): + if custom_weights != "": + if custom_weights.startswith("https://civitai.com/api/"): + # download the checkpoint from civitai if we don't already have it + weights_path = get_civitai_checkpoint(custom_weights) + + # act as if we were given the local file as custom_weights originally + custom_weights_tgt = get_path_to_diffusers_checkpoint(weights_path) + custom_weights_params = weights_path + + else: + assert custom_weights.lower().endswith( + (".ckpt", ".safetensors") + ), "checkpoint files supported can be any of [.ckpt, .safetensors] type" + custom_weights_tgt = get_path_to_diffusers_checkpoint(custom_weights) + custom_weights_params = custom_weights + return custom_weights_params, custom_weights_tgt + + +def get_civitai_checkpoint(url: str): + with requests.get(url, allow_redirects=True, stream=True) as response: + response.raise_for_status() + + # civitai api returns the filename in the content disposition + base_filename = re.findall( + '"([^"]*)"', response.headers["Content-Disposition"] + )[0] + destination_path = Path.cwd() / (cmd_opts.ckpt_dir or "models") / base_filename + + # we don't have this model downloaded yet + if not destination_path.is_file(): + print(f"downloading civitai model from {url} to {destination_path}") + + size = int(response.headers["content-length"], 0) + progress_bar = tqdm(total=size, unit="iB", unit_scale=True) + + with open(destination_path, "wb") as f: + for chunk in response.iter_content(chunk_size=65536): + f.write(chunk) + progress_bar.update(len(chunk)) + + progress_bar.close() + + # we already have this model downloaded + else: + print(f"civitai model already downloaded to {destination_path}") + + response.close() + return destination_path.as_posix() diff --git a/apps/shark_studio/modules/embeddings.py b/apps/shark_studio/modules/embeddings.py index 131c9006e5..87924c819e 100644 --- a/apps/shark_studio/modules/embeddings.py +++ b/apps/shark_studio/modules/embeddings.py @@ -5,7 +5,10 @@ import safetensors from dataclasses import dataclass from safetensors.torch import load_file -from apps.shark_studio.api.utils import get_checkpoint_pathfile +from apps.shark_studio.web.utils.file_utils import ( + get_checkpoint_pathfile, + get_path_stem, +) @dataclass @@ -73,22 +76,14 @@ def processLoRA(model, use_lora, splitting_prefix, lora_strength=0.75): scale = lora_weight.alpha * lora_strength if len(weight.size()) == 2: if len(lora_weight.up.shape) == 4: - weight_up = ( - lora_weight.up.squeeze(3).squeeze(2).to(torch.float32) - ) - weight_down = ( - lora_weight.down.squeeze(3).squeeze(2).to(torch.float32) - ) - change = ( - torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) - ) + weight_up = lora_weight.up.squeeze(3).squeeze(2).to(torch.float32) + weight_down = lora_weight.down.squeeze(3).squeeze(2).to(torch.float32) + change = torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) else: change = torch.mm(lora_weight.up, lora_weight.down) elif lora_weight.down.size()[2:4] == (1, 1): weight_up = lora_weight.up.squeeze(3).squeeze(2).to(torch.float32) - weight_down = ( - lora_weight.down.squeeze(3).squeeze(2).to(torch.float32) - ) + weight_down = lora_weight.down.squeeze(3).squeeze(2).to(torch.float32) change = torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) else: change = torch.nn.functional.conv2d( @@ -163,9 +158,7 @@ def get_lora_metadata(lora_filename): # get a figure for the total number of images processed for this dataset # either then number actually listed or in its dataset_dir entry or # the highest frequency's number if that doesn't exist - img_count = dataset_dirs.get(dir, {}).get( - "img_count", frequencies[0][1] - ) + img_count = dataset_dirs.get(dir, {}).get("img_count", frequencies[0][1]) # add the dataset frequencies to the overall frequencies replacing the # frequency counts on the tags with a percentage/ratio diff --git a/apps/shark_studio/modules/img_processing.py b/apps/shark_studio/modules/img_processing.py index b5cf28ce47..80062814cf 100644 --- a/apps/shark_studio/modules/img_processing.py +++ b/apps/shark_studio/modules/img_processing.py @@ -1,11 +1,33 @@ import os -import sys -from PIL import Image +import re +import json + +from csv import DictWriter +from PIL import Image, PngImagePlugin from pathlib import Path +from datetime import datetime as dt +from base64 import decode + +resamplers = { + "Lanczos": Image.Resampling.LANCZOS, + "Nearest Neighbor": Image.Resampling.NEAREST, + "Bilinear": Image.Resampling.BILINEAR, + "Bicubic": Image.Resampling.BICUBIC, + "Hamming": Image.Resampling.HAMMING, + "Box": Image.Resampling.BOX, +} + +resampler_list = resamplers.keys() # save output images and the inputs corresponding to it. def save_output_img(output_img, img_seed, extra_info=None): + from apps.shark_studio.web.utils.file_utils import ( + get_generated_imgs_path, + get_generated_imgs_todays_subdir, + ) + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + if extra_info is None: extra_info = {} generated_imgs_path = Path( @@ -14,20 +36,23 @@ def save_output_img(output_img, img_seed, extra_info=None): generated_imgs_path.mkdir(parents=True, exist_ok=True) csv_path = Path(generated_imgs_path, "imgs_details.csv") - prompt_slice = re.sub("[^a-zA-Z0-9]", "_", cmd_opts.prompts[0][:15]) + prompt_slice = re.sub("[^a-zA-Z0-9]", "_", extra_info["prompt"][0][:15]) out_img_name = f"{dt.now().strftime('%H%M%S')}_{prompt_slice}_{img_seed}" - img_model = cmd_opts.hf_model_id - if cmd_opts.ckpt_loc: - img_model = Path(os.path.basename(cmd_opts.ckpt_loc)).stem + img_model = extra_info["base_model_id"] + if extra_info["custom_weights"] not in [None, "None"]: + img_model = Path(os.path.basename(extra_info["custom_weights"])).stem img_vae = None - if cmd_opts.custom_vae: - img_vae = Path(os.path.basename(cmd_opts.custom_vae)).stem + if extra_info["custom_vae"]: + img_vae = Path(os.path.basename(extra_info["custom_vae"])).stem - img_lora = None - if cmd_opts.use_lora: - img_lora = Path(os.path.basename(cmd_opts.use_lora)).stem + img_loras = None + if extra_info["embeddings"]: + img_lora = [] + for i in extra_info["embeddings"]: + img_lora += Path(os.path.basename(cmd_opts.use_lora)).stem + img_loras = ", ".join(img_lora) if cmd_opts.output_img_format == "jpg": out_img_path = Path(generated_imgs_path, f"{out_img_name}.jpg") @@ -39,25 +64,25 @@ def save_output_img(output_img, img_seed, extra_info=None): if cmd_opts.write_metadata_to_png: # Using a conditional expression caused problems, so setting a new # variable for now. - if cmd_opts.use_hiresfix: - png_size_text = ( - f"{cmd_opts.hiresfix_width}x{cmd_opts.hiresfix_height}" - ) - else: - png_size_text = f"{cmd_opts.width}x{cmd_opts.height}" + # if cmd_opts.use_hiresfix: + # png_size_text = ( + # f"{cmd_opts.hiresfix_width}x{cmd_opts.hiresfix_height}" + # ) + # else: + png_size_text = f"{extra_info['width']}x{extra_info['height']}" pngInfo.add_text( "parameters", - f"{cmd_opts.prompts[0]}" - f"\nNegative prompt: {cmd_opts.negative_prompts[0]}" - f"\nSteps: {cmd_opts.steps}," - f"Sampler: {cmd_opts.scheduler}, " - f"CFG scale: {cmd_opts.guidance_scale}, " + f"{extra_info['prompt'][0]}" + f"\nNegative prompt: {extra_info['negative_prompt'][0]}" + f"\nSteps: {extra_info['steps']}," + f"Sampler: {extra_info['scheduler']}, " + f"CFG scale: {extra_info['guidance_scale']}, " f"Seed: {img_seed}," f"Size: {png_size_text}, " f"Model: {img_model}, " f"VAE: {img_vae}, " - f"LoRA: {img_lora}", + f"LoRA: {img_loras}", ) output_img.save(out_img_path, "PNG", pnginfo=pngInfo) @@ -72,26 +97,7 @@ def save_output_img(output_img, img_seed, extra_info=None): # To be as low-impact as possible to the existing CSV format, we append # "VAE" and "LORA" to the end. However, it does not fit the hierarchy of # importance for each data point. Something to consider. - new_entry = { - "VARIANT": img_model, - "SCHEDULER": cmd_opts.scheduler, - "PROMPT": cmd_opts.prompts[0], - "NEG_PROMPT": cmd_opts.negative_prompts[0], - "SEED": img_seed, - "CFG_SCALE": cmd_opts.guidance_scale, - "PRECISION": cmd_opts.precision, - "STEPS": cmd_opts.steps, - "HEIGHT": cmd_opts.height - if not cmd_opts.use_hiresfix - else cmd_opts.hiresfix_height, - "WIDTH": cmd_opts.width - if not cmd_opts.use_hiresfix - else cmd_opts.hiresfix_width, - "MAX_LENGTH": cmd_opts.max_length, - "OUTPUT": out_img_path, - "VAE": img_vae, - "LORA": img_lora, - } + new_entry = {} new_entry.update(extra_info) @@ -103,23 +109,9 @@ def save_output_img(output_img, img_seed, extra_info=None): dictwriter_obj.writerow(new_entry) csv_obj.close() - if cmd_opts.save_metadata_to_json: - del new_entry["OUTPUT"] - json_path = Path(generated_imgs_path, f"{out_img_name}.json") - with open(json_path, "w") as f: - json.dump(new_entry, f, indent=4) - - -resamplers = { - "Lanczos": Image.Resampling.LANCZOS, - "Nearest Neighbor": Image.Resampling.NEAREST, - "Bilinear": Image.Resampling.BILINEAR, - "Bicubic": Image.Resampling.BICUBIC, - "Hamming": Image.Resampling.HAMMING, - "Box": Image.Resampling.BOX, -} - -resampler_list = resamplers.keys() + json_path = Path(generated_imgs_path, f"{out_img_name}.json") + with open(json_path, "w") as f: + json.dump(new_entry, f, indent=4) # For stencil, the input image can be of any size, but we need to ensure that diff --git a/apps/shark_studio/modules/logger.py b/apps/shark_studio/modules/logger.py new file mode 100644 index 0000000000..bff6c933b7 --- /dev/null +++ b/apps/shark_studio/modules/logger.py @@ -0,0 +1,37 @@ +import sys + + +class Logger: + def __init__(self, filename, filter=None): + self.terminal = sys.stdout + self.log = open(filename, "w") + self.filter = filter + + def write(self, message): + for x in message.split("\n"): + if self.filter in x: + self.log.write(message) + else: + self.terminal.write(message) + + def flush(self): + self.terminal.flush() + self.log.flush() + + def isatty(self): + return False + + +def logger_test(x): + print("[LOG] This is a test") + print(f"This is another test, without the filter") + return x + + +def read_sd_logs(): + sys.stdout.flush() + with open("shark_tmp/sd.log", "r") as f: + return f.read() + + +sys.stdout = Logger("shark_tmp/sd.log", filter="[LOG]") diff --git a/apps/shark_studio/modules/pipeline.py b/apps/shark_studio/modules/pipeline.py index c087175de4..5dee266b13 100644 --- a/apps/shark_studio/modules/pipeline.py +++ b/apps/shark_studio/modules/pipeline.py @@ -1,4 +1,21 @@ -from shark.iree_utils.compile_utils import get_iree_compiled_module +from msvcrt import kbhit +from shark.iree_utils.compile_utils import ( + get_iree_compiled_module, + load_vmfb_using_mmap, + clean_device_info, + get_iree_target_triple, +) +from apps.shark_studio.web.utils.file_utils import ( + get_checkpoints_path, + get_resource_path, +) +from apps.shark_studio.modules.shared_cmd_opts import ( + cmd_opts, +) +from iree import runtime as ireert +from pathlib import Path +import gc +import os class SharkPipelineBase: @@ -12,60 +29,178 @@ class SharkPipelineBase: def __init__( self, model_map: dict, + base_model_id: str, + static_kwargs: dict, device: str, import_mlir: bool = True, ): self.model_map = model_map - self.device = device + self.pipe_map = {} + self.static_kwargs = static_kwargs + self.base_model_id = base_model_id + self.triple = get_iree_target_triple(device) + self.device, self.device_id = clean_device_info(device) self.import_mlir = import_mlir + self.iree_module_dict = {} + self.tmp_dir = get_resource_path(os.path.join("..", "shark_tmp")) + if not os.path.exists(self.tmp_dir): + os.mkdir(self.tmp_dir) + self.tempfiles = {} + self.pipe_vmfb_path = "" - def import_torch_ir(self, base_model_id): - for submodel in self.model_map: - hf_id = ( - submodel["custom_hf_id"] - if submodel["custom_hf_id"] - else base_model_id - ) - torch_ir = submodel["initializer"]( - hf_id, **submodel["init_kwargs"], compile_to="torch" - ) - submodel["tempfile_name"] = get_resource_path( - f"{submodel}.torch.tempfile" - ) - with open(submodel["tempfile_name"], "w+") as f: - f.write(torch_ir) - del torch_ir - gc.collect() - - def load_vmfb(self, submodel): - if self.iree_module_dict[submodel]: - print( - f".vmfb for {submodel} found at {self.iree_module_dict[submodel]['vmfb']}" - ) - elif self.model_map[submodel]["tempfile_name"]: - submodel["tempfile_name"] - - return submodel["vmfb"] + def get_compiled_map(self, pipe_id, submodel="None", init_kwargs={}) -> None: + # First checks whether we have .vmfbs precompiled, then populates the map + # with the precompiled executables and fetches executables for the rest of the map. + # The weights aren't static here anymore so this function should be a part of pipeline + # initialization. As soon as you have a pipeline ID unique to your static torch IR parameters, + # and your model map is populated with any IR - unique model IDs and their static params, + # call this method to get the artifacts associated with your map. + self.pipe_id = self.safe_name(pipe_id) + self.pipe_vmfb_path = Path( + os.path.join(get_checkpoints_path(".."), self.pipe_id) + ) + self.pipe_vmfb_path.mkdir(parents=False, exist_ok=True) + if submodel == "None": + print("\n[LOG] Gathering any pre-compiled artifacts....") + for key in self.model_map: + self.get_compiled_map(pipe_id, submodel=key) + else: + self.pipe_map[submodel] = {} + self.get_precompiled(self.pipe_id, submodel) + ireec_flags = [] + if submodel in self.iree_module_dict: + return + elif "vmfb_path" in self.pipe_map[submodel]: + return + elif submodel not in self.tempfiles: + print( + f"\n[LOG] Tempfile for {submodel} not found. Fetching torch IR..." + ) + if submodel in self.static_kwargs: + init_kwargs = self.static_kwargs[submodel] + for key in self.static_kwargs["pipe"]: + if key not in init_kwargs: + init_kwargs[key] = self.static_kwargs["pipe"][key] + self.import_torch_ir(submodel, init_kwargs) + self.get_compiled_map(pipe_id, submodel) + else: + ireec_flags = ( + self.model_map[submodel]["ireec_flags"] + if "ireec_flags" in self.model_map[submodel] + else [] + ) - def merge_custom_map(self, custom_model_map): - for submodel in custom_model_map: - for key in submodel: - self.model_map[submodel][key] = key - print(self.model_map) + weights_path = self.get_io_params(submodel) - def get_compiled_map(self, device) -> None: - # this comes with keys: "vmfb", "config", and "temp_file_to_unlink". - for submodel in self.model_map: - if not self.iree_module_dict[submodel][vmfb]: self.iree_module_dict[submodel] = get_iree_compiled_module( - submodel.tempfile_name, + self.tempfiles[submodel], device=self.device, frontend="torch", + mmap=True, + external_weight_file=weights_path, + extra_args=ireec_flags, + write_to=os.path.join(self.pipe_vmfb_path, submodel + ".vmfb"), ) - # TODO: delete the temp file + return - def run(self, submodel, inputs): + def get_io_params(self, submodel): + if "external_weight_file" in self.static_kwargs[submodel]: + # we are using custom weights + weights_path = self.static_kwargs[submodel]["external_weight_file"] + elif "external_weight_path" in self.static_kwargs[submodel]: + # we are using the default weights for the HF model + weights_path = self.static_kwargs[submodel]["external_weight_path"] + else: + # assume the torch IR contains the weights. + weights_path = None + return weights_path + + def get_precompiled(self, pipe_id, submodel="None"): + if submodel == "None": + for model in self.model_map: + self.get_precompiled(pipe_id, model) + vmfbs = [] + for dirpath, dirnames, filenames in os.walk(self.pipe_vmfb_path): + vmfbs.extend(filenames) + break + for file in vmfbs: + if submodel in file: + self.pipe_map[submodel]["vmfb_path"] = os.path.join( + self.pipe_vmfb_path, file + ) return - def safe_name(name): - return name.replace("/", "_").replace("-", "_") + def import_torch_ir(self, submodel, kwargs): + torch_ir = self.model_map[submodel]["initializer"]( + **self.safe_dict(kwargs), compile_to="torch" + ) + if submodel == "clip": + # clip.export_clip_model returns (torch_ir, tokenizer) + torch_ir = torch_ir[0] + + self.tempfiles[submodel] = os.path.join( + self.tmp_dir, f"{submodel}.torch.tempfile" + ) + + with open(self.tempfiles[submodel], "w+") as f: + f.write(torch_ir) + del torch_ir + gc.collect() + return + + def load_submodels(self, submodels: list): + for submodel in submodels: + if submodel in self.iree_module_dict: + print(f"\n[LOG] {submodel} is ready for inference.") + continue + if "vmfb_path" in self.pipe_map[submodel]: + weights_path = self.get_io_params(submodel) + # print( + # f"\n[LOG] Loading .vmfb for {submodel} from {self.pipe_map[submodel]['vmfb_path']}" + # ) + self.iree_module_dict[submodel] = {} + ( + self.iree_module_dict[submodel]["vmfb"], + self.iree_module_dict[submodel]["config"], + self.iree_module_dict[submodel]["temp_file_to_unlink"], + ) = load_vmfb_using_mmap( + self.pipe_map[submodel]["vmfb_path"], + self.device, + device_idx=0, + rt_flags=[], + external_weight_file=weights_path, + ) + else: + self.get_compiled_map(self.pipe_id, submodel) + return + + def unload_submodels(self, submodels: list): + for submodel in submodels: + if submodel in self.iree_module_dict: + del self.iree_module_dict[submodel] + gc.collect() + return + + def run(self, submodel, inputs): + if not isinstance(inputs, list): + inputs = [inputs] + inp = [ + ireert.asdevicearray( + self.iree_module_dict[submodel]["config"].device, input + ) + for input in inputs + ] + return self.iree_module_dict[submodel]["vmfb"]["main"](*inp) + + def safe_name(self, name): + return name.replace("/", "_").replace("-", "_").replace("\\", "_") + + def safe_dict(self, kwargs: dict): + flat_args = {} + for i in kwargs: + if isinstance(kwargs[i], dict) and "pass_dict" not in kwargs[i]: + flat_args[i] = [kwargs[i][j] for j in kwargs[i]] + else: + flat_args[i] = kwargs[i] + + return flat_args diff --git a/apps/shark_studio/modules/prompt_encoding.py b/apps/shark_studio/modules/prompt_encoding.py new file mode 100644 index 0000000000..3dc61aba08 --- /dev/null +++ b/apps/shark_studio/modules/prompt_encoding.py @@ -0,0 +1,376 @@ +from typing import List, Optional, Union +from iree import runtime as ireert +import re +import torch +import numpy as np + +re_attention = re.compile( + r""" +\\\(| +\\\)| +\\\[| +\\]| +\\\\| +\\| +\(| +\[| +:([+-]?[.\d]+)\)| +\)| +]| +[^\\()\[\]:]+| +: +""", + re.X, +) + + +def parse_prompt_attention(text): + """ + Parses a string with attention tokens and returns a list of pairs: + text and its associated weight. + Accepted tokens are: + (abc) - increases attention to abc by a multiplier of 1.1 + (abc:3.12) - increases attention to abc by a multiplier of 3.12 + [abc] - decreases attention to abc by a multiplier of 1.1 + \( - literal character '(' + \[ - literal character '[' + \) - literal character ')' + \] - literal character ']' + \\ - literal character '\' + anything else - just text + >>> parse_prompt_attention('normal text') + [['normal text', 1.0]] + >>> parse_prompt_attention('an (important) word') + [['an ', 1.0], ['important', 1.1], [' word', 1.0]] + >>> parse_prompt_attention('(unbalanced') + [['unbalanced', 1.1]] + >>> parse_prompt_attention('\(literal\]') + [['(literal]', 1.0]] + >>> parse_prompt_attention('(unnecessary)(parens)') + [['unnecessaryparens', 1.1]] + >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') + [['a ', 1.0], + ['house', 1.5730000000000004], + [' ', 1.1], + ['on', 1.0], + [' a ', 1.1], + ['hill', 0.55], + [', sun, ', 1.1], + ['sky', 1.4641000000000006], + ['.', 1.1]] + """ + + res = [] + round_brackets = [] + square_brackets = [] + + round_bracket_multiplier = 1.1 + square_bracket_multiplier = 1 / 1.1 + + def multiply_range(start_position, multiplier): + for p in range(start_position, len(res)): + res[p][1] *= multiplier + + for m in re_attention.finditer(text): + text = m.group(0) + weight = m.group(1) + + if text.startswith("\\"): + res.append([text[1:], 1.0]) + elif text == "(": + round_brackets.append(len(res)) + elif text == "[": + square_brackets.append(len(res)) + elif weight is not None and len(round_brackets) > 0: + multiply_range(round_brackets.pop(), float(weight)) + elif text == ")" and len(round_brackets) > 0: + multiply_range(round_brackets.pop(), round_bracket_multiplier) + elif text == "]" and len(square_brackets) > 0: + multiply_range(square_brackets.pop(), square_bracket_multiplier) + else: + res.append([text, 1.0]) + + for pos in round_brackets: + multiply_range(pos, round_bracket_multiplier) + + for pos in square_brackets: + multiply_range(pos, square_bracket_multiplier) + + if len(res) == 0: + res = [["", 1.0]] + + # merge runs of identical weights + i = 0 + while i + 1 < len(res): + if res[i][1] == res[i + 1][1]: + res[i][0] += res[i + 1][0] + res.pop(i + 1) + else: + i += 1 + + return res + + +def get_prompts_with_weights(pipe, prompt: List[str], max_length: int): + r""" + Tokenize a list of prompts and return its tokens with weights of each token. + No padding, starting or ending token is included. + """ + tokens = [] + weights = [] + truncated = False + for text in prompt: + texts_and_weights = parse_prompt_attention(text) + text_token = [] + text_weight = [] + for word, weight in texts_and_weights: + # tokenize and discard the starting and the ending token + token = pipe.tokenizer(word).input_ids[1:-1] + text_token += token + # copy the weight by length of token + text_weight += [weight] * len(token) + # stop if the text is too long (longer than truncation limit) + if len(text_token) > max_length: + truncated = True + break + # truncate + if len(text_token) > max_length: + truncated = True + text_token = text_token[:max_length] + text_weight = text_weight[:max_length] + tokens.append(text_token) + weights.append(text_weight) + if truncated: + print( + "Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples" + ) + return tokens, weights + + +def pad_tokens_and_weights( + tokens, + weights, + max_length, + bos, + eos, + no_boseos_middle=True, + chunk_length=77, +): + r""" + Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. + """ + max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) + weights_length = ( + max_length if no_boseos_middle else max_embeddings_multiples * chunk_length + ) + for i in range(len(tokens)): + tokens[i] = [bos] + tokens[i] + [eos] * (max_length - 1 - len(tokens[i])) + if no_boseos_middle: + weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) + else: + w = [] + if len(weights[i]) == 0: + w = [1.0] * weights_length + else: + for j in range(max_embeddings_multiples): + w.append(1.0) # weight for starting token in this chunk + w += weights[i][ + j + * (chunk_length - 2) : min( + len(weights[i]), (j + 1) * (chunk_length - 2) + ) + ] + w.append(1.0) # weight for ending token in this chunk + w += [1.0] * (weights_length - len(w)) + weights[i] = w[:] + + return tokens, weights + + +def get_unweighted_text_embeddings( + pipe, + text_input, + chunk_length: int, + no_boseos_middle: Optional[bool] = True, +): + """ + When the length of tokens is a multiple of the capacity of the text encoder, + it should be split into chunks and sent to the text encoder individually. + """ + max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) + if max_embeddings_multiples > 1: + text_embeddings = [] + for i in range(max_embeddings_multiples): + # extract the i-th chunk + text_input_chunk = text_input[ + :, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2 + ].clone() + + # cover the head and the tail by the starting and the ending tokens + text_input_chunk[:, 0] = text_input[0, 0] + text_input_chunk[:, -1] = text_input[0, -1] + + text_embedding = pipe.run("clip", text_input_chunk)[0].to_host() + + if no_boseos_middle: + if i == 0: + # discard the ending token + text_embedding = text_embedding[:, :-1] + elif i == max_embeddings_multiples - 1: + # discard the starting token + text_embedding = text_embedding[:, 1:] + else: + # discard both starting and ending tokens + text_embedding = text_embedding[:, 1:-1] + + text_embeddings.append(text_embedding) + # SHARK: Convert the result to tensor + # text_embeddings = torch.concat(text_embeddings, axis=1) + text_embeddings_np = np.concatenate(np.array(text_embeddings)) + text_embeddings = torch.from_numpy(text_embeddings_np) + else: + text_embeddings = pipe.run("clip", text_input)[0] + text_embeddings = torch.from_numpy(text_embeddings.to_host()) + return text_embeddings + + +# This function deals with NoneType values occuring in tokens after padding +# It switches out None with 49407 as truncating None values causes matrix dimension errors, +def filter_nonetype_tokens(tokens: List[List]): + return [[49407 if token is None else token for token in tokens[0]]] + + +def get_weighted_text_embeddings( + pipe, + prompt: List[str], + uncond_prompt: List[str] = None, + max_embeddings_multiples: Optional[int] = 8, + no_boseos_middle: Optional[bool] = True, + skip_parsing: Optional[bool] = False, + skip_weighting: Optional[bool] = False, +): + max_length = (pipe.model_max_length - 2) * max_embeddings_multiples + 2 + + if not skip_parsing: + prompt_tokens, prompt_weights = get_prompts_with_weights( + pipe, prompt, max_length - 2 + ) + if uncond_prompt is not None: + uncond_tokens, uncond_weights = get_prompts_with_weights( + pipe, uncond_prompt, max_length - 2 + ) + else: + prompt_tokens = [ + token[1:-1] + for token in pipe.tokenizer( + prompt, max_length=max_length, truncation=True + ).input_ids + ] + prompt_weights = [[1.0] * len(token) for token in prompt_tokens] + if uncond_prompt is not None: + if isinstance(uncond_prompt, str): + uncond_prompt = [uncond_prompt] + uncond_tokens = [ + token[1:-1] + for token in pipe.tokenizer( + uncond_prompt, max_length=max_length, truncation=True + ).input_ids + ] + uncond_weights = [[1.0] * len(token) for token in uncond_tokens] + + # round up the longest length of tokens to a multiple of (model_max_length - 2) + max_length = max([len(token) for token in prompt_tokens]) + if uncond_prompt is not None: + max_length = max(max_length, max([len(token) for token in uncond_tokens])) + max_embeddings_multiples = min( + max_embeddings_multiples, + (max_length - 1) // (pipe.model_max_length - 2) + 1, + ) + max_embeddings_multiples = max(1, max_embeddings_multiples) + + max_length = (pipe.model_max_length - 2) * max_embeddings_multiples + 2 + + # pad the length of tokens and weights + bos = pipe.tokenizer.bos_token_id + eos = pipe.tokenizer.eos_token_id + prompt_tokens, prompt_weights = pad_tokens_and_weights( + prompt_tokens, + prompt_weights, + max_length, + bos, + eos, + no_boseos_middle=no_boseos_middle, + chunk_length=pipe.model_max_length, + ) + + # FIXME: This is a hacky fix caused by tokenizer padding with None values + prompt_tokens = filter_nonetype_tokens(prompt_tokens) + + # prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device) + prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device="cpu") + if uncond_prompt is not None: + uncond_tokens, uncond_weights = pad_tokens_and_weights( + uncond_tokens, + uncond_weights, + max_length, + bos, + eos, + no_boseos_middle=no_boseos_middle, + chunk_length=pipe.model_max_length, + ) + + # FIXME: This is a hacky fix caused by tokenizer padding with None values + uncond_tokens = filter_nonetype_tokens(uncond_tokens) + + # uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device) + uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device="cpu") + + # get the embeddings + text_embeddings = get_unweighted_text_embeddings( + pipe, + prompt_tokens, + pipe.model_max_length, + no_boseos_middle=no_boseos_middle, + ) + # prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=pipe.device) + prompt_weights = torch.tensor(prompt_weights, dtype=torch.float, device="cpu") + if uncond_prompt is not None: + uncond_embeddings = get_unweighted_text_embeddings( + pipe, + uncond_tokens, + pipe.model_max_length, + no_boseos_middle=no_boseos_middle, + ) + # uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=pipe.device) + uncond_weights = torch.tensor(uncond_weights, dtype=torch.float, device="cpu") + + # assign weights to the prompts and normalize in the sense of mean + # TODO: should we normalize by chunk or in a whole (current implementation)? + if (not skip_parsing) and (not skip_weighting): + previous_mean = ( + text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) + ) + text_embeddings *= prompt_weights.unsqueeze(-1) + current_mean = ( + text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) + ) + text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) + if uncond_prompt is not None: + previous_mean = ( + uncond_embeddings.float() + .mean(axis=[-2, -1]) + .to(uncond_embeddings.dtype) + ) + uncond_embeddings *= uncond_weights.unsqueeze(-1) + current_mean = ( + uncond_embeddings.float() + .mean(axis=[-2, -1]) + .to(uncond_embeddings.dtype) + ) + uncond_embeddings *= ( + (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) + ) + + if uncond_prompt is not None: + return text_embeddings, uncond_embeddings + return text_embeddings, None diff --git a/apps/shark_studio/modules/schedulers.py b/apps/shark_studio/modules/schedulers.py index c62646f69c..8c2413c638 100644 --- a/apps/shark_studio/modules/schedulers.py +++ b/apps/shark_studio/modules/schedulers.py @@ -1,4 +1,99 @@ # from shark_turbine.turbine_models.schedulers import export_scheduler_model +from diffusers import ( + LCMScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + DDPMScheduler, + DDIMScheduler, + DPMSolverMultistepScheduler, + KDPM2DiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DEISMultistepScheduler, + DPMSolverSinglestepScheduler, + KDPM2AncestralDiscreteScheduler, + HeunDiscreteScheduler, +) + + +def get_schedulers(model_id): + # TODO: switch over to turbine and run all on GPU + print(f"\n[LOG] Initializing schedulers from model id: {model_id}") + schedulers = dict() + schedulers["PNDM"] = PNDMScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["DDPM"] = DDPMScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["KDPM2Discrete"] = KDPM2DiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["LMSDiscrete"] = LMSDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["DDIM"] = DDIMScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["LCMScheduler"] = LCMScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["DPMSolverMultistep"] = DPMSolverMultistepScheduler.from_pretrained( + model_id, subfolder="scheduler", algorithm_type="dpmsolver" + ) + schedulers["DPMSolverMultistep++"] = DPMSolverMultistepScheduler.from_pretrained( + model_id, subfolder="scheduler", algorithm_type="dpmsolver++" + ) + schedulers[ + "DPMSolverMultistepKarras" + ] = DPMSolverMultistepScheduler.from_pretrained( + model_id, + subfolder="scheduler", + use_karras_sigmas=True, + ) + schedulers[ + "DPMSolverMultistepKarras++" + ] = DPMSolverMultistepScheduler.from_pretrained( + model_id, + subfolder="scheduler", + algorithm_type="dpmsolver++", + use_karras_sigmas=True, + ) + schedulers["EulerDiscrete"] = EulerDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers[ + "EulerAncestralDiscrete" + ] = EulerAncestralDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["DEISMultistep"] = DEISMultistepScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["DPMSolverSinglestep"] = DPMSolverSinglestepScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers[ + "KDPM2AncestralDiscrete" + ] = KDPM2AncestralDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + schedulers["HeunDiscrete"] = HeunDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) + return schedulers def export_scheduler_model(model): @@ -7,24 +102,16 @@ def export_scheduler_model(model): scheduler_model_map = { "EulerDiscrete": export_scheduler_model("EulerDiscreteScheduler"), - "EulerAncestralDiscrete": export_scheduler_model( - "EulerAncestralDiscreteScheduler" - ), + "EulerAncestralDiscrete": export_scheduler_model("EulerAncestralDiscreteScheduler"), "LCM": export_scheduler_model("LCMScheduler"), "LMSDiscrete": export_scheduler_model("LMSDiscreteScheduler"), "PNDM": export_scheduler_model("PNDMScheduler"), "DDPM": export_scheduler_model("DDPMScheduler"), "DDIM": export_scheduler_model("DDIMScheduler"), - "DPMSolverMultistep": export_scheduler_model( - "DPMSolverMultistepScheduler" - ), + "DPMSolverMultistep": export_scheduler_model("DPMSolverMultistepScheduler"), "KDPM2Discrete": export_scheduler_model("KDPM2DiscreteScheduler"), "DEISMultistep": export_scheduler_model("DEISMultistepScheduler"), - "DPMSolverSinglestep": export_scheduler_model( - "DPMSolverSingleStepScheduler" - ), - "KDPM2AncestralDiscrete": export_scheduler_model( - "KDPM2AncestralDiscreteScheduler" - ), + "DPMSolverSinglestep": export_scheduler_model("DPMSolverSingleStepScheduler"), + "KDPM2AncestralDiscrete": export_scheduler_model("KDPM2AncestralDiscreteScheduler"), "HeunDiscrete": export_scheduler_model("HeunDiscreteScheduler"), } diff --git a/apps/shark_studio/modules/seed.py b/apps/shark_studio/modules/seed.py new file mode 100644 index 0000000000..d0b022a6f1 --- /dev/null +++ b/apps/shark_studio/modules/seed.py @@ -0,0 +1,66 @@ +import numpy as np +import json +from random import ( + randint, + seed as seed_random, + getstate as random_getstate, + setstate as random_setstate, +) + + +# Generate and return a new seed if the provided one is not in the +# supported range (including -1) +def sanitize_seed(seed: int | str): + seed = int(seed) + uint32_info = np.iinfo(np.uint32) + uint32_min, uint32_max = uint32_info.min, uint32_info.max + if seed < uint32_min or seed >= uint32_max: + seed = randint(uint32_min, uint32_max) + return seed + + +# take a seed expression in an input format and convert it to +# a list of integers, where possible +def parse_seed_input(seed_input: str | list | int): + if isinstance(seed_input, str): + try: + seed_input = json.loads(seed_input) + except (ValueError, TypeError): + seed_input = None + + if isinstance(seed_input, int): + return [seed_input] + + if isinstance(seed_input, list) and all(type(seed) is int for seed in seed_input): + return seed_input + + raise TypeError( + "Seed input must be an integer or an array of integers in JSON format" + ) + + +# Generate a set of seeds from an input expression for batch_count batches, +# optionally using that input as the rng seed for any randomly generated seeds. +def batch_seeds(seed_input: str | list | int, batch_count: int, repeatable=False): + # turn the input into a list if possible + seeds = parse_seed_input(seed_input) + + # slice or pad the list to be of batch_count length + seeds = seeds[:batch_count] + [-1] * (batch_count - len(seeds)) + + if repeatable: + if all(seed < 0 for seed in seeds): + seeds[0] = sanitize_seed(seeds[0]) + + # set seed for the rng based on what we have so far + saved_random_state = random_getstate() + seed_random(str([n for n in seeds if n > -1])) + + # generate any seeds that are unspecified + seeds = [sanitize_seed(seed) for seed in seeds] + + if repeatable: + # reset the rng back to normal + random_setstate(saved_random_state) + + return seeds diff --git a/apps/shark_studio/modules/shared.py b/apps/shark_studio/modules/shared.py deleted file mode 100644 index d9dc3ea26e..0000000000 --- a/apps/shark_studio/modules/shared.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys - -import gradio as gr - -from modules import ( - shared_cmd_options, - shared_gradio, - options, - shared_items, - sd_models_types, -) -from modules.paths_internal import ( - models_path, - script_path, - data_path, - sd_configs_path, - sd_default_config, - sd_model_file, - default_sd_model_file, - extensions_dir, - extensions_builtin_dir, -) # noqa: F401 -from modules import util - -cmd_opts = shared_cmd_options.cmd_opts -parser = shared_cmd_options.parser - -parallel_processing_allowed = True -styles_filename = cmd_opts.styles_file -config_filename = cmd_opts.ui_settings_file - -demo = None - -device = None - -weight_load_location = None - -state = None - -prompt_styles = None - -options_templates = None -opts = None -restricted_opts = None - -sd_model: sd_models_types.WebuiSdModel = None - -settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" - -tab_names = [] - -sd_upscalers = [] - -clip_model = None - -progress_print_out = sys.stdout - -gradio_theme = gr.themes.Base() - -total_tqdm = None - -mem_mon = None - -reload_gradio_theme = shared_gradio.reload_gradio_theme - -list_checkpoint_tiles = shared_items.list_checkpoint_tiles -refresh_checkpoints = shared_items.refresh_checkpoints -list_samplers = shared_items.list_samplers diff --git a/apps/shark_studio/modules/shared_cmd_opts.py b/apps/shark_studio/modules/shared_cmd_opts.py index dfb166a52e..93a09c6758 100644 --- a/apps/shark_studio/modules/shared_cmd_opts.py +++ b/apps/shark_studio/modules/shared_cmd_opts.py @@ -32,7 +32,7 @@ def is_valid_file(arg): ) p.add_argument( "-p", - "--prompts", + "--prompt", nargs="+", default=[ "a photo taken of the front of a super-car drifting on a road near " @@ -44,7 +44,7 @@ def is_valid_file(arg): ) p.add_argument( - "--negative_prompts", + "--negative_prompt", nargs="+", default=[ "watermark, signature, logo, text, lowres, ((monochrome, grayscale)), " @@ -54,7 +54,7 @@ def is_valid_file(arg): ) p.add_argument( - "--img_path", + "--sd_init_image", type=str, help="Path to the image input for img2img/inpainting.", ) @@ -130,8 +130,7 @@ def is_valid_file(arg): "--strength", type=float, default=0.8, - help="The strength of change applied on the given input image for " - "img2img.", + help="The strength of change applied on the given input image for " "img2img.", ) p.add_argument( @@ -290,9 +289,7 @@ def is_valid_file(arg): # Model Config and Usage Params ############################################################################## -p.add_argument( - "--device", type=str, default="vulkan", help="Device to run the model." -) +p.add_argument("--device", type=str, default="vulkan", help="Device to run the model.") p.add_argument( "--precision", type=str, default="fp16", help="Precision to run the model." @@ -323,7 +320,7 @@ def is_valid_file(arg): p.add_argument( "--scheduler", type=str, - default="SharkEulerDiscrete", + default="DDIM", help="Other supported schedulers are [DDIM, PNDM, LMSDiscrete, " "DPMSolverMultistep, DPMSolverMultistep++, DPMSolverMultistepKarras, " "DPMSolverMultistepKarras++, EulerDiscrete, EulerAncestralDiscrete, " @@ -350,8 +347,7 @@ def is_valid_file(arg): "--batch_count", type=int, default=1, - help="Number of batches to be generated with random seeds in " - "single execution.", + help="Number of batches to be generated with random seeds in " "single execution.", ) p.add_argument( @@ -363,10 +359,10 @@ def is_valid_file(arg): ) p.add_argument( - "--ckpt_loc", + "--custom_weights", type=str, default="", - help="Path to SD's .ckpt file.", + help="Path to a .safetensors or .ckpt file for SD pipeline weights.", ) p.add_argument( @@ -378,7 +374,7 @@ def is_valid_file(arg): ) p.add_argument( - "--hf_model_id", + "--base_model_id", type=str, default="stabilityai/stable-diffusion-2-1-base", help="The repo-id of hugging face.", @@ -416,8 +412,7 @@ def is_valid_file(arg): "--use_lora", type=str, default="", - help="Use standalone LoRA weight using a HF ID or a checkpoint " - "file (~3 MB).", + help="Use standalone LoRA weight using a HF ID or a checkpoint " "file (~3 MB).", ) p.add_argument( @@ -453,12 +448,6 @@ def is_valid_file(arg): "Example: --device_allocator_heap_key='*;1gib' (will limit caching on device to 1 gigabyte)", ) -p.add_argument( - "--custom_model_map", - type=str, - default="", - help="path to custom model map to import. This should be a .json file", -) ############################################################################## # IREE - Vulkan supported flags ############################################################################## @@ -499,8 +488,7 @@ def is_valid_file(arg): "--dump_isa", default=False, action="store_true", - help="When enabled call amdllpc to get ISA dumps. " - "Use with dispatch benchmarks.", + help="When enabled call amdllpc to get ISA dumps. " "Use with dispatch benchmarks.", ) p.add_argument( @@ -521,8 +509,7 @@ def is_valid_file(arg): "--enable_rgp", default=False, action=argparse.BooleanOptionalAction, - help="Flag for inserting debug frames between iterations " - "for use with rgp.", + help="Flag for inserting debug frames between iterations " "for use with rgp.", ) p.add_argument( @@ -608,8 +595,7 @@ def is_valid_file(arg): "--progress_bar", default=True, action=argparse.BooleanOptionalAction, - help="Flag for removing the progress bar animation during " - "image generation.", + help="Flag for removing the progress bar animation during " "image generation.", ) p.add_argument( @@ -675,6 +661,13 @@ def is_valid_file(arg): "images under --output_dir in the UI.", ) +p.add_argument( + "--configs_path", + default=None, + type=str, + help="Path to .json config directory.", +) + p.add_argument( "--output_gallery_followlinks", default=False, diff --git a/apps/shark_studio/modules/timer.py b/apps/shark_studio/modules/timer.py index 8fd1e6a7df..d6918e9c8c 100644 --- a/apps/shark_studio/modules/timer.py +++ b/apps/shark_studio/modules/timer.py @@ -11,9 +11,7 @@ def __init__(self, timer, category): def __enter__(self): self.start = time.time() - self.timer.base_category = ( - self.original_base_category + self.category + "/" - ) + self.timer.base_category = self.original_base_category + self.category + "/" self.timer.subcategory_level += 1 if self.timer.print_log: @@ -82,10 +80,7 @@ def summary(self): res += " (" res += ", ".join( - [ - f"{category}: {time_taken:.1f}s" - for category, time_taken in additions - ] + [f"{category}: {time_taken:.1f}s" for category, time_taken in additions] ) res += ")" diff --git a/apps/shark_studio/tests/jupiter.png b/apps/shark_studio/tests/jupiter.png new file mode 100644 index 0000000000000000000000000000000000000000..e479e20548c9844fcb2514c286c160f5392cb662 GIT binary patch literal 355202 zcmV)$K#sqOP)?_B zL2hJCZ*OjME@EtDZe$C3TrB_q02p*dSad{TbY%cCFfubQFf=bQGaxZGIx#RhH8M}O zmBs)701$LVSaeirZDjy3Fgh?WIxsLgFfr`ICc*#!010$OSae2mVQpmqFfcK}Q57Np z000eiMObu0VQpn{VE{v6ZDn#{X#el&0000BbVXQnQ)6XrWdKuSWo~8VAzhXL000wo zMObuFWo~3;a#U$;WdJZRIxsLUGd5y*sJ8$BAOJ~3K~#7FoW1LgCA*fLxAIa|ySoL$ zP|KEN140XyAH>#+^@~ad^v4a@za*SKdskIvYJ6kNL2^Z|>LZC&D@e}kn3D`LBGx6J zZnxXV)6?tiFG)Ji%4nTZZEfw+tcm#^k&}e<6YAF|16T4us+^DZZBV6Zf`#% z^D6nDZ(@k`<@xsUNM#N-upf2_1zkt?P{FAef@fSS6+Vn{@p59><~*k^`T$; zSK43VSDqRl#`K_l%F%AQ;;KIU@+w;_l{x)Y=BfkzDu0zZ`L%P_zv4r`VmSM4pZ==z z$bMpSLj;I*6>N~><= z8Xx>q{wjC!%d5@SrMHHDZ9`)%eA3!?^#_k~VhUwrJMEQs*6=B{(YiG1M>5b`>f@Ay z_o#o`M}|J7Hrh7vIQ%8OQ~ywNvpDFS{;qQ6KeAOD)kpTERj2ipf9;>T@Xm6z(b((z z)U9l5?6m!;hW{%&@F?Ze{_^dcUw{77fAPL^<Kh*Imf9|@{pS#7gTH;15*s*+ z4LcrNpNz2hV97Mq+Jtr0kj?s88}3i->TLA0`J=+-`=njHn?E!*cP||nL*}4yZ4oE( z2yp((HaheH>^2t>6e_(MNG}rJf1>w%dlptMI2y1T!W|{s5(ap&Eu7P@?WZ|Gz+}u^ zxR`d>v~cL~Y7cU8E&n6is`OK@wAPiaE&7&M;FckeDzvXU;e^+AYiyi0t6v-CwJ!Zx zde6GjV)HMrHY)?Y_9}DokZ+%M<=dt{`H%Y0DCpKl`)17H$}jJ#L%*1jpXlb| z>k~_9zmhHgv{~8OIpd(+Qa*j!ClA_HZ&C*9ldOqzxZ1t)YWI{w=Iqnvg=}&C_~Vb; zt8S=$(|Ld?|MYXVAKAU~PZ=^a+sGsV5T@vj36b!&$^I!dt(tmR`Bje!iN z(H$mtKiD!cz!2N9kyzoOdh&K$C)VP^XRrp>rUyd%wyH5ru7UyH)6*BxCbt17TKu1r zGqUmJaRTKbckhSs+Y4!YH>O|fopQAe?JOs~_=*j=wjb5H4wuA_)7)LVw$J)bK%K@} z8TiFF^@4t<}dRq zSKHTE;a_R(6T_8n!hgqMuc!axwoV*vpSY6iJ;$&3ud@GC{|xQwtNqhH?UUYi?VL}; z8Fo@y{mNA5v{j7ouJYBRF8{Q#`LE*dr%#V$tB207{G58HOzGJ<$E$--q)grqPI$c) z@S(@OAJ4iI&?9{ogWIbz@uF>&Y-oeztpoI^eZpw*h;B{*g{jj>ZN``cvY<#G+Q+kq z>icA2j$t2tE&#-fj4lF@u{@izI^z57gcoh@$iDDa1G;ZOlC1DP^R|X)ii5W(DDKYY z?|uZ7XyRzh7S-W>Hh*c!1y2?HM8b9?KZ~`c!EWo4^NEK78*w&ccqUs|hsux}V~|8^ z;v6~0hHUM#FxxaaO&c}Z%f9&NJ0{h(OU_KgA1w}f**rR?qgUJznFA^$R|4YPzUWTy z#TN#n5DDvL9)#WrX7vTmJ$b&@?P6(Da=S~gB);UT_#(S$_=cy$iUFDC)p+ncfwtbh zc5B(v=5K$*%v(Y#_A#Qh(cJBR{A@d;aOlH3Uq~bmi6beSn9SSQi{WvdoD>UoDUbTq zFVE@Ixs!3%v4VL;1IypX)mmMTKQloj<%^!{$YC2ORFyO)i3W_BU8KZs$03UPy4iawzr?TY1Hz` zo8i@|kJhDA*Kc|BX`g)y%>jD#DwjHIZ}sY^ZR#U(A}KJC-%UNeiHo7HHF);#ho9Rhh2%rr)pYUQ{*Bd zeII2z(*YmcNnTwvlW=ZoEpeDO$Z_`y*-w!8cwl@h$ob!f56SmP&n zMtm^c)8GZuZ^Fm&<_zTB z4z?goJN&fq3#R)ySUYE3UC$BcN^{Bp(dT@|E+xJzZ;AoJ%AAANl5tuCx$`3dY&`2J zpE0uj=^sTIcYXRWAygfcx$=S57)#soJE!GQH+Fne(!a-9`P$k#pjIL5(O{(RSg%A6fc+*=YsETP@~uyMEx|b%>Ny?b?q%CJ#HeN z^xbAGBF8Z{6{jzrp=7M^9i5S}EHv;T^WCo-A9_PgMeqEp{Zkiv$OOGVSL4*{z!(hu zY;-=7qZMmqZCoE3){{G98%5+=GN;oIT}tW_+aaYkHk3ETO4|KGhBd*G8PniN9>C-r zjlY+{h$UX|p|#wPb8=wX$TYriqTg5=QfpMgYZSqUZtYa}-Y+Ah7^U{1#nqpwvQbtg zY!z?oqKAW1>B@O=lv`i)>z_84e!Rt(H-Lo^AETTbf#8q+_;i&yjA-%iH3u3Q-n_Q! zd)F58A$G=}6?m$fex;D9KZ`il<`&9sSFXOymA%@8zuQ_3wFdA{+rRCI?G>?h(5Il2 zSK_OU@+>rRXS;Tg-@ecMMtbwM{nN*lk5+BhAN|U8TJ@BMJVoiCW1o@z;I$q^|-ADW0-^LTm=|_%Z zZJA>Hw1O#l@=2n2k!1_~q4wvoYCdW&ACzX5XijJ)Zd#AZ)`fc?2b5rM?zqeE*p`<0 z-!ToQo+o>5m5)uziM{P`PM?e?pYb6k{Nj_jFC2V0+z^H)m#2^9t`_(ChW;p;+4wp? zi{+f_wbHsewYB?^s4aTMwC(PE)gCoAD5w0k8_ekw3wg;nN51R)){iahCU*KQ!IE$PollPX>w`xY%L=aLw11f&Cs?CL~P+= zqsEWy=VZW_4TuiT_->xZxTy6j2`-YH3)OwP0R)v~2OP6#K*SLjQwoi^>2tA^M*#?= zc(~95gGh-hymLae4aXOSm}J}{3JANTOB|GiXK-1_9OBIdjpgJyIjDc}8FMa96adx; zCxyJ?jGht4Y{~Ye9Elg5AUTfH(PXY-Ymu_>x}X z)n=I|RcIlbd4;vi?_wK0Np({}1}IKposN^ngH5Nvgo}`|Y`;0e_a$f$#OJ!<`3N?Ojs% z8enJ47OfLpZ28%wWK<&hw7zUp-nkQIUOe*%e=cIQ7~Um-E82{pMQQUDxBJI`(&erV zxnfmA`*Y$9(MLl>&KD_rkiiywnZQ~6uqT6rEp2iJ-6Y>)Di3#?Kk||Si&)vv$Z7QX zpcX-7A+)H*Lo@`qxKf?RrFN%-9k2L1QGx>{d<3s!w@;0r4W0bPR9Y5&7Mfi55PQ8H zbHM)5QQTbYuu+ja6uG{sU&_+AH~6Ypy(MJSjJYcT6^KfW_=|$!CV^tVk`54 ze&h^ecxm6;3axg~nbAqmW^;bmZtc|;V$DnPkX(o_I=1LLZradvQH@SxkG$f?*~X?UHn$3wy@uJdGWa? z*OlKs^@*?g)XJ(1`2A5%EVVTenFde#0NC?0kAo0nhtXyt9($^dX|WL`(e>YZZ%<4X zIt`G18{qxt4?q6*<7q8`nBq~s-%IvRD!Bo7E|RwIiOVOFB}0sw0KPZ?GP-&(81OCKnWR#&5&2~y2LpGn ztduHXhtCG>J_jZ&l|;CUHcn#kf(FtGx9$xiGm zGU6^a*yO_*k#H|qB{R5bNy@Ko3fRx!`o{%y}Z=1N$h(-!tOKJcj<0J=Wt2Y>8$ z@+~6d#=_L^VvfovnU9RoImU5@#Pn;!YD0&IjmVUfv9Ek&VyFHp8JYVWRwi>l<4_xx zADqk{P`;{9`$gy4vjPm3;CIf{JZbhlF_81zA#8(^^JPyf!^3r&#R~_~tp7s?fH-PSs{||itaLJCp z!-bpho0rO-GG{KzZ;f2qpU3aCflTet?klt{8C$zgt9sN9^jKryKfQka_VMQT@eyD` zvM}S>`&Wzce>ImuvWmyjNx0DO9vJWV|gd)Ft%b8CU3WZ1absAfZi z_Uxw(EZhnbec3Bkfyf4ic@+kV_xrmMp6?%X!oGvkxK;NA78qIavdre~UV+QLi#Gdb zJ&M-<6k@UrnzL|3Q1K{sPIlpSOsedY9Go~uKQ3fP{bF;1yt9%H7FeD9{@S8GcIS-) zXR+_Wr)ztm=MG=UNGA@&12PLXmp$a%JOI(wg+P1@gQOahRoLXvQCU`G^F=lZhezS& z$pLG^^OtY8ulj|!fBgN=QrH_@Cz^5dv_d9Fb)YAYu@$tm`-qmm7 zbMD6m-Xn`<%jVTrIjwu1!#~^byZ$VEn*WV;fg3|$Tyl5uAh6?f&x2035}@-2V{#w) zGoBsC_7VBo-nkW};t&1W(K^(gtqtxnuRQ3Dvoa^G(_CBSbn3Y|B0FR3gaThkk26r@wLxTVm`Pmx?C$sGGL|_&G>;TM1ow%;agFlET zU2@67PCmHU0T&67i1GLRV>W%>Lcu_Ti0h*-|JZN}I3O$`UnwOfn4WadDX@vx`O-jo zJmX)%ST=&vl*zcBc*N*iAzu8|Ul%*}jW>g_iA;8EWPZdh7%9b@I%+|{LKS)YCCZl<~TaBK}rnBkHBDS zFEuvF8TPn)LoYrWCvw*|G|G+>{l*rD+yCxA!dTk9z3ubrdt*eScw3WG<|6Z^cs5Th z(=!K|iy2GXIX1*%+VLTgi@1BcsQ5gF$}$+HBM87F>~5I z88BvbT(ce$l1go-#}RCb4?JO}?{>#4(#ZohyLRI<=iYt1@f)oi&m3Wga?;Mc!K*LY zl5{_9^x`*mCNGY8U({DNb|T|N&wBx5&w0eS#y-c`yT?_Ur9R~cYD(pr2dHOz#8!15 zd&spxuew(`WFFB@onN(U6aHB~+w1tomwbLznz-BG>rYCaepE*v?bn>-@jn^VMF6*c zx;^%L>MvR(e$F7c_AbRwD^wR_8bZ8h8FLT_w;P4X;*#W&LmFJx< ze3TzgEcNY1*mCj0>4h!)qoW@F6c3ww^AV-UIxoZz7Kvgn7Yu51H#m4C7 z$Zc3%)mxV~a?p`w^Or2pVhM)xm1iQzM29o&jdUQ>nK+4!dB4gU!8e6~D%}PA?W|7B@aAtIf~3 zM^<#dPp!`kcmQODuO71b`>q>5Nx$dDhc?FKtdQjiwL;tYDWXOcTKBB5K z@N{DyS=s+h^jG=(0#3(t_>e4mnS8{EHk((*ka=PnbBh?;7d1|x;~xU16q6a?OmWgX~N9i2#Cm^Wh9UsqDzcSXN`fF z*|_nvz?zTE#J}mD=YeGzUkHN98&`bTsJIprrsO)}9AlFZ%4-cXa>W{5?ACV1Cpj76 zg*@>#48JYboR9}{09O3^A6T$E^I2`|o_OgK3ADa~u*c5J$YqXV+OQ+Aww?{UsLrtkIa<|)a9y~pN5se?d zAtdhd@la9?zH2kY&Jt|KO*$@^k96V=O*uX4%1c)vENg%dNqb;^J7I}PXY-H;JB1rG5hX=SYe3l$9b_26iGTBpFHYYP60b+Vdt%l zTu=my<%Jphj!6qb7#RQnAOJ~3K~!!3E;~J9()ie_1#Qia`7DKXgvU=Uw2G~_R=~i( zNhf!Nq=me5SE28^i{W2;CYSohhyOrhPv}iap4G<`SS~y>ws^Jd7yp&_A_qQBe2?FF zQ{GeTV95{B6}cjL4oBjJAUH1VPcJ^WD3OcK4`hJpCm!l#e4*@^ut%yR6Rc=?PG|yJ zdgf2)(k^`2p%5>$)X-`ldSWJiz(uzPf(5(itl~JB%b^ALXc8+!lTPTiiww9RV^cU-l)&W#ifu2dz2OTNbo}~< zl+?7H;9eWO4Eh6;616)vgUQio3=EB{Vk76|w-*2&dHjJ(F)MnSNEv4*+nQkF`9p7> z3z8!K@po?2W8@wSSS6dq33Q3YHD(BRXqFLShay?$8huHjx{5l0*1>ZDLkLVv=Ablv zpGc0dxssmsmqFvKp6BWKOFk;hoP`nn=E#l^xdRCSe^@2!7N8^F{{)Vaw4e@FKkQaF zHMWqEluuZ4Ri9Y6?>lCRGKT(IZJnsDUA1BppWR3g{|y6qip9q89&4j^uC`pTL??XO z;Wr;piiOD$AD9n>iw)6*cH}ZPr@w9M-d`|B&34V^fUVO9Sn4}%Y$E4aIge~Wlli#d z@Ej2fY&8w8ZeVcm_H zzb=C8b4Z=yzOSK2zx2~aVrd>WPH8Q@&+_-a;AdZ=_QF_|g=rBUG=hu0lr_}1n?lUv zCU1^xK6-hPkYjD2^m(l2z}QfE^h&Z>beSCf#|9X^4?S{5=M5bOO6HwEOgu*DhxyX@_ zbo|7FZw^c>h9n7|f>1a&7;wiyh-&lVZ4z`GmnTIY+Yz_%kPE~qsBrL!mfRqOEF1cI z%ffgFMz+AC*q`_Y25Cm#7~QcC2GZli=IywIlqWA!JlQz^=>z6C;UJmrBbN;F$%P*> za`~q31%B1V#iYS#ZBITs>GEM6EIB)l7#UxrkQmI(KaVRUXu6nCB{kvarMRr;{8-I$ zY3WEvD9o#GGvETE$CnEf+fNNA`3eqn{b*R@mbsE~!oLf)r!7Qw6&^X2E(ew1935vR zZDb>YTx1;^?$rgO`3&FKiaj57V4j|O$xTG#<7}hV0wzU0cG2@h6)m|5_Shb1k^%3~ zEEgZxBn`0OQu}Q@5t9#S)r*WI#500IQlIk>n|W0wy0O)HWTlnAnAmbN-$f41sEh@i zML-?g0M`G*9=QjL-_GGY5tzoG92F0`HX0Ddp7C$q;wSPM)18aNx_uLKWAq)qt!52N zED+?+a%$QXhu}-TO^lq@PiT#^ITh38jLSAyOZ8;WvHGGMY{_TiA5G^3eXk>OnVU6k zn56G@CzA2sd{Jwzt$Yvmi2C^14`lo_mdwN2-Fa!;V2(d)I%s&@Fr8y%vp$^k`oX7x zay<0vkNSL7hME_=XZDYYRc2NU@G2iaa5tZJZP30J&rha-W82(oALr0Qzt0a^8^g9y z8g*VhpV+S=Cr+MWbE0c?)JrD`8xwcx-lTA?hvsa0PNaFm(uE z5wt*9UMleYe!JhxmqqTVXMkpYiiWRU8m?&=ussr=%>!XD+LndH(?tzWANYFBPhX4( ztj|A$X^_wa;k$OzgmZ4fY|oB`#ha9BzA&%PK=4=&H?UFjpm+h74=?7y;?2;R2B$7u zbn%pUgEtm1QtbLjhBjXRlA(+XjboIOeZ}E8W^Cl#zIh+IIRh&fHQ2O*4dD<+cIiAj zwqVcSIUpu|(9yr0fEXFjxI0#Ig)C5I12gj1VqcCk{N#g8Kr+Z$PxFgpp$A54+45UA zilvXF^0tI{QpD9+usNnrC0+48_o_j~{r&{2Z}Jhk50@fU$>6#@c}B+O#B%FZ$RC z-e6MB(YG^jP?{9U^3)7p)*&&2SN%wHDZlj8$l0Xp%AER@lN97-_ukINk<>T~!6OGdr}1S&>kzx`p8jZ4kVxq9 zlQ{3=Lhi&w-}&)6Bntp*upn~W*yB9%B*p?-sJi6589RZJm@1(9#0z9{d;+2Ya@Bt{mm)(U;-4J*SN6EE(4z3dqbUAskoUtNT&oPgBdf7XxS@E5N<-qLY|;399fK~~T~A8sP-WW#CO!ATsFL6?P# zrwr2bLIi$t&1T0xuDT@a6M>|TZR+Gnq|vc|<5BEJX;k842i?KA6rl<4GD{v3zsCgm zD2Yx>FW0IPB3#=JRvmqw;13RQ#~wZUW6?b}>MY8Nbz#CbI(jKLU-XwMzrE^*sJ!`S z{KDTk#h7!Z+IF0?cnW?bC;=@^%O&6O$Nb>J7#Pku4L>o!f~^Rb#$_(rXBI4;WhREq zOYS71=Sk@K6a=OU3Yb0aTu3s%m;=leLrvadLU`*5J!CupEIjGsAAd_;3djV5=$6a@ zTJ+)Dg6q&Yk0wu}UpuMg$x5;@`IIM!GL#q1F|lZtaU!sdUjiYO9#9OvEr;pY!9`vND1y=H8k?W%SSH{D$LVe-!_?C+g!;?)MzQVI&$xE)s4OV)X9+(BSPWU#+GePc78j4m8Jew*)pu( zWVuHLHu^7npUo+}%yiO$!(eD~}VmOED|nK_#!VlvA4?qA)5sSOaPn6>k1 zb9XI*X_#L=Sd#}B4IRe&X9mGYJO$$*C)SJFp0FgUl z-?i(P4fD6`-+Xa^tT%YSr!HNd6rc>A{v0ohJ@$Zwf1L1?0Kb4^uj& z$&YXrbmEi^?$}jG4WC!=9iSA9hdjXa;z64eT1?^c#l~1BE-0ZP4?x9^t_quPaz+Qr z#_9a{ceQ1s(7C9wO%a$NW7*-M?!m@PB5ORJOxnc2TU&_7r|QNC)AjSh z7;_S1K1QJtY31B^yb`ff8Jd!Q7S%WhfL``Lb7PA~fBd6sec%kF*sJxk-bRq=T#iI} zW3R@{gqOT~T;-d;iuuML%rXm|`T;~cswE3g5)m-Q01a-nCx)C)ocvv|#3N_b!345{ zt#MK(o)ZJ2p_huTUmjp=)3tFAli65zZgJL_?Kox}5B-=*tG3XM&8`jV$p_~{gJFoS5?koeOSbIyPT)y?oqR*M*Rh=ew_`z&TqJkFv2EJS z-GOs*%fhOyJzd1-#=&!hdK+jgt)stf)E4z4EYK^ntwx`f`CtC8f2#|C|LyjF{onuo z_8624#g6`I_NaVt;wi;hlT><+#hg7uLa}S;P|Qu@1Mtg?$qieo3PC*IT}>%U~HKUL`93d z?`UaLyiu|NB$;04p?g4Rz+iHyh?YT5U+IwkAeSGy-W<6YFfZ)blCDG`d*i0IUHyEp z+Am*!)4r=%*n||BpGI)eGmm2Vs^N?NDi#wkKhG&S^v9?}G>O9=D0!88e5=-igl#8| zxE&09%#dIK^xcq}Dp{x%i!eFAYcb_686D}`j-@$FerUKWJ)I~7&BVe{{=1pnk~a~> z@Y@G&azGCs^YhB;u}3==Hg?SG0>KLlecM2Th#Aj`2RZdSglg(yi37HYo!m&@7YuAn z%BhUIFP`rfi4jHnpi67@>E=G8jUH$a$o)v5M@)92y?TYy4Cu z_R$KelOCE;U=^XIUwO~((QU3QI0w_vvu?+W+{wZ@C`+j^jO;oeu>dV&RsGZ_F6^e? z*b-A6bljMqX@jnDSL*D?PUEky<|P{S;c;h-99!nGo8u$&+DbhZT){tIcX(#(?r77g zf96|!kcUo;2zAcHOk*D&Yx3Y`J)-1-CgX(7=A^#J+gVDdu6(h?-j+Y<@Ls<>-~Rvq z??2!E(|`O=`VREh+qbX!iEM7zzUWE0O48Y}YmAIROWLuUcJcTLv&1qEio@$>;;M{# zgg>v$L2PZD$5XeuoZ9QZl(AKN_(He+)JOdKqQCZR{pn9X7G`40Deryg?)gQ(#>e+f zeUa}C4Np?WDwPK7ox5x<8l>mP)^k-upHC3`=UTm}6B_Es>Vm=>iUMPd8FUJf(?q@} zoVt9@0LV!KSA*CXIVu)+3vl(C;LQg3iH*mbT8OxC$)}s8q&r*BPsyd&keg1H2A|Et zk55M=5~8t@d6LSTY#I~EW+C^2hkfW^20xpqOb47~lm|W=P#x$L%NGxXD1j5(bgdIn zVUSPDAPSR3n|kcqf|T{&pV{z9CTAF&lZ-`;Vtls6q@yo8N9km0EhENaP}V1Q6otT8 zkO)_jX~nP@E`+jl(+gX6QS~|PM`QRAHZVZ9jmd@e zY{?j#+xmucVywUN%c~hrFCZ!xp8p<0#i)NP4#15dqLjr`yFSc7(?aM+nsb&Yobw2+ zW9*=Vf1StUH#QNQv7d7sN-^`4HGgXaYUcIE2QKK~=sE!57(duk8bfQyv|F0D_)aVU zseENChu&_-?;~5_*(JG4PM}`#!Y3a5#(rcD3&yNW19VDt+E2Z0mwxJ$*7~uHa(9iK z-i^DxYRhBR|D!%f{_%(KYCT&wFbf-F8=Q#=OuKx>ll&l29{$>IJAKx)jQf4AGG48(u^0wQlb-LK69?~qvmttt z>c|zc`(iTLXCAW{b8hj6h8(n#+u+G93*~x$Qr6g@j_DAdDLBQbUI?h`jhYE&KspLe z78zs%lQ4NIAwwZppVOPg95OMac;nEspV!x~x@bt8{9WwaO=<$5K%xNPMF1OV8gymz0$Ls7-yEB83GZ^jUL>COa2ER!|4^bakO z2rca7nkXpoPfKHNK6*mpw&V|5WkZSuyN57~ry4eDI`##YHj5g0^5-dhvA~$wZ4apt zrZ7jEiWqRb$pdo$o6UV-Ms?<(=M;IxUTiYr^kXA+d9hdCjJq%!Bg|E>6>9R7eC4g6 z2zj|>z9IS9fa$l93O4OL81LE`op8hZi@q>GD)7F}c66ZE44%8w9-M!(qvBBxp0wnV(<;7o6ma z&m6@*=j37M)@!6wI2B$wRz z0>d9>j6X!4^#RHEdEeQ%6&r<+Xga4(9H8ZW`L>u3nS{faV)S4Ik6UBUHjuF`<^!$q zDZ#YT07GpUhZCnDwgd5X5~=T8V%ND0{^&B+imdsc3sU=GE~>cgi(cK~^4?HUxQQ;5 zb6oLLyEfqib_{tev<(x;>q1QJr35#Ue`0^x)8i%}K zbsi~-09m~7P#Y#;-c2NbW9nQf4gM7IKu@PTM4?QqVcg4dF2<3=fv< z0VO&+h=1iMF++^=cTb&A-OD`_%I_}IcqE@?}?3`yZ)1}y~$6q zzXv-8PCotGIQ_NlHJ`Z1(_pPWx|H2#Y=iwCUufu?9ku0mx}U0z;WI{r`9Qu5GEsHH zP7O?dSWX*V@2Wq35`YSKr{2)rL)q(_AJeme%|dIs%=%B&GB|A9JSJn)!j8&VP)mH> z-O7@#MdSw=pR~y61eo*4cQW|E9OKR6$(_th^bSI523RuVj1L+TpEtsKX5#ZFkNved z`NSrhuQtnf?vB;|`Sa)P>o74m`g779+qhG&U{0M3mup7JpnT9&ds{A?T(r45sb=I!Ac&$oOzo%0;ezhm}fh-EWMcsHE3QP1P?lK7$5k$mimbt zzL_a3?Bf&XF63&Os|qiE!t|58=6U?#BYv>K;uG6r)?-OjJAUlOcJZ;;?*(7_tQTO5 z7zV9xEDRhLRN3Z?2{ihanKm{`vuw{1$rW>*q%S)EFs@?#!7l6WLo?kesjU1k+`6ft zB$o&@fZ$+UYa5==S7XLk^=x9|=-k>_qE6j_O4h9bRa34kD59 zH^|8kF=ou-w|S49#O}5)BxKV8rmDT-rGMJmb799IP3NRK9aqqurGNUoG&X)ok-cYnbl5?Ek@Qn{|9X6vVf=5z|uWRW_!+Wq!ES?znvA7pm+2igL z2{suvVEw6J?soGr0Va&f+|6_Kn1y^Mz0@p$A2rAY%^awbbWCJJL;yaMpZC5|g3IKD z#v&ox{=`41p}}J#K``>f>2EfeX9TxDe*fM4-~agi_Vz<>5DiwoGjCmd#$39@Pam7v zr<5ZT;tCdpfVvDA>K|b;SNQA(|LiABG2+L#(WqnO z#L8P*kOvSnoE^m+^YJa7`Kp(>=oW9)ZC@R9N{d*{}ETe6*R0L!1V24OaY3G9D)Um(Sx=1$Vcd`p}dmw`g9C@?&z>+3lp^YgYTZ+g=Z^z z4omh~9}>qWCB7Lu))xAWX9Pq`Zs>1X_0h>U(%g~+GN9#^{GPJ)6X_hm)i_Qc9q01s zzi8aN`pam1#fJYRxG1kr-}3u$KlC-gcf~o5n%C1W?W+%bWvo;i{ytYe=O)%AX|vJI z7$$!4?>=0+{o(iYU0f%Pc72vlU%vtK`uA}v$81tmT3omh3KWS$Cb7O#(%_0|-ekm^ zy2J#-i>nPY(X`ldhshnAf854KCF8q%90-&$<45{Bfg~p9np7;%digzLHWE%Q7VhEu z1X0_AEwsEXkpUDc1JeOx*auf9q&7|akWlCx6^=GnN|T@WswJbC!S2Q1_9VpKf;d<2 z)K-=En6M3=91H`a_GQ1!0xpDXSMicZ0-jWX-WCMgy`M zKBDf1Xw4ZWv6b3f9f${c=*dso0ZD$MgEBHKK(uuN(b62;clO~!x2)&Zv{7+W^7biN zEbKrW!}2ZJ-A>L=S+s4=$9*7{nA|$CgUry>id>MwK_(}12L2d=J3VAmh&32Gw&_mv zGl5nNBU>Hb-G1}m>|hh#xN4mmmFT-?7X9FiZ%LS*1MT|1r`Hy8!GIio6EF1QNbJ$C zzwn7!QW7XVw!pOYfyDTP7tQksz538kx!O(`tSPe1)5x(IJHb|)%a&siHhy@`2zFkP zsT}pSlm|VsZmFGkBj1?N=N0?a755?^t?RM=m)?PA(HsMs@pF!esTZC4So*_{ugo#a z@LMx1_LyBUsO5g@=nTP1#Kqdcxu1f3eQnyk4x`{{fv`RUKp~EoFMoCW{`9BY4?YK{ ze{cHim-m91SCPb4=UFH{7VB7dg>-NyhMbetxYkCs@k{wtpHRGy4~wG-dFvc2A(=e=YWcy*t+ljgVRc{D;_!VZYR)15!c z1?!)dM2S0N{k$#vEgwXKpZD`M0AE0$zYOA$c-CV-g6B>ctmqgUN|S8pfMd@1jvkTt zchK6jvgwHrJWje!4j$_fgYQsfK(g#{+4R})U=-bA@uBdP0H`XcXHyjc zwtQjBcf~%}kcBZho!pAxl0|OtfzvKHWL$|OInQ0V@%+L@Y>i#%C+vyCafeqdK~DtW zO(aO9kVoDC03ZNKL_t&}=LX{ZkpfEWpLv4_K6uevISs)K&r9bkJ}hI4;|u-CtuNMa zZTB3u{Sec)I_CfvcI1PLyxLIUJ|_k^UeD4vpruXz?a+Pb%+W#CkwjSYkymAggf8I@tH#DvT z_#xz$>Yg>c}5Y{$mJEMMLvxe%}} zexPTP#R?k_j#vgY5XXYrBs-Bze1hZ&gzho{Ckp#kY~!0a9E0pTN6`3sxFS-oOJc_)J^X#_0JO zx%3e@e5rln3eQIltMo!?r(*mKp*Av}@lgymq7GDx^Ms7mE**jkcFxg}L+8YszK=TC z(yE)s$aC)8k1bkYIZJ()USB6Y?Ne*Fo$Dj^t8Do4%X;JSQf#w^8q6N{bk&cJC%)T_ z^B&th271dkxl^28wkF>AjZdtk|OkJ7s>GN3tRzRKzkYzvYmT9c>Mla>m+imFK z_gK5tF+$l#r@XAK_j8<_)E08}5xYXg+TqVn1xsaWv;O0E&z0Ia<%-3(V`j*+2UK_T zjHSm@mbfx{;jW#DIh%hr88Ov|&xi5Y3%&6&(RqkjmEhRMU9%2bpBe__^pvtT|bwI)%7Nvj;tGxw&%heyh)98d*MaWRsH!dz6b&GF&(=!DmK zryg-3&-oNbwjH_22mHm`xZ>NPAZ{#f=#c!iM=e`ifE!0_)Mjh!;nDL-H5aO&b3W8Z zm$K(eFo(}MWF`l;NYuid3JN`h4-}(V~e3Luo!)Y7ijXi$qyLjoxFIi74%eMNJ%Ur8=b&B=T zJc(@MiOtyC_P4#W1wS&|ZuyQ+WbiPa{BK?}FCgqOAvVhef#g59&Eqdem?ysUtj%!_}Yoz?I*Ft zUhP&kweTglL^wG(NW#KcM#{N8nQ8~C=GAQeu#1D7RQFWtx&*H_X;-%T`lQ3FzS>Rz z+!5bb zfM!1|IR>$nuNg{+izaZ819Ii$>Cmx&FjqwWop*1nSnz6@KVbAzee{-&+T#l#!38a4 zpTwn5lLPFIrkfmu4dWey@>QRuKJpXCo7QVFwxKE@r2G?Gc~(y3laU_>-SC zhw)us3%qnZHj&$~&i;ut7M4vG;uVj0`EM`5SdX${pUmy#=8OS_ZFBo8jSK6XMfY;y zGp5ChEjO|OY(ijq?%qw@=w~WT|v^P02 zwxL&N=NNk#jK{QUv_5lDT&npGu6fIs^DX#4>+QJU^}OBl8~G-`b0F{&0}{VF zC&TlRgNyBh-D_*|m%oO~>{hJ$e`3H38SqUY?K9sqem1Pbsno72!aQchfLQ|qPsGc;{qi!p}O)@4Ed2#^-{Jld29lgo>ZPGHuL zuKVcv(f&O8f=*uJz@WfpG1bf$o|v=d#jTE9gdii`%GwEbpu>>8K$W~4_&faa>tt9j z3p9`Z8A|I&8GTBB%!d;bZ}!zYQ6cfymiUc)Ph?%deEhKOD95&wY-6Sde*~BzdW59k z3t+>tS4WrlmCyLo#gcQ1e}R^NwPb;W3L^(y1o3mkxfFs3w&|;_4*wxrS6q8zK%&Dq zDai;diP(;>!?rXsnJ^op#tg4;e&XQi(inoe_v<*gRQ0U)<)^A%cp;~+aCyn0cvnG*6OrIBL24=yp)W6 z`pp--TwI#7LBOR6$O{3yku9K(WwUdk;%RS z4?QJcz<6(-_wRZ!!(=ue+!p1aws&%x6Kmx}vOkHD7aaPcgGI)Dw4*~l*{*}nS;du# zsK|=P;G1ukYMYLh{>hASdr@JNln}`!|5Eoa3yOj}{}zMtin{%iM{Jtu=IE@;!p-%ja)@ zt>0=8@o)dtzr6kYv%a<|Q~oLH!5Zxy&sb9xpXvYl=$M_aB{<*Ds!s_60A9e?;&cA!u) zCiRh=LEjRV;_#rL7u&OHt6R$v@0eG4_wVJ(Cm}@>*~m2?GllFIKKV+Gg>4g>zr}(v z%~6WafhB{U$>F)Pt{VaL_z|};>I+jYve9c=bIX_9`YV7k#M#7Yk|(ih&`!0E!Tu;eCr;MG zErV0~!3sU*QWMKEzZ&*4jQYGEE9{|rjEjQ{Vsyg4HxAwC1S!#BM2iSNRzu5=QmexS z0l)jtBBS@1^FBKcv~`O2=G%I>lGB999jmz7X#-k1=z0y)K}~=1U5h9jEOGI`Zv{{s zFYe5Ns|{L`thxRjfAHuE&mVY;GQyr@B*K5df!c=Tj~Xl((1+4I#F~)ajPz9p7IZ?! z0dHZ*P@v8c+t}i}%1;D|{va_GFj44{(wj$@wi9xqJW z;q?bLY1t6gDFCJR!L{=}G`>=~VnJ`ojUDxKw*@cx>SV4geSUHoSKP(hzog-t2j5KD zKt}O?*AF)R-M{@mZvW=*{!Sk-)BNR|Eh>&Dt=%^+@N7)mM`@{{^NL^D^W#pP0`jMa zR;KHCMCs(gfb1GwO5756+O!r1cg*skc>ILGD#Boc)uF~$>@mhE;~!30qc;K(6%T$s zH-RvE;EYmn`9ht1>s3t4&VdKr=wLVb!WQl7wXUtRUz?@1pPKm@beoP3ZX`%?u%X*r zHxAbbshBtE5vsw-xtH;&PWrKR;@NVsRk_->?8KXM0>AhjC?lXupEdzoOb3CA;W&_3;7^{NA1 zaeUEv#QUoXQhD=9*Gp zZ{PIka>>8{&gP$!J|~9ENH03Oc5YTu!7@M{OXNVlb=9%Fs$2yc-m|WDOWRMKwh(R%Ap9{J45wfUcQcFOM0k-@h#Km{cg~nzaHkNT=oS1ja z3S#n~C9c^|b$!4OS*jY&Wjc#?~ipLlB14T%gnT z8tKIYKlFnsenz&mqYkIX`yA9pxWU?Xe8g9^i6}*xkz;z!5s?n2G|(e|5YMNoTR$=H zy!xh*xml}zO2Yib)_A4kaQ=y|b^cs{6BZV9=WS#C~QUJAatVSdizj&%@;( z8ylF2)9W`k0UC#3YR-=M1JLoub_mJOjwQVMARcm>{#i%6Ht^RRoE+M*Q7ZX8%T*7) zUi#5P?s%n~tNkWOL2~}`K?woln*eWm0>C-XW~H5qh6l$D{Z4=^gvOdJHs2kP73Et& zsrI0d;5gR%w9?VSO)`Cw@SBT#7Jw@^7ZxT>0JpIa`yN0RN`dhjWG4uTrW4IlPEzD^ z&ddf2ml=UBn=D%H;#Zh8Vf;4=tTcj2tOY0z9G7-JB&6(u+fKMFtKH$lRD8|cf}j%owXyr*{+cch z+GCT`jCmr;Tu2)Czs^kaMHMjnuM@&y>>^MzZm89%p^!ZDBu+9sR9&j{|G9AIo== zBDF8OYI=QjO)&d>ME6r(keJ*h7uX^<$t60gXi50`{`L0#<2#qnJlnX_Md#jD{cRuE ziVUqs;}v?(5qgx`P0th^c6{{>=MnZ(pZ23t8`T$v7v+*gLgG%68k#$P`_&7zI!cN+ zDKADD06*r|Zq_aXtCv$Od%jzj{n(VeA63gBG!@G^snK^oEb}eHT;}V6-tkNc zj{#(&ftwAHjoh0$6ILR`p~`w=lcxf{q)@@pF`WoR?UFlaWxZZxFw)0m=$`mA@;#70VRuO3&f ztS_t&+~jEfdf6OXi6dAXAL~o^11B?iN)C{tfP;;Q3z7DvXSAU;uW+xiU;C#_eh@4l z`u_UzO~3y4hhuHNL12B9D1kl=2+GNfBfVWJ*tsA{VGkD0)nCsA>@CcSn^}Y(%GFoo z%-K4Y%mKc+W3z2)R%j{F#IOLdFif9uG|yPoVL;hX5EjimBH}+C^rhra99vhi=t_^r zjQp>jB-OzdhSXHyM@Mo6nZ9>cn%=~;z+(XC!e|uK*fdc_VEge5w$#z2vKMspLZ3(8 zafe^9!qAl-zwyrn1ix&^M&|dRXR}3NHZmT`r=-R-Ek5x~nRKYdrmuW)VIt>ugJ#gB zrS^bJk~A`4w&v7GpEzVEpUnWj`dmnPJXHHL4M4;vnW=5?h)@1};Q$sJ4IaRfKk|qk zZ2YJ;2ICxiB+#gYOgZC1isJDGv3dIg-OJgz_#LHXWWZ#*g}baztJjGvZ*rvIt6FJub>j2j+o2P1*I^ zOGGHy2I-!VVMDLPvh|1U%6^=_%HdaS;i1GQV={3KtynCii0Zp?>4y$1=<$UgCaRrU zJ+XuzxjXiDy!C>3=7AAY!SmJl*&AyAPOzSZJ2txlge`}wy+OgGd zoFE?)8ym98OJr*Y`e><2XJcMW#|Vh#ulm#;vjekj;tz{H#3!dKQ>+*Jkswzfpgzw( z5fwAGvu;V`cnU}?+)%`A#wF*&jvcF%U`{)Jv_NB=Blu;@H$fq|4?i&?1MrmQuzhOl zulV)tq33Vf0`!+(^|bpp{aBTY+65AvO50m`B<@0 zB4t9L%L%6q#T&O|>)dv*DG~03x%*}_1ESb?O7;>HaovBPO|J{2Br%$ za{ABTCwD@;*5XM1!7G~xmtPH@uC8v2;<+$dfm08SuC|0LiL07Wx(+J{{>p z;^M{QK-`jp=0(JQ85?eSu|uVX;v{~5UcmqoR~H`SB8esJQa;4V{l`y8!@M{jUl39eB6#PcPgLcneQB^)wyB2(}%_jgF%Q27h1>*962$b z`y9p(dM&Y&vD@_KZbn1GS1&9kn7yzwuCor=)b<@2h?y((4t`^D%1BU3aPE16 z93$fMD1lYfR5?~X{+j;DX#|<$%wx&vg3aqHcD#@}Q8p335|eK8UfSW%d2T;L7T+D0 z=x#jj2cgq3 z?XPaX>&L1rCqwyQVCPDlI{pCwW}3B&4@z!UwqJB2o7{NrLQvUbK0!^coFB<-+u`GH z>)6EVN49eKihgZWkG{rMtECoK!3D7c>0OA=Yl8ElB_rB-ym@(*&j2x1ISKXy>etDaiX%Xg9r8`#}^~QKn}g=I5#+*#H(feV^7_r z3klg^Gq*SNVNyJsoLLEr$^b%EO#bjAF*+`5CRW6>(0Gt>sA2Ud|0U=}Z1N;2{{zl- z5_N(A;e4bF1eO8Ja`3ncdoE-!G;KgRIS9%jPbrc<2rludrqyPgXM)DQV$~h0&t2$@ zBO8G4B5*EU!giS)(?`ZvZ{cL_!Pp86nY$5Dled2KmJktIRj68a)CUMxmE2P8#~5K_VJ^B10W2?2NWQXH3}$v%FL61%bq4$&Qz@Cu57(RZm&6A+!6Tx6~%XW}cuh`(jG|uRm;4tC1VF`n#TJaw-jZJKqf3{By zrBk0+eTp&9RvgNWPSoJG76%nDBFBGlwrGBEW5&5*8{DJ?PbKba?C{EX1q=C@@iTYw zfF~{hH1E&&uy^f4YcbDyStsAQK$Cg)C_nL#Abz>0@IzkUP$sBM6FOp;$2?q>X*XigN=$PU>ek;KoMXn@G8b5^aEUBUZT_yEH0iL}8l89!N(C^X&5TW& zg%?o9BftD-J`#uQ?@#KQAhHAiWw11p$K+CEY`G8veEBzab(}v0GN}37h5Gn&k*sRV z^A$~W!2ne`&4!~~5Vv_*u;U2>z=ve{htVl+qf37u!u4i_su<+XK1Wd?yHS<` zBIIzF--{53IWWbzz@bIPd3KWIn@uv#=NJVq1QQH0Z6oEuz-U|%o(^{p?@Pn!|ny1^5hSV@An3ydX@P(t&m zs&s|Mcn62)iDKcLH6G?E7vY?1!fV^Hf}`XY{KP6_{+rF_jO~KC`e?)k9#a6cT8^K-;}h8sR42!&;h8T}ZXI~ARM}vHlHy;7 zRlag3oH&N#D)NafHo%^L;F!o{Uv#b_yfDlYTKKZvT%{fjMA7kBjYrFq7c&(HW#rZR z^4o9vOMc%_bPm9VJ~$dzYiq*eL*Xhsnc#+w8xa0Bz!&kh?|5Pk+|L7CAvY)@rao&& zi=8k3vAHpW2YeLsz+{ynmTrk0kc-}ZekxMlHP-T5%Z@@Na25{+O$m3Ojh>D0gHM5S zVrU7Q->&!cF+zeCQ~F+u=CLNm6C{fCh(U*qVGhtVEIZAD%B3*wi#(^R+|e z#Jew^d9etpr36`x4)H=(yaRA@fnV$*4`vo@_^g9JY-3`dB+kF|y)l}n12IZR z8i>cf7i1O3qc>AWY{Q3I*Yfz!9i{r z1obS@^pGBG=#gtM!EN3PkJufO2x}g20e~Ly z#C80Def482b}c@v=oG7sn=MIvxSc%MuP+8P2Cm5orIU4HmQsd{c*)(w?K&DcCa=gG znQ;E!H1GcBfAP<4`uG3*r`sRD{~o-v~>bNkgoR zG1xV+$z^asSKE_4w(YKNkVkhih3%Hao%o^M*>fPK+J5*PQ`Xo@4u8R@2LB7dq9RP$ z_Csjj=!k(hLF#sJZd*H-TQ7FD%)MQEzOx0NTrtPl3wG?*WS;t*OFW*hldg=(84Gbv zZfUq6oc1-A6Jz|2hV4f$GPTh<;}RFMmTDfnynYdWZ6WjB|GS(3K|Jhziczeb4SGvj zPXXAi@-$8=cH&F;7|^;+IH&F4NQ^{q@^IR92>(PtL8sH{is_2)>_2W}^**l3QO95K zophf;42m}rpFk4zL;#!?Xg^A38PrLpi7+M{pv|(0;aKTgC!)PcR*Cdxz;`d#nnRjZ zg4dId_g$O`62d=V!71D<14g?OIbc7CA{AJEj*J~*?rbuHe}=ELnp+<=8_FO znRi}vc%QF9dQ>F=7JdeZlY#iz`~w8wvg3H6j9oCX(99~2UB`f$<*A|Ru1vL0GB!Rs zdS&oO2R=N#954FFh)xmNsmw);I>wg8VT?{{{@M64bI4);O@kz%q*+UNNBx771Cr3dPULd#Fg*yIvEFB!FHVorN z63@8dBB3QTFC0d1iJ2DYSBH9Dodi(83@&&T8Pi~2#aI{*geb91A5xw&*$PG#8-tUD zJ=8hhG6`a#@u_DV%O8GcepwRxo;*vhQylW^x3BtbkmuX~`S1Rn=fv0Fz7EdB78BST zV^d{*CdY|m65xPH2jjVOh?>u3op=5xIc#q1q7pZH2s6g6bjsBaVIfS%(a28%`>n$2<2iBXKMMX4>vL2kS* z#i+gaoYh-3eAulYQg)o2f5@xEp4#L9-1-jy zUptXa4q^oK!BSbrD3WiaoLhd!;>YbLALw>Levd8c=uleIwtukM`A?q}IfqA9IpOb~ z1}DnaTCvcBnE4ia5*A-%+swSBsYoL&CjEh9+vbYc_*z@RrcT|o;VoRmV7r@F-O9CJ z+mh5n4;>woKlp~l0QL#S-L^JCZ{(hfNhk^!@dQ_L@~3U*5&nPW>s@ptIkqgjVpWms z)<96t3`Y9h`?&)M0tQeEnoF{ZT5D}HkH{iho|)nHIFEfE_HYl6NOIl>Ae3tUyEuDX zgSVqXfYz4?EAyIra*A`5w*XzNj#H*fNh}U7Og;5ZRd$;V9fhvkmDEB=lj@uFIWqu; z23oOssHcTVRdXUGJL>L*s!8jBxep}ncIzl554S8Xz78~REj3W(buuvgocU?DEdVjZ zpoFw>lBiRgFG`Rvo*lp?(ZVUXqpJz}i4A8Hml~L*?KF*W~$%tqJobeJCL1j%Jqv8 zT4ZnE>Q%Z7!`S<@H^0I*jFTMt@MePo52mkQfBE#sAOHOHZ~x`Lefl5&g)6-w03FKao|+xu1EeuqWSNgwIe$%;d;qf zEYxA|j*EM`M>{_{7m*oVA-~Ibhj!?dC-#$zfJ^v~_8pZ^pM|U6^34;=Z-zoMdG{m9 z^+%%4Q*-_*pJ*Ti{ukA{4uM!at`BZmM-)TTC|j>*B43|;O5^v2C3%u2UmV7J$LxNU zZ~GlXtB3A(eXw1T2kz7Vj@!oTlZPNf#5nx#{C~&$qd%^^VIQ_@$83Jbo72?zhND_= zu53Mf{qinXqFcY`AKMbd%f)r;m;Rn_yXV{HnLi(GcY-<-Q1~`sg}ezFfNlcJ80dDp zJZ9<(g5Lo4jUS!hSX0c78En#i?{+t>^e-RWO=>(eBt{Fj9n_yi$9^{X;B6!qz2{c-4$6a74w3_l&# zgI0&~>*{5F5Ks{FLn~q`ZhE!=O=uI4^6Ym3N2fhkuP$j~(LS(=vFAH**d{ODza^$U z?C0A`GV{{MZPX_ZKMjfm)2m(jgrBHesFrBa&WXp5+IsB(r7|)2o{z##$pdd2E>Rp> z3e-+aEW@AgMPS#V3(YAD;gCXrJ~3y%HWi)m+Is_#F#6(!PSN~5$6Nok!9MAFZ6cG| z>aab1<%V7O2`tx!Y(~P9bCblYH1iu3$d!i;Jk=-9)ql@NX_B6eXLz9%&&f+S?Cs8Y zef+D$H$$`A4I+itrcj+=|G;-;aL%NVMKhl&S3j1WwR`6>Kq;XKT%*0R=CSe7|Ky7O zLCXG>Ci051W+xq_C-DVSm+c`^WQjjCA9l-=s2+F07Gt>Ce|VXd_@f9ZSzDOulUm~4y?v#;g@;B z_ZtQM=0s)IJpKZtgOC%4KC4WVm@T_$fREbogYD?c?zF&bFJ*kt1dmRS^+tZ?Zql$!`!+@1lArA4XSdz6Nlt;zHWqh@J>GqW{;G*lMU;2nYE6XWCa4mR7nNWnwdmannEd6HhQlS#7}eAa{n5l>${-B%;qQKG@RoFQy)`HVomu*|J|6=dLVo#jsJz?PFm9g1pSh3 zDOWbeduC%YC-@~jN&#@Z^M(!|H!hzixYZ$DBDM3)_IGUWc=K6;{?3lmQ4DYSp4D@GHQ&5+T)6N4lW%pRDpdQ5m30z@T14Y;_zy$nuc2n`Z~l&4c+~mhe;M_ z3B)t;y&tZe@LM?8?}DT1e34*PN`jkwf7{#Z*g1F!^s3lr4{|MJUP2=3VGtX! zS>9;3G2Es>SK2~AEM$+^)ZKrdvy3fYG!R;8JU(Ni9|cvfM|XZY!<~<9vrOZe=t^|t zJ_GUCwmcig@EjfC(FZEH;HvO~{^`_D*7Z?@FTJ(frbSuBt7oM_EE{>&U=}Kyp!pmR zVZ)WT=N8lxD=$YL4OgDyaFZ=!qq9Le$uFa4INrJfadP6Z;V2q6I$$tf;e0^w-#qb6 zeo!hj`eBRz9Y^9j{;@=Vz7kV7l>2I*9`R47G&{_gwRa~-Zo4qY^Qe{wHcsZ@H1_H~ zbCwI8JdKG+><5?a_`!37$v)>a$A!h`MV(@{;efDz4c)0y!Hweq7wPW6Y4e%84$qQw zGL^k!XGx}Sc5U%j;i%elTRHWIT@sid70SUj|J$9L&YyTse-||4?XA`;!ws+wzfe@= zJav451$ADV%T)k)*EalT?&&!nF+76Cx$DTqUSD}U8;u9>)W7^8&iUXmmd#myF7%T( zn>FOpCZ3n+xxTdZmOa1p+;)Y&^7Yy}KAp3}_IIDxiuxW*xzqj939WAdoQZ&^wM!CST0~s5Ih+&D zXeD8s&`vP&8N$30E0HLlAjQK3e-~)SU`b_xLnmJITnD_`UlQ5MMC6m>PEb_)p8^c9 zuAqd}qQ;p8COP8auczAW<|l|bUJS>E`c9xZsWuCti$)poDDI+z210AL$?aO%?)lgv zvx9aIp;3nZ+Bdmi5Tbt8P6v}ogqS6VLRBK}`Aq-YcWE5teWa_{$Os+&5z}-W>{WU} z9(s@=_RI={6Zzt8tTHAg(70e9-}Z?ObnPxO(m1z?bQomP;gH2@z9x<{ME8~6{)onY17vBfEh1U_>Q&skSV8eS^vu2-|;^F1`O}wu*H|=gHFXkzJ z4UJP1W`}}crcUEz3 zF)VS)qu<_mhlz$UBlJ>>Z>fV(n8 zrt`nM>OCmgt;2OYd6S)rTH&&?(3MNc0cw@ag9-JeZ}yn_w=@27R(=5-Y$e`Mif8 zjr_{zxb?WWygld!uJ?s~Pk$WwUE2^pA?j)IN20OtYAxGow+`Q~gu!=g*I&vXFI2>H~XeddVOUh{<=%PrcJ>&8O^@1OL=+&dPMK$E!RKyh%C2bG{vxBR^L zGYsw0e4-P$_g}YMNvb{bw*GA%6Pwg%YsoC2w%%&-E|o z(|r81F>n|E@hX|)`juifWd8BLEn+);SLYooSw2I_{w?N;yvKZ-Bf@+6LU#Djn) z$R}lF!N%N9_`Aw0_XWorsw9nlkTcMLSK6kqumdjN;IP}Tvuyg&_uJR%( zI*yWIFu&{Q<(QfPc^~5SR&R_yb@K>1{vH&R)u-KdbsoB2ZF$XUmiaSJ7{QLkw9077 zIW!5hC(HW!#INqzL9;p6ym!j-l^(pH(4S~zhqk(N0Daz}a1*O9E@r5PSHq5zTua)} zHnG`1tECMV_4sR6k@Cb?v9hHwpLuNfC0P6>Xbkd=_a$R@UT*#7LW3bC(d(-UlOraN zBelN8clg%(V|g+j|33IG-TXiMrhl~W<;l3ZX4|9f4dY8zI0y9c7b}NXgHG&NQj(mr z&UInYKog+EJJ!08*~ZC9`vFRKx02~HnEtrq1VZh_7M?K3i;NlI0>D*2JuVI^?IO^L zkwV|p#scfVD)l3K4uUO?$Q3*K791pti<1uPy;v%3HhA4&g{NDHsSzpa4wJNGcIz}{ z50j!q?tWGc^(a$ZqtRFJY9L1oE!dBQjWyc){`{LoIX3cuh{4B zs^}{#+N&I~%7>1yP%(b?n@4pD6gq^=$8{%189+^6n?G5Pkxj;Sb{r!5l3P;QXIy>z zV0b(?SvJuPhiw^<+#7THU~hn#2kx43_|A`3CbZFkpJRC|@1?M)pt0u-R}HQ~dD^bDI5x~^em zHL6$UM2W*bMGezx_f=^h`=pz`>$A(}xVuv0ig#`DyxJ_y{@w9<*FAsX+we$daWR?< z8f@h6-YUPers5FOO=9$x;vhx8`2sA*Hy7KB3t;NqDZRtyMrdj;b~SihUnpa=HgdrO z=FK%^LAJQi@3@evXIRZ`9d9Qg{LjK0LUP%^D&j(v~uV!~20oEVdg3?Q?oN2gncDDcGx%h%{ zUTnL6levz`9adYUnO_g%u_nKq0e!Kg^O+arJ+Uft)TeFc;&e-Yd9QRxzOqqtA#;+3 z5P|JPy;aToG40AU*?8cn<_2C}0L5nfyi(a`{!Kis7*!KA3E)}AU5(Q%E4I&00#=#N84ZGE#yb(}PrVFG! zopKn((=a^J#a9P(UiG@mwSH~1tXvxnB<%_f12~PJi1H72J+#${wv#;AXWqA7dl#Ym z?+!FbXb&qg_b{1k$P5&^U`lO1dWoD4dG!gpx(!dis%teEE9)TTb(JrEeN8N#I~GF= z*DeM4Cl6J?NEPsZ7{x(jH^0 zQ040D$<0s1h=$2Nte%SfTm9wxHCp#b_PgG7f9RX6&Oa*iK#f_`xG&%?RHrA=-s><}lh@FR=mqq4?(ZJ^;f9(w|2^Za)S{>mdJX{JjX zUiPYce!Be0|IjAi8EY{g%+F>3JFXN(Jr(spYrWXg=`ZEQSUWfAH22q@H}M1K?w+C7 zf7LF(d0@Ue=QAc6Z^-XLth2Hg2Ly9$-fVgh!@W2tcml!4cM(e%Z~Zd2XFv426Aali zc9aPp$NX2RdSmRg&%f(?wL6Ip+h5}|+QON5aG!N-w13TotN-efF5EV7y!vl_V@mw* zeSgQ30ABtc=U-)CeZycLzw-KB#&pR)*__+}IIw9XwmDiqb<2mwZsB5l^Y*4D%^44d z5a1?DZ><*?9Yxz`02nmN8bF3)Pf^@hHCbFqwW4a1L)}2kJl%%N!@#ZGa%i%y6;WEuUAe3RbVhrrgCTy z`|-*Ap-ne=)sO`2gVym)DVP3$DDa$GUz^J z$)G%Y#ip;-1Ia1pR|#u7w#Spgy{M~PRj*CQPD|AftWm$cstsWr&#QjySc~ADxVDLN z!f|7D`zS&_DPkLM6U1px6y+9(v*_ElNo06ewsrhhem;EiNT0vkb!p#a+4hD_ksb2c zXY#$y*Ef>r=N;&AqYT-7=H%*KoUT5pAyV9e#F?smN7tq2xu_$5cgvO?Q0GN`fBB4sx3HGP_DGPhMT3HDJG=X@JaqQW+w(VAUR|14>J#S0jhbMn@9oIR z?syysLp7alR^aUl9Qye~-DkIT21BAyBrbJ**LmFm;OucgY#`ZSKc@z?N}UL7m7hL9 z$^rp&)T4_FH<2qdPqmtN(@ymrcXDP9-Ud)uy$hH)jJ_rKjyEjFr|PYl|N{{$`^2e=&%+go}Z= zhc)HklK%a;nBUuO`P#Aia?bqPg0469r*jME#@g{4W&85w6=sX%)I~6u^T}E%+i4#^ z7k3me+O?ctS!~}CqW=_TtK-M9s$TgLM`3fND>D`65Cg7E=DO}^Qf}j*4b53{Yv;Xi zk)E;Tbn3&u-FZEDoH>}Qck5q`0BJy$zx+YIu|n1Qvni+pdQpNCJmsRg_zADrwlgQX`Q3&~d+Fwz%K1-jQ5NwMF ze`?zTzNGXv>2s*_Z4~LX`W?XdH(P<1UzR~l0NU#W(YpnGSRV$C{H*g%95>;tx`mZ; zNRcUn6HUg)e0mJ6y7_Yogrtd^tN$&KGe#r|F?92pm{ zEgDbzL1v{xmi671$qy%;jj2gb?`^--*58j|yKRf`JBe0_LChCR6SA8~e!81@8PX4( z^<&!y+g1D$0~!36=dJ5Uo{L$LiB?YK;jNk+Vt#Xqm;{S3T^;XddCDaLx!8*3+F9<9 zN6L^N+UC{ztH6aW$C77(K?_^CbHhjzci-KRAOlj*9?F9$og`14*XH}NYR9p3`5jtS zEH|giVcvK)-?TS^kAtdSA3eArE=Kj5Y{cQ2M9OI#O}mVHQd{vJE}#)qljdXz?HEkP zd0wL_`#Q?A!#^PE(3zvUL3zB0U?iG{CHLMofEJzxg=Yr<{254x-$(w3-xFZ+P57k1 zHb<)Ppt>VVhydEaB`KAu63z}XH$%OLip;h*is-I@_Of9*i9Q*VEst*(EZ4C^EG-P2 z`JHO@P;S&Jb5{Hm@3%qwa#0wnvcf8QJ`z@K!+Y^Ea`fBjV#P_TM|;9&@hDil ztWIuq$f_&<^5Jj$Mu+vz3ocCj$eCnioEX0BO2EUX{vVIYwe|KP_2`|xGgs7hasgf5 zbra$?gJM=ajxTx559RTfpCpT7>Z}sZ*C~r2K10C5#yIkVouT9~>b#qkO`2(NGb6jBx-T%>%=rgS~ z{>YgwTpTvEH*XIOvOq|J?QSibV#&zELx_@M*8-B|zSJl(uPDcwgbuX*N(;&lCgTj2b%w*Z`I zi|kSCsU4#QjOLb)fm!Tb{3oLKZq@8@0ItI8jbkE_LBG2Vlj}QQuv5aycCTtpAZ73q z9QzW20I>O4bTvhx^IPHG`e^GtV3KH0ssF_HlAnc7THA$1@qU}3&>L*Yro%cDYFN!! zZ)>Ts1U&)C5D@n&>%+E9` zH(Zhk5?bQZfI&u~gYW86{!`YU{HQH@Hy8{IH{QD5VCDx1Jb!V4qS@yzbm>h%%YF9P z$w3Io^%Z{THvw*7sO!6+@*9;Oj@<o;^W++PoF+Ii#oyqf)4(E*K?3+Qmnmze2_rpCt&#(U}kd*a# zXAv2*hT^$9=9KcxH{v7q=D_>my!z$mpP38c2ZM9(^Tj^I{ys-E%xG|GBINq~&u?r@ z{3Ov4xDCVYq!s<4vdIl7Kly`tZfasd&@~UisF!Jc5~0s2qR^eW?M%)QHa_ybLKTah z>4DL4K@GXuJEm@&?76=0*5JLrxIwZ?BSwx(ll#yWd+CDoi-HAZna6oH2jK}vFBmL29?-osD>uVQKu&8Ht zscbf(_ir))oQ3cFgv{1$=_8iDBCO1C51wcMNS9r=(;MJvn3g9p~4)Cr1 z@HadX!;?Prggu->q2bXzx$p8dTp#u=d+6W>zxUs^ zaKJZO?TEFP^riR46B`4howmKj)rpkO-g~Dc<}F-e^2&d?5=4DvGh^}EC+}3Rh2D`^ z;Ar}CZmhEWbL!h=+LupXmC0Q6t#RS-&?Yxl&N(1pju*ku?EiG8uT2NK>%bl^qV7~F zpf0K@&>C|#GY3lE80gsmYg)ja(Rz+=Y%}gY^ys&&N+ho4l2J~OymlQq27xk^y}kRr zbz^oo76lS*baHx;V~%O_&NQB~=O1L-$#M0m{hh|Xzmub0X?K>&WA!H&s=xRr*Sh)+ zhb$dpedWrveU>dfF#Of&U};q2G4f$8#UW+*vH?q@Fbc(7pE;d%1j=Us+)+uCRjNp zyb~o^sCb`*ZBW_nZAucUg`f89I=fo+w?Hex1zuaipq4aoMi;VmTy_ajL{8-3FwMTn zn=A%Ebo%7-!@yBc0;dl8lH))yDd&haA8XeXZ&+N&#TFxeP(1o!r-e-q<>~fq2NxM= zwcp_^Z6o(!fx}@_$FVotlP-;YygI_=*SG5Bqn5XC0f6pBzAgu?JbDJdc;2LRWg-h( zreD%&z7v;*NvTUF9I^RQKI5yu2w<}5OC2vJgd9^7uX_5Vh#-+G)aQ7O61wQ4o@QrD zk2{ZuhNkRCzjAz_RN6m&`;8fs&QwE}ZYZxTXggf+1$OF>zPfoSRy)Tl?aWEm<;$my z4_JtIce_*yF7GfscM@QxH9S}JY_JhK#i5z^Y~_~8hc?Tx1ENVaX)>Q9=40n+k|Z17 z`cz}}95cUw%`Q-`ZFE71UW`t7o^PMy(7b<()r>&88=IRqG?HT`ki16f(@9*T&L$h{DINo&7*1QCsZ@=^LWn#ER7@Fn->F0whSNg^R zvX0tPBKwu^Ut?=I*r&Eg+175{?v=h5Y^?H(3&>IR6}_&_`KF|E{-8mue*Ez(-vv@` z@i6cvIplCL1D4&r#g04H>EGLQoWpXEqtjufJ~s}{Tjt9BXF{|mORd^S?3?u+<7YfB z*F|NduiTxR_w}cFuig%Y_M2PKOsjsO1kEeX=HumA-{G2=^Lpt&G`?5T#S6zr`L>zI zXFv-|tN?jol?$q;Z8U4GhnMkK(?spKa$kIFTi&Ae zltK^sQO`y<2IGm}+kMIc6=<(6*%ve8@#bKGz`Q zp$?8c42cg>yzC%)Y{XkE1k-KR`D@<8jLsvWERLoA(#Iems!on)I z{usz%8#%AT4)tV-8KILrJLJor!?QXx(1}Z*FAndqwg1*-oc=54VyQ^U5Cc>K_Cp@zRA=fj=DYe3m_~uXfh=dINoDC%HLC%aung-8jnU+n(H# z-<2;u!G`(q&;~-AB-SKlTO?rC&20Nq zs4pE~cLBp6KC^VQOa3<5>VZrd-NAJpRHSp1%>WUzBp#}|75^EWb6AIY$kCXGpo<=L zS`Lwl)FI&+w{cA0ltLmx_|)^nr272j8nQWhG>VY!9?g|%J#9b6`2+o#Q?72<9@(`W zp+Cx)Y#ret4(y8ew7lV2-wWTB73a~{1T)s;%s=w@H$8VF@}s=N)=3Zd5MTiL{OgWI zroq*W#V!!Q1;t0xgO4OrSTzB`2A;sKe$w$bZ^%_vu43 z?M~I3r<^&#(N!c8OK5bMu<7=U3}0w&EQZGhj=Lx+PxI{46i!|BD~IH4vSLqmz z1~rL(AAHobMkP0dQJ& zcMZl|DJ``ifycK*wty%e8Opseqv&BZ2F^>6Ge~G0ud{dMA)A!gr#$EBzNH@1AA&C|!J$_xB5xm` zf=FAt>V}ZX)2rF(KlW%7re6Q1H6cn%UAj_6`@Jh5(`bjop6M5upk(HttUiH-(}gNG zTpxKZdDbt>V<$Sy4<~#0ZQl9#K#jJk?CPt{{*!C*-MZ(=|EoOPL!1t3=%!w-jbDOY z&0fr^)encU^MI(w-sQpCq_VWO>06z4aq?6B$g$1igA71jdmZz=J<0fy_RFtq80AAfSkR$O4vWo-d!Os6o`xLih#Am3zImDc0-rB5HJC!ncxBKX0r??Og~v_y^iyxQnPT*+`W`S8za}0HosXX6M$ddXn`X~)uG~XwlZv=x z4*~sNH3vvO~oU-b)1~0Q>-3}j(kc-{+C8+ zF8z2f$z+WA@y((Au}|TfSiyRg3Kd~j{>3x{tZ|6Zc)Q3kU$mcu@7I6*_0#|MZ~ytz z|M}nlJJSePzuet)`bn1VJ~rhe>czTS=ad^``uC10=4Zxr58~>x@F9bz06Gd?^_x9{S3@&K9kUA@Zxk>}Mm&sqO~=j3Da z_&g>F2mTA|m2cb1m5v>jJ+5BM@0e)!b7BS+fyP;DHB12oL0|@n;L>C=_AgsW^Z>LF z(*apm&7p?{O5niSEBwktk3n@orT01PBKCJNte-YX6yN*qehR_dbb?h*3FNMp1n+%7 z`^Xd%DkG?yR4quHQT$9w+9R(1eWnRXP@!69fdd1;Rg4b17P)s z&;fsZkd(Mzm`WDyQnvw;mkoe%N{9+n=Ap%6*hcZ?jXJ}z(tU-NpUg#m{?h-q+cT90 zkA5hB9Pu1rNj}E((a3zayF&>u&c^)QrXQ~yERJq4v)S{sKK`F{6&#<uI7*b&7N>8Z2VW=9c;OE#?7usp;bPzyPcXMI9=FjPlpn{)o32Lu)Fzg9D&zy zXpSq(LnZpfvZ56%p`lxvT|FjYF?w(rHeZ*3i_}Pr5E2fB*E5RpZ0QEBWqH>&p)f3a`j zrA<5e(eB5r+T!5{Oy9xe(L6Su_BKoWAw5|6Ii&%%>u3O%DI-83f_o_bjNc-4)Y_pA4{;oepU$d_?e?@?kNPA6!}IgOrv~{GJFaOA!@k_&um5*KB=*{&_Tb%|+`I4h|-tFFz;$E>y-_sokY< zfsEIfiP1@H>|v-)7^^*)wM7}_yco2>v)YvfCEtDE6^qIvJGo1ztmQ%Rs#6;MDtTHW zii(C;FTY|l?+fdv`cA%xMIPk}Q<>#v#XeS+EXSq?{@8<@)Js`?)n{PUOOVKZl@`B2l&O1@UGu?e4(>nrtmLcapzIAgfsxJUa z9`n0cKEsR$k+V2JG;E9|u{#fIvwv<1AXO1B`qMuSZRrda`nJx{pv@eyIHqLN#(s&> zd8TFq=k(cm4%K=rf5rG-KV{L^VgKTyUDJ9_h+Iyc*jz!HI>|52p#B92WqjxQ&IQ23wRV{uMo<>KR^BI z)2Zq{j!8;Rh91bfRi-|rDgY_<50{O{g2@M4vgf_wY(g_JNg&#NAS+pJIIu?@>Q_>K z42Xdg8n$RJP5a`XIKwP$4`IayJ_6GsrWDCAK>Pc$jwZ_u5WAGyU=XKxjy|J3ZDscGP}N;;Z*n_fuq$&6Oe6C#wAZ_zs12Rc6c$|3H&xZTU&u$!P3cgJIWGgrH{O`4;dj@b*{QIQUDOQkOm96hvhr32#Ph0e*I z&6~VNC1rg|lDC*E*Y-nG`{de`yKbL!{H%?>Fv))o>gCB(cWl$9ZywrVs2w`>Dkl|6 zKXn;~LPoK@AH$n|=*Ke!cS9jZHXXf-^)QP^BeH}!vT>>$c^tpSLmXa&l*159PtHoy zf^Y6R9{L%d7XcEaa>C%Jv6x)>kaSNTIv3`05MuNtT%D#Ye{xWJ_KHe&5%)H$Gm@Dg z-B)wMD}R1c+t`8{jNWDNYtHLcoh8rSl}i>a*=ifYILtr$XU@T;QTDsw>xcYucpYEz z6lB^SA5P|;DL5CEDxVt78HBW24n4IS-<_T3Z=0mj?Jo~J^>SSi=SW^UCRg#(A%PX^ z6t&IDbG*vU4OH`4dRKn%yvx__BjfTZR~?JZY*^F5}!O}tS37s$)6Xr5Tle+4zU~M zp%is`*TDDBJg$*iFD732J6KHJ@Yu%Ut=#kXOmJkjdhPqEyA52e9|zBwOoJqzli?Q@ z`pT*9WE5gqdsUL;BUTh z!IG~x;B1%IZso$WK5S#)^xqP*cQKeAB*_oYCRO?2L(Xm!f(gARl<5w>zEu~z{)lJ( zqTLNt7!AWnUl#~b#s!)$zL|MJVH|K&gZ zr%(UufBCmh|Ms8%>!<(u@BiLiuomC+iRWO+&x@;90oTV(n8x)23!j=De>>{CVxF^k zK)+Znp*#pKUa~`L-vdEEnp;i&3ZnPL{7AV4eI|}OX)EtYJVF=ie47%5rEzE4mY1~V zQ~53LZxw`!6WZmVG6!nDqizr-O201^qLXGhleEM8>R+EYCUaD#{&7$Z>acM6C{-N( z-4=i2ukYGU<;c#~rwqMx{B`FwJ&R9cvoV2;)ba}7=D_8Q^OQM~OwJod>l=@G>7+u< zJHAe|GBqCAjIx>Mzp1b>_@(kU-*@fU=AjD#$Q^rs{NV$J%GM@hu40J&^P9ZnM(d07v)?tH~U`qf|YqzxNnZ30nr zwgiB3B7bO`opLNcZ+^&QpE_tm?n@7KH!w0ugW<(hU0JQHI^$hQ(!;^IeVh_fbK=#) zV8eSp%65buJl1N4l^MQwBH!WS6O`e z?VJCbUctz-A88pf3(-Rz1>Kr72o4nL8=$f25`MHzonSC9KIo=2WGhOWtq%hwRcV7h zNbdTEMLZ@nec0jM5dXXdC64|@G9We~Db+Rv`XEhyKYB@u*{(9m;-W3z)*4g!Q{+dd-B~m}&3(4yN6|1BdN?N@9wFQR z;G!Wh5nlbnUz%k%$Qt8`i83cfK=rWszIJl0FP_+dRUN(?LR~3TT}26F<8kz{AmU_uKb$y-mY_= zrS83i?jRuW-rkqG|HVSysPSS|DJJ#OS->3vv8>G^gXnHnWU`N@{L~}_iR83jzdbr= zPgF1PX#5(Yt?093y;ow_k&!q?T>2Hhz6u{+#0PtH>#$f3J5HW;4Y~Mjbe!$;Ps1mI zvU1epm|w2lJ|+6RLjs9{C+m@EfkVM+a9Vyd9u}axcZ?=Kkm=N>KHJXKLlbNNY8XDTG>IKNcQVX8b%KYnVQ&WmOn)tp zJPVpB+YJCZe2}^akhC`&@zpU>8=o>{J)KRwY`cB=K~Q~OULwlMyazyvfwE}RPm_5N z)@Zx+>&rEE_29(mqb}N<+}Cppmxtb~4O5Sg)RjGnebX(qSo|0lxp%N0^~p$40*t9+#N4Jk9`q-Vdifryql1HvFFX&^SA8SNaceG1PgMc_?#23* zPw|O~`oI~&5B^KM+~**+mP$Txo7=Tqf@I#Y@7$5yY<`OMxNEx%^4~mo0MmJ~NvDQP zk+z(t4Gg21k;A1deQRE66MZ>Rryo7|$L8*c=_}bIg~mIu`1x_|Fqa!0&%2G zc**Tq9AtPoWv=dv$}|)G=q@hUysvYAB7zd}XK+60ZUXY>H^;6(G{bpgV|gBt9A0EL zhIHaNp3)pEBD5d}J0=g`^K)Y@+k5%S2{VHZmVe6qi2Ws-toWI+&MKic9Q2b{u$`?3gYo}6gZgKZf^S9> zQ~*`wq6S!a?^D|H)>Ggqz2=45dwNp|P4))}b(2Q9=7K@pZCNpxH13A!pem*&Bu=@> z|J}_;S4@TtM$92;+@0o>*h=xkdl@-qifebRx2@=vZ(`}l#$W?z%jcD?&v z0$@47;(`XOUinkijPY1LgRq8^)Fl?aYmnftH0Voo?e?Fx&@Kc;>KtQ&JoPam9$z@m z*p0N49Qu<6j`S!;cw*w5u6p-U7#BK)%XB6mE7tw>*1DQ!pDhj4?iP&gka|jtFIR8p z1rUE29|5b!1O>OV$pPl+4&30-p@$mC_L-xe72(|jCkB!4_T|zt3%N8*eVqYxk&*9eB)q0Q;~h_mmx*#LtC&eCnd##o2t5^2R_N>V8QN zidCi!=2({GquV%`?R_yA91fXH@Qai4S>OGiFN~-<3%4qh7v2oU6VQME%x3(X-~7sn z<-qe->i_(CzVN|8FDdr0G4{AI@wZJlJb-XxfMw~*7W&nyi*xsaHowudyF7MkyB5g` zKY!+hrIjhYj{`f7%+b_6Y8%8=-kJ;Qu}R6saU0v_nd)!(0^!Y_gq@diU9kuMdwu2c zrRUMBuA#eF6nAAD(Qh=vc^tJM<+xEE)!DL^89LdoF{{qe9&c*<^}!Lcjet^gq8iOG zM@73$CUz3H+b>OQCh)gUKl8NmH_j%Z>$bAIgV@zL3GHIul>h+bZ6bC+m*EgME&x|3 z#{by|2i46Xa1*)1d(Yygy?d>FIbSnC#~3$ zac2SRN8TQ6LBBpcap-M>nGKzX5-x7+$F?oTv#S8B=jhtgz&Zl9u&eJJ#-Dcav|fr? zyVA#d?M`M8wD6laigAMF*Eua8z?(1WQ@pb0+to3g7KxX5XsEv&kve|5Jrux^-{P@J z8hEj%-U6-t#KOGekC*sTp(+f}wDjtS-kY<*#c48!Sw1HRj&N2;rftE2p|cJ0n)9^n zmN|{-#((NGM-G(Yj>^u`W6#V>ax&D`ahU8k_pbj`SZr17ar&eyW)2?wp+reueAdSk zR*Nz*E1Ei< z!X=?kJ?Gd$Y7CV3@=ecyAhTBW8)w;UNVjq}*Vtz+``&z4tkQ=^Vh)n|$HiZOsZ5|m z)8!oEk8fw@57|dJnU$f?u47vZr*!g=2i55_CjyO3W$a=|LawNxC2d(`H*UUT(|x zq6{sD@5Fk1>Y5H^jK{WNy&LwcTiP8L%jG%EN7lu_S2eNwvyq_taipnpUw{C9T&&#T zkrGWx()Ru~{tgU@wuQVjj^C`yD0nuAk!5_epg!tmrquexHfy%=mtF;gA*wn;M4Pl8 zeQ62_8lMB<$Z^15-Uv;O{Njs)oNlvXAcD0k(m%Mv;S}bA2PK=7d-$_4*305X;L<6x1vV0tUq(^x)>aMQ- zsxz#48=x4nOE;gqi}RXrwL^x)jNefNuQJuu@IGWJvN!C4IN%Z=EKokU!@C*o~KY3%=cqUZYaz(<@FvVHd)(y za@^5TA(%kMEqE7w>X0-!9Us6DUAAkCH1c(BdToOzU56wdosC)3Vf7a7FpR)S$hhbq zkHmgBA3gON+to*B`32?rd3w#kdf_qeJdE2%J=-MGqYtD*ilzO!#|@N?&J>z!dp;JyAT&PZQHMwEyv)Wig{w}*gT&Pjn!bSM$g%r~XaNqt zIjYWVmyqZ&KFbRy205_hJ0B;${<#}-<#qV{(Lk-5z{%3KaL^NpW@tZSQn{zQHkoJB z^sBl>UE3w}0B29vw;4E1^@mgfNcm$o=HlPQ&&A_MlKW_((M7jn6HRV%A!j74I`n!= zc0=xIQ*LubCP68r#yZOZ^UinQLi+a4E0q1BafS%lLeggI#urJo8T249mi;DD4^Yzw zKJb_GB@bZ4qi(T__bli(18n2g`P|ssKn3&2t^vj~sOucgkLD8TWL+gS;-} z;5!Iu5_z1J7b|$Ri9WlUJ11ni)N}gt#j8~n=MTA8hC=ALvZ<_`Z>DE*Hok0*R$&}3 zK9DA})<^!U|AkA6i>*zf4d<|Za>OzJDTC)qe*H)9qe|$PznC>R+k|y@-IMoD7WUg^ z^f=p}K!btsEszy?ky=^Gq^wLO2%<8zWW71Ux+L^VEv;87j;{&gZ18iC@;OXIPIDYL zs7a(^1|-`QVgDX-@+`mdhv|OV&R6bmNdN0E{}O}WRo(5P^VW!yX1oPa#T-YrMO<3L z{(%M^$3~b`iXAS;96cCDW}1$Ax#abU~9IaM3tdCEbl-43Sg3rhZN z0cAiWgJusW@({PdSi-71sAuq~XZksbkA8o;6L4l1eK;Re0@QG+%N`Z+hnO9;v_>oj z$OW^u&fe26=jE}ZL*MHj`b;PjN4zIbw&}Tj`UwmqL3g2~1Bi%}q4@krd7CHo)9VDz z;W&^#xx>btAKYFUKW1zAY9n{JSc(8~QL@ zpI#jDs+js)6Ac{3DW;$Sj9=Ou%;#Z6Kb|`uLSFyLGkIZcPBSFN-g*;(`rtdUppyK2 z1ChC-8O>?D8nfN)l25tAB>hSgU^yk_7JTKLI&B|~Bs*xp7A^Fgz64NBx9$ENe<3T{ z=1<$b8t1Jyini|oV9K=n&|O^gVKx06mYK($a*9l^#DRzJ8UWSaWvXP|Ji zKY72f8Q90Kd)t>WqCd8%qvrUg9CU7@N8gDDQT2<+&A|S@3_{f}9}1j@)Nc&#+eNou z{xG^Za(-LK9IOwOp>g$7bdD3Wx@c>R=M0(uYq35AoqW zf-c|Fo!pPImFQ;1XJd3wv4JidkL;x=S{3w$MZGT_muDD~Tfe1`$AkB+*|z&NjsZ8YhR_jOF9O=JEtO6ihWSLM zx2Dj&;i7#0+(1ldfSh>yVht0Z4(JVf3*G21eix>kjGQ(obTh=^gIdMWA|P=Py8He0mw(--CcIMPsL#O?Qnc&WQ_eruAi6t4@f0iV z)|np;{bwvW9KhP@1_Zs~WWZ)XSc#`M2b1FoI@rXcNiDqEKo648W%=#Up)z|^gif7a zQQ+6S(c+B{{A7iGDPI(hUOqyqs=ak#3I7Nu8CTgktEP`F1SlWffl3bt{NaJD1zmq> zGslf}_VJdK6CyaS@8)lD48K=I#nCu`dB<>Eo{7bt$0^W73LU>ra#r^?k+#w71C?$j zi9yoL3$f{=VmklphX{^;H(|n~?>yQZBHdNhoaE<)gYHpD$e&=Qqa2c&gj^gfPYi(L zYJNF-^2;Gm^OfruZK+YTu@ZYc!*{VvA8p1#IUG^svx`l+>b~)yy#aw;$d*^~Zez3I z_fF{IYuDs9)*MRekjIAz&A%>wl&wFZB_;;|)nmKDVKINQ>5w~Lc@fnn%eHTeXX&6U z3T8=%BGB!B<#W+2=FsF9Z8J_XO5XUJ2SL#0|D)V@Te)cc z`VXVt`zDLV`0}p)t8lBRIgFx4_yT| z5}Zio6dPboW+skwd0&JYXzHvzEMAD@cnRre0s#e-W}lJJF3N5MFvtl&lTCsJ?gX$_ z$WKs{HOi#9kqCy9tTuX>Owyc5+F~EKgQNGkvI*3Vgvj*|g$JXpG^A644T|JNXj4{) zVYdh2V1n>2h95=r-;UbZqI(wr;UQBIy{Ag=d)zb68Vuc~XP+IYre&A9Pc1Z{@&N8P&imhemyW zLSpR8F>%&8n&E=C&ExSWyq7@#9iAut3Zor3UP6s?`Yvy8|pl=FNVeB6~k+*?dFEq zJuaQ+Exz-&W5)b3%HmQ$KaLz(pSg%G_NSWCp}GB_WG>TnsiVa(+*7 zd=W{vj_Dos?Rw6|XrOC*a2|@)Cu`yY^U&&%WV|G%iROWOlVkBs)#>pxnLp~T$Dk$b z(me)e-_84bmdfLpzH)Z}(hOL4wRzMX9)w=m>2&m%1H>Vexta#A^gXHrvzKMhC8 z;NWzU$ZUji-T<1;Xu4hHX2`yahH|_{=Dl_NaeKw6Z6*xg;b70hb9iau5SwydvXcTR z<%ka};2oWqJj9s~y6Yg(yz#uk=wzpD?fPM#tmA)k;qYXeglhEcMCzR=XAom97MRPA zqt|$ulYS!Ne#L8aG+!|mJ%)JN)VnXx_*p=oiQAJ+>dL;ReT^!!Snn zgO_`?zORhosmn6$U^2ZMXHh988J^=jb>ia@t?k)gi!T|nD`dwNzB+nxXD`a0&~jT$ z>Shjk#e0G_CVtS7eW{u60GYg-H?|sE}EHK@*e*ASeuStz;Va<`bdgQwco=< z+J{|9*?;~fYMS7|cRl-bKNW!d#%DG?>f+yXA7Y?t?8Iu0yO6nv-P_gnXajQ-16Yvx z$e3jlJonC`skXU?bGe{+ZQ1#ou|*XbdY)ZE0(4$@dx7wWdE7C&pP2)smC66Rrf>aE zmp}1*&^8`?=^E|dpaUWEr@7BY03f^5q;Bxqhg_#Dkj2vjcFJ|m)#fhjpt zF_|33GIp!mG4ahiFbFM&@yqjZW+yZ>HvdB>cM<^cnNO$X`;!y%%qa3LI*~-f=Hk-> zKc(`+Ql16_x)Hq`K9=H-%>m9k(3h>#FC|_ydo!hD#mh^Q}{~ zrVZ$R@vr~3&5ZRA%ZuloKVkX$VwUdLf8`D-pZCmg0HlzI0~ifrDH%=elH%uiNSa(KpY%lP{=)2)-P^y-g}%Q_qN$=Q;f2rF-MPxYg2ixo=1n2 z*Upu#BW|nRK*nND-&z@tBW3P(a|=-a#MbJC}(onQahM zwMBemNxqyyyUw?+JTV-(#N&>KS3h*MtGHho6&|lGt4_`+E!vFg82eQ4o)k!TvVWsY%0n1E31svjMP)(Z@nb_xj z=&s^1t}EHFYw&Dul$=G&Ro)?GemMpE2?P_=tZ8>k>B`qR=KQYoe2W5llR#(cpeAyt z6d@!rmmnm034A&rT`;M81;wu~bMEyGEH;%PCIvEwDF7Tsouei<1qWEZp>Cfx2ulo1ei8HfC-1wie6ShM4*0HG_+r0j1QD^B zXa~nOoR<0XgdTNWy);mqwKC49+Isv;o5_-#i{BV}8?HDjQpSE)PLZ+>W^$v6%UqOJ zk-Y|oQi^Zo^z0O#hxUavJFnj3gjxS$Pn8%PWcp0UNoyWn{~YpsKq41M;|$;2W!xls zVPD_d*wZBQAD-*)tOP)E^|Ab(r8#52L{%pWyf`?6k-8o^v|>k&pLTDK&<-{d;5651;t!+QNHc)`YA2v zy2d_Z(z6^rM-VXWSGOPKoSfbsghoQI>($YJxm2Ca`bON=oMe#8HAMWsA1-Es2FOTP6{CB__M5V8Y z3>^uuh4}a1f9G9-^Q~_HV2`%}K-ji(0vv%8zQHkvd@rilB$yW$qO^F@XiRh@m_QEH z)7h;b+I}YZzjOQ8xjGk#4H@3;`^tM^e|Q^71qM8ysu@>0aC=$>iGR@Ioq_b)>l3PT z?h>A|SbNbHL@{Xp{PWxmfqoB^K5WKEz)mnJLRq**E29(D3~OY;l~oq zZTZY?8n@U?oKnM(?4$d=ZDc@%SHA}1+CsHvZCA=9&Hu_e%=TPc8ZI*3jS6(e){t2= zFU=dVaxAwgZ-CH3*)9@1>}LX-vBqe;ZGfF99k1&2P(CL~b|47CyXoE{IZMjrt_&eA zKRsW@*$e7nQ05g0O&dc0cLaWKEEV_R{lbFl#nCqxH_hUug-c-C&2PSH?AWNW9N6C& z!#}&jWsaSAoJDH9IVYALLR61Bz~FU&i@5W_ z)px_Y(5s6a`oiE>s<*?P09^bHPRDVe*j)sa*MDAfw8+n)|E0h0zlig5p9zNk=bzaC zom~Ywn&jMkN}rC|lcD(u$95HO(y4$tr_XGz^hXmII2(}Q`R+!AAAPo~aeV@V@S*RY zAwl1SN1N1C1b|TeO&@fcQ)`pRVA%0uzhoV0RsylDxH4zv){fYm! z@oyh+_np){w3zHWun=ue02I?O9J8@Rr5s7vwS6$cfAsdi>P>p6 zlOdDB!BcKpHGj3m@EI?2Oh3DcRVVfAfkwA2WTJr1dLn%#F4%PSm_Pa{yz~Y|JF$5= zaBv*|xdfi_11psEZ) zmN5mRp?6c@U0<)}f7c!{7%xULeX7>wtoV;!8~dkW1!Zxi z$y~jDeLpK*S1lK1zOp$0p?pOV(LFLS`zG`dzVckee&yzmu%bRm-ha>@G0xc ztZ?Ml{Gt3VbLHt#rq?(BXd?D%FR=P>ftXVb919iTROw&;%;$hQd2@1E2Or@{gPO!N z9g)a+hr!p8)2L*}1{BEL0^q{O;!7ay6Ld)vyU9pFj1DS(`?z6?Kh)x4!1yIEM$8}S z++nNFUymGNwj;;HToRJ7CWx}*b0MUSL@8j%R=$$9b5^#bCOXmswj7MUPU_0cgDH+K z4d9bU5n|QP;@T}fnw((m6em>DT52N1e{J4sIk6ER?dno!cR-NYm3w>&(}p91T6EcE zJZ}G=pMLrI7bYC^H<@m<-6w2gk`#`xtfW}oLpPnqrgNQpNqhwHMQcxXVZZ(xbcIZY zQuD8}Da)SoM4LePB^QgGv}rR$)}&hd>8hXB96NF2GR|=KQ9x;pDGg`mInU3}S8w*A z;o7ko9vX6Y(8$4)m*%@0tFV}h*cqR>*A*3gAzt2!zgu7sh4q*g3GjA^XZF49!G3g! zZ7iG}gDp1thChCHIQhx1AJlX38HC#gg>vj4jm$@ejOn63V`#^giZXeebN!W(f?w}A zSCn!uZDj=1a$*oI4YsNMP?raz)C*#wW)5AdiueM1!bC~tQJ5dtX?_#0E`ZqdJEnHk zttMz;>=Qy@agI=v^4&O8Bc=(Aniu_X{nu&=y$U;wi&a7J>YsUGYz|p{(h8Wi4JE9j z@LW*w&~RT^<~)Zl{g&4FwB3BtzqB+2X>vngom*FA7525M)qdhnvoFo@&UId#Fz@*3 zpL_hw)A(-tdm9(oH~wuoMLrhi@iG$$kn#i6Z!FnD@pU44A4ry?<&w2_J>p< z`Np}=`Kt{OzV0Fqs^$uqwL%Enur@R2e*IwIGZ#YAp|2eS>epWu1#*t^fUv`2bjUA({Lgr;4SV!EKd*Mn()VJaW4yPvzAMl3*42xD^4|O9 zw65IffBNz(dQ2c(uVd(gjT*_(#N@3U9u7I*Jk{=Q`D#X_$pK?>UZ~lm6cVb$Wijqf zL9*Ars0juU(7CJp-S^G~hBI=sgM+ru?C>@yg|An=nzl?jwL4~~#ZJMWhOxC;@j)INK+m^{c{Q@SMM%}6?wUY zvlIUuPuWG}$PRvfhF)PXEFY0*#I3{P#8d(}IZ@R|{aa!mUr@-Uy_k(35AYO&a5&Ud zi+x;$xruSr*)3+VsUP3?51-bH`qNI38m6zFkcoiS8%xV9Qo z>#y9j`ruVx@{W)3!IJ6l%GctifAI^4q7`~wq3@(#{gkO|VOMziRev?f+jcM+&xtB$ zTSyhGhz_wWC)9DLV|96z?E_gI$Ylrt8~W&EqkP(1d9)#%lBk0}#Up3=h9r1sIymiH zuws)|Wf4Hb&!}ynY0aR<79E0Op?? zKii?O>y8U8Gt7#YMM#_IgwYL*IY_pWkfY zJmu~Gp5?NKw{(98&o|CL{qY;Zo$C?ecqjk+SRuR)R~K#b&u4;CVwFG3&U^pl&#TDK zfY_Zat-%)B#H?MP!TbmEU7kS#Cg)X;#X)h#BUQ-BW(0t!9@AlS<6>fKcWS!RJDjzN z$+3wbue!xP8FV1O*tq!-qH{Js%^Sw=?btbQ@3G#H~1EK^vIwTrcgO6_64c zNA+|mO`ezD>=KD_)M-=s$dh9-$CrfkPKNae(2MDlu|_-iQ{WM*p76b7@;`ol{zp${ zDzwRhAcY2_Oh&!0A!niTvzZ1ukt(FuqqBUJrd9{g>Q%fM>OpmV0uhPg(TD8uLt_Au zmlrJTu=*e0nu*2%+PK?@sICZaIf zg+vFY*!>Bcepi;5?;Q>4=--5)-PGV|Q=_hvR1Sp_7R(*Ci_;T_`YI}3KZ%c){^<^B zP;%Pjan7?*g|`W$v1va%R#Ky%;mhBi6=GACCA$eNL1j(k(nNY}NQe#TQvp8LR(!j~ zYIXU-md?>95Eub$21|^j#Ah$cxViXj6lFwzo6;uVl;^i?l8{4xF;CcDikqwmFUFL% z0aDq3bw%kyh_mK5^Kw2h2Acwp!STx{yS6Lu#61Pj4cchbOfJ@V{O8GIVR;-$9l9Ek zsXwEl94)VkW*j#U(C>QTd{8{i`ScEx`mXI^gfw|a`Ll4966#ef6zWiJ#79P*w%Is5 z6u{y*@i_eIRK&<08>G=TExNYH0_Iyc=G(leTtoEx7Kfez@M7fLx#@18y7*}#vtFku9rQ7NYTa|jcP%-DFvRO>l z5u&8#w{$3S@hoq}dvrTy_BOBrpyTkQOfM`w{y(c2#w6H7=JE zS3yN3;xsnkD8>%N&IAGjIx&k7NDLT-nUNx0i4o%r5Ceq76fr_#zz884VGNO#lA-L_ zRW7Q%w{Cm-v(MRw=Xu_>{(Wva-`@Yfy0?Gd)qLw~{`zS#237qE{_OtfFYNC)MoyZ; zb9^F|_+Ynu!6krMun8yS;W98#Q2+Irc43xKW>;R(=eUyeM8kNvQe^{@4(%;dbLtf# z+Ly*msPEeGZfHdhfE0Spas--{@7$3|*(%x%F6n8I0vN;2F-9;LY}ME{i53oyuG3io zZi@rjd*)O-%HQka;;>5VSg}$*{sC%6z{(2(?eg7g~LE7<_f6l0eVS z6cgbs6H)R^P`qKFwCk{WI>FPv4G&}|CpQUrsmn*2&2iEZ-heXCC-X`d85(d`5DC`} zJTRWEG0)T{5XBQ+`DrC*S^dmI$9nA;ExNvoj}dY~BZC8X4v;r_b@fl$e2Lyfl0PVT zC}(Y?(HDmUTaBBgK#ZXHwsY6H#1jrVc711Vi&u5f%`S4bUUi^bM*=lFQSMPg0HmiZ zV|VH|ybVjvZX(L3(9zEU+0JJj;cG1MhU&tfvNrH&(3bA9S#d*t(D>S9YPQ+6koeP~>D2cKDAhON*0ip$Y+6@QrnBV(g4B zfrrNqpEO$bXaSQbVvkG!N5_7VP9pL@eFa*|MbWWCG6us;%*!_VKw2lg7F}v5zHOU$ zNiK+;Xlh^ktSHv|6*}yYIzfjsa)Ter!eyeHAfEDaS2V|C_FEAsG5#pV z#JckOPf1y@yW8Auua8ZQJKHE1#M)h(i8K^b5a2n%ipB6jiIJn|tq-^PCT~b6jc@8x zj~4o#-8$}I^NB_H+9u#irPp>pF!oI^N8juhU7sI!&fe(S{@JzHXw!lEilbd#WI4M$ z&S~QZ0pWRvO@SBS^J}o|4tO`dY=)7#@}Z(Sgzc%?c+<4NyrW;Nd+`qfeDGglnE7+He7WveSN|AM6!QWKT3U(XsO8CvIrN;fh~8ChoQj zlWzKx%0s<>{$PGSdpE_PZX66-18e=53>8}uTfG^^)oFKp;s%4dm9h8=qjSX34*a=o zz6Uaj8FLf{62S6&-B?bxV>_<408we{s}L7>I;8+jETq0UO3UoBTwSb8$53n9;Ok|C z!UjibpjkL5s1ao#cmPk9T0Zk_AWXo($2xJL={Dg_Y+=wn_uU4vauRCAIB)|?opwR? zP1!Tn9fawx{EMS9bmq6@KGBe*wlJklxvtx*;j0PM*QXq!)r`oxrjTp(XjzB*IiXaEl`k^jLbkTcTFu3sRTv4Latb1CBf* zYGTF|44Vg-9DA?3_9k<|M7-~ z)P%F$aUk)PDK_qKsiy?^Dutu^g&WZDHS0$hGx}A#lGRSNiMO)De+GCl0yGgOJW_pZ zOvS-T)&9hS{OlGxj@%fS3=)iqRLF`)9slvfmeHglJp__R0D!2gi;nQL!LkX}_9aW* z#4HMOIkwJkFdz?k{TM3ngwOCQj<{PvSo?hOWV9Mr(uF2<{+->pB?oA7K95eb&Sd~DdZRpPd?s*m6^~#p8z}KzxEmW+ITK8NN#A^%7|n#c0pFh z&bY7x;AN&{g@h96vSb4O6f@MxGa?cSDxqzl8Z2#K6JL&H^PbFg>TYEm6AZcQb>GOnx)a647X<3Pt{9V<|MM3_oan!MkF`0C5} zc&)~$7S4thM3&=jAd+(+lxFkN@ZWzYnkUEzu;LhdcVZBJmlOrk1Q=$+T z(!;=YcR<~<0eNBU>_gc=9J#VBGAoOEcnyBe)D2E81DyDR^QE(4NnHiM0rb9bLGGwZtZA?SFg;#!|T6!{km9DT68M;FU zFm>lIDM5*rczjv}s=MmcCIw}{S1B|sa-Sga9a!s%Ih7WTHWM=WZSwe*;H~*2!z=P( zN85IsCxbQGW!p)TF2v<64Si7@4c+2W#N-box#H{nPEM35VBc~Z*IhZe%0@j`QG5?8 zR{xsIc&kj3zko9x$t{d%(LsnNJ1Xr06VJjTb8zzGqpd)2!uRpLPQ;Q)Jg%$jGwom8 zO`X|GCPIDbb$y>`@^@d(Z@hLqeDTDY`Z%85g|1IdkUz(U=eObY8n?E*4X?M3o3GzK zUc2)e3krUo+~1louJnO@$uSz1(pQw39KC=-dC6wd9HGm>9Wzn0MD6}0{AM>$&-xrV zNRxkp!?78BE6uxIRjh{EUBzp#*mPdREdRpYRe?Fyvgg zx_bg}IYBYh)lQRidZTG_1fP@va|!`c!|fSpv^8IKy&^i1;1=5pOHU(%-7vkD$i6L+i8QX zf1N|BnnX@3UX(@J+De@VM?F&{J!qK8*9Dt2)nj#1#11C;b=>JdmKyc*sS^AJ0}cPi zPPQXhYEUbk*!!h?Dy1qj$cR>%G#)TWFINc2-2mqxAY=V5FJbj-vJal|lg(eLPY&5i z6WG}{1W)0U=)?iX_;dkkEE#9Wop$;mKPn}7V|3uDU;dJkMU*ExNyg52|9a*2aK+~r}B8x9vQ!Xd=*>5y>j8kMpyyAegU<~n`tujm`Pl985oiA(a<%_B9zx(LHwCgt z;7tG8ty{<0owtrRSrpu3;c)jYG`Y@el6fzT&;Fk=Cm6%pQn|FIcJpSxPWj-a)&R85 z(608CeKR->*vgY3$9F#J>i9AhdNq4n%gY#-y<$|5DcxrzB zJDHY#8|PJ+S9uywuO1uspH?p%QFZNIX=`_*C_5RDRG4=H4lsMM{-nt{aT{+8MeG2q zvt-}al{Ae={ym$oLAo&OhC13crRLA))En?pXsak=v&97sJC^v$*?5OJx6`G{LJ_Qkn{Wt71pyB&wTTMtZnt$qB>8lV zp6v7`vOr5))#jo@I>}<^F%KM#LsKYxr2(DHV@V4gzTBL$)BRu<{)Pjf?{{`Oc=HsKDSv!uUxU62lo>d1fE! z)&?wtqomv3q`j5$Vn3I5K<$pEggW61l7Z#tk-rQG%ckVa%9~>RTnOpUV1}mJRD3}C{&DS{PmbdY_m6A;9E;J<{p@)8 z%da2D(@&1;w_YA!{Nh{3Eq*Ee;>p9&3B5G0Z)ufpk85L7?PwHQ>m_3C;JC`zO76%g>>ZRW9DnQr_HPS=Z@>oNAS*I}Z7i{2SK3}0~zd0moE@4Wh z0y|ajZqFPOe~V9jQ{BK@q=uJB%h&1(ap_DyS%gk<*k-&*z`T9IGCWB4XJeVZ3ct(J zzk(h9KaXSkKCWO-HQ-O4og|*|Wt9m6t?K?vHC>W*LPvXMCG;ia9qnGJgbs_uUD ztdf*T7VB40quP)^m%Rk1KH1a%3MS*}bZuljm=|9@8h!CKKILIqI&+|tK0txC9`WJ8 z+o~^Qt8&H;=4DRzAs_I^h*`maV328%O;oII5aZw2KwSjyeEi(Jv=5 z@nIU@T9tXnAni6#-UCM<>VO=3QtsL9#Y0^(Q1+7U;E$caN#|utaDe6jhNH<=xp1m2 z=@JBS5 z*{TJ`HWJ-1V}|lpHR#Zq8tQh8pu0aEok>PSU>((*D(#buQpnVu6;Y~ z>u`_{e(i}F^|jq`AYou4joj!sR))HP@1zHC`+G0_B2FDAe9r%QcwlIE{u)1_+xSb} zjH7tS@rjeH3l=O+#xGc;+SQD;OMkYP!r2XeC6vB?{>(At>qyTHfH(gMlRVkaQ*{Ti z|9LWBd7I0W*N(5e_1f|N!w-(nef8GySHAl8@%{H79M3*JKQ3N8J08)oE}y|4o;@B; zyf{9#Q$alUgFxB<+gup$7}Jx7F&kqr4%75Na47@ZYYQ5UuU-}eJ!-IsW2JV)X8ad# zDW<)R9}Q=G3r)LipEGMS)CcU>ufR2je;?uf`6|w;pDjyZ3J>c z^w_Iu#po~26fPzQsPPC~snZNm*#dfBz0J`*-D zZo@g4%;eDwQvd)U07*naRARywD1oS549@K=&>m|I>+j51)QCPvN(32 zwj^#!i|9&?#}NYpwr5X}q0?jt zsh;jYtX{K1c-r? zH#`#1N)8M8Y>N3`ddVi>oKWb$bf+ADooinU#5(9I6BV0hb}V=lR8so?CD+K$nCRFJ zC!PmZT8g54_)Jh80KB|3$Sfe#bsc0~@5tHf>-KNMqCS{O7!A<1cbCxM52sxxCXGA$ z_Ve^}#2~c6)h?Yi)26MiU*~rL*4QK+HE`QsTeV||e>}nRXLY?_#~YQeKf8Ur&CP)C zUHeU>;*9^duN`0bF?6qg^Z5E-{N>{tf8*=Nm)_y#!=>N8zGHS%` zC*vI?&H;|OE?(4q?B3`jGW~AsxL*?Dr+)sjP)VYm2Ozlqc~ zW0G*Qh#0z-J+_|aH_uJ+)jHd_=m9Yto=I?$TUr{^YeX;B0G_Ro!D~&=oe)x_&~8!? z*A6+1U6~Tt1_bqf);QYhCk<9RRogbCZRbF{l^?KnO6hc6jPl9p>gd@jbfy2?ff3pJ zg)88CY^*^EV&MA|yG>993UYw&ZoLti!95$67yg>(R>8}60MXX!yT@W+NcZY`7dfEd zi+svP9-~yp&!@K=;zH%jpU$H9U-*&@WKvgcsyy6BgS}ylE;qQf)?I)Z6i5@Y&{QMZ z0EWI3ufp|GB=Rr($y!o9r7UhUI*+ujPC&r*2W=j?%Pcrb0#QEV$-C-mfy^eti|VJx zgluK>Y&D)Nq0>?6nHUh2;UN-5IkZ5^Y4GrU6x9`X~pG+D#j* z_Tmd&acO>&Sb6c7IqvkYzMlJUgD#yk#HS)UOj zaac09ND*GpZ4b#5r&?XzC?~Kw;RA6wG-)?s0noiB)XgGGQ_Z^kZXW^bu}$s4Rm=GP_?C>*1#2)8OSk z0ic!rY+>e-pK4Y1 z|K9QCH}4;3fAI^)moFY44-n|~&F9B&{K~H$@4S8YxbeZ|@vXo7<>QOo!2HpNKRtf* z=*jVuM^BG4Wb!hAmrdpleS`(qmky17l!pt-LCh&h|G&*QTJ)+(6lSVpDfQ z5&AV^ANm1xbMleO@yb}T%?y4GUSmC|C;O9+(SRVK42&@c5qK(uAj-aADoyog=?F9b zr}`7FG$dg>klFlg_mEtDTIF%t=3Arf3)k|{*e#z;ujpRMz9FvguJ{kUutt}o(97f9 zMm|UJ)p-Q6qq@c9spzC42HvVNmK4VBH-l)k*toM&T4Yd6 zEdz8d(7bld-U&W%)VC53ADbD+LP2BvEm!a~fbwrSBwtR&g$qI|V^wnvK$_Rvo?U(Z72kY~ly2=NbfTOX?vLA}3 z{&l2>FjEsh&svxe8>Az?_-JGFSE{aEpW3vo2Q9=(5ctG02qeIpwY;hvoadJ=IkR2w z*LMIf{=h8GlL`DyYgmjOB<1<4oquXg+W(rsiM zovdq&OAQIaO`S^lF2B<9#*}5pl^rJv3O)MB^`0-6PG%YTXp>vD;Vzl%QwZ)NJ6*5` zgw(PzD0zvy9nV<;TGb=KnQt@XZ~A1*6VNY@H!klSzszFn@{JqEud%)VZ+`sR@nz{GeB(Lb$UfJCRc3gAZZ}9`ByG7Xx zU(X|u=!Fh>i?n$}hcCO&O@G=!*DhbuniYYk22kO(ZOkc}&M`IOP3xxLco0|N#P1{ZCS{=iW0RHX!M_u<^}rt*WBt!?EE zS`1XPdUbM!5jOI7tPS248u0-II6H=&x-&WLiBRfq2T^|~9sUzmd1bZGuTq1E26Y~p z&?Q<_tLLd2OyzNBC*4+F02Dw~%tK+lh)LY*?q)d9+Ii%W(e$gf89F@yAq@!#V?ucb z1&P47xB!O+N1g3x-#(_t&lWn0yeA^FBFOCvcdn#us&C*#k#8;0NKxTzo zHkP{jcVeJE>MR7fW0z&~YWec8O}P}qkHL{_>Kw(`*BI>399Tie6SL^kRXeU|k(8vx zD@I2?$vRfq*ba*A{!te8=Z|z+bZnC*-+o=`CYmQi{51};xD@8PXLQyH?w7HI4`<<~ zwvGqV>1)L&81;3LIP#k#n0fT~A3Z&xlJ;r$gSuHrF;0*d4Uljyx#92jlZ)f?*FHJE z{LQZ(|MqWu{rJuA{?>8h3%_;zFMt31_z&;?m&d<3&Jkk-u}?=A`D<@%p_x><0Y8@y^#?Kfd<$Zyw+Olb_TtBoCLFWu@wmOUzzyK#Q&*1MP7*fA?u>XML&r!8Ls zS(*AU=!*zpS9J4(K#&I>7XhRIR|tbvStk9uul0Ci2=6^yj__fZ#sS>*Z) zgy(hgwx6`KL?&FCv{;V7eVf~a#g2h7sH zcp)aFH;Yi{F1ySCAwLZ!Z+WP*zSUNy$VTFhK(b-~8~y;2?}S%3Ki;5}UzAC?m+X?V zU59umjC_zg{qvpG$mhTxyeu;c4d6)1ugOlFNwXS2oLGdBCVx#{wB?hH6CH6PR&9CZ zv6LOlTAEKdLeaO8bK7q0D-8ThIP6F1R#_Y}*4dF>(WQ5GXeVjQd-*&u7K%;jy_L5y z1(g%Z_@e<=K;u&SnoGNi>rn?V1teJj)`wdrXjZ@R%`AafHru+=hn*yJXZMZxO4CA zdY~8M(yr_%c zDIE&qTe%7|V^KI$bHa<>mPMuJA3g$3!|;-K$R`<=^i>Lj2!HO_xTpS;CBR&?)N56} zLU+iVJ$uT{Aw1{lwQC~?5x|^l;4TWBP&H;}vblK9){1DOOCcg- zxo-NaxJm?|q24w!c>~CAJBbk9kf-~v`7oOmMvL4p<;F0jc>R=>AjyLmk2h8ORAzwoVp{3(Gwwocm( zXvhrnSzI8Xw88gBIqazJtSe%R45gQuf-7U{N>dH6IR&HS!LWLPdF`C%aI-dc!dKxo3y~q|R7rx0;x$}vSg>Obanb0t3shW}{Zx7r z=p4!cEw8l(M%j_e9S2iuYqOJHp(H$XoIE>Af`nW%?uJVSV?!8lwUI_K7)aC2HzRrv zf^jGL>AJZ*zc3pBy_8~x$Uk$&)xQu1kcV5Gc!C7d9)cr^lEOQuc7PS5D zBiU8d6&Ty+Lwq5IJ9tvqGz^IRi_Uu8v1?ibDPMUQ4^MPUo@}zm!J#9pW|>%jm0yaU zg`WMHgUuxVWFm_vwA<^u9k8A=W;Z2_b3 zJU2T5*<=o!=2$EVe1y{KT?s&erEe#4z^3zc7c+Hh_(LvrhhTIa0F}fu-{I|U$8!Su1*_k422JmKkdHSn z&KU)`^43@ezk>j>PLRQcYDib7EP_=e@1bnzt-=6jd(sy^gG`NOJ2!wQ8%SSTeuP1S z8Px0~wt3Ljp9ABnG8v2j$SXTgemuh$;0Crwg*u3c$X91oC;q~8bQc&v&0G_sUB?_N>VzA4hZunE7<(i(-Oh(~Bl|e^HQUQEyV@>u=FY+PSHZ7XaTn7bd zY4hOX0RVCi%+zZ?@V4DSSp-R(W9t>uk!0)*cy?2sk!kBmhX4^{B;d5Q*f8#)cfJ;D6KDF%m{k@R ziSReph=F#LjHhKYg^7pl#bBDlOMODo5}-0}kq%5bOVMkxrS(O-bBqtxGx=YC`t-PW z^UKG35B~J{2j9AO{I!4Z50A$eya0%;{o}>Yj<>(`<>Tq?Ysc3<_m7UZZ~UF(^0lwl z_TakD1hR&P@$=(TCjC2|{kvW|KYqfx=^f;`$H(6UxP5%-i(fsiKl$PDEO^@#2*$Gw zPZr{K{Jly}Yjwp&V~uSS=?=||N4s=t#Rkcyq8;i-0ohc#L6w|w@1kh#ld!;^#Tuvz zSlS|ijH62?LpF9Q*b(Vn_O7;pn~7B$7*k5Ykx0m-o-V+w<50Hd-;6r2;usyty%ckC zH(1iJRf53l4|(Rm;XMHJnN`W={E5tg;DsMHheZ?LdhPYH(b8P9vfvGzID7vDJ^JpEbrV|2#07m1{7j&YxR^PS_@e*ItS(ZM(G-s$%O zoZdaD3JHDdUUmoM$3RtkvBVq4eUw-4dvfQPBZ$G553z?kwYqJ*PaAx%b^-Jywg9)C zK>JRI{Z1SC*)ZhsLo(Q^&6g$=@^>*Y0t}f|A9s21@hHZyyAj^JHPEN;+869f#5%Q` zL6X(4a@)`x@+Y{VWBXMDA}Mb0CqObII}Hm|+%=Pa246VIjU9#pmzuA?+w*ZK<%I4d zW^K0s=}%9(wcm>-oY+`h_V}Gl-GZ}jN!1G>s)xfKAYw+*;L(%o=A}?{eahi{qlC%~ zLn_U#rE4gi4qXGT-?I)`1sCyPaR{;)3?8k%q1lm3o+>f{JEZx9WHpEd* zgG>W@7J11cVy@}Wg2sNXO_B z)q;jK1p#%ffT1=|2>@WM3(Jx7)loccpbj<3M1aw!I&gw_P*M@o=zIzXOjf&DI`GhNz*#u9p|(Y^{FVV4QoIUMH(~t4qthVZptt=O-H`6 z4e8hujXl$lq+^_R7k;sT=#ob}Xqy!$!to!XvN;~h+GjbA$0KfyN+@EY7$u`gLvgydfGXopw$=%?X=4Q%tw zb^DQGu|paSCe>7=&y0MgE3SpfH|ixkxYFY&UUwiE>k`rL4L1ghaUNE5>-WHn{~~eq zl?~yGe#roC3qtVEcy!RoUsn4!QY33P*khJwy4DR3Y=TZBE0?Vj0sS$A?fC|JN~g@( z9{z=FWlu(^cZ>tXw*~rlgJ+R%UFV049$)Z?-pk|P{f#dj|MK7ak9f83*Wd-U>)+wj z{_ObS2VXi~f1O_i{Nx`VzkK$^CTLl*Q;kDK=&9+y9T?|Am&&hhxk zeJ+81{&@a`E&P}KP5?f>&P|0kuJhJG6n^vdw|GO~d&fWi)AxAG;N|fc-Y%q7!q)<8 zgC4*1VQczWJGscR7l-Mt{?s+`V?&q}^Xj{z$L8tEwl^k`dT62yGsT<`SH>v#&DexZ zjZ5PZyM<=VK)v4J;)7BPaMFRPM$pr-aZeR7ANY;4$PW!=19;+P)19*L`gsoJQl61- z;KdYYVxj`niVf**M!^j1z?=7T)ju}uRXX_cY0m3;?pCutYl>qffKgxgVLY5`-=0xB zSxcW3qj~LLW)6;?E#KyIlluwI1=8RgtsIq_D$VV`pDq8KP0*=e>59)zWN;V+YP%=T zwuop{XoFoEP8c~EWXUQ=0Y;iZH=ji(v$%y1A6=gOz_zMn07@M?RSvK-(l-r?+qdr~ z%Y{D@!WOv;mL_9mDA;IrL8KDvXq6d5Ist@T#@Hb=pq;|^!%IVbv&Tk6uGs-#K+s44 zi!Y347DzACI^uMD4NHyzPPzT_(AhqJeL5t8PYMETFggcG!>;}LRcl)PCewc`gug@LvsMD&gvg+%Eqwz zr)va3Q6@lBgMOkulE@buwhH7%Lw&zl7#kvsV#+H?8ySqTNKlq;eUcjZw{AB+tXAKp z6UC07FJBFqU?|(tuX+UO1!(Abiv_?NZ@gZdxl`Eg6nw-BA#GZv}1 ziv(fQ6D=J3W|F6#*tR>XgR(=A@s4)b`00)R*yq^=&*!tF`zs7tzxmsLfgksIL+;$b z_h9|n-%pO0>;~NUz5n9)3*WtU-1(DR$D{iYzVY_)lp6q_bMqfp_K!ci{~gB1`@}47 z5aiteFQ2p6AiaFftAn3^njhT-^isfI{k7jYzVY>UxPO3!2mR88kzwHpqyk&E59yeF zlI9pv1^Uc^O^kUz2R?M;~s#0{l&mjB4yIhzCo|oO7bX}+cxtLjK=0q z12`#J?-rPEn|jV!vxBnYI~eQSQ4&0A3w(L-@p`%}0i=nk%`*Usc9t2a4Rxhc)&Kw? z07*naREWrD@EJ&H2G&W~RAKz3D`oa&85<8P#NV~1Ls)cyagpGbT6f#C6CnR{x9;pr z;_%n1JpgDxm%kh?1908!$M5p&oz`IuQVBj3N0wR4$N|^@ zGG&+pr_Jr`sS6M7fnBhti-E35%Y^!Lf{f`r?j!|D@uiMbvnB+%IYAMvx2%m(zCa+@ z^QZi>!C1XBC{64@4xaFlpfKW3pjm0X_Hkjj5A~cy2eLV_+OY)$&G>4?FA06GBP6QO z$flfjvhwIvy&wx-yXb$(>5H+)*nq0|no6TVNBYoB58KHolD1$88=c@GrU=;*x}^$} zy!rx8G8IFb;#;Q>jv2L+hnJK`ewBfr`mAI4j=aSWwwY(;2O!9tNLe26(nr}Yw(^wj z9KtKN?d14CPeE1E%Fd!GZyzig8f0Wyc=8gzf9(=i$xXdrPK5khH+)Q~TDaP*N4>ZS zD-rx!?aDKCdW<8#NQjaPI_vbNw$i3nBs0F#wlY;uQTU9`N<6$WNjlN)p$#`YaU4G8 zkZh$_Y#2Dg85<`E0Xm$5RA3r(1@b(vu2ag`mhE4fgXOT}1)9ZtB zz=8{pt5iqKZmX#`_jNykIw!B-UgktDP@`p;55p&Pq>U8F@VFBVe*Twx0RHap{Oa)= zfA`mqo1gpQ@i7zp?dMN;-R~bB&+i|{lTW^UeC_rd$F{UP%4djJP-apXM!FL}>_m;KH;>HFY= zpB&%&{tu2PEJZ%O&o^^MZ{sXh@SFC~;`(A8^&9Llzc@=nwi$Q3+pIl=A+y$A*r3&d zViN;_duhd<=p{_3Re| zatPve8sBQyUA}ZqC;PUebqwte?oNOjJZEBXFBz04S{vAOj+WrgpnV|yE=one)l@8iw@XxxB_;7qTVk_)H~hU*O^QaI^7_ESUUp|wF(2FbUOxg z8m_VdZ+IF==!Q225ovm67Hz;Nu6AK6ka|#-t+t*Dz87En9PLAErA9Am zTzV>9uj z=lBvH*-dNPK$1$(f_s4lBpn4-0PWV94U2y+DbD*kZtzGPXUNFon3kMkkZ~NE1-!Vl z=|^eVZ}_vHw!3Pbwy+Q6m*FKn@diU&R3QExN4IS5E*cZ=D!%jjYsbIxTi-nX*=v$XEy?dYMBQ?d7gYJDA`J zv5Y;YqhKs(_Zd^b(cd1>v;UmN3t^|4Z#%Xi!%t3)YssL!^(pX<@nXq4*cC)n)dzst zN3m8x9N`CcksSCI33A>^j;~o|dT3k&x)qkut6nhlrYyIsT_`xM`e@-Wmgz7iY_qkoGh;6} zZt;o>NCPP4It|A5CNlw1hl!Yc9m9#2T4eWZKI*E$7CnIe*R@FEubi`GZ=?LU9)%M8 z!Zj17K+2>Z6kwt2?QVBp<0r6K8@R)8>9r6ktDmrz3}WlHdfKuEcNuJ>DVEA;$JZVK zV9N99dSqs*Pzm%%d7>r4b4TUd#U98&NzkIzY%5_DRG;IM8IT+Bhu zmdT|~@dLgQ11TtXl#adU8>&nO#)~TT_$oFUrLYsW>1@LZgTZO2D07n0hElK4OvakP zWl=MW9QEcyFpQ5u(dYRcJ%Z;ey^&R0JFs9d^_oEf=`GME?)X_;;FDdS3d2WI3926l ztwooN;<>?=UF-wm>T_iPH+#toqLJcDYBqbBqNhAGn61{4riZa42^V_Rfn|fvCkehp z-NdhQaD^$ttmCugAb-slNO_2&nA@L#ZC@BOa~Nwg=ndSeR}LJZos6kr3hcr<$s!$< z(5@t#*Nkn}scQ|z%ceCa97Ap6YG>Hi4!ZaH@#dRv9ry0sK3+WIX#!%S@f>9xpIWf!V`zBN*hkxFM&veXr_a&8Vn(}7 zplc`npM^4Q>o1UtUErGJYo|^#En9-r$T+(L)EXnv)d_GA)-i$nloiN2`I$xux6+6B zv+oMoQ9SI@N(Vfh>WkrnqpXXT>Ey*(GT65kY;D5SpB>wnH{vS%&`P+IBr*j;V|JL| za;>V`r*6Z^r*5}F9l|Byw2^0Ert>=eeZ337G2D1D$$ufC_5U<-vpd+${4(6Ta{5m!C9Gj+K=B;MFy*^pUPQc!`60 z@-8;~G;!(ZVXnk{EJ;WlzA_jK6Y#BdR|E3NFaaTqy4mRH&HtDdQPf?kiUq!c$OR#; z?jTkms=*oVz`}U^A=NyJJpco*$%}lu3Sc<7?t#lrRiME~eC71B>*Q{%XoEy?l8gow zDS&KllbSbpQ6&G^k>#x{(q8t-BGR)HZ+!jf!cD%!MAw3$)!Bp#E}G??&j8x7>GBwdYG zNjZa>vqF9&hppMw>R?GFN?~YE8eQWJ9$mMuQ819Kvrogr;kOvkIKf~?v;56L>P*F< z9rF>zZG+@a_7SEXqz12P8MUM}GS)NUGf$0xgAB!=f#OlZSNbF0sQEt&II zLXCVv+UG#d2jXs6c}T2e(h7X#msL3RiN!xTYlE)7s4q*Hf7Q@O1(S8;Iod(9Zq=KJ zsvYa20zs#HMzEO^ogvn4D{xZjkz@3*->HATB8{JV@VUXP>C*tuFCQImzVlk5efa** zjvKe$U_$-mc=plvj(hk1*74}QPmX&pUppRLynbBX{>t&`qdz!)@$!S?n_Rwq^Z3Q% z?xQaqZ_xG&zw|AB3ijo3>-9T~yASDBKG*+@`vK@1d|q<0b@|Lo0jP!~w{G4(-hAU8 zPmf^;^f>1!GEWkW6;JT&PugIO02&LiEl-UbRE?uhb8RKSXMW>=kQ4_8m1K)$Q^&dD zTbl6Re$Q|zr8n})e}9I=lsD|ud2$P)_QtIQoAdIlkY_7sNav~THz3rpRlTzC(S)WQTHY-Bi|_j2 z1uy9G*el;HFWz`_rLb>-5Gor$EGnX5aZ%o4PAx97%g7 zv6LZJ*gkX;vL1W`wne1g@+Cy3sMn+$cVVa6j0=!DvV7IevqbI6pOT(I{{=FpkY?i0 zj&57-$xqvjR<@dxFe$Q(%^>MQ2SHw9F9)-cM|ckD>QfgX=#uK>X8{5&2f@W4e~aS; zyztapo=FT~XNk(Od?>+O3e-RfP7eIJJR|REoPoi-kW$qy$Y8NSqv~`?P){!KKbJcN z2yEn`PQF0)t8+a5RSYZ|+Rnv|M+;Pi(T}pyhw6dP=RN>pL>)D2qL7!4Hdy^{F5STo z+X}Sx7}-Grd15$46t%MT)@0Yf{NjMTFEw&vP}FQ4Zwt~QR`TN*Ze*6PK9_b|Z9{h$ zh_!Ts1hN-D#s#{hUb;Zvfewbzu?`v3dt3&r&QgEUxjSA4901tY9X}h-v=4aR5T`vH z8=Z_&9<5sg6W>0jjt398v!CnqFSx1i=IwKC_It?vPTx45J|h0l zewFm;@vY)>worD=-=xm!t?Xz$K$8>k7vB<_l$mc!Pftc7vz1=A8NjLfDr_l zo^xryUOzuWe3O$Tbw9s#c0BWG08~5ewrCUNQ*Psf675U3c6UI{uC|lcJ|sE#DrpQZ zUWwaF1{HU%vtTK?I-uSH)i3R*o13Rd_d(nE-NuoLZ*ozq&H8qj5lGk($OO0r3iYY3 zyzy<<&)Unn&5P2awbPUxmZLZDug0VJ+M^T9WD|I3PQ5mqx?P@jVXk0D{=plX!w360 zof6OllP`eiS$ZvpGt@!zD6H?SpI^wM!h^=Y* zym20R1_pn9usrM#F z^=l^}8>_LlhBpG+88lXM8l~Vg;j0YvIvmIxq^XmB2*|4KEv60Nwo3`s(Lq~HV0OvH z7UZx|9JdpdLl!~7d$32h(Mu7ZXK&wwAZI^b7CK``rhyM$PW%3(>2zrMLkS;64_Gyh zo~gnqAF?WrhxYSGeJ2&l4vc$uLBGkxkKamB+N6cN%K$Cz%oAtYLOIXNBo%&)j;ULn;K_Vx8?7|Csc|)l*Jmx+L+l3?1{uo#F+%2&--EFMI7wzdw47He~ zD!(X;K=4s6&zi!usIBn7ou~yXZ8I1I$`{Ah1F#WKczr4^`n0tKSoEQce(j>DTRlZ0 z0lcb#1_O$(Pf{6@(J&hp^s{KgQ!#-g}^qrYQEFj=xzYa`D zJ`11vx$B+wwXnWgfazzplR+FiZXMBEzsG#VbOjhtH-jO{EI-F9SY=ZN@mIXD;NFpE zOMH}F`owv^Y)8zyh<8D)-6ADz*?GBW3d4ZzJc$%b$Y#L$3Qj;;nw4g7z_Xc+M+AN&+f>I53&$|j&-|k>X#4WYix=$X*8A><@?zKiv)Wmm+-Sd-;i66f!X}h*!Tgg-A(&u@(!Nz zU3Z1kO%)hiGEO#P6jW4nCL={YXAuBMf%q83lS)u`C~VE8!Pz2@-5}*vc4CYf1Htgt zdDLN7%OeL<3?0qa@){UT;%P{yQ%G>VR+cgal(O2hkj2S}j_EiP9^l^aH-j%h3TR!) zfzFOj!JGUfd!3c9XYlgyrb0VaXV?hmK6<(1MRJ1 z!(&YtG&+FjRORV+y+^sx5tPf-S;&R2eg@E}Iy(2mLhd?92OFwW1Cz`->Hx^41n0MI zc7~=7b3>DK0qo39xuwl89|P#HQ)A~%{5R2@WNUiJf1NGr@5ELR@=JE{ z^r?QRGuy1s7ZiTwXfAlm`#TxRo$(wj}c=o&gVj4l9 zMT1_t5lcB*y6D!K(dwtRCsW2B`tFn%Gs>b0((Jw%U`J=@DxboL4{mfS%4;3A`R)#g z1S$_{FL-3K*w8OLYgQoITj1EjM_+=SO#0jQ90tuLSc7f? z3yN_>gA`bN$Y7gfpc1j61&#WX@IbYYlMnT_K`TDGN@CJhU;fkTIKA0Ibw0RQ5{z<6FG0E8}@RqJ|*Ryqbr zCs5F?=(iaG9Yd#pU$BH^us?HP@fCnPsIZ<@P)!$vcHbMn^xP06cH z&ZWxXX|9GQGp_|5v3x9}lLqS{GI#&yiO$O1(v3FCczueN9ISFu&=9Re!T0F;H}%po z5C#s%=r9dw4Z`IN+shmB%Shn)oU4O+XI=%(%vDH&taKK5HZj1nRi?ZLeNt$su-Bzp z)X0?H<%xgr&{Ps9O+cY#)L=XvOPzqp1CS;&XuXlo3BMqNw)Dl-p30@8)wiGOq>f5> znL1I8J|pW0s+|?qGsx~C0HLULIaVvfljW3uX%3t@uITb6M^FHn=VB*AL|2ouk^ z>Wo#=ZptCr4P=!xm38jU#AwN1)&d0OX(y-eJA;E}WE%L5H_yp=L`9Q_^N3o$Lo&zp0(uCeDWaL2`g28U&kUucl*HsAZ z(&38<)hHC-g?K>8Y5WYGp%skc$xGVeO2SKD8G=Mm^{Rm~hjz5+9gm@|M!z0A8)^X_xSX|r^g-6{BJVB`@1jCc@fari+7GE&sZef z_{wqb#$U#-cNjN5I?f;blU(@GLp8c}Jbm~GK9@}Dyb_qR|Cjt$M)$kiL?&)9RA%k^ zf}aR}^5hBfUq7BaczE3AX@mRsAEYaT84~)$0x`;%r>$QQ1D+zZ3!nnjO<$bx&hbF| z*w-ALC}Dqj`XV}H(=Om-bVEQhOrVY}9f#^W(iwXd8_?NtvakCWXOe=KeDdLlbjr(q zALDOI$Pg@@f^21zWTj^47#jZ(G``s8%z$)8GMB_Fm0vofj!J|%+Oc{LlTA1VM9Zv(K`H%9>xr|l*>Ni zFIVVl-*FhN$|7BG^veippjihE{3n^2_~Zi&ux;?^1n`w31biW_>}>y96HpUV3yVlK z(YFwmr8rW6*GZOw3ECWcz@jx>dsCj#EK_Af=3+u%5A9H6#K|g?byDC^c7)8{c3k;u z&jc3^$ZRnfv@9AM|0;C>8h!1gUuc@~N8Wx$mZ7Q~+FyF|kdC}JtTM>lYAq5F?c~h5 z;1i!b{{rzpw%1%uR1$EWgh)8dj6HdzDQR-VGin-9C`-r5Usxo`FS=CO=4qXm$7^@b zj@P(>k3cwXaqrX(CUjNv{?v=doY}Jux_$ZZxW~fb{MP4=%cq|lPx*15mrocUc%hq= zFWA<1{Cj!vj7tLS7O)UFyLIQdb^A@m2;TO$Z1LpTqvNL^escVyKlsDrCqLnaLlzg_ zdgJaymmrOK?d-{t@~XoaCqCAB*lORg*vSHb!m0b5Y;m6&b+|s8XLrCY?6I>3`N*Rf ziEw2J4=>xt*7E)AF>O<)r8!2t$;Py7)%lR(#Im0982;uat~LSFp+Edr-dIAbT(poZ!MQqfF=1KnotyGd5^A|K&QB zu>~*wwnR^vn8d+1UOEdWO;6kXGj$-h($0(SXhgq6clVHBWz7wqkM-uJaFyGPLF?qI zILPA`{slJ?Hqg*MJ0LV`U-Hr+AMr9vkyBM%HdX!XA}U;*(pNIb|Ofv*5y1MO?yno<0c;-$5c*TkROK0D6EL` zwP&)rSK15-y?)QZm|Bym`fQy629AIAk+3N|9OTD;wkth4I9WOw*(ji?|Jp{I>NxUU zFNQ$_H2igow4LllQBD@Ulzr!pKY`GFDzPcFkXc&<-ZUz{ANo21dKal83g?zRXg468XrzY}IKa!!q)BJl9G!I*Ser((_@b#@EpgO!>PQw>aab51`ihIlBam8)vupg#l%347qkZdh~I- z5#Rsu&yL^wzrM|@fG>|vKlY_T+7et&*~e$_aZE+#8LcL-S?rhTLOSEwl+l2CHKR=2 zTzbM}xr!`GUjIu|gK}sj>nu8+qg%d}hj7~;wg*JAV$U01_ioIhPMcUDOLo~?HmAdW zqdXk>+E`lx7I)DeT&p*YX+xd`zev=+VRjmAkhh$>JgNFi*V4i3)s+AMAOJ~3K~x-r zx8=CvnI73xT7$stoagU$X0eem*k}u3U_^R>p?U+JXBD=(C`fSfb|UL6+o?FV({u{e zHV!REV-1NDoOBX_FF_ROL_;KCCKwVT*nMM%csV>JY?nSbI3pSainv}!(`dqavrcED z;L>Ac2s|xjsZui_Mi}b-31VwpQ8zdyB+bULe+Js>JWT)wo%rkMHMQ_jPY+?@XJQ5A zoUqGPjh*Tc;wW3dOE&oJvtng7wseW`-Jb!hJ`lygSH;96tsYq7att9PjE&bD^AV6-X zX(6FY6Pd1XQ3j5G!5JTXq6OVZE3&b*@dG6e#DRxJUmLno4 znnDN2TSv^t*+7#CYN_mwYxsX%?$dV(L29tcH0pj#e z2~l=o;*svkO}$ul82f;FSSKMp;(}0vYrz&*!b{57qrFRTHq*|hSRG_UTs8<{`zvOy8 zG4X^8g)9VK-tY+^xIz2{7gsM>^nCK^L*C*C2^|jY7yP{M`T1RJ1q8n@097t!vT$~R z4Bqo~L*NUx_CNjPljD!R`@`eg|Ihc1k3N0G$&dbZ`~_B>kj?QoxugJnFX^h3aEjh; zTuNwvoyQ4OE%PlAn;8+pQ&Jr@F$#?FrTlzJ)1z1_E-k}UWBL)YDsT!E>0qC=_#51T z5o{k5o0M!?JjhHQvaiC%Zy^d>pRRlbmF|oGZXX^?Mp>P7xuUV@P212*t&;>JlQcK% zuol*NVQ86h$>{vuMgUD~Kw@7TB0E%>{QZ$20?Nq{FGM&iILM6v3T{W;fvcYkvhk@f z;OPYFT^&v^;7k6dRgi&?`zwB;x}z;^`CFn6^`;}7oIlDfh~Nyt78?XxsXA&H^+Xs| z^h_{<^s4;|%2wtyw$*t4ZprBxOJ~G-++cen74>=r5T^Pg; zLu*S1Q5rdvDGQBURSli!3TBX+DzvoM*qXl3agdk+On&}}KcU!UskN0QTC|etk6@L> z!JA7Qt?p4&*{%j#fmJUAWEfc#CAInXEPA#k<&VCD0v2=-E(-{3J+PfAYby-`uWl-` zougQ!+8vQ%68L>dqo)KAwMke{2t?Y>j&aDD+{D$;1+*QPY>mJz#xm-}M7s+qf(=Ds zE))L}dm9@{u8o%AG?~o(GB@&z&(XabI>sjTa$W?s4z8|!v>6nOMR$rEd}h#$s&@B- z&c>++l4a<2sagog0O;)im83Wl8ENMD06YDe_@ze8DnsA1p&{@0l7~UtcD`m2p&X)F zXA%Pagquz4N608&={os)rZ!ZUY+(y9bbRzvT`My&4OQ(U4RLI;cC{_LCex4sXEITw zU2xr@pCn|xXz6L(1W#FiXj2o}EnCEbO6BfyQWTT{96nC?>Rr5H=Y3z!#H|2ZXL*b* z!`0Vo;ZT}fP?Wzmi06!%?lig(OLppK2L*W`r$*kBWBj)8c8syFM#C_po>^7(#t@gs zQ+}A|DdW`HZGN8x8E&%}aJqL~YnP63$V-?n*adj{Rt{hxe%eDd*QbmmuLx%`#f z7`ID(7_i-mMAT0#1oD55qVE?$h;M9+ceKxpp>!f0K$GFtmV{>9cBE;>04uRgex*`6 z14N#~{60>3q;yQ1~}!1t%fA;&AFf(H{YQx`7Tuk8aCI zORKh6@RC6}1_N%SRN_7wyFnPi{cG_s#0I81IpPQcZzeTym+&gAY*ghB0c)n-yhV7) z!wJbi5G6VQ$a5Ad%DJ5V3NaT7P5}14A?G*r8hBj@IbcJhEkmi%+CU>5@;1l}yo&*G zyB27@e4MEbTA|Hq7wWaP(0(blH5Qz7XE;<3=tpnxPJ>W`2RP6YOM2=rJ>j>`Zd1Fq z9v(`-VV?s@yw3iSIej9ejPCdh75ODTsQOJ1b&$TkQeSz_WUV3;_{!e2w1B9r9_T9_ z2q2b3q@w5*(zQJG>PLR~DX?7B3)zyx*c_Nn9u>+FwFy|bd4eIgN{BOqYdj+@jMPN# z?S>7sKr_JJY_J?VX4NIcB-jHwPOHjn-a{KGWIkh1)?WFQ_GLkUlua4b!I>KgFu?3$ z4et@0Cg8M#hDrv|_ChE+P6YCoq{fkt8E}-0uQ@U*Q4~GFgp>ajz0+2-xVG1dT#^%3 za6i%l#_T|4!P?@TMS<}j*kTl5>FwQ~;H*IY`gpKo&EZ`m0CQM30xhnosKOl5Rwl#{ z%tQDft#Em`0F5Cu)OS>e$=bdq-?1}Zr@qUIo`hN0gp>ZNKN=U3@NB!Y?}YHj%=qCfV$J)J z7!iT+hCo>H!*7qs^w={$1O9?7|Hs_W=PlRW1j{RbJsWo%=>}!#LBZXa>-PCk0Ig$t<_^9ZV{QbyM13j0cbR;kCnG^vHh z6dG_Z$kj7bZ@;=-3S=ion-Wz;0R$#40ii}4?+~@d01pnnc^AU@e5J4E2Ye)*xE6Nx zLG}5$V{pS-EkoP-YVl`!dE0uokOBy0`Y`Eh4=gPY+sZa4UINbx_UG(+o z3;{MYJDXyp6WJSzIwO-XfNpu}P#O#V&Mp`1Ak!on;1WnBr-HyivBry;3>3-%HN5za zU@9gr)ssf@k#Dt2*-5%iBS40bCx7x;a@jP3kV-3Kd6v5J02p2cVDbr04S8R6;p7rk z;JfR;$B}?ASix;zl!toqmkt?toGe|p=nUv?ZXvAmE67d?*%X<2mN;%1f=<2y3YhK) z7LO`J!o`R2n~q4!2^(4cu?}^NAF$d;96s91L1b=#^4VnS>DX%d_ht+De;KzR>Tljp9O|Mch zMki6xs|vHTKx|%G4{fvL%&1}wwkRzU@r#-bo@0^IB{;?n z{t?|X=gBLCbh8?G%RW}X*WS<@P3cqhBL)W_pLW~-_MMw7Xt}P>_;LM`CjngCa=hTR zzAv6}8R0oE3_>5>^MW(_mz1xwJJ7htKEQSQknaO|?mIt_(KGz}51t-B{@KUJ|Ni^` z`1rH$eQ-SFM}R!MJfLnpn*>8o&BOcT6W+@)~Qpnd2&U zXB@PyF&9C$4ktOVwd)Y36;*ZeQ zZUafj77jb(fKsFJ!7YH0tv?{4bKStz0m-2xu2M%l zh)_T+J1T1*pU(ht$)N_>l?ZlovFoZg1Bc)IvJ}nYRDJf|7I-TWc zcRu2bQ^-qG`)XKXuL)Rd;5cJ~7GQVhKxShI(t|_Ui6?H_iPm-bfn z5svC8DJX|>B|m*%Jn52BJ2Km5VMHixbwm9eHd5Vc*JjHLhit79&?bEXPqoR(hn#&f zUdqeCS2hiZx)VQXE1$OXpNW$1=@nuQ|0z4EX%BIfYzq`oK&devqz!*D_hq|9hJ^70 zIJ>K%5EbJX9L27#g^yK8f<1-tke|GrKJ0rQMTqRksfS~Vq1RhDX@AC}fy(L~{=LY{ z37r!e9|3&H`23i+0(vXPQ||U3lW;>IedeXUhwd|OYJT>NJ^>Bi!1v(cv*W!FKRv$v z$3Hm!*Wdd?ZvNv*0q8w=6||;tq%BR2IL&3ZRQGlf!BbzbAf^oD zBy5cWfiV46TP&iqNmKh=`4tq7V5FYUrVilKG3~J$Uea90Zd0i%x*#i>Fn%TYAP8T- zBkg8ldD>s>cgkP8%vYY=W@nrE{Ymcvd4#>fz2c?8t(RVi+Gib8e=05chljOX0XTaX zF+iNVY+WI$sBhTPoK9-q__2>1r*jN^gOXJIF^qJS!82b2Wlcm5MAHN;NBQXhpAkGI zPy^fh?g$c$VLm`KMz=*$Jw7{3)Nj_rRoUAXUe;qj)s2 zNykp+zr4k>;K0OxgRFFFv_pEWOZv+uilL!>Fnv? zR=mjA)-EkLe4Do)$$0V@M~2{0!2(khZ9R1h4%>wQ?3^jB_AEa38@#+E8-!C|v9|q4 zvmY?>QeTxq)&?PBH~7V>KA4TQy97OkVnN^H%PRQjOUiNAXdJoML9Rb4R0aO>&Xp~= zXqRMxA(ILWHKf{vs&Jrr8cTrksE-COF96fchLOel5t3Uu)t7o@xsq!@7l~jq$pF9} z=n*b{>8U={*#|r(kt%rT@hVNU9tDaLe?V3w_L;(Lz{~kGuBikcI z|Dq3%7F1Rzy*L#K@}qj$2BJ&%Ss2K}eos4a7xb&gJj(Zsvv#F*QuZZ0zVz$~Z##X) zPZ*zZbL@pT_Ic+%Fc<#9-%DTj?U_5z22i|owZb^^h{p&&`s4{)|35kY$N%~FyYv6a zQ+_7^JXowg##C%9BbQcETE`jp9cErIbrCZ$)mR7x?cHRlq}m_&`m7lhUjA^rl)exx z`jZ;mVs4DH$8oSEqOtgl4L2!a%zI1>OgjF(s+)ElG;tBaRN`;O9SzNq%;krwv7!>2 z9uQRscarZF_@QBJn7iUNvWc@~62BPQaQ{D9@3JgQa;DeqbFV6Z06}Rclgy+y&`hSM zJMX5EW*m?lND~?*%B1cVyP*b9s5&>TwLbUU`=BFtW_Y;&m+!y$B_bnZyT6-s>KT2y z(|78RY^fOvUVk(Y*7eX5eB|)%#HJ8p#xX!pb|(uNMcA1%d)poO9n1@MPBqCm9**cy zU;p0<&Q3v)k=r1zVTipQ7*QPaTK1eq2%vA$6$15*a>qP02C5uk0 z7A)(HIn|D*eWt@FZiMUczNuJ8)t~Yui?RnEYrAQPuwjdUdJadn1XX^?pdhyd4ujZ~ z<>muEw8fbCUh>m15a8Ik-_Db8(DYkxcrJ~<)cC*?{*>W&b}n3;H0{#*$+y#6zhF>5 ze@4$B`%gS}bww1&r_M>at3Osr#_^^$k9T$F+NE4yVg^yU<7JrQx79!cqk`n=N`n4> z_=6&N)^QJqbh9~^r*KYPN$ak$0{MKtrfLn8J9 zv?t01AD@-~(4j0;`krTHi^&^B#n^a)ubt!{1Cs5=PyQ~m;#Ua$^?_T^?Ns2)J}UIl z1~JPKV!@S#{Qj~L+1K?+`dYL1@b zkZcEcWQkupG(|rBkwnGZ!Cw2_#Jtk*&L8=JwsLuDv&;N0J~E;1nR&W`N|XIkUHQ(q z89?fVqdFgLUUedrh+Nt{0WdU>6O;e@S5JTCX@Os#c%A@P zf&a|+`~Ucle`@mo5C8lB@$|p^umASxpZ?3A_*%g6->(tKb96U$yzsmBITw^w8qS~N zg~!?;dTGS3?dGv^Hh-M{lbujynABTb_iwKCwfewtPw2AViTQW|29a64uo0|eO(1nA z4$$1(9j)_}!OoNIcYrMW69?%;&hlLOw#i_=Pzd|#FO3#U_EMK}!*MX(WA|1`ueb-# z-{_Qg@`q*ek6vZygk&pd8$a)U_fdz>K|2d-z)G0<2!@V5XP@#nK*OS#O}umF&XnO` zpn|M3yg(}@V(2r(g9EY>A90w(XYj;n8oEiHRs*aM1W=mpyBEJTw9DgQJVU{H>O7X; z#B(!rb<(5v4QG#bHao~WR=IAjroH+>728SB8jwO=XZgr9fm^{cF>nY(bY1+kg$^RI z#AwBYTDb{?WbN`We06f<_tQQ{2IVPNNz^+JY|&I@y7h_Q>v{IyBUtr8`0X@wHU+rc zi=Tvcr4CL17ob5{pfn=KiyOw69hl&6a?mz&0Uql6<%5^Bo!qE1;!e;$k!;`VV+Vrt zw=pzC(cmNNeI*Cp4cZ)|UY$l*BU|J^rCyQgXWh|H!VsuQA?8C2EfBDwU##Fo{p6r} z>BJ!3yO8h{DIE+#3VP*#d=vm@OkE8SAe;2%9|G(PunmuAo*?#r4ydb`f+H+4Qm3uu zBr=*%)Rk|oKNBc~kC=`P`9_p2Ke*>z@J_wRpiO z41McWlQ2pm4%%J3XtQ}2-P#rhfv_nbF-qxOwLBcdYn?60KxTE+>)`$g6hFE0A4>HT zHh*3wzkOVN$w~kp`q;BUY!Hvl`9uxofc{=Cn7{c=e(D>Twfn)RT{a;zpwL~sG$fmf z`tAYI_=A6yH+|ZK9A%t3^(F@j3-ah&M~{StFJ!Qgm%hN^Nk%e=E2B$d5`kSehZ-}o zQ&GkknTW?~O4rXCjJ?&U8Jeml$1$z1W0~Cl!ln8@@(<4b#Qy>K6N`dB{+F-q|4jKG z|N6N{2Y=;;z#pD|_rz6x{sG&+W?bQ)qW$sbr+@gne|q}QfB$z+|L))aZ%_aB-~Y#_ zKXMhI$=@-DM+@lEH`?Nu!6F63RNCKWmG>u=yPl= zehA^qjQmg^Ry++YqM^NBG49o;p0c?v05(1D-}5FI$+v$lKNHnfvmeXF=p19vb=0vR z&+-6D`#f7?&7aKfO9t{{^Z?xGa&@^49#Te()zK!Nx(rM%T+#<|1CZvFhGH=t z*@G=96g(3)`x;oJP!_UXv6^6Vt@BHBa2ZMMmxh-bY80SFUM>KXEQTgAWadw$(Xa%h z06s-2(h1r#e=q6X!M!sC+o9_QCJ2?gPa2XZR)&!YZci;K;)qaEDO?5oC-v0&f)*@Zsi$%h~u~&BX%=fl=Q9$py^}F`4;A42#fAP;_!Lbv7 z@JTO|Mp)4{IUd=ojBgvhsIRl&Dd1L8Ot)7=@OxG7-}B$-_{kaHPk-jSYQ1CMcbiNL z%ZV!a2!nBDYci&0bsW2(B{ub!2Q7+;ukm?oM`rn#}oKJHUY|{Vi?J zYjJ^!40FA)6U;8~d?tSM0FkC``1^_%_KJk*!Q8lC7hE2?7* zG|vsvJtH>;ZPqIvuA38UIMwJMu3M{^!)uE=m3#Kj)M7!en`0fXt4A9Zp|v@H|8aaD zKa3bQK5o9Pf;we6hhCk_nIq+aL2l*ud1yQM?&Ijc*l9~HzHlsh(Su@NVhwor7v$2VZ(pJz;4Dy+M%JtDOCa`C{-d46V286?AT^$-4 zxS|gqGEG)D2m2n~DK6yM`4rzU0XEUq!P(EJbWYk`F!=K83UEOweSH&c|zp>qoi^u+uLWgYQk^NwENte$kU;C(n3Dkh@0U}ruR%N@{ zv0&=qP{=Y?x34*W{lM1{#`za+8rppr`*)$~M2qroJe3&c7o9<``cG6Guc3rsaq>tz z^ICYVaXUFsB65W8@=O|islyKRaz#66OD_Wb_s*V)>J4SL2xRXhubp3@vAo;`i*Yh9 z*z0R`&yYAqzUwY?#)J)|V73XT*nuxE zM9A0w!?*k*c3`=-mVw!?07Ue0Xig5-fGzPcIW$qVus}Xo#ISxQC30xf9~(@l;-o>` zg^Lf>2{StO`Pu=l`zYY$l<_V9ZrjVoKmPWor{DbcH&1{6pZ{;3G{@cO{zeU1eRV;c z@&}(N!_UYzerc$3dUC?41&UZ991hC*b^f3YU(e6um+Q7Y%eBo-yH~29OmrpQ>Qfd3 zE+Sw2K$o^(8}c0$VE3lR@Vh@aubqqvh+BVBSTD^`5_?jrD@sU@vV2Ps6wuorKdXb6 zxHn!PMb*6Q=Z*>eKL+ym9aA#@CE)4Xho6wkf{cg!{m!4p^85d%KYn`p6Hg2Lh1a{+ zzkB-a@BZ}@{{-;qPv7|W=sf6$M+el0Ujyn3i>)mpUGzGR9vg+_Jb}YXYX?CbXD+$; zXGg!1)|h(bQ`Gdq*ZxXrW1!xIYkdsay$T31GZxB(OJT(nTRRCzw|+p6scF#IS}dM& zxn3^Vvi4|_FU#Qx9juaH8?*0VU%fthw$nPswYMLaFI!#VX*KX>zY!QRQ&DvmU0Igm9xZ@Y z`#z4VgF)PxHT9;od<~2qD@b4(Pf?u))(R)Xu0n7;anPw>XG7P|6FEL2CP%ywhcbba zDJC_H7X4I&)3eaZ4%nk|=fw|`<-5y7G(0;JaWFdYYwKMAD5LLFHOP%%J3;72ZO$+8 zX^H+gbaZxQ*q!C6R$gPk)^~-VZ^EfgELsW841ntJ9htd;+Ps0`wramIv(|RYo;3^xpa>pk>ay?8wEImg)|$3oLB~INlK2$23j9f%7yUgV2G#?7jLF?uhydQ| zXZeRdi#`^%&1ScN*^W$%?cqQX#zY z59M^Z`AY+pHLq?z`Fa^xzH}8Yy<;32Bang6DEMHTY(K5`;>%m!pE;BNE06!>u0Pgl zlz-+C!GGa#!2iI%68_B70bMn4f6$+~f8h0(-#q=yg5V2Z6@2BVgu2rI<~KD=Y;;yp zQR*cY{kAcS7A8txiz5pb7+swERmfMy8LrT?frGjxsuNyVI$j^*0KtYmU7CnCQ+Yt_ z3mB#!9H+_k@(o>geku)QV+&C-&M|a%ykCuCpLr83->H)p1|hbsFRopjjd`}$`M|~B zp=7suL>+8LPsi5X_oS=Jx^418Z$Gku?PAJbr)~*pJB;B_z_UH{uy-E?f;bXUm$KdoXQH^!apDyp79o!`X?0DpM9R+NENKm%i#!{fc+&*8 z3~a`ZK19X2PNWZ#z<|Kh4Y@2UtzSQM8P$%YJndos?62W(v1S`0iaFeS2*@t>d8Nfm zm_m0J_UnksGANgWeVw8X-d4{v%Dl`Kd8t&pacV$UCev#v7I^_kZ*iv?2<2A21X;F5-JGD(N`A>PuCr(|igsw~P)SKJN z5VvRWxybK1K&%7wGu8TLXQ?!4uZfj|#Oq-E9H;nNoM;+!nrYA zpLu*NlTZNMAyfG|)M)7k-pLEGHIr}R>6>cU)lu2AsmsKa_HN}A2lMx8!xvhU4Z0O7$_bHt$^B4zX z0$31{Eb-CIAIC^0=8c_@7FYacTa$5eiJglR>WrE8`RaU?8~psr;qBM=Pp^J{`}F4z z#No?do?i1`1OM{r=O%xpefkUba=(U);lJ1pvx^qeuZ7#Q{$dR29?V#+6q6%lI!CwYs> zcw3lSWyRTRhsX7+fu@;yG zQz|=vT#BHa1T+aESIt-YW0R|$!ladkvLeFFu`sAnmXOrR-?LkwSLsdkejR}=B$QmyG8w}4l|@QK|aPOW@lxip*g&@K_c0L zoM))89WCLL8hywYpO+A2SYMM=oZ3MNzj)O~zOb@St`gLt4EEl6FGj24J#=t$OjI_i zW5c0>(A<$jJM&2bwOw6q^`?BHIBT;xB}Q0TZu4DWU~Ey4FP)@}nG6GZ#{_ESQ*6qgHY(V_&S81m8La)r+_A6A zqkedNMYJC6AeuK}b*Mp1puvjUg{ z6zp8Uj|21olRtG*knG+6#F2@Q%6+BAe%rp*=+MUerrRi^qZ7RrYUG~d><=bZc-)_&ewhdo(`2MAQ`QCivIRLv6 zOmKwcT+UlZgaH|rTdmEdM~Fr#5Gm3q&4QnSA`zXr9-E1>Hho~wgGc4n<#D|EMP9(u zX=|5`SA$z(kL;5y9l_`2==ct(c*-8CYolFaFpAkhfIdJIA7I)nL|Gb)-&w0EIJppo z(dNv|HxWZm@{@$Bv8Ed+HJZQi%QK%n0uimP%3VUtQR8Vbu^?);lPfXF<-F@jNKgkDTtGj%*LLD#LZReXs zZshvyAAXNJws7<}^-czMPOl1t3BHF7N!~zGu7S`o-80tVL8pT|40hs%6ZVBOUTt=8 zGEUm*paLCib#?~0CR42vzIo}HA~eQGf3=`+bafCxj*C|9>UaFIXp1}!RJ4mzZEhU4 z^-w5}iKAZ=hz~M0v5vO*TZT#B8H^U23gORQ5h;%EH}bM$h`N-p$tS8KT-oS47!u>a zY(F+e-W;W1^lXAUdwinJ?5HQcl%qpHK2xSQ6vC^|#NC-_W~fa6*nAY*n<}%H-uT$A z?gx#sXNAmFW9M^Uu>3q2H#ldvc0jKzw%dnhoBo<_`W_acP3+mFc0q99273!7+X62% z+lDDn9|9Hi4DdvxR{OAwLc+jkD+qUsO=kcw7=IJKkc_Uw( z;&;ecG`{`lfW=UUvI+6*0x`Uf6H#h1HK5Y6!Sn)v5^{k-y$)tmqDJGud5vzhMq?8Q z&9jGO#!$P9iBkdxVFYu6p+ABN{H`i2*wWYObru5cBx13=1XaH#SqBe*O|+3nKYsuB zb^o9O0b5y%$MljG2Hx4pzB8?Cgx@xt6B_7S4cn&N+Bi@QBVwJNqIMs5Zw1W3oo3b1 zs#uKIW-SUcf`p<-bSY;^e=6h9M#)AS4vsVU&dx9A_!T?tNsEp^_$f1t`MG1()XyibzT^%YTzbnH5ON(@lTFfa26l6PR zLid%gLe%9~MJ@`x0;PT86R$joflC@{!!?n7F0T0`EmpO8NAt}cz$rzMxl97BD_6uD zMD4?3{Ua}j>fL|Ig9sYs(Ii#zQ)EBG!VcqMUiTm*7%c}lspB<=#h~3`bFxXGzT@p5 zB+V?+HmKTkWC_kXzuKWn*4YuO>=I{JZGJ#u9QJhpM-s&+skF9HMcvCiEfBC}3wuvi zpz91vKASk}chI+-Z#=dhVde?-a~b6Q}GtVI>wXyjaOKOZ~6rbUkQ;EG9kk^ ziDT~D&LZ_4zVHcpE=Cj!1u?-tDiE_+EJEMnBO}DW`o5-V(XsyE!dYE~ijT%##D3OK2Hm>{8zt@aW3tC<@hUS{$I4|3;FVe|N6(h02~bGtKUAo z`SA}=ulX0h-@g9HtpK0-cfovZ0FPIJyt-G;9)AW(zcN>7k$HLTHHYm3ZnqDMLEq0$ z*vLHLT%@nc3ysc>6Cc`~#J&H*F)SSt=Sy{QpufN%-{v#+UJWWwl`|hv_?t~#mv1=# zhTt5oZ8>FGJ>z|@`<`>{){S00)&;X_>hY!i3WAihUz+Fg_++`;ap^sL?ByvF9&%pt zUd-esXCWWeTGF&)JTM(`oVX9l1XAv>z#uTZ(D3riyMfBi!KQwXtAi;jMD9OAtz+5& zTrD5dp;b3T(jVR~E2F1LIw>@36grwPz~1j^Id0HDx&mO?0fGJ9<`^wTMgsyZW*`aT z9kf0Jz=_dvcgq`*a6y+m4ix8GBiq?oDe$qRmuHims@18DP|>xhz(xAS`YVCl_%K0g2kusH}L?Ak*y zy~YNi*`iHMWDbz&)`i&hhDrWMeg*I|w+a9D5C7`vAO4@eYfuDvgWmnlU;mpx`Kx@= z8iZ(rXWA_~95}5#m?-ZjQpS`z=zLmjGN8Fvl^jIB_B->%KzvPbspNx=@TOlF86}Ml z#Abidfq7JO)1QRLC&tWNDJNwFx4uY%qnU#bHi@lGr>TdwJ4kDbGIq(H;wY11J~VM@ zzYW51;cYC@RSa-Z1w#Ri^gcDNvm0ogY<$$~r%znYy<*tc4+WzT1b4G0$L60^oq4kW ztgqPjWfrt0iu$Gen^NjLnu5NT&}QR^uyWdcyheVT#~y6rgS#?yG}MO7?3YgYRjxg` z)zbu@QggR-*eo6Z2y3*xDtY69J#sqxgX=6SJ6T3%~iRfg)A7+i7Q~2!Kh- zpC7#4q&F_?qoK;Yh{UTcV&d4Fn!UszS2@n!EeMs6ip=$oS~^}nsc!A*j{aYMTu3il zE}wYEE4b%v^7YM?EW*95n~z36+F_}G$aCIOT-saLBJ#vtnUMa?mN%;7E3kLumqZ85 zGH_7f)O9Aoy*t*)L;CCORZ?f9~ZPUTUgTd5afVOgjZJ`*R-CRWnqP~E@5Ht>2 z4JyM1aadL~WyfjHAeyL5Y~o#qvEJ{rpu3lY6UfIdJx#j{1;^_)Ws^bdNN)6EfMoX0_!r{KN!WCsclC)B1sKCT8gLo95^v=M_G)jq3F6Se+Ww zU$*E2RoFjDc8_IE!qpGAvgu642tsufSM=#Keds>J*f$V(R>J7e@7Up;%+YF}it9KQ zV`UM;U}lpD{O2I=`F;DZ3`$R zbl76N5>(8w$z(}54eAnswtB_o=(Z1j_BjOFW`yY!_6_d|irad>cNBwumInEjm8m@& zf2w@csjdF(UVF_@pDW>4c=G5~**%HZWNA?2Q!!y_{K}`)zoNhD8gJ+`t6t#~`^>N3 zet7zTEx&yG)6<9F{k}8)Z~UKtr?+DIAz!>5 zSbVO=!;hd2vp{n;JJ|*qJ$O+j$=72Yo<6voe3to4t3C3n z|5;W&3GnOV7rWXn?R3u}*1P4MN}N2u`$#@WjbRZEZe-54{&uU^;jP-5> zLr3ybWok_{&?^6Nnc8-OEv3=az)2X5H`0~N&{&3|7isj+<9!NUqJ^ctialVeuLMQ(*nn!_nW{C=Y2217ZIU}0C6jjMwmA>9zA z6m^yCV(~Dd$)wGJ%}v=YL{#jSBKFcl9SVyc!RO1r#xY}p4oQ_CE&Sq>#mUUQS5i)Q zCDc%P++dC24kER$1FK${^YiQuhjSowcah}i^NocN+IXLTKtfLCUvlazW!ro?V0YsZ zqQeR95Q>$T^38#KZ>|V|qrSp$rNQ4_v}tk!g`-k-)NG2yQ=U=pxNFPxdq>oH?4r#} z=CZ3lG)|Zqzbg!~+YX0B4ID66_8y0F0HOv(>}|mUzi!>6C9^)@INK~yv+6*rIX$Sb z!7E((o$~V5iDVN;ivoB}sf2*#J-_DY3474zNhfyuOn~8Zv}-U$h6!W4eOLK$geY-+ zkJuR@wC0q0STHKhWD|BDPBQ$7%ge*ttKE%;a@00z;Hz7DC@SmN?tTZ0w-jqj$~{D) zK-7s2a*80+4;U|y84Jg5jREK`*HP+y5sqs+OX64&@+il?mpWyiHn8?JfVSJL0^`YB z+AP#>PB64NcIFlJWF&3cCq|`mwPp(jK&aQv_WqP+1T@A!^XB-6eIiO|>b*h`Ci_;# zgNeR}PCe%s*?-@o>^(Sr)m04CfK5L66#DAb@0h^i*M9}fkNloq{q)n*XMXzk?b}bl z<9Qye7GASjFo!*5{C0jdBG&pgX7saQ@aY<}({UWh8hFMRT}Pd^vDY4dcjQ7_R11cJ z)<2Cx9zH%7V{?Zm5H=>?!sTV}h5~g+-zWNv#67#LS!FHGpU^h~M(*f7|&PXV1u$J&ROt!o+NVnZY6uiC@Y9QJpk7V?_ zS_jB}b_T!6<>V;8LyR5B&XnjNXw;EI<>VqC9&!T?I-yuf81^aU!=#Ll514kz47;qi z6KO5=S)?*HF__%_4mlBGVKZ>>)!9AN(GDB7s$ER*q?jA!gg#p(zZgfWZ-=+&)5&&2rD)mU zYq6zYkY#Q-zWLO<eQ1R z#tB&*Zewpeyca}U6ktqzD2X}cu>*I%Dd%e)$Bc3c`3Z-%z4Bo%XT=3oekm{boiu}F zI>5iLFP1Oiz}Oq#dkNlwErARmdQ^;9!zX3hF8*v$n{~7eg|X7l^FfOv&uSMMc$I_) zvB!nh76wX|+c+If+3I~6i=8%2vH9X4t{*AODsKS9sMD4oE*O+rE=CWfB<3T&=_*w& zgxR>zW}J+n3mX&R6$_DAR8wKLB39z$*YAoy$EQBxe5E{isNdBL*_^!>uSBM^s;r+r z8T^Ak(CX!F!fLfd#aANhtvGLY`Z7LZ@eaLBtAG!~ zoj;+`&;E);+sv~_{ItNaIDMCcRC*JlG1xaAU_H3>uUq4c1>*ESuKW1YupUwQrE8Sm)SJ-!H|zjchtFPdiOsN})F+yl_1KOKr=$e@yl zMnT;`=(HvTY1m9c4OCZ`Hl>&Rq3nR9{*efj#R1l)4 z7m0F>Y@!H9(n#{tW-^gUtvnBqEe7U{78=ykXOL^ox_T9~P^c&gICSE0;DyMp(nJN_ z4Tk$AuzGcYizRsCz^NpLG@Q zv*~;M0E0^ceUs(z%qBQ>v^>!H^@(s=cv@wg*mk9415SegqwXfAwX5F+u>XreJoAm5 z>g3ZA)&`6mJYyy=_$1SK8^D=gaDXzZqq-dQ&@X>wXwb(3WhnX(hSzHQkg|C1DuT=^ zV3Wr-s?=2uB-@y1&xIKKxwzvcl=u3H*uSm+25rH+j)_F>&m$ps^Jm4Z;ep8lT8^at(b z3Z&(~V+V0@9Nq%W)&yFSZH=Gi$?<)$woiBEDouh*A7n@HF+ZyLq%~ALqTKjp9@ko|}G+QV(D?><6OLM%8co8yr?8e^b!|2=fENkCzGAO2?vRG2nIAErPJhNJGm5(O6K4>rTj!rE@N+^s7rro zZ{J%Z&`F&XQrZ&wQi@HPDxs+ECMWiOr`W$hMaeEt!b*7)D01A~LHh{0v9uCFH9b}A>NFbdgIXE^Jml-g3Ey-PUT^E>qpc$frMY@{|Cn6#rggT_tjfAgKFyy82) z^j^Vyv5( zRNw?gt8(lIJChubXJ#0tm}fu9Z=IFcs{i;XJ~UP5u1%Wou!BZ_m1pE&dEz96AAH|U zEa8T?lmL_d?J8CcN<Y!A z%gOZz<37Go2xUO^1qbyVsxk%H4wi_bbu#tu;pwauXXHm?_t>DOABO9KYU1ReNmvIe z#C(*_vsm`VmX|Dmq>f`dq-_PNhWJ|U1<72YAV_HH2l_-Q+QJoq*))9l0$StXl`eg3 zUo@zXu_^u_plhfva7G)FR?opF$YOL#SSN8;8o+h4bFg9Q|U!4$La6n^h&HG3? zwoll$eqs)Fc1>*kCp}xds0Mt<@oS_{KI`A-Bp4Iw?O!dR@PqvK*EWvH95SYX)WWe# z-$|EM@U9O@U^%Nst6!6#U9^^O`mn#^?wCB$^RHzqnnUonu~q0JuG%4(2vu_m2wi?n zXC!<)lY*I-wl*@?W^6?EyiMMbrT#+~g`}!hs7AECEb9^4kKgKZ4?VWv&jWY+qvv3F zsmt=^m-FJ3@3vWgw_&Wc2dhG zxu5X#ya0J=@TWq?%an<(Gb`$lrJGwk*nrf0M|p#~aa6Di3eB%w>`iS{JT}GoHTEDkmu$gGYkYOt}QH#mI!(+3H zv>5>*^?SB@&8lPrf0we3jF_l4p};CHCbFA%RK~;B}a9fv9p>pnJokfA$kouB;K4Py8J44Uh270`rRg zE}XA?fl%I*C(8G<&Xp^T$bI0T)<>DQ>Gver-vf`ouCeAF?~+J{+2N3#Qw*v>WWR(MpCHv|GjWL4^r<5`7|__y$B<=T z+2ni@z<@JAQ`?`ie97H(>CZa7 zixbM?3^r{Nlza-xnHyO2fe-GGS4-{KUjCeCti)pqiw6@L$n}zTv*~z`CV;*5d5lSN zmv~?M+h%MI6Xe}^*%>0$@X2Z$pKv0QkGY$;5`3>lsH}$zVVf$uBx=1}mpAP2a*Rt2 zA#7&?Grv5ma-uMyq@mpbLmm1`<;GEgzIFybTiE}=%|I`sX+oZCktc3_H~q5py*>_F=FYA<9yRp~lsORg%K{U`-!irt zlj$?YZkeU^qf^@oo(V@-BP^QT01H|1}DhIw$3rO>)ugCR0{)o>UF*iAo#oXQ{b`Ie4@~&ri#!)qG(m?NPO5ilX zqL&U|@K+9;a+E@Av7|7$sVHY`IGfHeht8R?xs^PcMcA)jBAp+Mn%l5cESJUSPfMHY zs)o+w!2kDQVRCA_+yXwn5^NcxPy zrcij?L!ZyhsraSCF6|(vw9wAnFT!Z?mm~&z3wJ|7docrzJT>h(j?Z`vm>Ry)BHPE#+rk#F-j?43%-hibAf}=e zRD0H~xx(1y&0eG*oxPvfV66j%+Xi?C8&|T&5Zb>p3_Z8ahzQ=03&?k8DGct(B!He# zHn4%g+%O4_OJJ8@$AK>awDU3m>MSQXl*n9Ij!@xojPQ}%O%{-a!$DYHQoR3chf?G2 zcWqtI}YuB7b~xdQ%9-fc6pCty|X*XcXlim2k0in27Tmp?0}3-8hGr{ zwPL+5WjQG+zd=A!UTWz~h~s|pCf)T5ly{)N$#N$T@v2LI8EK^{e@5#Iy02CGnzCBG zli1GHVoALMcJf6|v2kwTK#ka{IJ@{*y7T-ve1NBUwMOjEk*S02PFf6Xan~y}S z8b0z9BFi@|sNoYSoZ z3jQ>h*$eSx8Uv+HnQ3EvXRX!WiC7>?A}1vhwa3XhZR$23@uxC_hfXc`1tSdhQ%_mc zBOfi)ch|oxE#9? zoOhEvo8eTiFMoF^HEr|J%2mi?yo!^HM2Ngp;k`rj9r>&ZhYE3xbBp3U>;o*~$-maI zjT2t{Q6IA1`WM*3N4@*i-}Xq8Cs(o#|7rObJoV)V+hWwA6MEb9D@_Q43${~!NjqHB z;Ohr24^Ix7%kuR;^KeGaBT-%%mp}Qo$i()B1pau^H8ArvAPENI8&tlK2oRbRbwYz+ zB01np5NTp(08pEt?3B|{6_f|4p@$9?)|E7r3b+g!rFxU$2&;2Ny$Q*dt(Jzz{?ouv z{gO@Ib#SyOiwakfB_g=BvsETZ^p;NQK=y^-!oKsLUicec;$pI;rQTVL(U#Sz0!PpwUSbT*9c%=Y2a(+<8SF~<{M!hpVxOsDNGlH;mg**TJ(e3BNw{9)T{c0ful zB=U5{f(g3+Q*P)XweRXH{S=i28myC7S_jr8@cBwe!ef`T%19Ut4w63H#^Fi%o@Ms@ z3TnJ2Bug%l=iVh^T0J&ksCh!2|7yBCX^FJ{WgOe4iq zVk$Sf`?XO}X4qMb&(wk4(T9mWPw32VXIkx&*UbUh`c168f>Y z5~-1KY@YadYfn&$G`g2h@~qsMxG_b9_>>n6>ayXq>F<}D>12&G<%cI2ipgAfD=+rt zK0?hmnO-|S*`6*Km@Kuun}PGarfL&!a{%9V1tvSqWsaENJ^Z3iQl>!udLriRHMSdH z9T8~ft^JJ;<;KNH+-bJOUktdv3)l&*8NA{kKVED~bwc+S4@l!ce^|?guJZrbopYee zv}}nxxyvQk5wY5d&@|b@v8w1-C%t1x1gCYoG0}V3j=Bsz zmY&o|hK^1-8XZtw#ZIlVz(R9Lt6T;z`Ob^)zDl+X8;{>* z?YH2F5v#=TF7WqI{P+=6ZgVtrl^L|#50}`xs&dX+5yj?;o8D;f>IAb+J!*Q$^Vn=Y zd;MPDHFnfF0JJg|Vq_L1R|yqc`Bb*5!IC3gRx^p@LN0Ro7@;+yxasJnwkSBsX{I-jPstGGPi_)fst1PH*fK+$L1P^=|Vh% zwG&qx*mhOJT-g;TuXZVWS7FE>TrV{500wftlt*m7jbJxGQFftN4*_337e7?psV}() z$Dz`7;~X>_L>0Gz)cmoo$<-An>Z(JA@k#`;^eYY6Cql~-s0e=OETu=AD#t-7>iD&! z1-nP$BarTVH}fD*E3#Q1gR7UhpjQ=)`cnV+wK`gd%l6}so5#wAuLXqCDd#EMJ-f?B z?SZaCG!n;~t72)POUw$(Ayp8XR5;n<6+hs5@|!w)L86X8SjHA?kaNJSQ@Q+nW)Sq@ zGLtlzzRx9b{h|NFlg#`hr^7SG7pq+%yicAed`7pLFn`!pbjai0QO%wbgaz2bRPXy~x9 zkSIptV4?3f9>teKpa>%W>6hic!l{GrUdfo10u%_8?s(-(YFn$IuW?7*Y+k(;Ll}=e z;#{i~jsr(lIH)q7(7u$N$^s#e&9okRcgsEYr)*A)W<-`Blj-rX^4*kk>F@qQPg}Cp zhu`El@V@6$&K&vBGucOPCLMi8#&f$RopQZ$V(~a0hHH=e*RQ^K*-R&x2BK7m!K9J1 z5uc3apy}mdzxVVv?wf2$RsIZ`6F^BxvSt|kIfH`@dTEF@*(btwFq1mR(7M5#%kCs& zm*g3hofPeKEi!Zr^kOq{)*&@^muS`As*I#D0FUp-kboTE)5&F|;e1B%jrouMnqyoI-1L{?uX>DLD^F*$Zj zpE+f|F1M+@KB7hvLwtWaM$wzk6(Cc)?)Q+U$%RJj(==_+t{hU_Y123wogD0^Ww7#0 z#IW-T&N2eUM8A6b%N&C*Bvme(ZZ!@FKp!%}(C_wXaLwYjg(L;)oI3U0kn2k%`QbQ_ zNcUPP`K_bDp9?}z9N7D>xL+|5&pzZR?@7guG}7YM4K37jN15$i7G!^^|Q}Y z2wDupBop>z7rG?ZF89>8_)rcNo{e-_^|U5kLf*#HGZL+C=z~E&Zs8e;fRo9`WI@qD zE)V+E>R^+f24K0IVbdXQ!HBx~#X|+MImP%lf3nX9!_?#|I_*4q6b!D7L)BlB96 zEfT30m-?;_ovfFVEe=&O8f&XL%QOx7{AA4j(LR!@07}qK*t(z$1@1wNW;?RAycBtXCGVyQkeK2#rAV1_P7gTd&B$ z>dC{=(A`e$oM9OXu-ENDmE z8H-mUlyP-92#Vzlo#B+j!NkrXo%(m#5n=S^Pp|l{k51t2Gf!{&gocjh0Nt4e^}+{e z!>??PTx6QM#kVsiMi~GN;UW}ud^zTs9RD> zuRgC3Ie>E*gNAeMeyirD|a@_F?B9<3w2cL7ZGV<-h~+q?5#D{?)9j3VcU@`@!IYK zS-$aWgLghGN&kpd-)k=f(OAzm-3O!gDo@$9ajdOu`)>bW7_imj1MPBhvoAZa);AH- z4YKI=@y$NUS)u+We7+l9@~O*TN!&+$lC!;jMl2rX3FY8vA9(#rES)&$OZSSOhUKR* zjZJ(g(#pOXghF)2VH1mfMM-+psXfwc(w^srVT(=p^w}A=Y|r27Zjy!<|15s$h8N@A ztHzIZxPLd`4v*tw_2z(h#Vsoz`74{GHEy-AKfHx6My$W#Z%$*&7gKAovtEWVx@a&p9f|Lf_UfYXPK1**bk zeF6mPj3YYoz0p;{EEJQ#@*z5Ala(fUEf6ACGHpCB@ol5V$=|pcBLvxvkBV%RryNcQ zZ7S;z%wWrR_@FJ@R~#0}$G`o{Hg&H3(%5?W(&;hpY)n>~ zDrAEnQ%aB!nMD5Skv0c&5;zZ;j;BMIr0DX_cb`<)gzagC&qCLsUZ&F>s7h4(IeNZH z4KlQm)w5?_CT~|VbP#%ZHCR!PA=#%-+#VJn%ucm|_0(sdQoWPh3H5ZG{E7|OBc^-C zZKp|~LhDcf7YnaKT8EtueI@Y*TsyXlm&xmlMJ(v4lOxta$$*Guj&O%$1wkve&Onu_ zFZBoQL8%OVFKf6d4nGJ=9r;x(;ZH+;D3_Eg4*cjMqIkk84}aj>f#rfv!NE{+2Udx& zgpVQdcvlyo6E_v+gug8CEV5sHfa_3e@^nGtU>FR_J_{i%>Osa9L(1B5r|8s6qc7OC zx!WM_S-M)-lwGLwWA0o%iKjRGX`^yB;7TJZHt@GVuZ`oQL!K76R2i2ri;-80UwolE zyqWZoQ}n$;j9uoSK(akO=d2h#A*xyI3K5Teh5mYA4af{BKfKxioi_A!h=l0KCiFDH zyXOW)*NHB8_caNM(luK3)wlGgxcoq79bp;A(<0v^V}7(<9onLQ%LiG|?Xsv!u|Ol) z`u=ide1{jFt`2Z0VRZT)QokK*p=l>phMAE(#8j8Hj`{_u)}#vZYT~R0IJh8%OOC46AERWm%+62X-hbfs ztMgM0CzwzC zx!<(Xi=E`X>F=#yi%>SFy!>DCQ*Mli!=TKz{30*1tnBOt8JomSGH?V+Ecj5HV}_?a z9+3|oaVkUO$Fr*y)aEBUm{;rE63Jc}&e=^o%25Xx*;RJ*c7TcjCT-R;nz1Vi$^a)X z{r#ECuTG}=w020X_tnqX$d5;0kN?zh&j7=&9oX&JNglp*bcFn*eCj;obbY2{^^;de zTs&Zffu=zt5Os{!R?kL^NOh-UX;Xfq-AR?B!)xSd;zQofv})r6x*ps@z26{jQg`9@ z`RSKmc>F0cyIlH^cBE>9&UItApU+Y>VX4>GBA9*pIBO0(Y(`Aaf{^*d((mRw3ukYC zQ1=$e=C*n^`I{5oBeSoWjPr6xKY#WKk@(R_K5xnb4K^y{`e|N4J$1(T`iC@PGw$v^ zGA_tPW(#I$3z)t^FakcHgMSSgDl1A*s0Kd z(xxV9l;_?B698rp$JlMXeo>s83y1E`_iH?P{BR%v!>;C0q{?{YN$>42L8vQsOo)sG z(aU8zhL5tjqmx>Iq1(m&@>O}}HODs+`aS^WF4bLr7bFM9nv$`xDgBfx8!yEm%hqi5 z(9Ky|I&^>a9s>N*uQX7gcKP>p46z^~9mnm{F3{rjzZa_5uZ8f>{=bM!{O;ccunRf* z=KjlJ2;tvZM(A8106|?XytSV?rI~NV+r9Ux5RmY|P~PQ4YIFu{g)7G#c?QJzN=Kvd z%+FTY#F2lUd8vV` zv+qCJ=!B|KPLRRyLwXX_-&p%|s5&lni8)p^aah5-)w?bZpz4htO|+SW_T9_d-bVgR z5GTbM#GuUCNWa$y&;GTmG908>X%b}9Nqz^el7}NNGaFo2vnt$`3y#IY>u#O#abQh4 zE5pyrWPa^KI%w@XdW3&vRaco8wT8Nhy<4W!wG|iX{S9kJE!uZ?at!{2M^3}1Mzgs}q*A4yNppabvCrHImxljMsMYvmbH%;@PY=ZgZ-Kzd)VukaY8^ zTut_9*A9Px#XkGmi%nYaxz|+hr*N@jx07b^*bTOKAtE%FVK=6?56l`F&m_!#GVxtw zG??aaxZ;PSr!E)T^{{^SeCA?1=+>Nly8RXjx^1O}lNh3awfB3}JWQ&%(M zH!j#tQ}wjfrxRd}8$9Y%!DCa_#>HJFQ8{A+x*8|+x49x2Z$4|wW{%uv$0^sh#mVO4 zC^}TS_^x~@IA3D`03ZNKL_t*W3`b-bO1#uNfUQ5WJge|>zj{A$W}~hqc|d*Q?pBYBH9}0-22SYwJ#@cLa4dti zv*mP=vg$_{nggq<^)KwwfK$K!HE$i15S3Eu(FX@#iJ?DuC4_qI(T)}yseJ(W6=4Useif0)W~%^UK%T!Fdxd%I4qp2O1kt23Qa_$?xw3DtR)qGnGS@D}Va3tc z1cpHMDK=5)qh_59Invrv4k&=K{mTLM!W6T{HONDutR>!AxFNy`@cb#HO-<6eWg;>i zko0M1z{FNL)`@kSSkZWVV%yD`E#8obzP_4>(m@KD=1BqYDutIXXO{^ae&jibDuarR zzZIr8A9+UU>(Cij>5~MH{?qTqM@sXq7-=6qrKOUeh+y0F+Mou-J*jDYEH21fP$nxT zWU7;P7k&3?h+k2z@Gu%Ly$`&9J5_R!D!9vol! z#6wYk0hq7*(^Sx(oY@jI^T6BQScM1<^9FwRK%gDv`qMgV#3i3BherPzmIi1r=yRKu z-guHEsHE3<31M7HZ01|gFho#bo!(O~zq<6dCDQJk#Dg0~{#tB^NoPc6Kd`s5X@g`OFW0DN z3@o=LrcFAdhw*AaJ~5LQouW=ag28X%8TTe;+Db%t)C;@ga#lh%cuK-2jv2gPbKiv(9@f`)oR0jhNc?B}C_#5@(bxh*!-3wIpnWtsUic{VJ%2%F zfgg>BzOUDg->>|Lh?zzzZ!+PI4*ogW3J?67)QPWsHqP_WNO7>re?*P%Se6_b6xVn* z!5*FW(aljx(+B;byFB;!?nh1>lnw2DD|ZW&16)Ej>yBw1Gl#zWs52ap-@Vy?SX;me zHoSJIJ-(5I1HFo98EhprAPxLEd>FX|6##F9dF&vR0|Vm#lZX3m^TWFjPrrQPH@9-| zk)DGArewInd&eEke(&l6;QDbBwVVXj{1+@+1Clz$O#M;UalZwRh7TdygGw+7FYyA} z#GV{!B%bV6uO6(*Qpv8ZO&0KkEgI6>WT#F)D#Y)a)zt)ON6nu~ad3l%j%lBQ1t0Bl z+P)JN29@{~t_E0Y#*c0R-9i4XKk=!Pp*K9^+aMsm`KIK_&Elibphmm489!~~@t$G8-a#8M=45>X^4EOr=M}FK4bIno^~I~c8ME3@0Ezu?-}j1Sx*BMNjsLP) zQM`<=)tgKtRbw3WrH7keeIZF8#s-T|2|$?e=d9dDgs3ri@lUV?Nv;k>GOO5h2Ha6U5g)t z9!T#dt(zF##mi%mxfjN*pYvAohB!3MccqWw1d%C0+M}lE-C-=BV8!FIOJ5l$D1dtkZ)2eNzmqgyuhwpbssNehGRSr zR%n<*-+ok4-|dnR4HzhLaIxM=#Exn*_H)0IdEo3MCC0&GG6#gq@Cpd{w!5 zDa)-ofQErK@jF<}7CIe4wo}U3pliZK(~jaM>jqtoP3HMnzU2e`c6~Dl;MOMOlh zcB74XHUauXwE9s$gj}1bGvVb`b6?veO}lA#LR5!@LBlqDV=TK7XnjV>5lH9UmNX9L zlj(F^aPzWXL>?q~yba#q6qVH@Uz?x#(AFF)>nJ3&>}6sv$6h;&v3UkTd6bY%5~|iP zu{6ndOhl34kk(CZ`09&~BSyB13U&B0@`kfX9l`CzVw3e15KkTAxUyt}&GVUgpKf`i zb*GcvC~tDZ4_t|IR5oYwxAsYcP3DSzuwHzK9=;<^U&O0FC_mx=-M&Ua4rRA|lhtc8 z7hrBeD8G=e9?f7LPoHV!LepRSYy#(M0)n(L_du^78;udte}n`5Yn~17)q)J@lCkkR zF=a8feyNm!bF!utEMoA|-0H(D2t9&*QQ?z|BwAIwMK*`K`G8vG$QKE=ilaQSAB?`PC7Jha<>XzZ znOvbHFqx1!+AA`-MqEs?vF^zQ!s)nw0_2T8aj~E8%0=TQ4hYRNRb;cai>v;kZ0MA& zLcdag?&0fO2ak)%P9pRRzExgy<%4~PBYlxh^hfT`m&v!f(=eK)& zTqpJ6L%x0xf6U6$s3Sv6wC|n+{416)s+u-=Vx62A$Ia)_RuHeg>3Qf7vr-guFkC$O z&Q|qL({#p@lK3nQW2G9#JM7^!9tY2CS^W8jPwziEda1hrAIuGuh;qO+z`h&|?>|@B z%4dq>rQN+jCu~j9U4p@QLe?Up{G~l`8jNgZ`aqK(fBKOLo}15d0%H;P9CQN~3D^oyVU1u%(7-7xJ2Oyslc_iqKDy9XB|Slwb?*H=khL@1#cz0`YuTpiF%*`KzPM9 zpA3^UDv{<6^j@84@SssV`oQEd*{5X#mOS2f;TerL-#Bm%CyXVZ!e8izxz+ZnlLcA2 zw4n)7Ecw+=C@;h&E}Iz4$LiQCpw+C+z8gS~EmRnf$YkKpGlUUN?y8AOw$Y@XNcgY_ z|J{zHbGX5ESdK5?^`9KcQ=AWfB2c7guauy9FOIW4Y6lbI15psu0)cJzXZXXS5Dx{3 z;4jpBqF{XPWM2*opCsTmvhq!#NkiC`(1b$$7L7<$7IIu1xim3bd(H%P%2a1q%Cl82 zcGT0M%r-BT=RkdMMtb9_7tJH(U;8|=-hKZCkOLQd>7ZU&ent15hk1SCX?EsSXH4jV z!`0+ykn3ch{;%EuhD_?H!=j%A=|mwN`%rseYnwFqw_G#^$U8Pgr(AK-2fg8N z%m^Sjo>(+N(e80$iy~W0Q%5~iV@a$Z)0zIkgID{Fv%n)3 zL0xQ%!}!US_LuxlR z>H1y;C~;ujzac%iICh|MV6)whJqJy7lR@>g6C?xj6O2B~ z#$$JnICh|+-GxBk_fqaMyZDg5LF2j8Kjg=U9&7}ST!U_oQ(1N#yjC?3ibo9iG7lMZ zZD6M_J)GZFu8p(Dg`xLwY%v5T7@7o>%iB}Fdq4SIZ(Tho0q!^sM&3h}W)ea|Kj#b5nvL)=M)N1r0EgA`Rat^z`6?cnCTHTC^ zaWob#UgTYzY!sXNT+HgX`Me4Ll;stQixb=J8#>mUzcG?RKQtldD09Q(ou&I9-MWca z|L_*K<2xdeMPFmz*BqQlIw#mQwEHv9!C;95(ez%oxY+1wmGaaquFm)=J6TbOid^bV zwaq2as19OiqkvYut9wjBr@bo=G)cL61wT1KI6h=UVM&%^F)SPw??SAB8Xe-3TC~kR zp_USPB*&T;C*{{)AnnS5oU&s?xvXwva<=&U}%&6Q)XhpWzN&sq4XN*3c#d5o-ulUS|TdBY?Yb} zmL5*&BR41}78}P?qU@wkwnqs_yhN9b!gNBoYUwc1%t&3%SEtQ<^rx-R@eX^O6Dgrh z9u0wH(z?G`6*d{;7vF?Pd!Q8eTTbG^l5ES#JdA2gNI;W6Iy60m$dIJdt}(-V7f;eM zPrUtz_~Pxpvl#-W-&N9OW*NzX!?4`2j@Je-70IiFGZ3-&@v0ebeWUU)30rJUqUxc- z$={m*OEzSaIX8vZ=}DM6c3_V#c-5aUh2rQ~5BBPe9DKZ$YQ>XKmO# zkXHQi9UhPMjlPOFk$A&BR1RvwoUr*WIknsIFPq7*PGep#A!v<2`A!^M89-NbI8o#y zV4pIM^Z~TzP*v&nih?l{uObaxlUw$TJz@vJ>R@yr(mv~ws4esB&4FB@T(SES`@|0? zZ9%9(nJG`6yj?{Aq-E(^1e)a0&E^uE$|>!5q!@AYa)Bl8jL`Sycb#UsRMr_5ZQ5DC zQU}=dPORadj9N~P>v0zeP2w=yi_9&>*Nw!GGgcwqJy~)vTgW2AGeS3Y-+{qF2;WbfSe#xq{Ua3zQ(H?C0bV~P1?~_0_s4+kv_4HPi~l{L&o((Uj-1ndIe-nlHpPr zN|0r~=A%AUXcINW&U!!ZX6&W#sjXsu%c{-svvEooaRi>yXgit%l2krmSoxef%3$72 z-3dX?9d9P_?Y%mpZxsj*7mPPIm38x>HWnqi)T;@hz~;s=`jxbULs>r}ubhTb(dCMM z@%Z4Yu0E@NbK7`Lj{E*%R)AYk&%Zl{B)`j>PLX7A_rG$&2kj@*giVx}~VVpyzpT!SCHCs&@~o{wv&U3sSZ zo3|fLUR`3n)jNX`w!j$(tvwFFgC+2=41mEM?FlsGc041IBEC&xJET-4EIft z&Q73>A2}s-wo9jM-W=OvKSa7v@kyRRbtq1M-~%#oH3W7lHDh}4NRMt#O* zLO1xy0T2B7+k*+A(!aVpvKd^YA?W1*6H@@G3i?RB#!mEt(j3T>xmGItwoTI>WKJZR$)D zJ3LXbMmpo8NdQitc6${-o%+K*rjgd03qr2fr4Q+xn|38?^N}agIA9%@;1#>>DgZkln(WMO8V*eH%eV z#v3M#e2G-e*LPal{R_y=FUPI|a?ZxB5{GvBibom5tjRXs5#4?SQIzpXD_6e)Kwr8i z(|qf`9b~wP9jLP>#U{quXpCuNQNd4twkWePzE?wqwF$oT{zATWoviG${;QY zm3Hblg#Roe5}Cc!Ww|OadCi`1(mu}a~sb--+m`@SZdB5j%&C6wkc~dbKm$Oew*7@$W|*a zjzbWQly2j{vUj|emG5{u{F6I9%GIow;MXocC+Y7$dO=mt2C8v~_N|;{G8h~anEO4Y zjI}HHS=M-wBdt7M^6WCC9Rn2IEqcD^kXMC4~xeRb6!QA@>&&@PV+`AR%D+U&(<$qtmAF^#YIy|B>)(+nYJzs zKja(iC8_cr0j9lfb!0ej8i7723sjrnpv;@osE#cPmQDUsjLx!tyQv>m{bK{~p3r6- zZak5Df8HcqX&l5I;DpeED7!!*ZI;tO&sE=qo%`Q=Z-`BLJm#ImdLl zag3Qt@#cZ_P*LFolhkymQ#;-J?jct2eP!S?$KJhr&;2By@>#hAP56rixuQ_$;pR1< z`YD-wdUlT&crj5jxO#5b8DS-5ZJWLGo4~q1=AA~VZ5>^jkm~|B^zhdh#aHv;6^E5y z`cIV505H3y*X|D7`yD@y#Z(7^!OIzaVE#bVBn`&X-JnG?__Y@?CypK;uSjS)Kb^94yrQQZzuFCUxnqm6 zv}*k;8szBGaCo_bxXcpOM{T39jZP0Rzi=HG&|#8Tfw&^t{5s$;W<_=`0hl@ z0sO~b@PvzFRf@#`c(vPFD?7%xa#BYO}1&B?ATYfZ-Xt}q(^?g}+ z#?Lk#da!Amiq7`3{IV|pP+@FujwC{&!$02ZGpnyY>##w*!-daUTWFmnC3N;tWVo{_;wL1ETBNX9xup|6d z^=fw5KOF5=x7@8Z)!imV(WFFDEYw^8a)7_@TYKNkQuy4=d-kyAwf1<18x(Y08H&R} zLa2$Iksb-EOnzwk5qA8g9KNIqENIFvc;4XX{VEMwC*soc|4#OqR3V3}ASYM`8ZTkd z-T@~M8Mek_z;_3RYo#yS#*#ruBXDK{yC`6RR&p(*MTJ zZX1^MT5Rb1tDC_BnXNf1YlB<%n%JmgbgMWZc_=lf7?i@MV}pvULRv%A!`d1dx&|QAm@fa^**AY!+=9vC*k#DzyL1e*uY}Cammk$9WIoKD?J;k6ec)1Q$7s> zqFd5sPi65?-boIRt(;rb&O(*PwCDy{vdPbiWh8wR9QAGSm7~yd<+E`EG_Gsrld&ffTR#+K{TCr^&ouXrCqm1Cm@ooEe? z1k_D_{f}v)n@yp;JLz-))}x8fk}ZEM8IzWBaRt=}y^yIDBrUeOyN20zVFdlse>N;- zBYxoRxJo8^mIqbCUuD3U%Ern7zqym$;V8vr8DH(-HuYqwSAGM95(iMX>9w${<0nrp zdK}Q-SZ~*Zdfy4-ipEd95A>p8o!gv_?0rzg3|w)?w)IB)LAN z9LxqQ9^(5%gJ{}mCjII)yBzX-&1HdWe*kR6fDioGr4TRu48r7=FaCyp5kZ+bVQeF6 zzx$BdB<04Sau#!unRcKZTi>dS0!ss%aN;~Ml>y;h_!KtO8$Z$&XT?&!No$bNe|D|{ zi=Ohv0$!#)9GLl@ijwfb@5z;)tmT6(W`7MJbhJ-F(opM{pp(qhiK}JWZO|SH8S}AG z%z}evO6%JZL!YXYwB_43dBGeU6NxW(N3U*PA6HE5Jc^TOl^J$or>l6A4mheAa)c&N z$3_~RkW1urUUVmitO-dWAi*fJhvmeBXRFk~I6&JbS52}7v$6@svZZatf4W~JaZVUuZQGq}qhqTjO9x;#}vXx)F2M`u~sGd5AU=I|r_Hz7G?O>;l212vf)%ivTIQ=-CSjx=mhjK5c$yGPo;hL6~ zD;7=OdDA4QrO`9>(u85xyefyzYN)D`h7eQEk&W}BbJxLX)9H}COPk9v$NA$Y-64PF z)*b@Xka8@-Kva9kNBTB;ZKcfqN6fQu+JiK`OR@21@}IEU=-*!%ke>L)?tAYf%&bRm2?Mn>V0k?Zox-%n(-d!XXV7kj^u-QUZo zooT!HN=o9ModBvg$q|#Yu|D@exI6QvpP8NhMW0VxQs0CIZrX>-DQezePk!I%mmKhr zS#_$W9Pz}O*nPIAVvF1eA(HUawWFo+)kz`wH#G7cdeH?kNm5q%;~#K(Jl7o#?BjLj zZ9<5Q+OW)q5|Xt4%I-<|oK(omdIwA8$2VSgwzxh)STpEqdz5!CXp(?c=%IgAM|pZ2 zITQ+jiT`Y;hMR4qAuZ@Op^C56WVLJvbTQ;k7#|Ng+F!*v^3Ydl{%S|VlClj=xpwE6 zi`apg001BWNkleN|$qLMF zf5Jx-KC_^b2K3UAv++#4jO-E<4w%R)mt|w~a1@tg=$cCC&iBL+0vPL;g?reCn6!h| z<@OmX%Gj>s4grj7+)B?c1v+IZv9SuLu)712mPg+b1c7+sEWUXf6(>6pJ6K3Wo?3uL zQI*n{^7CJ~>9D2DL0JcM9H_hT>x2)#8aW13wnfK2s)m%T!>xe{Qe9uQkpVkftvCo8 z=4}jn0QYWKSsU=FBmn=tgha7Sk7U?w9N5I6&J=ne6?fW#&hxQvU4-(bo!Fltg>W|xFfedpl;jF^OngPrnm-DwlTna&%huZ5+P2UpnI zQUP5tduS*7H1TiswF8K<^O<{bEK@Wf`In$=>Ds(O4)hgyS=Kie2i`S~#6AT89XITI z_opM?I`%-NjUMrT4)UXG-WU!Ge`ts|RWwm{0&|W`{bcYJqG+FMCFE?}${RiPwf7hq z)4WK@!MQA5}@hA?^ICxv=9fwZuXc+!S|12x1}CW_fXU&!A$v@~Gj zj>0a@cprl>T<-7~Xj58jqD~2&Eb9G=dz8XJrab-TBu()HYEkuDpEe|Lis_JWnV4MrqM8t?-=V!`D8Rg5whlBNA^_ zont4kO~&yoY0uW}`k}w|pZc-M^qu2-xre7P9Q6fLh^?01yF7r!!9D@Fndn)ukKv!a zc9Hak{4VLt?&+gX5d#Nv4HDrqjGcuv5{SU7RnlC_+tJuu_yE_cjkGb{DpFcDK$v9< z$ROr?D6?%yCi1fIlNO6zLK!faS`g97ETW zdgN;U@SEuWB6PI7|l zzi^jb8Z;=U+O2ZW+Dg5R1G1HiqfIlbg&z1iJorhs7@;n^VBFX^3PC8I6ci6OVBsh? z4^?2sc6QkMhT4fKc?u86z)pQW5(h%^^D4M! z`f%}=&zuA`SV0R(ho1ZZFs|nn#blQLPA(lL)Bz`PMl$`6vF90fkU1J4gVAmbp2BKl z{u|s)hKIGKLH^CUQP#)J`n562sG{Cq$mE*5_QKvy%({d@r9SRm_So8?U0XDO1A9)| zpSO0fKj%*ONAP>{gd52obHgO_qH}(UvmGd&5by>{Upei>^2YlqFw6}<0OL)7a6o6} zcSVdHm75Rj5Dr~=r5vJGHmh-E#+nWclmpSX8Oybhw(3J$2p9YMD5ui~_0uovrLB#h zY-AbjC7S9cCF4We^YKzrouYi`aD2O?mx-#aZ!%`gMM28^`zL#^Uw-uzN*mI%tH8*lQ!l6_RV?<#+F&%WexUn zqT;7X{y-;X20`~$mk^bkERz9y#of4)2KAgcjHKaP0m2HeydJ?yGubWR-+F}>wvT6F zMVhn)kpSL0579iNIk?KObX#=QUfBZG#|defoJ8~6$!5K@w%8?b;KP%#N@P29p#> z5ujl;3D1;aVbc6_e#EtZo#{?Sjaly9-VUC!LcpCE??E`x5)<@UaEO%$qZbbw1puIA z8~LGn4j9NHr7>1d^+N8pw$Ql3U-_ZV_;w@xNT!^7aMb|J*Gap z0x(i`a#F3RX^z~GPZoDcrZb>SHPB^Z)V`n9HGF5K1achV4B#8zf1r)g%UJMDeLilj zn2#B_-S)pkPj!Fx_=31P;|T!taobuQ7+3rajO&L!26%OR!{~kU^5S@nAzr;!Ys|q- zfLB)!j&EMSJYIhD&GG8`o8ysu85FOH5&d?Hus$K7QHkQshxGNeXC0>RWZCTJAdbc)+B|{9Jk?kKMW) zB5!M^F-lx2Q#hyg?hp%7NBcJ(6gM%*F0UnJN3ts=nAN?%~UNYti ze3RBthXZF|18V*%U)86we6|Yb1b^U5lkhVW3c2~Obrz56wj`0Sey8?19^!;%d`ykH zc#I5lUOdgtJAEEN^9(p z6UJ&tNM&J^Kh+7l8eKdQZSEf4w$j-nuYASp5`nTAlosNob_3)ilXVR23h>1}zQA__ z8*n|cC`~0)0KS9??nECr>!96WE|h_>c%6cz4$V$|e(})NzpuP?rRDpqouqipQCs_a z2ma87B$R=|7;L)QtOHpJ=h)d{O~-5l6t9LD#7qDbOH|FGU6i{dU)}p)=LIkM@2$6d zkADXTG&>CVh&T8S`u=7(0}`%q;-ns$$Gh0Y7kT?!nH+$pvxW-k9qe^z#f-&CnmqCf zE`6mt1D`yqLaTI7DCANP(ssaIgGXlXr|8i@Wby~E9JnSP;O>Y-e%ni4?07REqc@S= zk4_~7s@VTB2u>$5@!U1dEaHoFuswp=pQV?0(6b#)9N|xGLQX0Qkq;fnfbr%&N61!- zwl%WSbjn!bZQH&-9e_<#4wgX8&=v*W|}E{_l1xjde~{r2(JTTgqXw&zFuu+{}P zlb)ZmqF>k2A9D7kF)-;e$IYAbgucG#XtW3`1ZRO z#}~i<)A7aUU+@^=*W|B`m#^4q`TFJY#Y@t>C*Z4Bpd+twea!%`hF8UoQ=QROznHEK zc}{=CA^x?5R$Cz;;PL?&+64J-uzkBY`p+1HV5?tkC2_beGMpixBbb#qLG+D;;^ZpJ z`JtyS$r!l!Z2S)$FGolU`GZBNdFdU6ipY@*$rNkA367VML9I_{!98%+gI{X{?C8v3 zhb$u1=EfgVux04~=w&-E*>HVDfvD>83?vc*nWs;n4jdOS_Qm~PfioA009lmwWDIHh z5<*ZsNlK?dLO5e@pkXzN+I@QjtDT_VM8Puhwnh6_B`*)(^fIpisw1pK_&&iYCneL) zt+DRz*mcE8vT7rRsV3LnL9f(QC5LkEWAmT$22vGuhHvye-(lP@Ka~mqda{Ot-ky!U z{OB3IG}b<(zshjY42Lm-$~~}c2hr~1_r^>6^5on^N6^ZI=U=;S1AZTNaOAc1TqWd3 zRwe*XxlE+NG>9Fv6|U$kf;1jA;8g@Y$Kk+`rGpJC0qeU&DF`%-0U}LVVu-#~5Lbd+ zr#)2^j3Le1acAh17GINt#NRfiX$e-o9AMer4VX>@0R9|Yor#VdgID{vKxc^WIJ;)1 zIRpLF+wJ65y`b$$QLL9;5U2o1IAs?OL7H7bTA5C=lNG?u#OJ-w8RQ-GTxmPOvH{CG zG`?CjfRf`Ru}XFoDm5p-viI!vw1bblTqIeceXied^JWcZ%KQeQ(|)1VKq>|{w+@+&0bgb8 znzEm|=IjK9G5dIW@!ET_?)v7gFk)t)$!REUmd^y+yDB2Nv0DO1I&&HYJMAAE|S%y zfhs=?y$ATvHjJuN3IKh#f7XOfnLLt<;yQM~+$AucXWm2s5OWMheI~(F*g>KgPgsf2 zrag(GuSFCP8chn7fmmv1f%F=kg%ToM-J_1%X7x|j>l`|=)B8Q7OPP^0?ODXM9f`5U zbo!P<%}!7`v2_D|v8O$#rapfoy!M!zJl#`~qGhVnw}MhWJ{`?SFQ2q)&<}J4Ce?Uq z%Qx)0RS&J&V$>t-7ylfKMakOUDz8!~^23xflSl~7We1f&uMAJ>9$H0wWea#_Px!>@ z9$q=fx+N^9 z92R>WKu&L1ZCp^^wi(9M*Zc1K6t~0OK?Pi~x9BVq(aEVa0vmw41v9Hv9)mK$Ms?`R z&&DTNb(N2+7gxg`>TJW`gB94oH2eV%wgtx7!cD&Pkof4AMVI5Whc~IHce^d2*SttU zMw{6=%Q^C##f6tRHEJ6y^#vg*?ZDsYH|mXzEkri!)n-hmPi=YQsfB5FtAGp+Y|BeF zep>2WMp2VdS>L+qZ^8n%RzWg>PFtuF6xy?~;{-=q=#jS4r@EhF24p6D@*KQv+B%q+ zQf^hYJteb=wn}Ccw2@kyZ6Gq2gS4x%5Kq7;bNVF@9h#FqLf?T|nUuk&RJ!jj`c0} zMmV)=@XD>Nm0Ftmk+YO1UP#(r$ZJEzz@d|-O$z5*0u@BuCd$P1Ls!=Lb1K4Gu#>jS zF(s#IkFMsRV@2*CJ%{en8K-sM$Ap)dz{3N082dFu(bEaQ$csJ>=rr#Z1d7D5phs8q z-?EemGdCd||#gZ-p5##VqQlQyMP{+^>d%rng>7j04P=eiV? z*_03zafdQ+RHP?%fZn#R?$WQz5Fj%G43t9qeE{<}C(6h%^sMB6};QI;w<#1N?O zONL!O=h4r!{~l0CWT{V(JUD-RDQX-WXaK6yw2|Q8X1Fp2MfYbAr5G$ugS*IkiizZn z4-pN@7}wB;N(6z}gR}$53_ydQ^JHo;D94bh0nY4Hc*z^OgBv^LS+Mfg2zJi$(@@g2 zJqcAC8t@G3ic4B)D;+x!8OH~EkYC^wax7lbo7t?L%s44$LCteS=-r&P->^b-b=l;? z7F&=*i}T>9?j4_Tq5>-mv=Dk`*d$eeV^5A^RS9MA>(1Ogsi1rYN$S)>pU4N&-Oxr} zdljhAMH~_GuUyQwQURXPjk?j(nwdaJ5}Fwm9*nHUPT9pnOktuGK7(ZaDn#qQ7 z4tZ1p*o0{rPAPmGY#p4L6tMdHmU{@k;sr$i{7=6*e)G%U9{=(0e#?a5>*G(~a(@AH zi5q`_sCHsh>3dh2SPom=I6;G+?`d#r8^Xj09`I6=gCB#1(li;c9Hg#abF(BQskekP zbhNjbPBqviyzP$FBLUk=7a{=-T@ax2$V+wZY)Sw*6h*6Voh{a{6HoRx6z~@;hO?V3T=3z&Q0$$7f@ z(h6yyi$MV3OeRT+q&(5$Y_+8olTm%)8OF} zm6Z{_V?K>QJ`%KpH$C)CH&Bcql_yI&k8Fc4Br>e_F$1u2dpUvaWIAOh5v`=uNBN{d zTKqZ@&yv7gF}Z2tCr5bE2iQ*qUqa}zDo76CtfCQ68l-aXD4Pvx8=yy6}>z>s7H z5wcmc&cVPPSvg@F^3=*yo3^rUqA|Fmzx8`6BmVfHI7lAoaF!lgjxH+@XSZYNAppTS zx+$II=!TEGNm3ptYR9M?9`aIlW6#xz+RgxZ$FmL{yfX%%CKFOt9&qi1&gy8PCm;3! z9&){0?wSK>G3o5)%uRx$Lz7wY+(NK(ga4Qo8HnREIyRs7@L@D=yg#0vUmrjD@bdWA zKmO?WH>Ey%&nP*iBH_{$6 zVF8@Cv8kVz$8K4@Kj24w+G%3D-G!{9 zXW+jczxdlw!AIWc2bN2@5*)G z`_d#}HNyO5AEF~PoM*9&LG3Cp4#Bf8Jp4EyXn%29oKPm-jj@JJ+Yu0tPgU4m?WGwD zi^(o`$f6w#n&J4WN_@wszC-fLx$r#mCpq|&pi2I&^yOoyJ5Va0`dcrv?SHaP`0a1C zfyHoY;xK)?3@zjglNp%Mz`;(Y(P1)%o!s?dpNW5Ch`M!%8dc<|kBJ;yWT5-tU%LfR zJ1J}Jf5XXNUE-AjCz}bIe10#Md(fjOwAh;jHb1KeiRm+^TpEMUcfMXcSLuG7(g2t_ zz=jiF-r9sjdKYb!FFwgA3^)TkqLD6cSG|QB=;Q{`IOs4fApL52)Qzt%;cHO z7EQ;HWmm(Hl2oK?$LRCvuE&~OL1wQi>x6)D0wPYf!KdNMPvI0jWPYeiuqqBPA;zuivZDTyC)wq98xcO_mHyu^u~k zC;c?K$orT&ce_Igy|#V)+2M0m%<|WFT^8ACcK{fq5H*VzF|+=7vGX zi|=^S;M=dbH1LPt9>4m3{^9t=FMf6Wm%sn)`2E-1RLBo^W`)^Fnu1im+C^NE&G<9Nwf#2Nw(AKv;?X%+6a(6j;1QPnuR$gd` zsXb=EH>Y1IgS@iTFGw8&TjnF9VL@+>yoQfw;xTdLOafTskekl!eo)G$?YonOU-8K; zFdnrrY%3FWF78UKx)VS0_y~!Y{DW_!F-FYIihuEz){vb#d>bhX*AefmBE!1_OF$EC zgPpqlEq^76IOl8$oU+wc4Cp4-$m#?brnaq9-yrrqc9w5m`yBm~48smWa?AfNP=?0h z84dYuT1&=g0F$`ro$3Pv)ze|I^rl}my>zEco{;_C5o}1i-WUP$+{XavIE_(XDiI7{ zK0C}?!C4_uA<0ezK?1`VBaH#8=1YThnlA!*OVJh~LEIKQK*Cu)fjU9CLo`7pnG+12 zQL^B+!!vK;Ix?`b^S>t$BB|W{O9if+X;~nn1I(OQSU;pJr!ygE18c@dz#CLOu0)_X z6ZX>$LC}_*fnq=^lMQE2CZ?ZCAn>W8ue3L5hA-=9xKlggEu^qWeVcw0znxg z#K}OuqqBUYtT5}2;00=@==Ey{XL+d?q}sM_5<|y5J~42B!xpYE8hjNA{3b6pJZWe{ z5FFSj2xr4sFOyc@*rrDVDI4B!MhVXq4y97 z!@C8%x8RKX3N*0g?(p3>6NA$$-evzCmkB=q?2pGk{qFCN|MY+T;`p0i{NecRXRnX% z7<<2Yt)GPTFa{jF)Y2Sh^e1?#DaP=JI}f_Ewh53s7+2`t?gn-pOnif1%}LB16M94F zI0XX9G9OjM7CwQ{FB~!%aEdUzr4o=nyaq3$wu??zV?MoUaryaU8VS83@Kn3As9FQ<>^}Q$)NR(NOeCywMQO0nA z(TCzVQFHZfR`gL$KEuO4uDWHzksC0@*s}(R`|_d_{iTC;3NFG!T@Llr#DOo08a)!% zs&yb=#K;05IgbWsc_pc((i|q4Tp$m14~lyJvJn8?9FET8_o2Hl188S~lM>#^MP~P( z)m3>%Usm!RSS&+A3g6>xrLsiRb69=>Z$GCGDA#|(o&3mxV=CJ`#PQe@h)GN5`*2gg zXeg6yW$P!ui_53TV-PBRG*h%}fJVwPJVGePtSSaphN5|%o94rRm&oDM0cG4DE3 z`P+dv;25-sG7g5!O4q?J0AoO$zwK=_avepKM<8WbcqfT%R5WxNjAyS)&(`gN1x~#U zBkLtI%Fy85Duwy1Is~Lr`U0j-5=#E9@c*5h@J)_)_Blo)p@1`8?Vx1qDyZMg5de zj12n9JCS>(v=pc`t}OQuM+9F5eC`_-o6t+lx>*suX2PRg9iT-h{=!27;nqjmH*|Ks zSjLTejp0n*&Mt6(y`Q7&x5y+dHTUIC4}p2kPw;;9-qYjHK7Q}`^G`lLKKkfwZsdCl z-}o{f;_D6D>M*%^{VLnEZ%TeV&7_=i?T-R;~t~dbn)n9RaR(^y?L|e!TgHodMvH<%0Ngi~o#k>1S;7KXAu@?Gf8- z0~(hioFv*gouP}9lZ-BdkUWMkyd^J1k<1 zEiBi>Ry*5P&x|c&&(&?VvAvU*31#`4%-IEA^~oLTuALOL?O>ERMFoyyE?cQTdH5RN z-WTHS3{IwFfZXmxVqDzhWD$P4OgQBmAxO_rj8fO+6j@s}<9j))jbbW6vl2)Ql_{MG zIb|~Nm_{;k$S)fXth^m~^sTD-sO%f`y{TP7f2p-eK{Wo-h^uC|Z0Dt?JgaO;P+1@e zq)ft?*|?6uso)bQth zTKXn9{5a>?8CuR-^dt}L31)O29_XNe$Rx~S_I=rvk0U+Q4{%l6Hk;a%sO|(X6~S(N zu%f+bjNPn7A&y=(WE&hbSOj3exqs^n49jztc-9X=%N@LA zCX_hRQWR<1S!AR|BdA4FXE^}1Vg*z90uw?FbSGmP02Cr$lM&!dR}>zY@~5D|R64@& zZJ84R9dAI(b8Dy5Cdes)L?Q|kIfa-8yd5vLGG-+Q3GpohVOE}MFFy1NDuwb-7$w3g zG|b(wv}<}%BaMz|S}@u0>+y${LDS@bgxn18NNXEAIM0;plO|E5;~{!-3L|a1NDeaD z%|VQM?MFhk)p?ps(wEg=L7<2@Zl7w?;X?|-*#=Vc>@{c$xC)=e}VcpWr-c@akwMP$-YqNaeeOJry3q|Y2%?crBd(O_Ugq;rU%>;pl!KS(6%oExJ7-&Zh`i55~5rU zC|I_EM*b&xfyc&IJPn}ny)jWGy#L^q)xImPH#e`ql!iLYWX`ztM#oE@a(Mr}_m98$ zS3f`g@{`Ns!zbSzH{X0aH?}cRac7JD5TMm~^*s)XX3?!{*@i6j4;id&v)w=lp6$fQ z?9z|m#)K1_>_-$7V<2NfEFR5XN6aKj|H>0;g82=FF$3(gbAwc+0R(3@NNuJclnnyL zk7rAVT0&d7C=09q#RitQJvl*m#14vapnSrzihvI~LAc8vvQ|K;%FHC7wify8lc^Vw zB0D++?_P*($fKaxs>={yxjP{INCklMDfgmF27G9&BihjgS6=o7pe2%LKl&kipqK3? zzbqJKOyG!+9{$F0`j8)Dz&Y8KFSOz%t6CjrW>|K%#11?I2yGb3U2R$3KcAh;B1zk-Mj2x9pM{@QCD+48W z6dZO01eF~!RSk&t$Y6WD)D@FF>~sriPSS-a3HSo5!JAc+mX(H({!9#ThN45#VNBc_ z7YQ3Zu|tQ-R9qU#IyTi|{c);+izsbQbI@_Avw=e4tk{Rf4CvbiSOE>!@a1#i_R+@U(#zGh{s`_=I!KsndZ&R`n1<8~NqxL0*-u z?^YT%WIlE8R6xjEb>hbK)ud7H=hk}YP(#}N+v;w z1p={~JRzZ8@*is2dV5jYV}q@Gy9n@eC1u>!nSiNIwi7!-Wz>#Wl(`d9>=}>L%`S*^ z)p4yUm8lWd6)|+w)lR~mo&PS;%7Yd0O$`WX8P~jtPCk!$^WGVE^FL>s|0f?lW3~VO z@uQDEI^JTN|B_vR1|nr+uvJ;Fs&?7U6kGl?N!7lZ0o}0WtoDf+uemSF*a7*-pYnNc zi%$i#9dGl2mi5M=all`;QRfaPTQ&qBZ@H20>>)ptWT-LWkms9gR^&{al;V-E7NiV7 zwg}fBm<=JAVD;o8!AHb_|f+`!kHm zA|&Ifw=s$qs!eQb7lf_MRlTuz^U&>Pc#@x+JAK7638vEK87}?N<&a@xClU)qy+eZlD!MDBw zNBwrWI@$Mm)DPx=X^uQSR;XZNP-uR4m<_Bx{^BH-8(rO9Bhw?P?tXcRBFIsu8irO9a!eBy$yZf8sSTU^%Sr7{J^^yrM zWb45J(9R5=!&E#EW5aS_?ORAzYQlKm1xuS%JuvEWI*h5wq$A1r$}lkN1yB#+lv8_* zSeAHvKR@=`W67LVcuTQqL@@mCDL3p+@LRqGxa2S=;OP3`G4}wxW_E6%(M%P$YTJJe z$Gk$@h&bCYb%JB|R;(GVg>tmeptbcRH+U&e83Vkl@3z4d0UXmB#<#WkCOMC;vvW51 zY75Yc#=Xf$1NowFr#`ZR1K#}NAe}wD>hy86PHMIZL@OzR#yC(PI}PbMz&Bw?-jt25 zG#YrIEzS-!xA^2G?QS%5btEqIx)WBZ(~%XHohZoCnpftMhK2A^lrTxV8k;_3k#n5X z_59f$J6V_$A)ke7)69^l*f)gwEQ-vPPrYcMake5KPW)G`FkF6iR6 zvU33dl;4hX-nK`**ur_|ANA4V+s?6f6j1IB(bwlZGY>%b1vDrK z%#Cw5tdto@E*{++&$w&<6JFBuvmbnTeDKjb$5U?Vb6~#kdN&gbHzZmK8^^G_W&k#_ zb$K&-CK2i;u&6dWY*~T99PM1a=%8f;2@ZL)8sdP8w zd&mMiBYoir5Gkj}Oc=0ds)vEz(DssmZO@PK)(V_@ePDCbWZjE{EDsrAYp105qGjk| zV_%j7{ZRSxhE1tNK_VPLU`s)e7UJH|F&w5&foppLFL@MT{A|02fxUNjPX5Me~ z@|V^Keow@S7MWU?9A=VPyK9)cWBT7+CcP!Q!pU^$HN4Dgs1e?Ne2#%LeTP5k@7D{b z-SCKuJ{KdFcoU!@u5A8zNOWmX4v5-*I&_`yK&@dA6e2!?<&9PbSb`G34yF-J0a8}c z*jx=JTvi7kC(Jw2c|SWVg6jevDZqC?_1V}nI*_fSxQajb9*n|!yS@%IzJwQ5hz9K4 zAnUKZ8F-tv61Eb8Chm@As|FC&P5#wW*~Gwjae%WkRUincXR9BH!OsEH4!3gP6kMJ< z&hl&~psMkIV3{)$i1x-<#aI~60UWjPZNM;LC@PK6eOmA7N zkX|bo>a}fyIW$eHYev-1*im&n#9zH!blVLPBBoI!jorb|7TQF5;O(~_4*iL(eThVz z)rSmy^2wNukYSXllTaer?FrxLGQ86lVfNOWGAAgfWam7-*d2}Kq@5iwu=AyPCsxT> z!maRhg0D#vD^5!2DBLD|L`QmoT>Q{r&+h;DNADaT@}izg-r{rq#1%Si31VoUuN}PW zbRnvJU0rsU`C6eGD-L?bp6aV*D{NNIGca?8Xp^rL)iYudm(bQk22v9P;Odwt6H0Ie z%5J)_!HiY6GT7m9m!hlgW;@7tcENiG2vBc`>fQ%sP)6jN zb3u*u^cV7&*PGh838Xe5jrt2sn*)n&lwF&*lV_V;aCuC7@o`1nZw3%kl^9zh$=i+r zy8%UhOEtOLCS{M=ReH^WNeJ6lgU!YeU#gN(c_tplN&V!er{LjrLO*4)Kjgg-R6StF z!zV4$6$P3kgBx9_d5m-=`LbVvSFDw1YNy>L$oJrf!r&I8fb0P+|4EBux7*Ea=xQ5} z?E~Wx{`ObWu}amnS$F_BnQd&*%j{y!WJmY_{W|GX=KtsGzD`MzA4D|% zIoZh}vwDs!5*b^`=T5$rPCs;27eQ0blp}~|kVe4o;}X|R`Ufkxv0E5YrA*YZF^>WA zTL2K7FmV9H2v#~662PuVbCOu#TBT0SVrAy)1>q72fttO8<1euJv$;lMTfMal48XwP z<6o|VUGdnse4Cg;HtA?IkkRnT8BwQ%}7Xe1-}&!*}H+5dh(bIz7TDtgZ-HNq{}@UB`B0S za;!q_LoJ{IEmz)lJc`?8022l}dMJZIE?hxH+E2eRKqoXlMF(vLjz>Y}nSVQ|=)k{g ziojXl{f|F<>-gvg@9-GkML)jNr7T8`;sd&FgCkx8 zjiINrdd09i>DtCmK2|#F;AI=oKgOT!#3ys?`1>&*eWrb=_X!L;D}>~qv$hA0V)BEl zSx~zPh=Q-N<+B8U#gEC#MTZLo|G7Y?)Xw=t#HX<{XXt^ z41M)ce*6ke?g4nhHwKb86?Qgl+bwnVhxTh{j-c zI$Z~+tks8N;Kc@hiR%!=@V)@I*UaqTQ9ga9YVuw^T@wjAjM=eKj|o&e|B+3hy^f2F z;;I0~*6kkC5t>7C&>JwJmx;hh24s_GA_XZOsg5oBI%1%{M`QRczbcnx9_kw&yf z4pAEUr$*V9 zLO}8}eFHFd2UBR;hMwOn6Bp!su;{7F-4216#DM{xexLx~-L{;27XbQoEfiW%l%Xz4 zUxQT9+D%YCKhYZ4?Z25gC^zNqOsTW9IQHY2dev*3l}lHnKvNo>QTmJl@0?Ywt3!rS z`idvV?Gx^xXUo-<|8w5A|AY6Q9iP1KkMHm(9TS0bCI}8=Sagh>p6ZL8pxt1tJfs`v zU~A)dvVG?ExHrX_#%@jQK-Df@=U22={)&p73;}&K?*L$O<3!v09GGkPLq>m zLU=B_Mh=X8iN5YINE;p?W|Ibec|P{hK=rX4`-p?Q?hrh1hhR+z5b${P70s9h;6A}H|U@aM7YuT$^LX`vGqMy9G;W#<8q6kj7rp?sW zfYzK23hk&a-x=ffsV~Gm;+nsk2+K}@c`C#qqC)35#QLRGRqCrs+SCle|$(AWBawjV}|KR zGNAM<6i$Q63!V+ci>G{gOwV^y@KIFQ0cx+x0q*QdrGy3iD^QgsvjcM2edvcc0lo+P z77@V@jl~*R3Ujn_#TBx~KWY3DlL-XMfsF?B+E$&wnIgW|Ts>96(Ju=@kvgVsqlQV{ zJy?py9)eH(VDb$S$qe1`PrkQM8p}MjIf}RpclPOhA!vU1(!dC#AT)GhBmq(?g?jju z2GUJGlm$MJa%lxkP=EsXGH3&s1Tcto;J7M`Iy8l?mWn1BX5eAKtCqoJxot#}8auWj zO$<618WOXcFsr{ExFO0zUTCoKx6_o;RY4$8*3$eTkMuw&?drMJ14Y_EMGOsv%Gl2C z;GwL-k&K$1F;LhS{@|EC(bTXOcus(7qob{B>;gpB>&kRQ%B8%>kPgxukOdDdaV_KA z7Bhp2^2C?2Ryr2&ug?Y&EILE&Jn~P6HUdUcN#bF<7p6K+Y9(HAL}ecYFb1^YG(Nas zo3Sg+SsMa5-(n1nWP-MEcCrx$GA_|1nf>p|ma=q)`hsfUJjUi(^2EC!8chHI4te#h zG3ossP8QxgVyl$_$zS06fM41Bn7jKwV(@>;!0)|swHx{fV-u|LVPF|k7KM`5-7{~2XXvlIdV8)d1&u~1ORyH6Gf?9JM^@R z69L=iro(yb5e+AGBy4~9sMafNhZUqHArJ#$ek?4cBar9m1ImZ15Lpv>&`5caufx96 zi6OFgZ-5FX^i&IL-Pl?S7) zz7>k7EYanpF7m9n%IcD#IHQ6}>xJzc!675TWDmxAW!WAUdX9k!XcvW-D9ArL!vm0X zv7%$iM4Czc<4+@P6goT6q5R2I1~kV}hH{BaXRqH^paYp<#v&=C>cXnr7?>F<^-e4k z1_hf1D*$w-%sLn^*(xV1`x)Sj2bI+zX*6d*3NDJonQPG2Q=txgOV^Li3Ri`t&0+gF z*~MBr2l<3Kr#Mz72B@UdZ_$+3sm#?5Fk=(SwVV7ZG~h^PnM8xPJC$l78vkI7tPp`5$Eqh~ zw_{#e9DF>wfloNHfs<5=8;Ip{!6c;U*>z%+83X#;6LU>l|Afatz=HGb%k$$W@4a`t z_wKWPh3_d_QhJ^3! zS2m=f0r|{6H#YEo0-tKgjPS6&$%}(UR>xLe+QM(@WSJE7@#f>!z5Y|zy|ZCkt5C|z z#wPb}kTL0`$kl%XAOmn}qDSh(--${8R=8>g|KjrP?he2EhvYj?7onq32X$b@aUjoARf8HZ^=)x|Ha>ko4>$-7_v}O9lIPfrzv!r$4 zXbWTAovnF_$aab!(GNnwm>158nS-UIYPr_)FID5&%X%}A4Q-jX!yscaf~HdgapWl< zx%{4f0@>|=E!Uw6uMf~w-1;8}T>hHKnUE3xO_G=>mYuKg_aY$yU?QFR68w@(_7Ctt zjgCF0NV!Wz)Xze2<54dEp#7$N^`|ygE!cSCWb9uafRB%r!}=NERX$B3H!>ole@UELUgmZ2>1@|dP)S5V3kw=teIIyr!)!~Yga<0&) zOvR=VlR{q`fsMp+#Tk`DL_oO3V}hAd31$u98c3*jWgvFj&v|1ST9X#wmD$5aB_C(+ zyD`c!XJH?8a@FKkq-D+0ftT0(0|c&QT$Rk09jhYpQ9jWZ0en~0G$>`WN-d>t96cYT=vU%`gg;0^HDMToJ3ywt&KzK{V0FRG_h zz&CUR@Y9>UWGiVt`h9T8$xC6WADWX_kL;RnB1-)n#)qAj7%+tflQq)AJI=#QG(h43 z{n1UC)xz?jVfmggsfU%~7#kkSJ)LA&qZ{S7uK1K+eSnP@<<~w5!{9HS>dE=0zIVB? z?`I#se_Zm6`s2s^h&AZ~JOju&v`ypC+l2%S9C*Esf{OgBnUYa9J^6MQyd@G;!aGUsyrv+|(%O8BC zS4XAHI!wL69fP5(Hpo`+%67=)<8Z{iFN5)QzVdZPK{>rNf_2lOEnP^xai;@+sIUH? ziH|!F4%XnjW*1`1k1f)}vvyHG?IC^NgCKe3^N%tBhVT|gS^xkb07*naR3ES&V>@Nl zX2P=-PPqf6PhGt8{)fj;fBfF@vk%UX7yKIGl@p-bNA2cBfUjc9voQt#)1?LtVGP5| zOBeQW#=d{v&Sbg)8<~2X55ps-)zxXp23gMfLh)hIA)HW+^TL_q%XqDwO2xL93{+Al zFL$GQI?MJZ4l>z(v1&P>!l_jNFaByG*{XJ`Z(t_(=u4v(VVk$jN}GRhXL3)BBhJ`K zxi+h5x3!PtF+Xr6)J0%-Ef6CAf!ZPY-7g4bUb@L)Lb=5zZU2-m6azS_qp!>cbk&pZ z&<0fXg(aGROr1p@_80LiuiWMnfLaR|VdYpjm7VyJGZsSuAY$5djuWYs9~whzY_5kq zmM<3n(R28ZOaoUxSMkWW=?(tq617Dq(kI#P=nXvI`eT4j`pJ)eu&aQ@!tPlH@HeS`rkHOeMYV)xf zIw)TP$zbG*E*-hPw_0Ty0Bz;`HTZMB!&Mgi1|9VdMKnJ7l)APoAtr6FZ>q0w;=Q7i zRs;yAuaA&6@JJBYxk=Q)4|;TP#(~J<8W+Ztow0?piXDHbqZZ3n@jC|t?JB%HyvM;2 z%XbxY>9TtY(DnLd{&7Ok&J>pG{rEJLE^oNYkVeo!*IQmLkPDb7-1wq_25RZJTQat} z^)g5QJYtD#PL|r~upT@o(+=*^z2Rm*wJ~0Uqm56U@yi52UvgQ%7Ge}%usieYt#^+1 zf52}o{PcYuAN=F-`PW=F;IYo85cuE2m`-`VCw7$;V<68XCVTpKw#+psy017^j#QdgirBIRFEyxt8NYFFEY_7u$EoJ`b< zW?GvW+x87RR*VTJf5|TK>^giE(zvV-PzU*ucVaUWYK4I|@g8ct@PQd@JO>J7gQR@OHG@;5O<408AOx0m?&a(nd=aRo=x(O^l1F11F)w6%HN^=+ha>Nq7+& z6bd%vaSMyI4OCA>TR6&U7}6)$9gK9O4RW4~l%QY?tl%n#|29GUJw=ra-+cHMfAKGX zO+(O+jd1E5bWNuwUo;+w>T>_2(_kb$GmEE*1qXcogkB;x>3*AZ>d}6gi+%V{>zXNGllH*_Bm>5dhh#da5#8_zc zZyp(ZBVdClIPMHwb)B3?; zq-+G>NyABtGE*7u491dB9`$jTz%ymgwM>~2J)9=0>L6eE-1y@<(lpIYecl6c%`XSe zRzItIeBWgiF06A)rCZ&QYQ{=OCZMwvU}D%yo{OP#~${-wmtD}fzwZhPF}gBFmWVtCyWUo zY_UgmOgx+5KVAKjku;UZKgz%#n$Y%B;}aOIR2 zBAC1xFZ_o%8RPG2~>_HnVK zUVQ9-90ZchWFOAPj5+|%MB!*`CiKKSAB@y9>s zMzU{?OPt`!_SG$`Q9ExKccUOU4am`guYy2NA9TYd^2j&dd})G_8+vY~E0appVK^RA z?m!}nU+PD0KK{BTYgl00W*2s)-O_gw;K1dC;M#%TmGqL;9S(1xbD}ca?$AkQR_mON zWFi(moRv$C?g+@=ScHyat@k@<#M(b;Cm_l>1N_)!^Ay)uNVnv4GNOJC`tzD#b$2&G zobHcEX89uLx<4Z4CWgE$A3lymO)ebKwG{$3xc~-b>nARF%HZi+?;oG=I}9IQa)ag< zFZdNg-Y~dsdJL`LCdD=GT041v2{Avj6JS&uPp;sNC%BJZwhadYAesa__|bawC{ zq7Ha@)G~}g4l!tO;ab_k%R2e$i`8y4oyWusDwP(`2(tVx#+5<2;t3t)ly8vIjS)Ez zy9YsP_@tV6P8}$~NF8h)i;aDYhqybv^Hr2#@*~rA z42hM$kFQ;_yY!a*PLkfIfQR`3kWXu5b@VXT;0CWB2;CR16<@|B$Ow2>%)&`>I&D(c zD*&BRgBAhf%t|peTC3g-vxE#TUb2H;#+42@1V!~ySc+*RQD<8l7|>MHj>+<7zUm|V zaF#IfH+b@(yQoG8;Ri(c0EC|pr58^sI$IB$$~o_{DIVI| z2R&y}p?o4R$iYi{iz|&mC8KLRZNA_R51_3Okkd6=-ToL%uyj7>-T=oVes4YJ4*#d` z9{=hm@A4C{T=zeJ+FL3g+~17~JJ6t_E!~VV-JN>cu;-J!#?K4eexrrZUD@`%|u@);W17CY1*o zGXVk~2rmbuk9FG8QVOKo#6|u1xI>@}t%T7jzDyLFfC!=9>?rt>Av4PpB_K_ z;m5~MKe{}=`s448uXuytSFaer`Kh=vA8sx%234#!&z!BGHup1#(Npni$axM0FR zI;qr9(-2Bv)_4xLyt2zfzEdx&`U3gM7UI?kG5^+&tRV)khI7=PY8Sj%Thh=RQqiE% zwjh5>OX^F<@w~O`01IAOC|wqOwlIBvJDMwJ|jRyTOK0;e0^wpYS%>H05oZEBtZd_xQkEQ`Vc{?YBbbK zH=DGm;vgV0V0G#VvJ4Sb0vCXa!I_!x;36r)v+*=Rp2Bz=zg`=(<4s2@sG9Tw6*${_mZtWS1}R}Z z!mshJ9SwDXmv(gkn9dBn>UF@hQ*&!N`>E4V^f_orU)u>#@2oZ7$T8ka?&JkuJ@~W| z053X~DKiD6cU+7m6qaK6;@E*#0SeoBNG62>XdNS&Z4aHON2=n$SZE?eUU2dCue(Jt zmXZIi*s7B*MGRsWoSjHszWr`*pX1G#yyWNk@!|X2UCz(rK7aDz@$&U|$Ai~j_v1Zw zg8Hu?%OO3JDg23b}AFaIpnZCFl?iIh|OzYvF{1?__aDK=dXSTG*Jk44w|?CP!BClxg=S z7{DXIoPyX!V?$^Dul7;i=I4`X%;*brg5z!d`oP|+IHIe zz*=m%^z?{-Dj==;DrJn8mfhae<4ibhFTmNxw57C_rEB|;QBDaO|Mmx6V5I*Ph+JWCf#N>iv|Q$J&7gUVO? z3JV1sK7MLoJ>j9d{S(#kOaCP*WpeY|K0`{Q+ zcr(89AC;Vqpqi|atu8x+293{s)>=od@ERJJGky0kgiQzC(k&HEa^1pl#Z-AsQ?7t zbGj`D%W6-?HjFZ+H}L!+7ko6^`YeGvY1hi5JzblfkDch23xI4PWM#m*rP-tj!gU=7O-(r zrmWRbq<*3;I|wsMQby8$>($fPm4*2|c_c+{%IK{o%~>99+k4zH__?ibwRBo3FzF*n znfPq~+BUoy5IKYiUIR-X6Xt78+2Bte`Hji^0aTEQ!k1?RpGyR!DNxH62+~h1?85GV zn5TGk%T=Mm2hM5^Z8m)M7e3~)he!7g0N+i9PH@1RCk4E@?~L7@4~~yMe(U(BU;VP1 zJ+IH+hSpo${K1ebu2@sN(c9*AU3}2+?Dp2&67f`g~cNhp_v_Tf0B1%*vEd z2s6q_LfJN5&;0xGZz5t4kVe-QJ>uU6FSCIqpSY6YNlH) zhBj&GQ91I7XPk&9KBtu~9&L-!nfl29X}P{IusB%!Jo;4OmO161r8?EQB(Q_zFw-%1 z@%Z50vGGNfF!?2@v>R|OO-Sn{>S@8<%Ui1g!IFtw^a}6foH{E9KYEf}LP=WwGq5A^ zsMO3o^{hvhnH{SO#uUkID0tidk(v)d6KqDS0$8npjrfdwO28+(NeC6>&$)(Y!F^q$ zIj)P-;y1Pjc1TPos6lpqCF3K1^sQP`z`Gk7lPY#KT49n?_-rzPSx19T6VNi?Upmrk z;)8*psjr>!cDV)oZu$hK*dU19UNtQYmV%CGp2=g&3Am9V&IaD956Xb_kF!w9VdHTU zrM#6<2MGS*8<1@|ZCtL&;Lx$e_^|^MM-A*`?7+N6E{_!IO?sdd8@%NXyC2Q_9LL*P17F9`3qO z)W4b(muA&55bTN0#Ub_6fjZ#=XrXq0iaQu_RRefvAL`}pq^Rq=!b6i&31hbmjN}__ zkhrIjVZ^D?tSX)0yAf~4X}~{p)k{fn#o#a?8wL0_)tP+Smm6R zz15Ta!A)ldwRtbW$eG*R{3o*B;1@EY>N}(3O<`+OAUc=q6fkXhj$AmyooRg5Dv49esUq=(HX6(XQzM zTmzpI6y+ANEr0Y}^h30qV+Jz_LT3g{K; z-02Y>9cqK5LtX9De#`q_=8&h7tLH4sGhI)T-}s+mO>X`%-}t2pUV zHcF1LH5Pf{&{=WLxpH-qA0+KyydGsI5)B2Ap16F337Av36zq=GDud203JSrEi5CM- z#vwg|s`DpTve1`7_%*y?vL_z#$rS1$Me0`o+jzPxQQ$)ixH^w$N(Z{Y-VKBm4KwSs zlYZIh6eNTAwv*9!@dL^RJ9gjZXFN{RuNay{i)+J>8FjUl=ec6>w! z`u5eV-8wt*o@>s?$jB$1M?BB}jKTjsUVr-pnXl+fZup7ZH{ZR*Ti{;MxxC;}L#{c~ z^`Ft%UdqQ<1Kyh`eN56f0eZ8aJ0LbVAFi__>V;K`h^-PedpLNhTWxgoCKk@Yt@!|$ z*X;BBth46#Rl+*jAGT^>P+xSAw)#4#bTgk|i-&g6IN61yqItd9m9w_fhfa#@|(&=t=>K#nFo*cFk*=FE_jl5sLV*56q)Tm#>cTL#P>(y5;`U&6% z55GJvo_~Ejc%REY&p$c7`--1N{^m1Y8pOgMAK$*=CPjW2=OGh8o2x&L;{>dUo-*3t zln=vIuX^N*S>=F+MpeK1oJr*WaBMH?rmVnVe0b=6yo{~&={54JjQx|o(QcpsPdb^j z9|@5VLORM4j%R_igK20Ast&bL?6ohG%^>RiA58X;`<$0Z^;oPvW-=&`lbon64F_?% zMmN%wNFnm8$*eGCFe@MPC;?s7$QEG*^MGcTZM4gY2EdkMuYocypw>!ScuMV$#5Zk6 zW_){iBHhjj3TR&=c(XG(F>8uqV3cii9d27bFNeVl8pA6c25k^TS~R575fFS&q)aO_ zI^;7@0lY7Rc#a#Dg*P~sTQ;=D*9n`=R8+Zy6=t=Yl^>lT#-B8z1D-e@{L<*fr$l;; zg=+RH76zT`Ito3=ZTo^06Js1@deMmVl@2sx&4fX68qhSJFnkaJx`ws*71l7q3U?Y% z9Y=vHmd>H!*=a??&Wc%?GeBtRb7dJLk(WLUv~#jM8pv5VS@xyT1OQ&AXcQ4R{0;UA zNOJ^H!fKMCEq()a28Wjc3Jt|gw*@a7Ws0%%cOLQ$a@qwB;>1arl|{H!o?cTs@&ycEG0i0TmQv%1R2PUdV#|d@s6Mjx z)4`TUPCJVaynUj-DS&k0q@dL_mesP7+qhExwyeR`ZnCkw_}dwDX*FC$Y$B=udtsN3 z%?5CEKi;Q(yd0roHBfkX2Y!Q=^qKzoGyt;R_ClgtUgE^B3cloK&Tqdto-oioeEu%4 zJbKJe0l(r2i!Y9^FFrfY*d@9?d&!{eBa=Q+^f^EMOq^hbTW%b@WisH(-Dont;EGs$ zYr3VEXAe(K0&SPx-?RIVi4!%`e!!@6`V#{=0$VsKBBG@4zv|1xbg;$>u=RP?+ZU}q zCeGR_#MaGun=Z_aUQvfPiJK0)yrdcFk*;3C=--Z9+a_iA(QRJqqF># zL$oPIX6&b57H+`6jIQL8dGi~aY@GnI0uxEhI(SA~9`i4qxtVg?5E#ou0VMk6EgzxI z9-Ke=R0rtj8n)0nj#GkBXFIFYhIxZ}byF4t1%mzvW@>`Vpi|}=0CY8gMjP6lfSrIG z3Veew1JZ_F>}`x*XZ`Rh%#~trXiHEvQ2;L3;Gcz_~Z~p9Em_q>Ex4bpzT0n zbp(-r&-mq#Gt)@`VWh}s!FSS06c`Ye>$Cz{*qt8iXa|e@=|p+1kjElAG>3t_8sNkS zAr%l-*JWjJ=Gy#X;E8(e(7=_CfiIv`Q8_&pKZx5&G?-!=2{+J?#56$7XD$uFR652H zAE~LZa+p_QZP!d*ziCUMC@VH7G9Z;rywcUncyRUo9$NHCeScIOh_x$)+K$23PD;*A zDqP9JjGuNvA0GANMz6;_zI2HZjCWrVtPZ{*P+1-`BzR5snr+`FyixC*4&#E3@e#eG zKLPBnf|JjF9@up9(qVzCYBE#S@>sz5QZI^Bt#(wNjDFw`sO2-x!2sNI-JFAkkNQZ{ zNrJRUM(h8h>QA0_+qV3^>)B`T_ubX0ZY{}DO0sO(gir)YP-6^k;J@qs6*Qqi(4q)O z(4heVK|tUzQbU4}WZ6okl)9H8b$!u#~&5%LQ@w^olHf5~%86<0?W zax6wuT%x1$)5nt58uP5Er?a(dxZcOpZI%c-*cc3OuyoZKT!W!M4Az|eu2ehhyTEyM zz)|qNz)7-xyef1+N78l`I&E~T7hR>TUDN&3H>C+~&s+$nr|+0WsNlg90zTsY{=Tnd zJ`L`n!FMs%u$>N5hqDF4hiZtYBWuguA>%)lsGEmAzI!C6zw()`!;2(v!)4 z5I2CzkguS12?xlPE=gz{EID1OzhmhYpjUvcu=ej*!=7@Aoi=n&UpwiCYLRP4rXr?C zh}b%my-ZS{2(78_FP);=6-#_W)EbZFuF`t+FFvy7(JsPxfHINUQK#zV0ykw*k zOjcb|3Pe%pP3cJsyDIBgItON}gsOMn1Jh_4qr=5t<`ulbEvxcyt>d8QTK)uC>K>Zp ztP|WCZUt9(ve@)@fav9cF@)o3O$kpwr*kOb&QG}gL!(naltGeNn?mvocxhf{(=T~( zYSaQQ%qO;MXFrKap`x&sJyZ&6(C(Ah2@u?u4%gbDf|FBe?!Q##qV( z9xWUmI;_E+DEs6NYlX<$MCEFjlTTl*`EgxY?*1#36D#X+pSpeAw*UL<2>8LJSL(u1 z?Ym?m$bHtR=k3E6ef;Oy*LY6rU2 zEglKN?1`sg?8Kj*qcg;4vJqi3h?QBtsg3sq!(eZanV@H=<5fe^@fkF*tACd*c=M{a z{B>{I+Sdx#py7BFt?yd@H_LGvggO)qzD;g@;qX-mzWd!@Tt0jDv&$cS`rYO8zxw8K z_xmp|cmLkg%b$Gr-Q|lfKfiqYtN-TZ>0c=z_|reXeE06_J~LPV6b`}St*_`2bZb-k z7|$6B3fH!^fwnwGI0krw^#Ob(4Q4E=8aaUAX^{u1eoVLgZ_Qh(htf zSN=B9YEN|`2iKXP<4;@H7mjiR65%&-50HunuDqL*28VpX8ef*TRK(lC&_%T9G}Rkb z19kcu8LcpRDYJWN*&(ARK7eTVB`}C$)WLDi0ILM7hPa{>`7yQ{;r7y-<*JL;4)qyJ z3VuKm;DJr?8T&a~W&Ko-yFF=@BSiQnxMM6vPm1-XO*=*pA6koL@Hy6%6;Ww80#Wx$ z@eB^(R>#3uu49%@3AUEo_4rbE4ls`5lSt$%f2cqI$7f>T{PqmB9)(fAV015#xhc!F z$8{1>$7y*C#Tqy{syC&MKFRVoV-TW>pTw>gv{3L-1KAZv_tQJ&udtPw>;pd~>ZY;h zjJhTq?m)~C1=C~5{qC4y(z`_xaMTGnx+=xnDYvZtl{=XtJk&c5eccetcsVJD@8sIg z_7=oDQdW1mhIfh#hyUqH_l(PyA+9)#Jf;syRg62l@q3(Oj;^Jc*SjX>DETs;nS`k38DfC^Ii$t^&_&< zUdhFp`$puYX$a+Th9-Sue0CkW;rw3DB7>s_M?d12D;7wCXWY`+f*D%Rkl~+En4WXY z95xueF%f-bA3hjk`0*zZeBHaYf6xnXIHBe{F11bvZh2m2yDz{G$v$ zWYhCrxf|LYXy24;`J1*?CZy{V9H1oxkKO6t)mZS**$ZaoxRWn-KWf$iqi3bOWz31auLF2dIwhZ*9WgcbY2TNBnj}8w8dXhrzqYd<>9f1UUx~7%flV3?k zZqDS{U2QZ8Rd?^-P*srS0Pl);y;ylkU@FhBs?A71N51H=H>51V2R(#%a@U6tIfUp| ziB7HZV3F-my6b{WA5GqrrNjZQb5Ok92GASNMDL3n6p3SDA(+DssbcCc2pA!V*HDCT^1(&mrU=(j>h#g+dEal|7?mA?1$y`&CNQ7MOlPim zj?*AUbfAj^Av82?+L+McMMFP+$5&-_Odou?ef?_?24}^t*ritU*3jd*e?Wl+ZbiU% zOjczIeC7m8+ckXt>ep{GRO;Yshq}QMC}{7~P}<-)F#O2V87Hr6HAP8sxKjf1r^U)Z z<#etg>rZFO(rmO*(b2A6dB=DM|FBlMH6}YSSmTQhT$NyvKJ1itv>)T4+>Hh!i4jp9 zKOEUe>|T95opw9=FoQc;KzjJtdgMsRDOBD@KQ9P+U=&)J#_gl=uFZaroBB87A=v8# z!tlOh3^~)}=xczlGsI8dnNp^EG`$&vM#-kkZOjsAa?ApyyEYUUVb@|uMtE6UpTLAc zs`&8C;g_dFo+m0wpMyornU&CQ@Bzg7LpY?ZZFOFCmsb1qIbuDA$oJJcqXN2Iqddr| z-j3s_JQIB+OvYrtO?V+nWb5xA*&GocdF5?zcY^vD+R{Qe_xaz}@!$96K;OK- zZ%SFf=!HxVjr@C4-~H3)N9K|K=H-j{vT+Z7{^RMfMWMfY`tdYNf4Z~GXgfB}rkC}N?zR0vSQ``+z2i*ih5kl=^g^Bt;L6k0@11IT z@x3z%4?y15cm{E`dp~DHj1PLQFDR!S*emRER{kj6>HnN7x+~ndznz>*D}BBMSu%pM zplX-FoHDB#O!edNlu}oENe*%i-Rtz@m)j45?~=25#}1UaVpSsHp+;hX@TSLGp#cYJFJqEB2T+}*atPzI*5g`fi zgjb`jUdf{`c#a{i+&XDp1AaD#soY=3iM+xR-t98aV4`h$vNGj` zLp_Fy@_12l3}*0|U? zd2p08C7?`>1v@&{mBfJnm$S-I;C~d9b{etdYu!0&$fFH{!4DQgxzW=VCY)=RF6M+g zgTZW=^1B5hdwx1nN(F7_MUvza1|!__jCHb|97Bw*D?JRvqU@1V;`d?{@MT=eAC0K( zVnIsEvGz=+j2go+-oXkm*3PIUIhKZM5*ceZyq&eZ!?Mj>FZA@n2(M@N7htc<>4tYt zn)c5K8ijAvu-YdwHshs19@N?1f0RSHylghBywYCwH9br`O%^ST6`NJAHdq_gWSqZO z?%JsLxON~*Q{tP(jn^lAr=ISgpf_E})5dwSF@j{A4>ON_TDXzqAWt9ZQ%ya+px+sX z`lq0rb9wwxk1U3-3ccK>r7-1k&hYF>0EQ(z(}Pev%)y$uAWwb56!xQNv)kQckx#1k z*6#U~_oJSSc=*AlzedRm2nu{U^AQhw7M=m8F)3HC4DPLnEthf`?aA`DXCmzNt;Do-iZsn^OydkX~+KfWkzhD5zp^}M!W4M4T{SWw(%Zw3Q zC<7cYqL<+TBwd&sMnm%Mn^mI`-yuCuFoARGK<)VdAMuwcBRTK&(4z1B*JP>iXefX7 z%5B%-uZhN&BJ0PB5{q`hYy9sC6$N&b*|E0hJtJt^`$h)nmIDU-(OlAkvD#{K;CQZW ze%cGX%3fu02_oCv_cha@jZYfNy-Ry!2~> zlyQ}72CHk0a-3#mf^%dZ4rp$4bVkD9cF`pgmN;2uOr_o;PPm~wIIB0qk=&xo&#A0E za&Yq27FT|9rVzwIEq|2-P}-rYW>6|gD{7aNuj8QV9zW^~`87jn_c5b`o-A!zM_5Iw z3E$DB(WlOpXEGtf)g7{Q1FU{F^}2W8+rI)fNIAU>m@mU|#PgRS!n;*?x{hy7*!u<^ zwmk4CN9kjK_c(YMrCs(bTP-j0K98!;`hC?!H8KvOQ4M6H(&nt^4U+H zaTVEk%LDP>sQnU6_s>6x=5M1nnBJdXz|bx#h+5`oxBUCwub(d5w^{NoS)V{9*(X2F z`CW8Bynmdm&Wy@bAOrj^cn_MIN5)3QMor0I0Abpmj9-5HWhBaJy(iB~lHD5AkfYXUa&-d~9q9uw2A@XaOv!rn}d%9V=Ub?ob zy}GUb(RRzIVmWOmNLgFZ``YP-Pgkh`4Xz@ipUoD~s5rgXKlRyT2lpxgZ_qUFQv-<&X$rXB1%6}>`bW~C7*ToD{#2wIagl0q^k1) zY~kRkR`G$D`~ZrE`mW>nq7wkm<|V_u23lG8@Clv3DE^msttKQuMIvig>_D9_yVy=nUVxcXHIx$s$SLTNfzp_147yT zaqw41hS*TeX{bNh1gkXi+&XpXs4=bV5H!IR=X0QV0cJR^_$$4ROMi@Rbq-u}VBYeF zcRR>U)#1N?(M6Wg&=04AcbMTGnU&eaQX*9PIh7oAh00qH(V6aO=kM1sr$51~+>v3J zCZlSVc60)e0Xlin_=ZutcIYdTxXWqe0G@Fw3}z1cZS=;7e!3vwQ9bUy5&M%I@25Gh zyN5aLWcal8?kAt$2RG;YI^AvQpf!Tan?{h2n?`=Zk>*$gj8C3qFyrA>Bh3507-*#Z z^x4OkPd@)Fr+0UG)n+oU7QO|LZ<@M(D-po$laD_s2>4{L9X=a;+N{;9 zhu^&TI=HWTFTuC<$6i+3hl)P^{PW9`cCO=-<9Sma-7;dK1X+#kul}%14WN~G^h5P@lQ|mO?`{)^;%y0G|1^Ze&rea+BqkDpJRX1TI`En zv?Pdmk{mwy+24qV-n;N!U-0|YpI+X-h=+oPyMmV=|IvSV`TDp2b|d`9&-zm1r@g?Z z;N#0*{K?+caQF1%f`T8m6wq$}oa4I!f%o_Ihk}jwFAD&o=e~Mxzw6<@W+q<#=-bOr zfBd&DfAWw1i_5?N_x^*+KmLFH7nlG3fBYX`zWd_$m*4&BA74KCcmCex?(KJ%zx9)! zU4GmP6V|cHuz>WN)_pJaiThGqS zrd9TI)AS+uXQ@{j{Q#1EXCEuinbhWb{&0jZl7n?1&`Ige=y+fMny|8Q0^sSy2(8qr zKv4PgWgakG%PtQG_;EGCFaj%f(qP^aKl;|U;Z-_Z(t@izdlM%GKk235QlO(<(*1q# zY9zC3Lt33fJ7D_wI}I?ZYP&8+wj}lt9s+QLC((3V`Kipv*ZzWo1xPCp-dSg?*XIII z)0&bu`!c^sIPo%@=aPJa%a7e`gaRj+z~nVRySBsR1qQiW#+ZzPkG!q}bznz80r%a{ zO)U=RG7(wS)Ab*kTD=e|RNgHB7(gPaEMgs|yfogNa7ky^aB_`_IT6JwdxW6TBC_&? zIedtNYiY`rR~0xpjlsUrG6^eB%dhnab#>K2R}7~h$b)Iv4zt7KUOD`l7NbiXqQGH# zHH>HmHaPL&a!#j?&xsGm@CFT?Qw|9G!%wiug%du?8h{|I_wWkN@NFz5e%Ak4;&h<)DF5e@KSw z&~Qx0jW*d)q5a95F&VAZg9oiTS7~U4-H$_{v>B%%0$X}%P*zx7bsWmAy=sTOw52xF z8CR|fW=A>cC)YLqZFD00;B^|#yXro!?6VBQ^NiwMa(UJ=fOT>E1SadA1=|aMa`vA8 ze{9oP0PYJc1Q$R4$&W6dc#%wS9_L^~Fa!JP4@>{-^YG-LbCl@DALH{nCv=~)eB228 z(>^u){L@b^&pz$TgFpC5fxxE=0us`ZQNfoPfcMQjd|cTd{q?_o`Sgc>r8++@VCZ?Z z0Gk~!eSZJ!+2z>}e$@92et3D@_2Ytt@4o(`z~KJ!G`Jl9r$6~?bB>Q1*+1)3$9DxR zW*iv&M~z;8^UHsgfqQUyo&){#XMg96uD|&0g4U;vv_Ji{&m%w2>GtgWT{9b&A0GQ~ z(hs7kKe+D-cJ7|^Ma6=R`=_5=o_zMhob*rDAGE#v`vN4RTgRhjBRFmC%%MH4{L{~W z7Tu3)^ZIl~k?hPo2(BK7`*|_|3w-Z0c>Mea*}n|+%f8e&Bhd8rt8W(Mz+ooBQh;Fj zWp8$TpVy#I^@JuldWzsTzrXzSZ~X1c+ppeS{u}?D|6xyq6wE#S$>nKs_`G1@d2OYy zzx($0m#@D0bu$xmGDQj=opaRNqO+TRH@m_nNQ*I=d@N!(>7TUz^pTo6<#1nl1Brem z0eoy=6*|C>vwp#b?4IW`Gh2S`b}XX|xz+2$RvjBwlD-!nPY#$>89cb5xZ*vtsFf*r z6+ms!5G?hED3t5J!>ONkj~6_lymISf1dN6eS5Eg8jjccM&M8lh<;nj(*q3e)ll{<8 z$Bv=#)P-h@xJP3$nDY;=bhfGfEI6X?+7N#75MYVm9<;Qj94II^ z19BWMXA0`UdyrRN!5e*pRYHPTd&vSRS&InB=>$9^0%bU>J6MBr>K)xGfo+tJKlH*= zGMIx<{S!0*1O9*vc1(1ZR!O`8s(~g%=aMr5KaZ^{$1nf^92f|zw8{XukGUCj%v3gz zBLg7^Ef|4EloWN;?bt|fd4K3Q;00!MIj_TX!GUtZC*v@Nk<)3wJC)SI%;;Dv$RQ-K zYWS=3(gFPv(Ad{VXq!Nvjt21|@?>zDEX={n8n%YnjoZV!xHI zWI29-I5dv02$Oq5x%SW|fj5GA!;md{)*{CsKB=$`f1dR>;xVrnEe~@EoE&bJ-X2oA!UVhj5 zc7#2B{_*w(yy#^!-+uK~*ROk>@Yk2Gzx*m18reS22{t;v^Oy6setqA!>v3wvz({yA z06DamO;10{$$k9U4|4uLyFAMQe$>eMO{41XzWgGb?O+$Q#V6c%IZiv{U%&XKQS}$e zsdwL3=hL73^>92dSZJ1GK`VWEbNTkmU$;c@P4K>3kYHxu@w1%pXFtl(Kfiqb(?2S` zK%wA;qxg`Me*3B=fqH;8*It5&`=_5p=i>sF0vgH?|%Q=@~wfl5wOXKyPW*{HVZ!et3SKk)pmE$(awm;^66*K+icm4 zQ2O?>(trD#UtC@!t2gPvw>=VSl*~rD(mzby)8v^VO4~1$InF(ezV_PlpUIU~<&xoFZKXOFZW2O3^| z;!JU$fl%P9P}y(>VzS>3^pwDW<18@=VA$$IlY*)ZZ_YI&M-Bi~Pj_N#c3k`Lgd;y( zXdR<5Z18wy3Q6evOr|6hAaZo9jx$+1SN3KXX9_o>PCoU6GUE4?xl%g0;eTxeg!Z}8 zxPU~G4W*c)|KNA&9=Uv_8IkpbNJ1mhHtSsK>@+!pTMc@rACOxKevgkLj~h=BzViH^ zbfvC&pp4Zk`E@=_<^gkYe-p$R|e8#v17GslV$)8!BQi$bd1h{tspW34$ zMz;~dSspl@MJ18eP4ScVX~H#TWJV{LUI%-)4>#uUf+oCN8v(4Fpd)eR4;=@Fq?L%r zjd;fiUX>*hN>iB%lFb6H?qQx{T+dVTa38)^*sl{3r@V_KgE3j%5Xi3()w$IjnWtK* zr2|s$-pIRqc0f9K>(I#qo$y#29P5=?y#qd$*9BG&|CTcPzdE3ax;tIVY0E1ClrdTt z{gMr@V4ab}sNbmmiFNfnJY56Y&JcCNikPOd z)4BS9J#MBo?WNxob!j;A;u*BOY6(DqVX5V9%OXA^WW+6~@D@Qc0d}2#^W~RKwZG~a z|l!U;Q#i`Q7Ea zZwfNPK_>6pH}LA4FU$W$a9bM5ahh4szNV_b{e3i5|6TNc`^8^eURU;Q*Kgm#)1z=L zaojf|e{EX+>lg9$;_~&^zi1u&mzP&x{jTZwZ(B3}s*!Zp)icEo7U%u?o8Mnv{7$*w z_FCNnlY*iJ0RJ&(YuxgJ}w~gDd8uV7cak=J^bX89|ZTq=k5uF?hc*6n&ZY#dC;Jh-8wVG zDr!ULs?DxfhOI40hm!0{*7=oPwgsBUL{DGD^IOoYSP zv9)Xh?S9HA5#u|lk67qV-NEjjoaPiNxds|s02AcOu4KGUgX+AY*SImSv2A7%Sx4sZ zp$QzA;RC-F!;DlF$5XtPxZUN+Vf21qC!89mijzpFHRqJObY7$&3@sz-KtpiI#YlW_V@)y1C%iu#-v*&V3c%|{6<)~o#X9*wIrZb zJ8CaZn>?EZZ~sIiwv30E-})52oovcq`Ec6PaD;01Acb4|mD?|T6j~c7zfQ9B2#km6 zii{nc96V)|z4C)qUA%=DY%>5Vzs(uE%aK%id4g7};CC7Ky{HGRs}r7WXpEMCS{|U~ zrv5DDGN603ubYQW4ZpV@*Q;LNH0=&1r~e_Ru;@HKJP}}q@ANoe%D-~HS8_^72u;_dbeu0S|rt&|Z(11sGrc`cE!D z{Of=B@>K!cXP^Gn%kSG`@Z{s4)RaB<-yVzyA6{Rc^|7eWKL519^3CNx`A`4Hm*0N* zf4qG8r?s(10}C!bG_u>%A1qB)H*&*GU)g}Gvs^!z(k-uo(O@Fnu?{+^lN-HeAC$Qo zl=aW=9lrDqHqo*rC%jF}WB`S~@@J>&qjO+E6_6Rpds<;WJ3`qxrRT`P?dc45d-lV~ z9c=wBCD#?rsLcY@L3j6(oQ>W(9T6Nkd`}PY`gMTi@D?VOb^{d9xmX9F!WT~t( z`gM48x|e@~uD#L z7CagKF5=Zm!#&!Sx#H5MlP}~Q=tA+kM(A%m1_nn*5}!ePQq*$aBYzA2sw~&$*3)xx zAn|VW|ESJuD-igmQ6E>~NjDoga60>-4Bf2l1z9NqncpY-Oyhz?dkmhoeDLkdm)mb( z2Q=9|YfV|Z8}&1?cTL}auz9OABcR7kbw7LBl=3zmo?s!6Z5Yh4l7IBRZr=dB=+kM9 zoINdI10X%|2;g^Lwn4Db`Qx68fAwu|G%E;r+-A6=DEOj7Ibiz%TCOnaWfb1z{EUt) zU2slso6dfl^EQq7E`w~u{HUdg$DjPjZn`$;Sz|zA)akDCotZh2H|F|iCYxzy*KMLR5 zHvSpuzba^`sm6~Df&z1ayeBnGZB9wT$pY*K<#7`rpHlrafkX_ z20&C!l70xF7SwMRpfcbdvX;KtgXohc7ooFNY(jit?P?xQ8OTZ;ui@UN!mGU+h_V-; zR7SAEp0d95bG(yIFgALnAY_JqGz=x!-AgQO9Fw1vGS&`t$8A&h6o7OwT(&Bbvk=C zNCq5@BNwEO-Giax74{MH#JzyvT4n-PrgjsA)UqI)20$miZ#q%Fe#}<&w8x{LBRvf$ z0TQ+RJHow1URA+P|Hs9Z=}M{-5oZS}VmYWpEj^eU2?Dv}FmepsF}f8x%uQelCE}qE z6B59o1P8##0$-+vIpm=3{Gd@IspqUN;|r#04s^*sQ?}QJs5E%WmmT6@P*#_fCD7p= zTu>A_RIO8ihhH>x4Avnt$malSFiHXWTDy8R7)7eUq4U-O|4{xo%TwpjQC{#)J$QFl z3aB^Pl_ojbM&lL7W zZ)}>QK1KxoQFP_45|dSO>a6|IcG?Y|Yd!tp1`}_UHEP{c3ga~cKDha+bjS1b+3e{t z`gCT4{~B@5pBb(jW3T(JyO>iM3E{1C2!!dzn?|eJ7VkDj7FI0qVC46m`Fhi<*8UmH zH#sbf87O$H(3=12W(PQaW`&51&TKsNva0)dH5z{1Nc+XN9B_^^7@iP#-HUKuHPYuC zHxp2M8MV9qt~~^AjJ#VOfa7J?Z+fBt-){;6K6rW~z8Up*Ej`$q@UCFuZ9&SL_7=SP zwtA-j%>)z(y!hR3_NX1&Jxgy?{65G2sI!ssgGa5kw-Iv7BP}(2_{v9g;Ayl?KD`Iv zU3(NfM)$#H#{!8r(e~<#FO${F%h!L=azjDN`~Kj3-n&Zp8(rTvbMR#f z(~L(>>Ai9V2nnU-f}C`_`MtUrTyJokmY7{=wqWl9VATX_?TO&<@o$=+@qOKLf+Y~6 zcx7yed|iNHIl%V}dJSM`n@M{2xUW9eM$K@1Xd@^+H&;}PZjXpbLG}O-_x5YFB=f8% zH{O2t)#cy*>UWp#db!a1-Wf3!wd*~7sz!eT>eFAzPyxY&SF6)8_EGY2uw7K1KI50g zLxtY7y;)rwk({TSP*~Z@={F2EogM+@OmCBe^K4*1(WZ{?S~$Q*nfwva^#XEqS9h~3 z1vTuz%^zUv#s~Xd_Mvf%5A6Ec4Yp)~n$tMa;kudKi?(@Q@cTo?XsP6;%}blT1s0zi zSZ#gm%awP+Lty9SDch|-ynuIXE!g_LtZ_X#E~p6aWH?!Xg;Mw`sR76nactBY1bxYH zG^LI8|M?P-%RhFx^3fX+$1V=efn7o{F^iZ(jXJxPRKDxw5zGx{N#3Wi%;=e|zk*u* z_^b5j@87Yz=#B=k&SRpL*l+*Vup=x|@9%!TjM7v2<$+~NmH~HAISp50*P3{V0X)P= zBNRAKW&~Ae_ob<^pWsob!*r>tm1`PRRl&m{pu7N3g0qf7(9y$D#1#B%e+@AjrGU@@ z@n1f=HR|0VZ`tTW(XCe33G_5FR*vB0E2p5&wykkp+tImO*{w$=(iV3oDPJyKhQNLO$L#D(!)C=lp$S; zh%uhm-eeU*(Z9VBn47%xd^wTFa(|tAfu7T3|1VnmIU zIUj~}pZaA;st?Zzjsryn(N74a?-9`)8l|u?(+KrlfkqL?eRQIT>(4UG!>; zIg<1?`G3feTk?3&#=}R=MA-XbCgyw&H|Xj9{YNcPRPRBr0QRl}E9CYhJZ`h#vyY!$ zzWMF1F2DFq0aZK4soQK(d>$Q%>jDC^YiuLqveCSTP8Vi>>UZcf>ea94+WIBFlQCIN zjVZ%4XkuJhxfB6Q3-@~a0xab^x)m!jVk#ij0S3Z8RbF;6TA+GI?%_kp$0C>@h zi|Ndf4YC*X$h+bMpIn#MzttUr`Y0U^_5vRT9JR+Nr88u4i0N!*AUKteiBgkCRkqB0 z2!ee4oh5WkAZWjbo@+V%0k*Zt23d-Nd&-t|4&?)|_6xVRbZoZ3g?u=)#~`l31{Y8A z$3l5(ZYEpC2Hg2D9R~wTq(=rB!iLC;$wc|A=xg6M`CuVU^_Q;`?j& zic#bwH)liPl!y@T>Ym~2cDrB`w7TCr8%lE}c+p_MZcYUWN68OUHvegu!A27e?e1*! z3ukcD8O;fT&WXY`zB-%-#W)>s#VU}XN>gu6Qgx*FbLde2GyKuyGJx0oQx;9P8KIkzTHR|$QU{*%KZi*UrC@P- zRU>WPKR&MGodv6?_u?L#y>*P^{Og`8XS8$Z+S3h#e|7rtuYEaf>-v@t)PCP+(JaAT zx{#+k#y{NT_^3{$9~2HuA3QEdKf&=-fp=#JMl2^7t#bAsRp)Wn{ic7>S{*pg7xy@F zhNLqxgJVQd^#u&Kfaac&uHSrAur_C;3&pp05Ipe~yz=fUvo{U0+VTFlsc|D!8x_$a z$aq*enPg*kZ2=KdR60D6s%8`Bg$&U*N;QX$48uZ-%yGH%3-QQRLVKXou zhqQT8AG`0xL@9fF6Mk6J<;Zf(mQePNik3h06`Ek=`}kR3hKsxp1s=ZR;m_lM0+rbX zqx!D3gJlnoWZK7I^v-w*@;t8iD4y+M;E1=ZP@wR*eFSj5ZHnG<$DaQ$;Ca|SiwZ_s zZCdakFnIE5k1qC}1_4;D(R%_4DheVV_MM11kOCY}XFUAKhRW_;!pqed&(oo#7yHea+A8@zT)3jg{KP?5f!S=)!Rv!g|l@qkrq? z@Wv1Lg)h}R%Rc%r`vC{LslOhYlQ}y{=4Ev?T8IS!snF_FT+Px-7)G#8;_AxN4}!J2 z$!T@*+QrK3x_h|Om!Vi1+((8IH+_y)fWkLPxbIpT$~98hM_^gwcXExl>8A4jG$shA z&~VN#xyO`0Ff|(1I)`i>e+^XTc);e6S3v=H;WHEATI)cC#B*_q-RcZaST#aknnxT$+HEKIpEn6Zm94T34#<3i=aR++-s^4B_kl4Bvp1hF0Z{+#pd-ea=-a zFE@JMM!lex4x;tsGs3<%G_TmL9laPx9a>Uij;gZ2blg?erJeX@0XT7w1A3`VrtJ^T zvQBE`evC~Hp%F2Nx|RL_p4kEpV$=F1JmercKeb-}abK(~Dlygms1NKsu>r99kAmqb zh5M&oCWK4k6d2^RTCXsjFO^dseIb09p?aL7HadPBoW0*5{2LvoD_&;8F4;8a{PE`> z_w2vN*t`X=cLanFjkNwQC-;DhiJ78pQFMz;c^c2|R$MgCg&$HJ5s?B!W zjOJ;JcwUdvy#zm6ZMtv_-U5hl7~Ol6?lxj|zU%QsOB|H(LBWE(3+UO}e{W*cSH184 zzIPz_c7U&ldRidzFHe8?lj`LBZSX9ppcI0$#|tj#M*-Y}f~fR`&ImLkyY)}1;!Ck&eFSU|kA8m;q2lUKv6oPI!e zHR=gyPoJ2cm%px2J;t02bw_o`!~N*kohzv-g{ME_-t@In@!{!#DfKm~@HpcJK^AP#HZ%<2U{#kh^un$!VK#$x9K37|2JZPo6I|OX z6<#vifR!N_&4(7Yab>utwH{?fgs(+Rjt`(E}qRGnbK$B+fqJaQsI4kKnJe>!-Job%xaDK(7zBn*6*G^!ui?pXWe* zB6aT)2=9Hc1Rpjaa@q{r<3^zztm%HEe~!w`h7tUIj$3e%$2|KT8j*6!PaEm)D{q`` zqf<275NM`g)8OgC-Z{{y_}Rx#Yr}TM=O~{%%eht8i*U5>UAy`11F%W*Sk3G(Z|IT;SH){5zh$jf@%$H}qut-qxQ9yA-tC^?i3RysDV4 zcy+Npu~#!k=cArlcT>5^rF$3ObXuPv)c%!y*FKF8Et!01gQh;fA(@HT3|mVEpZ@TNm#=>PuP^`lH|?>AhuG_>fJ$cM zJz5v1`}H=u_fJPPS=OY0;ruU zJ)5jndUovRYR;6izZsP29p|wbQUN!wPPVfVr%#{ykd~!h_>f2Dvqizzu&(F)5rv5g z%~rTy4G>P9X@fHO?Z@wddFY>SF(3)9pDjyPUX3|hbU37Sxusf3gV{NpE0Nq1yoBSJ zYlHcf&Vrn^jX+4f+HT7YU7r^`k+0rEhNv(|3Y6nY?15E%Npu`tBPiJT*HBCEyu9e@ zQW0%JAJiMbYgKk|a=ZCP>0}6Ab=H>JbQ~%x4-J5#LDA%rt=d~Bs}uJ~ESoIqM~%RL zuGmJ76akVlPWvJfA_z;|5kP5#bmKs~mo}m({_+Xx)MG#qK2pIMaVguZsQR zp$DoPaBy>&tAhuMmc$@?a@nKE^{UZAwn~ z9iYf^oYRqs4EJXlfh;-B0a(VNL5ZuZNJO3Bhjl-b=>U`Q=}hRqAyjt9RR&;=pJ5JI zo#l3a6p>rNMsy^b(Ex4+(rZB%{o2Ho5hA<2qZ*&1>BvS$+ms;2i6Fkdr`X7B^c_CX z<{s`Fze?Zk(K4PV`&%7=gOkuBVF!P=5qQ(YXE)#B0e9efmFvSs(W?_Z0;uMPf8&MR1XWK)uN$q8*;HqC$TIcLs# zFZl?+C9S)h=d(uAz6O`#cLsmU1dZs8l+k4*{;pu*p5qGEah@ko5g3>S zFoR$;udcQCC!aQ=wQIi-eUt_((dCHI_o$KU2ahv0%6;}}FWh-1}c zJ9&4e))xqzV9x7^rRA~XFTISO_3PShBUAes7~%fN2(Cw*e>^@aSb5UB{vY=OCZl|& z%G59Z??3Kw!(N?iNnjsZDmd^lpa?5y=+(@<2x(6(B!5pEeEnsQ`t``4Cnp}#{eJYj z=`HzfpF=_BotGeGm>)I+VfvVkWsGeoG;^t+dC}1ie)#d_PyXva4Q|UD;k1+d5J=Z4 zQ_`l89XI{$LgwIY(1qB_D5W>-sGjmbD1Y)!?$V=qd1Z`;6zA5@rnulov%06pvZwU0 zJBr=0`f9H+z|p^gqlPK}irw{ZkLL|fm7M1kuGa&Ff-dk1aEIP>X7N~l@+~J=>eA5# z3F=6$JX+QU>V6Ld@%bA1R-S(K`L*PHz%+RC1Hf5bEL&n3zv1Nzx3?CVuK`SVLR%*0dtKwmDkk>C8eha#~AMap? z19E}n3}W^CEC5q3aE|%5cvMK*&GyN240hR9YEMP_aBT}{dimG=&n4-N$6%{){wTe| zBFRIX`v`$;fFS7_`YsOP)i{aQdK|w(L9L?hc8Z^~``dK*Ab|rSp{k-42qV;|6CFWM zAQkZAdeQEU27nHfb5%N?_U}d;!7rEiu0oek!Pwog2y53mMSRp?2M(IB9A4?shmTYJ z061u*l)g%*0VkWQc3+c;{>W~0bnSjZKIx;UI$+{t3YdH*gK%G|Dkn)-_?+XT>)Z3< zlxL5Jco9pE^G$P>qRw|{sh!7%M`Oxc$1MF1kKarP!yoXubn#Q@ELiBFUpD{kqsfVW>+B7M@kYPUUUG*w!inDo+PBL2LF-IE^ zee_p#>_q?oAOJ~3K~zd&cxxwrcvCe&OFL0BJ-F)k(vqU-^&HvW@D`z*0hyc9_M?CL z%ik~0=oby14tSWue*3B!fi{ahY6M-e(P`xUB!_(-$1_E4&Hk*bdvd@uJqORhZTdOl zIsM12`FlGf^V(BsbC{;w?}KGyoSoaIu1)z{-`2MBaOF=B#{tiQ=fD!g=;6pT$U-lC zC;LZFl2PxyfA>)%;++0IR+RirYl3q&)G>D71+$reRN# z@%g{=H}m~{8z6k9=h17VY}P}7CV-*?51UcJ=XsBL`Hj9^!>@B}WZixQ?dI)+_if6v zG0$5FJt1Jvz{8xo7X{I%hmF)1VDw?5I2gYl+Jo@8X%71%-o1_2aM(X^->l8U9s%>F z!b?w5crnh02d~=u(BqUI*KD@H-h~IzKiv(!rvjeczrXymfB9#Z*F8e_w!dYjmzQ$#Q4#6mVYuE14qBm7&eBU%Dt^}vW>WP(>@Pc-T!6MXV6y1`Mr-{;-Pw0n@vDN<2_BMD zQvg3Nwd^ZVEKh|9kb%V9Eq_W&U^@u!I6nf}PkD^*7+9Ig?)N~!@S?ymFfmA zqBwBOnAf;nqJX^N@L$6mvcMSor;55AM}X+)1^{U;mHkVdX2K|+F`vWI*zq!ISMLh{ zh@dj3kw(XqGuZu7&f%%fcx22lG{V9A15NmlwGMGRSN|sG1qku607y+0An_lqWZfC7 zI=g&j*A3PVaG&xz?t%kzlLl7FZ6a=?)?JT6y=tm=J<1Arj`LlPI+7-dt9el9_2&Fn>L;)>2NB0AJU8arv-@d=uy6>&wDQb-J)mlqz`+qzm0nCH@mQ) zELwBEW*zQ7{wx~fp@8J+XFu*8|9vZz2y}Z6UVk=I z@=L+w2|*f-D<1j+fcpZvHN z4*lunU;cTIIkuUSyu2U?tq(Z=e)NU)>ltQptW0&kUwhVC$#|o7O#5@N2d=fjG0#ih z=z21UhYFbdh3|}NE1OOn>r+)(*B^KEYy#{w&}=H6yx1rh5A~;lL9xMuuB#pVrB!4$L^*)r0I&4x^_#J^Rt_Njbg)nbTd3iUct5Rn^5obh(bR9(9 z0J(Sge~f+C!8_^`4|Rq`HA4~1tCW;gbxuQ?oBqzpc@=_lu2D0iTsk={DEYn&QYa;W z{idAi!-ekAwR%Um3?JyDk(M4*ox1)@$B9bigNqK)a!ncmM+sdDD(LH#&VdNhjPs!b z?P0#+!|Ond0x&f?;HrWrsvnqmLaR2>>6Pm#=_&c9mwHCI4H|=s2DXC}d-Q~3%19>< zGXO@!5AHIM8SW3g>Fr%Rplg)mWYw!G$?1$peQ z2|oB!)9jqfeIs^D0gs9R1Qs81K!yhVvu_F*9or=IAZM~A0FEZ7eH#HktUlw$P~hKd zSNB<8qr~Wv7t=|Io<(PEG4)uRg^z=O(qg*~-W2y<`I!TMaKl-eDeeawu@|J7`ZoRC zX!>n!`vLxled9s)=;GXMYV(CW@W3IAhh4|~aB=+GQ;{JwZBHqTY8mYFnck?4U!VE) zvKU6jD|&PDGmc~q7NuUWQxMRz#N8*mTEcX^&3WFdFEAlzAN@I_c6~!@<^Tjujs{p)||`&+HacQsm`$S`sPPwEkG|Avs_>^ z;oAb57WZ2+IPV#l^2FzzrxfZv9v^(zw*h=L(MCb_?C5ydz5ugk$QKAdXr03gWb6sR zv$r4qeZ4=a=uA`FY)@RZi?$VK#t_XH%LXcvrj7Tzz7;0rAO-O#CH)c)9fr z_vJJA@+vD(!s@tHHrV4|f5Nl&sVLa46%_0Oz{NODo_U6E%^c!CtYl{_gcV&GD2Umc-XVR69_|52+rtVk(JGeJHjqJ_Rp|?Wc+fWr|*IbnD zAK2@Jr!08iAHh6)U4t8CC)*>ZwMjhtK8D$}Aq&1Y3LYWzkd@sm0~x}l7+nq#WL+-k zkbiI!~$M?2r($|kNbR(#ZRvAYQ!p1K0T_nt~U1>ae6b&iY zlypQ#&#`_)d5CrHh`lKw{^j9I-7~XM0i%wh?Bmqxu*%eFOtj_yayFIKS!XOv{U?_? zD{iF=W**vk->B|5dOofIApPdgj6b)-zxtap@7!{IR5aNef)7m#`&Rsu$ti;zEOOka zKU!eIB3yz7JNPI6%3#Ix{YFLA*N!&#X~6l+nw0@%R5#TPZ}*sF=l}>r;-Di1F`N%v zjBx$ShNSJ#?p`oqG`mj$SG=3RsGkg?A@YXfY_PTF9u4LB1g~!fyz3RajD#R(0eH^u zF5X}CF8}+UBG{5k2FPrHPw75Z{}r}<46W~*ZQ!hZP|4_9Gc$y5d-)K#?6t$wFUcguJZiJP}Yidp&G8!*IWhh96Hd2<{@FprTh57wq^-WzAU`0cyI@%)+}~ zR^&rSe>i2p{%lr22lu$SU3ogE#L z)F%DFfp%yAHnrdNbUqwxr*6Q0oB>8Sle?gB1J3muDuJeG80|+!>>1ldDrsGR$DZ!h z$kHn8>wY_{n@+BKRZ0I;UO#?gUkd9`cl9L8g95n61)#6G-s`~M#Bq4h=><<)o{Fvo zuJQbu{_Z%m>!w;*JX zpX{cB>S+P;a2t=|n;$Q4!4;bs-YOa()c`P06q@yltO6TpvPH7{+xZH50wu(%lE6{NPoe}dPC4^xTD=U_VRJ6;V6lYh#p*@k#UNg@_yWA zvLE)h=g@0g>$i^^oxN-d^F=|(Z+`RT<@dexTEAQv2yS*=m=(knirN&Y!1?HsbTjp;H!ik$!rfNDjT`+ePR z*Ye?s`p%vZ7!9rUK1dIc>QOGU4%@7!Vbp)56Ci&9wzJufAsHPzWM6cmxpZ{e9)-&q3EdQ_u?K#j~>5u zuC3m`?Zrtink6ZieE)6B1|QcJwU?>>_ku=_sSR}6Pg_mWr#7HW+$33vp}7_rvIW=UHJz(UM=&%#Vmgk~?Rto$=V;Gq zc1Ks$=VtGuvjf@(u*6TdXN%BwTB`d7l$&{}9CYzS^Ur9tdX;4h^qa9%%`#U`FJKo= z|IuFuc6HcriVwi;95+1Z96#7z~6Z|I~O2NzNb(!>01D?6S6=g zuyam45w^+zc2E*nZc0$$exU7u@P?o?jdJC0wY3i-94Gw{&X^nn41bx*z`G}mZzyj=wV54TaXk}3^vmu?|Hunztc zWKcppReyw^qVV~f(XOgvvj>&ihjz*&Q)Sd$r^H`0qfZ{52=jZH;klHs&(Y5z!ui1( zttsaZm(Tk;-H(6p+2ykzwBz~XorDUb#g8t(_*Xx^-2eWcUB0;c^Gid(cG#zg4|;U%>%Vq+`#i(fCaB53`X?BQ{~YK# zZ*)cCI^`}#Ydn>E-*4s-_-N0A2f5}3?KpVsIG0s#nB&fU=q*NWFD;~&SB(>SO*0;}0K_l(HsJCwqq&x3p$_I4n zF2RoHaG=8yz)N2@d}vc$AMVKkuV1y{&RV>84`fM(TRnl0Q7l^Z3%KyI002Q4tn|&W zc?;i}x zqW!2%e;+=OQ*e9jvE>DD%^-aAxBw>pL_QytZW%y33plo^3ylREUKX`rAe@hSFM|CN z-_)o6$v^%Fm;c58`u}|SRg)kuTdL6qHxje7mfqSEV0i(}T2RZ41#EOP8#|GmKEJdz z5?LyI>M5tb%IuQ<=m~_`n3BUZ2M%adl-K|7%Z~Ga9q&3@Oa%+5^${I(ZO+zYGhl8- zrT(cM!xXIHCjWHOv^G9w!XJ`KS z4Aj=jRf*Q@(argzfetv-N6b#@Qc&Ko_SG-n7IeaMEw(!9AKI0ToCiG>bLn-{<2C;g z|H>1JGjW~4UP<)@9y+tzP3660KfsWX9Z!5*Occ#Jd6k6sM!HGxRswnL=5`+ z(U?=Rh+ni?)|4d%){siAY`8G2d>!O=?cT3Z&1N8iL$SeBTwR;g-n5bFz9~POcW$HY z6UmOUQ8J>C+X2&Xfg7MJrAtHKS+5P30TEo`5;}^CD?b#MoOX@5uKXSvQmo3avv5L)tDW@IHkn_Zw^`*UpFO+$s28(%L)+t~ zVV^c-`n>1787z_N$G!RNxe=K)V+xSc`wH4!4)x1-zq$OrFVg+X?|ygr^8fp*%fI~b zyGxHtU!MONdU_+=hrimJ(tg`xajlnD_d{Q$dvA0QEgR*dQ#wUNHe-R_@rsA*uK3hrIPhu;kW<`!)m+o@{&juM%AA=B^=hs;yVD=`^Z_Q5L8U{>Sa;5Y;qx{#v9YLr zxhSsM(d;XL3@Ta}!Ncd(QA>*t0h&hoP;klF4lkpFS||U~rO_5m=U`7tPAB$QT5?+O zFkO=#6mm=_OwEHeyHWi?US+f&J;nY9p9MxM8xF%cZHVn3LDvRV7zNmYr-_Ar9Qk!|^7=nMqlN6gmra17QR=PDjWHYt!HGZp0K9%CpFyHe zrB&83Q7Hot8j-_*9f7S38tBTF2`05`GD=j7epSaVT+^@8%RJ&bxpb`G$>s#VrBCtV z9|B@XcIq7->o=2mFadI1TQ6WGFLC*} z$Gx8SK67uJds+~%kIyt^_1q&_1qpYLdV!3uvl+Q1k5BGCZAswk%cp(e>}O4P{_S^t z%l)%=mw%Y8`Za$3`QPnpsDE^M`Rg`IJ$ZZi>Q{Y;=FN+$W@y)&BFYg@W>Y5L4EY>a z$#K(vGU*%xe5FD+&7~2F>=ilzvI@|PXR;ueh#k!4U z$!78n9yuTQP~UJ>HY2H1pvw=~oW9u%dkWhBedwsRQD5KsTU$ON3u!fqY0gE+&Xotx zbed7LStf>fqwVBlTAkzGdad=xELCi-9fdF2kzK9nbIOcNhCIHG4$}#8^GCkHRX;k3 zn$h9-9diibTmMkLview|4iXRT0hv@d3kHwgH+nUaP~UVfS!q@;!Lc4oQX;btHWHc^ z_jE!9-nWyU%*Ys;(Mc9W6da<_KRih>`HvQ?aM1n5uwZ|Y*QRqShum@jWT$qwGup2A z^5buKT*K*1Hr1Pq@DObH||0PuG*r`r6f&k8DDW9y+u< z)vKYdWiPXYTUQ21PN&(o2%IK`96NGHzcX14N7v+CA3o9^nFBv_dBuTzvKj;;(RP43 z$a%122)s|PJ$*m8D=W#dZn?{9qj?S`#I#( zqY+c(DiMlhfI-EyCP!pPS0iwS$Mvval5nu=z}Wd43@N|`V#*R#3kXU8bueOh9Z_0; z8hbyOAIS2@z(+Z^Hs17c5)C|=E+2juht7k)yVBs@KLxs8v99H>VJ9v^(Q^f^>;4czWBSBzxL_RE`R;guP#4+{`KY0zWc@HyFMWEu653b zMoJy;as+ciXuP%)`Cf0!(+&*eA7mzXEnT@|EVMCb*Vz|5=6pB5kulKTOzlSnJ4jy9 zo3m=#nnSrJ24_H5M^nJBz5-}c&i!^FhXExZtkWGLk`H+4d>#8;wA+ZssjoYdA28ZT zXRG(3M;o(S&N?2Lz;TjqcVGGH?sLKM$3Cg=!!u~+xIF5}+0bhpT@YlW6i2)bQ(^9E zxQAoqf^X^`j?tO(y_R3y@_Z+LY06g){C#Z5*-mUscT*euX z)Zdz#`958rDwqNzvpgJcheP?Ma5*jgWcBsCYp^k6ePF+nmctX6C~F4d9S2Y#^gdi4 z;ES)VYsUv3%y-)xbzrB1?G@mV&`7tp+iqzVs;_{e8>?X@P#OfbTN(Zo@Ao$ z4l+zvnLvDOpFvLPWE;#_4ruj*LuU`7(z-tg!Hsr#*R=1m*pbPbjlpE?6QMvV<0%|p zL-nl&OONlaVDJPlw8DXhy%u`=BFL(l(=+Q5eR!QsB4_vy4(yU`lpPvb5q$`_5Q9rf z%XJ@I_Fyeo=E2)}@RU7&)RGJY(OaSi4?oerWhR3#um|I`k;;42SGi?^mA+&*lxybu z{^yY%fe;edS^tE4BaAAJg8l27^ngIsI;Di_N_MW*j-q1lq)yo&SE7?E$Q{a#(ZQ90 ziQrFAA|nmY&|M3p9A3u?=Ih>^HW<2p5?>t_S)DSjBA=*_P*8M!#&cy>uJSXc!Bpo= zDW(+R+{g}+$|wzvIvPyFNz?WdZs%%WVJ5(i!{?C%5YL`&w5$O(?<5sdRF>Le{Ct+=D+9OI@c1wc^zx9YrCZ_3wXYoqP(-Y zDOvhb+p8CMm)B4J+T}0W1MsgN{{74E|Li}y{OWiAv-S}@y!`FQ|M}(f_y2Xfo_|r> zG;{F&Cw=3-ub%a0ImXX3@g#-j!y`t=?D)O5)RMO5=Bh-vZ|xbgN7m^ZHz4m_<1JqR z03ZNKL_t)vGl-`h=+b6z$SZM%Ij05?3~KfNs<#Abn%eaIp$Z&C>2o*y<&2ITWarHc z1l4+?y%E+wIaX%{VBsW93C{sZFxs!d=e)b!`|RoG)d3s*PsTZ5p9*$=95JWfeLUh`t1iZP?hCz5x17KEe^}3`jLbTbd z@G7&wq;j`BRD0-p|EoW_eDzHMG`nO5VU$v4y2`$d^QA6muEgwLJOw405A(s)stFS< zte(n}(enqorL&Dg;aV9tlNp%Z_*JIA^;LM%@A(oS1AJz-!W19aQ~dzC*1g(J%s?@E zx;CqH^oNr?)_L_SV>rV$?vdAcq0dk3ckz?XkM3x~;YT_C&Gg0FXss;UW@c>qPl#jg zDqr+X9?^F@bJQBAa>p4@+Yc{b`GJpx*)0hBO5%>wbM>e^4UWo$f&++`&|}T zLj%lDN(e6H-x zQ6A!I-0+MEbtooDA>66I%x(EGRX$oN0+PF0M=&b$Q4#C3>M=~F=W4To(GgLN=^N-l z9h#GP;8m{rlLtPlw<%0Qj{%(wdE+fmdjw@vO!Je6ayQDR_DJ;Gai!sUZzM>G<#7nz ztjyXb9Mt+Wd^M+x8S5#9N7|b9@0sssjrOf^i%QjgSld1P(0kr{)0}tG`%e3LKl|N2 zq57oFfF5P>&iB16M#l>F<2F6Ld)Jb|i$BZA{DaH4PhMXB>W}}Ymw)xEcb6Z2{NnQY z4}Ns{@XH66pM~nPpZxUl$A9mSF8|B_?SFQ8*=tij@ zJhhKpWY1Vu$8~M1vB{-^-TFK7k*yXTVA}mCzUWGQ$H&?tyh96lI>?KI{4!p|gJ#6iJkNQNv=|GDT3()9d#@{SPiBVWyvTs5q zV!Eae{h3`rp=tflth`_bhg12k)FTX=57!5)zv|l3Pl4{5y9&9}(N#_el5kvjZAQR%MSf52u95vUB-I$6~f4=>3N< z9AroyXpo*hn9d%~@@CB4#_KWcWNA0P{+~q9%pq2HHZOZ(CdAtp*%KQ=w>(k+Rl&-c z_J%1q!QDu(z-lvb&05_xyYTtvKe_x*{`fz*{PI`-Q0Z`8b-ntU-uzUKH2bI1OlIK5H(iSr z{n5ymU42`+FG|9VGWrPTSPE~9%nsvA+V0^iVSRg@apb}@Jw?wt>=n1ReILD#Yp;FP zF+}z~!6$gOL<<7&suD~82hSCF+5+w)EIpuG<;aYFk$AkEjjRoV+TXGzi$?i{|H)o( z%Ep=c_t>hc$J$_F$hw#Vho_CCXxb6o5i#1vl0uY2uk%J-zVtlf40#7sc%P`5GgNnb;wBwp6CXI7h@eD&=& z(fDN>-oCl~;FA{x7S;VhySqRB>E&0ie|`DYkN)SEpZxgK%TMbZANQ>Ho7ca){JlT^ zo0os|fBmP~x9EfNkSdWgl274phWJ4!OL=q}o+-At!wt6}KXHPQ@@Q&UB zJeK{{o8KrFKfx^kIJoIh%SxrqfT0J1oTs3JG!}#f0(}m_2FPJw+Mce6&x8VaaLWt6 z>(O{M@ZEG9jU}H0JWyD+742UzWiAU2+SkosqD`O@|5J~0(Vaq?Ig!2qhIG>5y#aE~ z#m1bx9{nJw%eR$%+z$UOqu~dQ0u4Wt8@K|k;N~!V;`u{Q=FWX?ta|k9?cx{^56V-e|q^p+6(YT+ldG5U>ykJe2`tbLz)6M ze{F+Wb2tlnXNncGanZW}s#JH?EAI&tUBEut%-Wyw_^CAkdqhXf!@U8Cw#SD?fR+r# z+7o>xM*%Bz_`JJo*)RMXi6db!lS4E|!V$Eu%CcoXaJEt&_=*4E?tUzkzy78A^Ck3M z17&My=Cjm%U+}&wwAA5=wDo7|9LQaeXY}~yLC{7+_sb1N?v~_8X@iW4+y}C4_l+L> zHA{1OIU;Wx3_NOpq%*bz`Jne=%xpr+*6}u;qFInQduLnGHF?-z)ctHyT7UQb#BAoi zy@+QcVseZR#3L!Ipw4U;@ zNAM*O3Upm?oOVw$jnmL}1^UYD1Siz=VNmBgID;9iE8$@7v4vnw2o==mO}7$+@+Y{s z!YJ>anRU)rBS0fVWsn;g#L$&~6h}mA2QR$Cd8HHiryM?%9D${s`#JOAin3fY1VPY7 z-r-=3rc0MrE83asNQ7s=quc!z-f2qk9NrdbR%A3Bl;Jos8{K2OhKGd#66vNd3M+4~ zf^F&1=+Y}|omAzSW-|f~Sg|>4yF9bbjWjbHk1cW3oPepqA&d{scx%1U{ruA&4{TlX z#b*T-o&NJfhrj&%(_XUDiEn@S=-<2i_JOY~=jwBBGmN`t)pWJB(l+EZZG97OM&ui< z;w4%+pmnrnc3ALaa<#EQ3OXE0w9YRbayE3hPN)1mt9vl_Klci8!2$cAU%kuwdD zb{04!InWOs5+h_VR^D#$bP|<=Kkm?*QrF?rXB6I#^j&|5c9YEXuD^R6(jyCkh=52b zA$Yzj8|?xYxYSoE7CoObdYO_MkL(O20qJz@w2 zQRU5cltF87`kR9+T^l$~qxz$}a{WvniYKS_YL#2n!C1zDH~#5Jhn;)_;?SP8KQi9y zrSo(f=_cK(f;vyPeG4t6khy1KlaZr^_vmPw#qfuJdlk;p4OnVm&{4`}$aMa+>z~4- zL%>ZZ28Hv{WrIVm{RX0PKH+giwv=(~4hV)ll6q-mOIB#3x4w=1%~azHy6LLA=ZTpT z9E5|GK4c5QeV`PEP4Vvu7CEGnl%ASGhvvwlDQ874M-i^NqI1I(74~-D?XxTj*j`d(3 z3#gok5H*!MG*p0)eexOV3UuvXTUqzO6AhF{)Jo@jD)T6V$Cl1`4mE>6QO-!M7zDV4 zrN4wf+6TCL0AI;l{>pVFo`mgGnhr5tE0fE2_9!A|5}MeqXkqAP6w|819KzN=sB>C!U#{@?ZO;woVA&g=!WEK zsFXVcoUxM&JV%*Vk*xervMo`A)Ic?wn`jvwj3WAavg~yJ6(Db~ntkc{zdFlLI%*Z1 zS55DGtxI6;o<5}=$IVfj;|KcD1UoJgUaQS%~ zzmnAD_tN#&M&JIc%C$T8!^6wp|Knd>e*M4y&gD1Xesk&Nub0>D!rzYRb)u1*AT}b5 zrghxpL~!?v2iY;YPj~Q5jjkx$qc=KthPcqEn_Jb$stej1rz$|?x)TBA*Y6->E>FPFmd>U*)gtA6wqiGlppM!Ha9 z;hvKU8+&z+UU2BoO{udN`Dj;Pa7X`#cR)vA|!e^ zaH2zX(I4B{Zt03*%GYsdD#m9xFe80ILwNBzAqA9-I8Jip?dfhEVYJ^6LieEhnG?MK z9{%J{km1=3NYI1Lu?m_P&X!q4l1k=|kLyjd6di0o8qQ|@1;R%qNl?dPHF5iI3D4Co`3qWX1MRS2cirV=} zcF^zfnaP=zY5R!a(-{>NmejMy$+CkN~+M*xBu zZ8B)Yv}<2GC=1+lw0=GN+3c6RWH^bH|JbIddj)nsC};Ey$3Pz8&Kwm$IAXdw7`sKi zbOsUdX(sw4RYM-V>Eu6T++K1*5X^uPa60QONU}OcX!p3gT^c})jB_|2kV&G3aWD8; z*pf=FrpguDAq_xxkG^vPA6o4=&GBUzM3~914DyYX+^oT%zaM2_H-)V8AI;rEG}z&{ zPPnC%XgbabnDVRR$FK6p191jY8hAEnEw6MPli5+DLu=4pJ^Q@Q-y>u%d#js~^Q#^K zd`hv6HXilrQp&bg=6r-X_>9H+_f1pm&kGD*bn4#+FNw*p+d42^Jb(2EHH6l}n&Nxj zS>``{{Wm>Q_(`)2zvz^=N0-+MV5%W% z_rtB^`GvoqIgGxm!>~9#Eo*1>M^`8+6%6bk)v{%2n^DNjANQ=fHPH_RWzX9o??roi zOJuUw4y=PejfTmxilZ$%N3S0ZeE~=?XA6K|+a`RQ1tL50O5XDqrK5gD71xQNpO)9G z<8qxq@YDSqJ=+S#vA2@TI{&J7^#P?!2I#72Oa`whd9>em3VWn7`}DL2FXUjCJt5+F zVAFATjr??E*;$w)EkJL2J#sRz3Ga0F*^OuXJsDHEhm8`?ISo1J8rLl$7i3$K`{L72 zF8}dA_@m3;{CEHB<(>ALzeEc*&$H-grN0u^S(F~$Yl(33VRc&M6Dc8Z{@Y?6&~K<=xiCYJ(|5Cx2#Yvq5zuRG9$` z=z-OL@EUc(8>Q)?c3MgHzThRa9@3-=nSLpA|0O&+b-S_P$g4%jJu+IJ1+Z^kso;<5|oxh;! zX7V_CIWs2NF&zrlHbW6zgy?r9#zVHg&UeWvH~FCTerZJcjo_!(G>S=d1JZ@dJ$m28 zd`s~5zJdivmC1%n7`)Y@>q!`>Bf|_SI>D}d@?m55XCA1NI7eNX-O2RT_dZo{NugTh zX@rM?5j&Vw^eg(>B?#Ta6f}4#3M3ux)7GGMY9?M*vg>t_RSw<$W+raH?@zfo{6L=Q zMT+uQk2c3amVj%1pVXA45lwir2CXTvAAe3o-;w zsRsdEf&dBw|Naw(@p&5ds9!EeAkU8R2N`8O!iY3w#XX&q?H+8$zozMspQk+y;6*ks zTl3d&to85hcRH>*F;4+JbxwOQd_Qa1z=jsbp`z{m+YX z`OMyiR>%IXk>9gmcrW@hx~l>H=l}VCcKOR+|Cg7qzIu51!5h*_$LB01;T|2vB^eit z2>Lv`>Tw&gx^d3-WO(F6vYR5##>jkm;vlN6o;taQ+18E*tr4)rb+mL_5i-|_!|8qS z;B1=v3Sw*Tbk@%T0CdXh?`a)`ph_-hEZ7ndldG_Sjd@uRx!DIG{n3nvL#eFMZX|() z_ANowkaezyA2bfqPkb|(rLp~*xod#{`1FmqEm?GZocBE~(NSGTnw}Yh3Zeyayaf-v zqXj+59<0@Y>e%W$bYAIv zyibbERXMW}XsNER((WH!y9IU<)%hQhMe5)|qEVv2M>U*Mr+-`I3x6zA~+tO|?-!2$v|9&)Nb9L;G z?YrsaKsAR$2hlhjiWxpEupqB>Y(1d?S2SJ<1|Ad~Xd8FUp4q_rkN)8wUH-|R{@LZ- z54|Wzhqe0Qj}5`<*kQx*B!I)rRGW;>5=u0(6RQTOF59!T-_a>if|H>2OuOPa%Cy(aCnkF^WL8IsDl_UL;dZj))=MD|bL6VX!M4oy$TL2Q(Wf?LOVb z2&jTGi_oU6(o?297U4=KX>`rVR>#EvMtnbT4oEN;c$9bO0ZaZB|7tUYQ+{PjQ+9$Z zUD^930@r5*aZM|KofRg0`XUGNVo>0c36U*shF77!5x-IXwJO}FF&z4o@xyS)rmMka z)T^74`2ta-hxqH;_`OZ1d@dWR5 z1ksZ5Zo05_uXUaU7;xRbe;plX=Ho%@Ts8_`ngw|GOo*d{^u0MYLmyOKptT!#5 zd|R{s*n87oyej|E+sl9dPk%diq%rLID~E9V{s3iqk0cUK*_N+a`4 z{paD~4v=3Zkhcz*9S43ke5PBgqwSV&*YZY|UE?hu6|PekVe9D2Barb2yZkE8p5X$2 zx}vjDJ9_cFa%5Y5wxzuj#(f7YeY=z+5S)EZE3e@vxH-veE7@}d(^tWtY5MK*PA?A~ z6&y_SP7gkI`X8eN1I_6|5Ws%8YH~j`(DRY+WDbAT$dIh)122!-Ot&{ACNMm@mcCJR z_0<&{J~SH>@r`i~pE+c(1a$W=fSTN*VM{Z0*oSxJqD25fMy}bcKrop5ZuUysrF%yJ z*9gKv&(TL8*`6Kl!(Tfy?cCc9Ivl1aAquu1wPex`fCew7q~T9q;VIUx_WNLwg8&Bq zc%jr}j;45WdZ49--~9ToE`Rw=8_EhSC^kwf6II(lNUo))FY@DGl|i@8k0!dA&d#8r z%9WYT_I=2CC5j&yR_#<{U=7q`3Q8$+j3C6QC z|3I#+w1hW*JsJX4=A@U74?GO0pm02!Ug=wBPA$X^w!myGnW-qk->&=Y<~(yBo&Sz6 z)1&cU`@rjWLOM95!%ZgjI`OlT<9}trikFcrZ-4!(&cNxkqwRTvxfl6;FQ>9m{854W z{AzV4Hz>f;_QJFPN**6TS>qY}<4?M3_HT_rZl5UIv?B^9jYKDF#&~6hF$U~-az}*d{piRn{C~*u%PsxKB zY=xCNLZ3_~5K}`N1zoj#|7g_so)UtuUOZ2L8aA4OH{YtUq7QsB4L8{yVN_GutE`bx z_MxRh$M~)?9FE5KkX3nf1sQM3p{|E0$O3j{u~^S@<)PdnQ8d87E}?M~Jx zI3usEecFkDob1zP1i&$xoRgg3q9J!T9puP7^SxKw*64ru+rPPd{mtJt_5X{O0lvKa z?O*=+_5Hy^1u3v%U^%@@$y4~Y`!FT z{fpnz`Hk`MLkQD`HciI;%%LS?vSqMm`Z#BuUN+?z-z)(XMTP*Bt`~Sh6`xIb9=*pu zr)l4`N8!kW>^TB3P#55}0Tn#sYZ@OGW$byg8AhYsVK8+*8;w_XqlT2aCKWL9)Sw&} z0LScu-VhLuxCot&r|%T3)2}Gvwz1E>pTT7p>y`?xu0Pw~fNrwrCk6N42rdbu-A{;f zBa@v@3_a|{eiC{osEtH(UT&)vJ*#|A*&Vt2G=h*77xPKio-U1&UEyj0uaA|8rLnk~0K@hzxUaZPdTU#PgV@?Mbu$d)WJbgk@Ljxn zLW1{4lBK2^Q?mayd%@OmoJNZe3Zg7g=@{%N+q(q9HDse93t|K1J+Az#&g=M-KmNnZ z|L>pw?WL792$K&!x?oI|t@P|0S~r5tEHoPx1RzL&neOlXkn$Rz;-Mx_cCn7JG(MS( zq*eQ;_F&Xa7TOb3d#o%wvPQl^gSJwJ!NQ)APiKQ)+3hCpo1_<{&|y4GPhHx(Vk%$0 ziG(IY;v*W^gU+p4L_#U?Q?|kZG2$TY2TqGD#jQ!rBQ_dKnfn9TyrB&}ZKLolCYWYU?r>VR`D8v4Ej+_Z|91F2i+XImNGX zG*;~#%X;8%RF<$r;HD0r^~m4zM))uL_SoO#lkwvDQHuJO1Z*Av03ZNKL_t*K^DoM` z=3j)IGum6s8pV*i&f!_X0vGV8=aNsI7m22G0CJ#&Y;Kf3(z z)xW&_(LegXT|T~fd-?IlziQ3?b@FSJQ^fhAeFX1szHcVt({?uh!^{8szxva;^||8Y z2R;MeD~H3m_08yDj)Y~@H6tan(ulzH`{bHV*%b~K{H@VXOVcH}y13U{M1`=1m#mg1 zdwL98@=<*2XXxsOj`-bC*!800)hH$1vHOoH+8Xdi(v1}N46aH!Tph5!3t9*_Mc*Bs z(|WuHCwVI){{&vHhjIAV9!3{iPJeWy@?dL&0yH*K8eXG`oX+$*P-RRGIXFma(C{1> zLse?L!&&-d9zpJpQ{@!vm~!-{lXY+o7kby4xApEvk3U@A1lqFzkDs&OM!L6T+@`i3 z`#3(LdeM8P6=@a?>2=@HqHV^Lu4p^l(L(Yi9-7LO#(t}VPjoJ?x|4Cv^d~z}d=4#H zY(r5v<%SE(1UwmLT3uu@`HUO#dGk$A zpj3)vj&I6K1=HyJ*j0XXK>C+VGD-wvr(fMUl(_H-N|!T>{_z9nhIDB;xlu?Rj*cFi zDKBt`_f)riFanJ2HAxq2U0}3edw345>YM|5&;fsc!*_7(u|;zCD?8W#hd_A0f*5TJ zjXuyjt#k>ck-=tKF^~>bXS&e#)ph@)UAqu)X}2WImrN(M&8c??z{>Abo=r(l9fy_y zLjc*rpNg77zY};g1GXifgns&POB0p|vWI2{Sq`fpdwyA;u(mkCPr`$XyDbl(9B;|y zjL@Tj46Z+vpd&V$FoA3etozAM#Y)y5M{?I>u~R$+)M)5OkVBR{W)b`#!>Q6X6LzWM z_R(~?6FlPwVH1_BF0x{o8LH_j6;eDA&or}}ZEh)l>$xzLMnR0sNRDyMxsZ&=k zBt6uOsQqIV{T>Cb1bUHl1cWllDT3X*`{r zsG*Q@L}(0)d@TkldZ*c2oLJcQbM+|5@)bw|4fl zKeCC)oVsO!OgDq$WIfjMq_fJOa@vL(&4~W+)mNPi_`~II|M~xM`Rr#uyL|WSzwBv( zpEc|MeL8Mkw2gyLo_|rJXfMKNUtVr|T<{YnWBE}m5#t&I@YP$<8|bpq2``7^w}k{g}-cK@Wgm}xGkkh(c8`Hr3d z!kK(G74Dm&jFe++JmO)$r9~&~OYi>%5akiEO>saX#052yJ5ZFjOArXc$ z0o;bH^4K8x@ICjVM#a|No6e_^+jjXg0-eFbCpplk-PdCjGFLCXCc6(?<0qre0dP+2 z^p$rEw52gLaL6+_-~1@fV&XXW;1-lSQ0jThcVB$|+2xOa|L2$A_Smf@c}6RCttukdGq&@`tlS<7%ROHb+iGX`y|7K zgAqi_CmqSrT-rL)O5iE=w)9dd=^;FW(R+I~|TKY@GG z7NyWLxi-RXW4IGMPtjms+t~6ezs?^$P_egu@Xo}2u3y1&Rj#%%+rUHjh*Q3QTpeCM z+GKbQTmUGkI%A?bm1Gwck-=@Nef|eKAGO&NGFFI_3O1?4>Gi(AvA*We9^QgMpT{2( zg6UA5f!P606X%Ntbxs7>&y<3IW9@D!9&XG?+8?Y+dn z7}_o})DcN2SA5s1-7^UAk;q_Ol~lP22n_fAY4~(IyC0p$g{%`=Lt#MuQ9@PWA88o1;q}AP?hw!Sm(Inw@bJwFK)D*T;He^p5 z!Efg=7{LSM-MhPH0CL&Yf6|EmNx=Y^f|obFi(MJV(s~r|u4y<=0(cbUX^$^?=lr85 zjr;{VHHDiFy|}r1Tc`CRTWMLLBTKgf{_;t*w*~6UzuU8#Kl$UIU;g61{VJF&Q!H6Tnc&EEjc$%B?g)e2`kwJpV?rj< z$+*1fLE}?c+!}C>;h06~T^k+DD%s=ijS~X_S4h?zYvk-B zcQnL@hweEeJT> z!>^Nl9IekkzDaO8*=$Bf|Lo9bBh<&_`LvTGAJqYV)FD>lin7e=fH|EjjQ(hjzja`c zN4IuJA7sDj;lANsJvq^EPv`8O6R#4-xAcsLzu}8YIq=~OWkM*W;>w3>yZFOpx3r#a z=2HISjh-r*7(GhGW{uz<+Td4v=*XFCM-cx#e$y7$%k$>1@60m!Ycn5Qdqjc~IMdo0 zik2RZYt*;zq>w(~f{-n>r=SM>AODAc@A6On?B8788}%^(vT!jjy0_CkTR1Zq72%t@ zr%!Y%wh=nn#GDS@haL4$<_yK%iXG{X(9K~g}sq*mwgtS5{d8@-v~ zC?n*pdw02_@r7Jy)!a-FS|CgZW`J^nRzBqRLkP>9F8E9}Pf%lKQV?ZCoA{%5OAg9Z zXH)uz_~2wO*)_X2m?6_cd*uXj%Jq(t8AbTnigsW&60MUBy)B5&SIs_7`e?t;tKEhm znEBECr889ovdtCbH~3r)D4PrJ#2g(76oU~mMyHo5m9QQlJsdj`=&Y~6TQ;^p??|rV z@fO(?jwoydv$Uv>9#R84pBR1M@D^-CvRRoi$-l#eoQU(|b9^TqS6JR6303li@R&?M zt_B(PR5o}cvcl_Nxld6gQnKuI(xr(WT@x?^I10uGrq>M0gSAiJ9Nqo>%qD#6y14`EfG`_dBwm*1#Wk zP|2enQPKz>bryh;p#1~*aiZCWru|=i`m1IczH4uR(ZHpB2*2pQ^WQgnkp0pB@J}{J?`NuwgU$(YVEvFcdFr&8GELz^y`bd13>;%{m01U9*1tn61dFhMNP%uRwp-m5JMHxvrxDPaC~* zc3lT{pTWFNDQ{!f@RU^nPX(mP{tg|U_uW!bRz{v+5|E`e=R=U^Mrq+FvoUYZsNvmS zw_BTjSeeF9(b!)zBe;3^luV6g^8-J1-ZXgHluiYW+Xs~GZEbw~;V?4MKv$~pGcJ8c z)25n{lEB99(v@=uD!-E~kX{nqY^vA#yZp-Y8*EGv>>2&rj&(l4V!T%I&|GDB)7K-j z9_@9w>v2}3IS+$f+~x4^IuY@)7r<256!C4FVISI8LvD`>q)nw4T(9$vr-zLo%Q<|a zrJp?+5N~5Uw4N^UlAAyJy%LJ$|!^cNNe~fKzgIrC$x+WHJs9 z3`KVvy_7qesz&%*Qaj1%4&H0s=OJfaH`LLW;&9g5}HG4|tU&uo*+CD+f-+kvG*Ho6${P;2lq1c72C=^@esGts(cg3?{oUo8ud7qTaPpri zoL8T;UaR4Filp*hh{O$k$cQPEPrqr+(P&MhqDREM_~eTPLJkMA1N;5k?=ElO{3bps z6E8Oz`m;}8?2$ij{rmbiUtK=?`R`s{_r}1eS@gSxptEg8;90W@U-q7WFF${B`Qc5E zK+#i9WjnU#aE(Uj{1~dnNC$q{fm8g(c08~vu(+w7 zTT)a64;(tN$&=j4R5LuAlV;NWQ%-)D>(q@vIHIh?uHSyl-NS_qryg>U;IM^#54OO@ zhl4tH$u)t?PBN$2h+7@kf*-||mDeTOhog%<&RFULN73R%W`Y`z@~ z$bOp#yC+Y*2`9XfL*>{#+86VXY5e(jWPiQ|7oK}|qU4r{Ou3Ja>-6f{Y6vC+c(v_R zWRxx#rEe`QIno7w&5YosjQ!EH>wCYW1E0M1uk_tl=86P71;%IJg0u$(uJ<4(#B6}9 zA2)I{*K;=evdUv$Z0>*$RwHRao3+x1?ecejg7;`AQ)?v3FSr);Ib19LYAfiT<%A4> z^N9c_mq*FlTL;Hy@{@dIz#cl-tHAy94(0j}{?5-Y|MJJLYJbV3wsoGyh$ns~-i&OI zhk$kFKbZU;S#Gd3nM*!Ao{psQ6H2ty#D$PkNg|QlY^>aAijbzR)2GOz12j{L(!TprGx%FK z&xThfUnuY5M`HG_wL{_cS<+2+e4NJ7L4!y*~zZu=YPZ$}yzDS8lA<;5Z6( zx?h3vhOB~ck0(MK#_s#(5am^ExV0qr6B1Y}-b_Io&CWEIT0W-(b?(2;{5N`{PaF8t;8=mp>SjAD+|2jF!I0jBr#)PRZ;@CLLV!G%AgQ zIGMANILB|DmE$=h=wln(7)f6h-z-2Cq@DG8X*EN*gJU4^xC3K0%^D3`J1lSYlAkhg zfyJqCv~DZ1yy;gte=*1AW(Uet0z7on6RwyHXE;aQ_pxQbbwKI_r$TTH4~g98fuT+r zv5?NP5#`8*Zm;-DsFIOT<5>lLHXp))~4+LmiTy(R=6x9PU@vcj*fp zDiV@9e>}n=y?mV+`2Ln2t-;(MdT4fZO2a2!u5={;4qjS~vu|+OgKrk$nv4&B%B)Cb8mfls|7l))C#c@`_r`DpJd;HQub+68`}dzWjx*Q+u7jpJ~f&CTlmpY`abJ` zk1Zel8|1~$mCwN}!w+5}=@7&g0bKhmui$S%oN^U<{?x|jMqAxq4SDDmC`>XSUileJ zG6~*fQ(m}6V|2`(M?4)UCqFXubH0a#z2nih{8RSOxWcP5xX@%*gFgZ9YEVX1JS{*N zePJB#L&w@q>1_^w{*O;&lQuFAAACSC2Q$K@MbL6qWI|j1%10zd$GC(8=O`SKT2vq9 zfjGiD#Hv@q{utLBq5D3Bv<9qkERtjx<&ANhNdo|%Ao*` zn1FZN66=8p$CeoS_ROq?qui8I(X;%(MdPV--x^kM{mDnKyaOEGN>^|r^2*D2-$p)q zJIaXO>ORYvJa1HDz3`JZ_I=XSynLhC$uA&SeabOZo-?{X`OjKEIXuk~F$+%KwJFcf zzWlw*=U@JQqY#e^c4}bx*79##+yC(1M!z2}FF*ff4W>;}jiS6g@X7O6^&dL@GcC9c zWI42(ULE_OGH(hh{`TANE`RpVzdAEd?1$3FcEQY5Cv>_sJc3R(GwC|5>6gruBdF+= zx5rf`)FUU>MAjPars^sq4eu-~pKxH|nVd=+#9$o91#W*Ok|EIBtKh181aH=-0v@rV zHV4Dxil_4mYy*^ftWAvt9itl(c)*s<4mj~{N?#kPA(~~80QO{{gH$1T&|_aCWy=Wb z5S0b1^6I(hk4=&1S=K6RAQ(RT0IsrNU16gE93OPe=G~NkI%hb_SLO(h3R zyQLlS{hvLQj-TmOmFqp?_#k;pMMxh(xt6ru?a^c}j1xuIn&Pzr*E-tKztssi?0LGn z%3h~@Z)3ABINIFCrq&Oj{p$DGqqM=~7sH4h|8)EtUH6Sv?H4`)nYX?2z2c(H9GGFK z9un5ZqIWu0HNNySZ0jdqT|oi%=mB&cEdU0C=d%|#{gz{a^pq6sd3Fs*6`n`H9Ua3<)?VfA*CvjJ<6VE=Q4GwpG)2XQ( zvlv@@k;hO7^}Z5%P+rJreR@k3yVsEszLD=y>S4n*ej!L+8Q(g5C|Cc;VfoTD@V-wC z@H1@FN;Qa|8gu!b^|04rK5rw|%U6#spSJezP#{zPuR4V1vyK&wi*r0{opjV)-o5S& z>m2sY+cx+0tmttXSyRl}BVhXfd7b&w7q2d#{OorxFM3Vs(?<1A8j+YqaN60UoS}DP zzijRQ$+OSrd;}C8VcJHt8n*RH)7)dE^6zsV-+cG_^1uFXe|gE5HEOBBW-n)yTe0+a zRoC=QLz9BeKlu+%4UN;R+T@l9qWj*C*g;ZDMwJR)$|BqTR-T>qH$KT1vg+bLt3|-T zDVug?2Ttjtr_-sZT)n`a=2a(Q-rsaKjaQa=;5%wJAe+s)IiP1h$7bg?hI0ChztLMc zG@izaul}yK5RyIt!*AC=Ij@`{($6wg>DkF?XPiM}JNFBq>W;&+nK(2BYooa;FJP`5 zC#)*&`OavqyWGc6tNsH!Z&Nv4l9a$kP3&vCDf z!54mbM$7mFQzt_5GC2spS=1^20U=mPg$eOBVF9Q9jxSG`_}Twu_f5Z(9lo2i$rW@)-OpZO6tB zis3K+r;e1339Pd%{(+2UU^V^?63Sli5!0J_4F|AUVfo4EYNH2dc}VpUC@9NJkaWyt zS#TUW0xqv|>MtN5W+^c+8KnOOa)YB}Kc%G!(2sm7i*$bjMJf0o@991GmDT2t+%Z*| zij3v*@T@ujXlrbk_*0ZA&5f|);C{M!kGYf{`{6lCfsc*&XrKtZr-wbhI>i%}gs-H8QWIu(vo1z_^5so)>EqY< zGL{Lv3C&&sHmp5bo}IC(01#PFM!)COHZlorGOz}_y%w9&&3>X|o8s884i2vXuzUM8 z9y~`y_>ygbzsitnW_^C58YdltqUoddS^mv_9|3|OO7GEE-U>@S_J&FJj2gO18Z z-&rDSRnu7|F!}zSKluI2i~r@XFK=p-@9Cxb2^AM%IQ*XH&3QoC z$CjPb$EMjhe({Z>quZ`am#XcvM}B>6Dv`JVd&>jyzYUwoYWNVzKlOuV8%9g0j&vN; zzk9gpu=}MSy$uEwNbj3I7ihQ%t^n9H8rpQ~;~^Q6i*k6_+eIs{486VY@zXt3fI`Tk z!l?X#zGUV2t%FOg4OZhDtK9|zuU%F*lMh$*;|pib@{r4xeXSTRA&x%*Aa`SkP#SQ@-PoBYSx+TB@8v>!ITOgMFtIH z?qllILr{0}!6S#|lTCt$=TI@cMlAv$F9l&N3}V(({nN+-M;M8L#U0_D?`>8I96Sek z*DGorD>+>0nA1YT7$7IWGE&uMSt+@DyxgPLBtw`(X&36-UefcThqGGK*V(*iN#L_r zHYL@J9ahsb`bI85ZHIUIB6Ay#&U!Dc2Qf$U`rF@L9>4C1?v@rFKkg+z8RKm`{U3Bl zkm>#h4?ZpLS9JtE`qu#gcH#Q7k%esbKJG@y4;R6oDYuu+D!h6gZ7~?dk)=^$?%0WI zoUJo&uL;MA4P8;QIF+_~*Wc4&Qyi~!QV6%aNxzRMGE zS(15FZ(sK}TiH>%@v`7Do&_5p@TwhDcUq3ztlGaGI?ER3Ji@c~rEVE9%tdXfBmQ+( zY{$VN>)^^WMX!C0kMhXZzJ~egvd*U6zFntBPxA;c&j=?9vh}&1CvAGH(Bo!u9zN^c z4?pz6n>z9Lbqj%!u}v+`jD`hyuQiGSEHw#*<|XNU!Z`BW^n7JWFHh<0S^x z?)z3$ptCk#fytPSq)QvVR7yp&HB;_uBd9HXhxNc6SH3p3sII;pOl@nk6-NH)N3bn` zoDqq@k;#o;bqP6(;7%X4%v5m66*@#^4kbmUuoc8EM zhnE1&hb0m^7|d*-#L5;}_7iI@*})nZ?g6sU45-}uxCvp;ZM(ixE=n29f7gU&OUs$V0SP0DP7QN5EH~;0+9aRRnKJ)8>N3x&iI3tw>)p4j5x(>o9WyV&2&`(Hf1@g zZ&L9Ai`}?qU5A_g+#b48UQM$p`qzI zfmfp#jJ_G_I=dh<4g#e(jd8nGgFF3%AL)EgSi?Xs-ALph%15`l@R~_^ZXKSXSNcuQ z*{S2KZaO6$8;xGJJm9`!{dqrt$C{$O($+FP`~27@rrt^3n3o-Nyl+ntl;?c_ADR`| z#=Y)MWj=b+vcS_oI<|B$S+=8r?42>_U#+ zU-G%F!D>3r7P_gPfJeTzvX?$pNxePS9F02EwWCmM-^88;UjhAg^y%ElFlk7x&ycp! zfcyToi<$u?4-B8Svdi>Tz3lZqaM%XMSbESQPe#5w!LA+YW_28A>iPD!1)gSj-nP$! zQf@=)nds?R$6De>sI?;wc)H6uo#*evTY}}5(P31N6din>q+_U0K#qr!z;C2IM;L66 zgIRVG$ZA1r+i)`U34kL^CEW@F&gRo>z4mIh?dT-HrocNtW1AU+Gn<<2G_v#msUC!MPKhrj&n@)uwCf-VdaP~X}LIheiB?HRO~5!xw!r3>@~0)m~h z^phf!r&*N+S;4sXOLWBE1gko>$+H2~rp3D-N)k?%lQY{T^X{h$LS{GNXKPEp;;CqQ zq6b{kHJPwcJOmDW%(`SBee>IFg)F9y;LZTpP<$O;lNo#Io?N7{{aH}@9?=I{@WUT{ zC+ME~5iR@Gl-?3w5!pAZ56Dd(mHg@<&XYkU+VLm=a!?>wl$p>QP z55Jo|XS3b+Z`q{aa-bRfT2Bd~q$9jdzN1_Ft$fkK1}xe9e4fhjGBAS?jk^n8^+qqD z8YodtzbD9!E;DzE9niy%ANqMr7mpcDOgrvCqE~a}bBL{k&hm<1ZA^nvc0l5gIKjIL z=uifibcLd!#sfE5NLyazw>%I$hViIf_#cYQAJ*x497`!jLuz;eA+-3Qfe|p&jqGEV z{ANV=h63$qI=s2Qe}?nXjO)}vJ7(19G?X#;zU^&e&yBPww!HT}qyIWOUVWmYw(CA! zhxa(t4;|j~Num&eXV~aVuOI8Mb^b`X=_Ek(iS(^g-WuNaB9pr}-(H^eY~h24P5s~G z(Besj@z;~$M(%exut&0mZhMJhj^(lAiY(VEL~m>f9D9NX%}V{+nhj>@Z7xg>aC!i=?m?TqqZI&-0| z4x8$~sRI;w+r;yPqsU2lc?1bnb=_EZ?n+Z=}qc=m`Mx~so5%q_ry?3ravp>Y*v+2hw>G;WJ4lf?b!7st!FGfd? zAYQH!upsb*&Xc|Yi%=(NADk;HTrP!<{1w_a5=8%x(4o^A0PCK2z#iDstzMk}ZEh&f7b{A(2@>W+#n! zoog`1x*)!a6AfM?t02@f{ZE{j*(rbat#|@vvlB39;}3KwjE4Y{#o9*iwAgt9J^5ji zo>LBc@H_duE;z{3Y3l*^n(Rk)@Y>|2f_zPv!K#6Jk+y&zi^-@I;*~z_sjP|vsO2?13dS-mWncU%2brneR_!-RCQ3TU{(Ltl93BPJW192>cxeH!uQt^WzLvg~zJ@!2n)OPbC$ zV;o_VYju>p|BU{7SxHRawV?2$p?gXvsPQD&d(4B~F;#`vhkLEfwT1NGVqP3WMKw#n@5xhQR-j_A;QvROPLKDOe*mdYS=dl%8$%Vvs3M-3*IBXH4Dd)Dh1J ze0(j!94A);7ya-nQeUZJ6c?hZP90R$>31vgyR{yfQ?)VA2#!oSrpHa^ z8XdjuK%Tch_84Fz3690?e#R$2;MnfG(>w#KW*Y__`V+ny+J`NvaQ@ambC^x>a_+A_ zdwO|ZP;vJzejD)^5#RNuK>E79`H`@bL5B<7zR7M|w|w{Z^13NvG4oCi!$t7fgH1>G zj^Xs_$-aiN%`O>qAd^kJQyi7d0cZ%>A^p)wS<2nZcyAmmft?%W&7m`bj=zEdkFoNFj~k^gxR1st zsC+d9a0Mrh(|%y@OYmSWnk~1^yui*2kp%yOB%5nvglmjOgQRF|Ht2DW+cuTfpV+r2 zYp@yVZ1-U|TVCioz*WyaGdxQ&cLmx{I-&E+pMTNp*@IrF6*K8f#mzqAt?Q53VAKRY zz;p?4_p&$m!VBb8_3<@t#|BnDeCrUZah0WZ%(YDUE2EPN{D3Aec9FjB{TjIR>xW+= z2eV{dl#V8vU7_lPZ~aAfUA}gyJOt9=#>ecYEbRveW~X#cOupZ>QL{_h%y^dAwKnZj zs=mHFK>BK+ieAN(-=lGSANs;M`^+A-omi<_)MZn-q}503_C*9w>(VCe5&3C=sZwI* z&lebl^T?NuvIqI26#;!h3n#t@8)GHTp8F=o$x+|b^@3(~N?yRQwC2geH&IW1U1PR7 zr>!MByz@^!SDFSdV$pum2er$}mpO{a%5``i-*U_)bJ~z`)Qg6}>_s39%xPdUH)0{A z%H2=rOsRnPgAsTE-=}iYH}XFz!P7XD4Q6=+2fynvC=D~+ZA2(%yjIFSit}K?80Sku$i-;d!R{_4jRlEAlrwqJ2lAdRvwbgc0C7 zRD&0=<0P**55M89!Rv&Ev+p=i^jfdHF{ZJN!R6VEmjCH>BEh+w|?wU7b6}HH|pY}^~-4FG~p!$rvNgdTOCRb=lzX60MSrK zaMw}4j~@SC4Wp4^4dh)t#O;S4laK%?m<11aEe+h=yz6M<_-aYuU7HQR`@v?N0)b!} zNs^0E(Drb2z4bCQ7C;!8q2;uwWD@HOA}50W$zvOUptx^#A$yL8ohHk*?TTF687mzP ztm#9vPwqAhff5os`p*kKYS`(XE;!4D4xD^MeRN!MGC%t_!du&VRHp)pj{6Zu<0tRK z8F6s$lZ#h5n;rU4Cx&d_ULE_sj#YdQWM1h(p28??9e$$Qbbm0{DwL1^T?fyo!ri(tcx{CFErjb@s)d(w>Alg{-wL-C}}-wepk zT8P186M-S`_BELKX&qqp#9_~;q^~*3XnD|VjsOCs1`_yPXT_%^s}Jv+{_l}m0m@B5 zvYCPvs>9yqXZ%(#W@h{65!Qee1UQEK(*o` z^sTXVx6Ze6o0*Cxsfw;s>3cM;b*1U@WiH4f(=}Wj=P6h5;EbyA#Xqp)&CW&p0w4-N zMa7i7r)z8Q;DiG$W3@JimI>%8C;^S&HK5s2xdtDV;s@}y4UZ$?v=w$g8)4g`ge&jB znfaU91V;PchSmx%5J>K9ba0aoAlui29K;6?n#1n#gQ^^FbZA1nwy`#Z(eyeTqp89{ z7_aAQ|Cv~nRWx}g1_yW<&>um7*C+fx;hq2`3Xps2S2M&J+HtCL2nfi5V`kA%BblvD zR&kwe5H+SbN>FBe5NIrfa2QzGq(nJSJb2z1 zv<~D+dU)D<61;rL%u8=`O-lV6C2g2%Z5ba=8byAn^Rw1!zlcBpkqsdBgvFEScwgf! zrp$Ttog5#r4`fQit3WXsvG3V_4O+Wfz?~fCB*?$A-c`@u9SQxY^CE7l%ZAQqGY;ph z&i8~6pOp?7o8%+oucm$4N$}#q=+q`oov{h2eCQmurGy(Z2rAnDkRLG2b&^{cCs(v# zW&b!C@S<-cnM%$6OY3)qW_jt=2z;|rmevZQbfkiqXFdLkR*w2=Jku8e{%WPzxjGJ%wBcc$&pXlvR^hz9w%tcC#(ULk~|D-$lEnL;sdl# z%PmoyjL*!I^G4`)Fb6HNlF58wb-+0OT(b*zqzbmr5(2e_0Qs}6cnQ+cdsx!V&N9eW z{3xrlUGT_m29Lz9h{J_SMe*s|PXE&hCjsq8yN08-I(We*KJ7}M6N07F(O|hZWvnfg zrVjc9Yx{IFm!Uj+;T#m`bHdTC$I#(HK*Hal!0+;cok=CD+K*rxkCi|ZJJ|QNznblj zR_$_Vu-US!K&lp4-Z+e}`6FKft7A)1cu}%`_v!0T(t-(|Vr+ZJ*sV_1CW7Z#yB+aigDn^6c_M2j4s+gW#JI-Yh`) zk06nbm-uvr4nQ%i%zmO_w4sIanO$;JuJT8AE)Gp^TWigo9A4Uh*O|bVSM?yDbq>H? zBa)js)9=6ez5{vMon4^vtWo{TmIBb@^}g@lzFFhG$q|}nHYG}~JKsCU^E6(Kke-?? z==$TwUINs9_D7u!@bR`&{~ATzJ*-oI_qX}{?=sv11P(U3?%G4}=FRI2t3aUh(tq>Y zuP?8EJRO@s96oK18c&C91`zy2lCf^nr+{SK;h2ueSp9r zd#0a{g9L6hjGA+rVET*eV}J24|NKQybY^+Msv&oMXZEMp!M=Q1u=DJ*%gYeF%-(O3 z!;6E!I*cbRH$5tHFxq&Wj-J-ho4I-F@y#4lrc-&20Y5m3{yNc{bhLdL zI_uh%Y0Np)f&{YD4(A7mx_X@ahuZt2^6W=36Q33>7vThyg2-T+h;g|+1*f#5#m3mSK$=N=4w#AE28>ts)efp|;K_j2uTVZn} z+pzQg!`s)FH{X8UOx2r<=U{&L=7;RkxfcZyb=o$Mz0>&>B)xBHo;-DomN0C}BTsD? zpL35fV;QVXER&B7ucqdox17d-CbsONGdAkXpYF^J=@l;^3^KC9k-S7r@yC}vEJ!jk z6#zbLHi2K6Xz4e}oJUJ7H)a>lpY&3#9^?J<-xm3kI~xra9UN&T2t*tm>%2(X_7H7q z0}YOzZIgYbe^UUjQ`xc*O;mS534K=Wm`k+kM5?q6pp@E&Px@(((~OavPQa|<=`=zD zpPzKJc2>Qa#~4LRJb;b%K0>BFS%AoF#(_!m>E$c2m2h?A1tkj_aT@Kaock`$FObz! z()U$9^l*OJ5Y0a0W<-qRjB z=X}R_IJV8;PI@Lzj0 z6>N3lVe(FnD^gqV0xCi8S$ZlJePkLfs;s}zd5XXY<`KrK?mT60zcLlBF^f!%JbcjW z{xtfla)K7vDGPE={(MgQ^(I9m#|&2^^1V^Wm6)neAjiq8*9S0!E;CxD)DqAnR`@&P~^7@24)0!owGThj;|+YO+(E;bF!4qEQ3IlIsb68bz9cXdU3@uZ{9S;|J6E{$L$pNG{BBY%}Ktic^3+F>$lWOExC`haoDQpJr1z+$ARXo)%Uzf`spuzary4w{HEEu9|Ld0VY4vGM%(uOWH^?k*dex`q=4fT zXF0(;Hin5g==k}**{w$(UlnlGNx!VUM6>e!e{HG`5O4r*+XmW?YnuLgZ|Yoepb8v*OwpXGmnp*T=U76t+s=okd5He zCm23P-ggC)sVjahDP%7_;p6SD&)e7Y-M3$DxzKFDmI>BCD$EE2v*o93N3bAx;jZY; z=yd)uftiuJAI+9IT;e5lS&>G&B}f(^@EU*R%tnGc!Lu`XE3Z>iHW{4t{ccM#%f2=W z?lpircuOyTGqvPazH(-V&@IjY8gQ~d{lL{mE4RGTH*?X={A)JO_bVTyfCtCS)`*qJ zUizkY@X-zFcrHDAD!GpggN^(~g}PeGa#k_fRfqrEEL|{%Gd|cFz|#RdYfHf{9lyh= z9oA03*KXFI;D7nqadx-Bri69!3{9G{>CF>At213(?HmDc;}D=e^$$7zruiOa*fxKe zHnK^^OEq-^yxS`m{lGPXc=92uCy`)YdY}WiA|Li`Y-_}zYKJ1 zpf^pO8+GM?A2t&E*r?Z3`DQD^=m4O5+Q|k%qB@3^jt>65e5{o&azqDWqh`$x&ggm) z9#f4roXIFX}K% zH$88Oz;oJ9a}3@_mQAGJmN{Ch{CM}KQCIwV?C(KK39kb3MY73cEpvPxyO`@i~4{w{eXD}%MC(H0kRMz`orufdyxK)?~g zRcBaMIyU;R^HGl+{F2Ct&J(hRrd)~4K>JMi0@5rj~@N(^2z6)?@fy@J1FU8PlP;p_d~%!qv_V#A12Syn&Le{VFTvN z=Ypq#hB}6umTBHJtM;}(&+yveZpOyO!t;`nI-)vv4x1BaZ-N1~`mE0VS@z(d7`w!u z7mVoiUwran$2J?;?uh1Q12!t+XoABQO^u(m@utb^u#En1+H>*IEQQWLx<5pd{UO^U zF$vn&kv`k-=F!-@0*@%ZJTiKS-mp!srOqp+ zquEF)ryGsyJX~k14;yVid70dP{^jM1Klpo>XP^B{gtQKGkF}*Uv^&S(Lu>YR?dh-` zDL-r0>FeHGQHS{Hm%n@Y?$@0}`JE%J?M<;!wX9^%OK9t7KT2(fkU7dN8}(ga{kXIL z*?>p?Ol8+CUtYJ-^;xod6riVV?h}BW=dlYG(>sUDE{&`NwCeMhc-1*k3%_NiW3!9e z`gpF}BC8p8eD(eLr`cLT&$sQRF_A#vo1U-Je_~0k&x4IPqn(|rjtxktC!V%uAC4b? z{IP)Xd2Kd(Er?%>qPNW`fJ|=zJZhb9IxmYFTO*4;6`EfA-smzpdOf_eD0Jqr77c7K zSp<&MPWjTbC75gHf`iR8?t(g~001BWNklx;_JZXv$9lG61TuqxVh83vhr&-_De3`gzlQN-86qS%!oK0 zj2s^3C`?0Y?BF;_4f5MstxZP9@EKyb%7@zp128_&kig+c*Qw!&rdEP{IC4sl4shTL7niTT{=T%)y*;`gwD4RVA#ma3qy(2bedstzu10g;Mx!WNz*gBZ zTkdFWIp=9pqqW1&KK*6M1&3{be9<9E4;y89ufoXh7$2&7+#Z1k?H4cwdsEQJK5x>& zi#lM3>3CAYY?xCRZ(AOauN@fiYfm0mdu}Ir4f}DOJ^h)ndAiZR?fzIE8>b>d)njXz1fLuxZrG?LF@1Umq2L0O$!TD1OaGyhpt9i-t(~F zjh#o2nT$s*5uGLQw3UsAasxMe8$Ld2Er1isb|2W(ARt_GV!sq|^kxw8B-+`+vw~D* z9<^@;OfSPh6N0Fk4X>-#V4EAv4tZ z3wqX3Z1h^4G7f@j0gN{y{ zkWqlO1G}%H9-^ z=qt4AIdF;#mVt)B5qKNfrmKFt1z*|z;tR^tzM~@^`MNEiRLTs6S*ksjx;o*Ze`LDv zgI6aC)381%e-g z8@8-I07~b#)gl6Ih?5r>g0lPydO8dEYaFn;_*-AfCabK*@GIEw`VsVG3qXO!mR06c zkEaT?W6I*+iRxxnWEmsSu^uIjNK>v#c6lfkAZ1 z3FCgWjxKfOtZyJdN`t?_lK8I{yiZ|evdthKxrY->b0ksRHJ99FvnW*CV0 zsbitslwzur-cN)G-Uwf#jpE5C6q6(6-Q(Z%U`sAWSNJl^py6xYVvO5lD8Bxf^*pMB zc-9oYX~%c3J(HiyYN^H!eaiwjT|3+VSxW%VT6^S#?Ezr0PagNoF?b>Wq7m-P%DsO3 zLnpocqR!>>98Oc5zw@ihSB;K-_~G5BLF6=z;B$utweJzB+i^`v}kyc;)HKGK3c=?Qz4>pV!ek()Z0C>FVW1 zZEAegtjK4LL|^un#hcDOnEZN7>silbe7t?V&2k>k`q*qtZZWJ8AKy=!1=)0a9rf1Z zYq)neFBhDdGBF*$XLDzFr(NjO*{CNBtnq78f&!;L+8^K%&}aSGQSJ?qW(7>`S*l1X zJ%JX-$%W1B6v%Aju3%w~t@dm5tTTc4L+j=@b&4N*JoNe9i#kUENi!Jl=%bjfrHco3 z#2n7Y_Q4!G)VUSFWRou}6}0&5nf$f8^rOArwM4K@dNfcQFw3`-^5XM(LF%)Xr5-sS zqVjCv=IuA_QR(GV8cV-uw{IfmuvQUwO@-@K`fyy30m)vQ3lNI{5>F*iu++>qEi9&6{-& zw*~->bYDFAyuhMo|C?fe(XvFIzxLX}TTQKRDKMEBXT);%*?HfMz#R({b_nOB8axb-)D|L z8In6c)NgqB?eHZ?xXJm*y5NqV3hqY!8jcDTnof6le6Y5}ZX)3DoRKJg-}z{FgOxq- z={qqq+ra~zrK~+_f6!~g>^p$7Nsk+9r*e0Sr2){D7IK9D5MPl#oAyr#r_(&p6GKHx zyJv$!s<#dpO+cs{9v=A21|n{$-IJ**@eKDgDB%Q2lU?xegmyCu@X^C6_YD+i^XF(D zPBI98F!xPAfN3)#i0%(4v}Dtsbqbt`2D%MCMje&&2&3Jr9~$A^ zv?h&z9ih(tLC)`E5iHyq#D{qD8sVp z_rLwM6H)g{;diZx|5&*{|BLT0|I5GnI@u*tvm@~}gTo*FQfRU>gGa!#6Hbc3(y|D( ztm(GQ$ut4r1KnE-tZbm7SBIH}ky-a_gWjgk%IE~+bUW>heoTHk=!n{nW3FsEcoH#B z8@XyEmV(&e^E#&wjiephdrnO&cu=nC|EFzoe0%%#)}>QWIO_zH`DOx~nu+^x>oj3I z>%VoR@vsAyO1m}fZWL>z7SGmeQD7nk1Wpt@wqg_9mY@P~sMHx3Ty)&5{Rn6m6ga8R zBX%}h!pU9T+I*H1tzvSC^V+f*ga>upb3Dx;871CybhJR=H0WyP6ik~x`SOd)yEfVB z+3s#XOa9mApySt@5pV5PdDI5Ir=EL{pMo0bwsbK4=p>`R0LwWQmMXk;^2MvqTgLb_ z-x7~zB|f}r@5rlWN51U|n@*!_--H0*hmNU!@$+9z!DJ$!;VjW?-FwdtZzD3`=UH=5GNCSAr?scd!Pbi*D&(8wYS5RV#`6=EGN=uzPRtQO3Q2D8~kfs=}O}5mTl81XVym>-||SsH{(*7(FkQU zU`?MW$eE0XMH%J63`Pj1vhs*wd_<^OjD1Q3b9p7Dk&+D=&h>b710JCHFdHz(Pswz2 zd~zw)ZOM`h>u@I9@;j=$3K$QuY4AaVDut|}1(;K!tO)}`0;kwCIy&{52Y?L(qA^ z!>D!o9GwGbc92l@A2;H8m{WcF)F=jj47!oZ!`>3uI;Ulf=S@X^>?J^t?|LV|?XLn{mtb!KSbk3Qm6Bpfd*HHL^gZ5M-7vs zJ?ms~Kfd}-Y>A=|TcESkP-eabP0|EpG|z}n;iS1b-Jw7EEZ7X!WDi{B&0v+Wk^g2% zN;QghZI41=R)^Xo85`NtwOYiD29xK}8i2FV>G#@`U>gB`>vUH|S#5+gf)|s&N+REt2E(Hf{-@P(;#Ji^ooHcLv z`_o4KMxsx8Ke;D8%&FYA?4a}9)OxcRHUct~BsTsJpX?&K?R7N0?)B1_SHWyLzR|CB zc_UT<%O@>$7;RTM`PX4rDK*4vIxPQ#prE}D+PO0wVz4re>dU4t8&dJ{pn%Xj1Ri;E zp*8*&+0?T(Rqovty?*-HkDVX!x_v6mxY)q=!?(XF@cg28Rs5#*R^agR(`1nj`I7Oe z!=~E;*8&*pvV0QUEaO@0=w>*;|Ez#RXa2EZ0gdCcK;VntKWF;yP@+1an}WW(_!kg4 z9Lp5G80XX9{oS>xCyzhv&6vLvtUCLAWay^Hy&$HW+h!;>Tapa+#Dk7;KL|}0*G@*D zfbJ^KMs^NHbl&zDtze+jy9-=ib=v2n%eO5pkv$vJKsagk?Rz@u|Mp>Iujhac|!vXuih#L zT_psTMwUK!ZVVCeW8mHZc!~s^qntulGV`F25nJW1@xibLzw50fMLgDhHidO%K2$>q z*Pbt}$VQ?JNAWyWUDSvhK#m1qpNM9ZhpEz*4)38!M>`wC>^vwiCx6vLu(IWC)?x?= zAXsn*$0+Wm4n>lN~R0=g>C(E4z1T&jI9`c!hAmcN*#y?qCNTjiiJg`1_W% zw2;T;>O;-)vAI18t6!Q1(Mwt!Mf)xX|Gtj?-Hrx6quuvSS$_PXDa=NO?}BgB(W9L2 zUF*R4-W}tL1|8zNo@V%%lYIB2a}B=8(YMh1@X;qV)H)HJPE(kVdmi4>&0SNp?^{PT z1M#ko-%|)Wp4-Q-F5mp-Z!Z7xufM+h4}bRS+7PE2kI7-Sg5UTfxZ@PM?sM#`G)~QZ zd>gGA1yF(i>dlXIKbusBZWSnV@HJ5P;hAHp&er~Q>O>i20i15bGQ#~%SnoN|U~Po- z{WN;5TYlQe@J-HNLv(HdIUC`>Yt(KpfX6Fy!NF-pppmDSC{1ex2aoF@Z@mm=1hIiU zO8lGyasv-hNwP@}y0S>jdG^`Q?OWv+?_=fL7~0lAWMhn=jZ)e`f#elsB^BfFC}1 zan)DGn2v0y`tHSN8pV8SYB*hsc( zL6-oaikBCkr|b3rkaCHo(Zm0rsy}PCY{|~^%oP!PM|Zl+L^c3OiekyEOfs1+bT2dg zHU2Z*NoFc5NmZmt1VJJJB$z-RD{AoBuQrfAnIm6Sy%63|&s@zDDY#eV1bH>V*Yr89!R>sPz=>-emq-&FN?NysU z5as9-ejOew=?1U7bCB_BaqA%iN0pMMu9G*MU{$E!bKTwiPnlM3*_PJx;3hy!V14w# zy|n>6E62sqN@Ih{Gd{Xcz{3%j;Xck<5%sW)SDj#c4sQyhuupk7b@j4OH2YaVRU4l@ z>cI(1s$a4ISH5_I2W@fCh5zcm#GVT#v9$a>8}gnXyhl&|3D)6nd<8|G)k+t`L5|Vm zgReR_gbiU2LbRTB56($(kXKQZP(hei7TBHzaY_n?vZbjjMwpCcK6L?m7(7H$*T)&- zuls(F*`q@#L(dglFIowPv7{+P-&=5#k>F&G$V!x9(9spH{#1Ze-c(!hFGmedPWX~E>XpKYj+K<}PzB2+Wib)9{H^2QnTAnmC`SwFY!|gbDQ}^i0 zKm7gWum0u_m;cZI|8FnfzQ~ZJKDW^wN!rs-^+xIOK;agkl79-%DVkQ9!cTUDbzhb) zX&$GnIwO};chNV!peNK0i}*Q9l`USag>vFIV72`yxz_m9rKqPG-V>@0G#()v;7 z!m|sGQ}(WeADc(k6aKLmCOy1q(OfqCX0e&k@lYWgIaL(}IzP}Np6aDVFB zDT>Dt=%L?MpKx$QoEn|_7%tNUhM+&1hsI_3tUq`goVU^O_HAv(3!FNA&H^{R>YIkY zZyM3LX&UFr(}t`+{VcG0wj3n*!3)-osZH)130Os4>!t;5oK0#_zMi)bkyABtK@aAv zIJ_zX95!G1-bF_b<>(5OJmF5)S;Ju|<#>96Yi-UiW~brc}(&nbT#ndzy= zEgC!l!POZT)D})Rqt;)Mqa_HBZa$B;FVa~-MLMZo+bQ$0g|9`<{l$OjJ=x!}U-3{P z>7e5{?t;zade;=(MtwN^>O$E$ej`DRQ0(NxY|7)NU!FA&@T|2bI_*jRz)IG7i|R~} z%Z~0tpUvf({z>L#Kgq!oq!P-l8^D84wlWQV zx$+O5;<_iSTD~R zjpH)AL z`x3HY*6Rpd&l&)^LoEicX^!m;owLZ*08nrxSm?iUibZ^MQKe0 zR4^l1@5J1+A?}($#=e7NGO`Z);RK&pkYqJ-$J}xh9KHGfcfIB9V~gDKY4w=Q+ZJrx z{&2|(_bS}Z9B6dlX$JUp4&}k4rpfB*I`{&4yCf0IGX$Z0S$Oc`^vGFkL} zh7u2Ns!heMM;xCs5~Z9)VVEv)A21T2TYuy|&g+9`^z=s5JN@#R zHP)^T(Sf@C-}UtMmU$ArGPkaC7{mOfsH!Qfx#@;C_U_hsR44toWWha+nqn()Qx@Z9Kr`H^22Q& zd5iY~dWK(*!G^-S7M$kx=ErDkEy$~vFD{=o?`_w@6MN@Z#|3!tI$a4KnLYG&$OQro zn={y69bK-i5lGieDOjv_`+0}(JZYZ3wID{2gpbx12qr${tYV;~?sNF72o-mK!MjPopb6R!x^gbxi3IjnQ)~T-4`Sl<5|GC z;HT+^2`gv9!OtmeRKXNo`qwtYPYb>VLk-5ydXo!!<0X>l`*z@@)NJ%a3o_BfPV_Tc zk|WX;ScfcreUpvRix4khWrK#JXFhCE;;k4s!5*CSP%7kDmsh|SoKxqe11U@2>o@{e zet`-3PtO|JB+()0%XK8rVH+Wf-KFAzeYMMht#-bOGbk3|r7v4YW9*wwMFV=-%_|Fs z2f7%*vD%e8vK=`W@m~J@$~VF)7QQ)I?OJ0$WtAB$1eDCS?{g*0 z{qTG4=K4KXx6V-1lr}#wnh@Eiv`uHmTo@NT&fgrSmgS&=lzfs`^)-C?^7~H3!QOLt z(RBT&r;yy=ngWPbPNxTgazqizITAQ9t1T5DT1-v&N*#lzy3lrp{IT3m_cghQIzmR> zAvzGV8sR5B@Cc!6w8nK zlyQc>SK3y%A-SCY(*l9=*0;@p5OA_dsYQ$j*rDOzND;%Q;rJZdMvS}hK-6m-gx)XS zO6cOqZwiRM9?~!K%6d5h4i3zdd_zy8J6vZz_HcLi!&$hdN9pvkCmrT<^Qsl&tt>XA zt#|(Bwmr=m)NlXzRYr?#ZP+*?b@Qx4Zf<{;5xi}+e@Fht>+1+GcL&z?*k^p-fSmM4)Hwj9pxHzy0vO6Nwj|W2j`Td zq*pQat!D6;MwC?$#SUrzjG6Kp1%JfZihPZfa4b72cXRZiP=>*bH!$C|OJZvRs_!>B ztS0HP@aOR+eQY+ z&Zz=MW4405hRBG8zE3zgf4#rf11tb2@Y1V&CxDBF508HAJ9w`(`U0w8ZsZ7m)q|!- zA3n4t4V>>^eBTz%7HT%_^WE3qU%vR_(}IAnPax0=exm`*?WS>b>$g}fn$%IWJhFhW zo2^E7=urW}gATpfT8*4Dsn6j*$YCA9Z)b-$2HJV@NE-r*9_GN(w6&WD3kK5}PBGr? zKG-<~7U8vi!6}I$jhX7~l|_}EJZX_-y?rm!@$-3y`h58}f4zEi`k#I7AAa_mw)^Fr z|Ve6L$JI5 z`13gLd1R8)wi|x@KBvEyG44GBh<#Y^zNA(7 zj0gKvKIP!1iP-8KTOp_ZuAgxsI7@ zH$9tO#x#KOw_vJ=|G6*mcwq&70i$y2T3e#JN9TJ&F84SPN>(vTMts}pjDj(VC%@yD z7CfUf8WaJuC$shHWx*U_RubOSrn~Atdg!;dWqMK;e1&cH!8DFhyTqHS*0I4~2!6u9 zE0vfX2)dx>oR0t(lT#EOj46dxrFxMwz%jGlRQXn!JsIMqkAez$c!&WQVqmW-vS*cw z4WFI_V3pk+Vx2oO#{d8z07*naRJI^OIZsJtpYjtDJmse}@(%gF`*#YAE+YnOSV)?1 z;gLN?6P&DrL6H5Zn)9NJg9lvq9~)jYwC?qvw+;KC|J`3){=>idtIKy8!vE#}_z&$!$jGlDMpwp^=|Ld=ce)#c zrvgg5KG_{PuU8ZQhpAIH5ie^rBbymp(QZmO!rP@hRlypt0Bu_ZtAxF|45k_?tY&v@ z1)PzOCQ~;W(g_%PdwV@pFN@LB-MRgZq&%*!%h8d)LFllb-?T0A%dh{_<=x+Wxcu@r zpVu=E(bKn=)~jui*@NDZV1b}5dQJ{Bee<|K(+TwBaY4eSAVzL5qeBC{d(^_Wo?9V* zmy@`A|H%#xdeT}1BLimv;Asz-(XLC(kH2kNqIWWM?C=lYe0h1{bjm3H=Id7l0qx{~ zKU$vT&^Z$eKsV(PT025`ww*lKs1IE=kIQFYI_>UZ`7H!})aZns>;rmYaHlPI9Ge$| ztz8vlIuVbtV!f<=Ffugf9H_kB<+h=AJ4!g&kF}krtyB2W2!$Zv)em3Cqh7UMaKS_e zPzeH_eg667^8NdQ@3)s<{`o(<{Qlqn$IHv_zPr4C@$KbjfBIisp8fn!1KlV`M+NUi zSUG>e06o?&^)gqFJ_BS)*ssd-w2n zQ^mc-Zz`Je4-Q?M18DTb^vp&y(#_efD(OK$AGSVbc+r&1UN^E6$kM%Ov$j>BVu!$n zV+EtZGy=vJ^nW{Xo<+wN^47L(MZ6#J8d2%Ng1Tf*e>hOnmuT4H+Szw&7fukM^Rwzftlw-LRaHtIkbAB-3| z49T4HPfwH`($P7mjljW$y<(hu&RaXyv0Olz4J50rd7%`mLp&)Q-ssz9Q4{dzlcd#; z>~Y4C3s!XioSkxHT?ZrGE!Sl2@UQ`xOeg4CT>CDeLO! zWIO}~loR#bXO#hV6hp;ca{fz~0lywhndFyP0;Rdakw$s%KPS>UI266#bE?FK!(m-U7-cBT_P`T6DRl=5Z6%b%sFzw2G)|M9oK&GEgw{BQr~U*{@& zOJe03S)EZ-ThxCa`a(8%8{2I=I#8o8<7YGaf!|p4s6?x$LNENuwdK0wMDMXTSWj z%OC4;fAz5glv)e%>cw}LU$j#F`}lusXt&cVPVlYPtU zza>vWk1d_f6xi#B7o16Jn`2W3w+;DAr=1_`z1zhwK^e)oEijm&%Mj@)Xt1P}!*f=? zp5R7K8l~7d<~>st45l`0+n?cSJ>#3_9n#Z+Jg=F)dHQ+Rz4ErLq@VrzUtC_l_@+SS z+jz~%b^P>)PFOT&{j5W&?B%zJ@t6Pn&n|!X-QQil`s=^Cy!hsiI~?fKwx&MsAfBfk z7G!IoH4P8RT(FYNC+~uZ+jwF#$dGZ}!94{FTZ7RneHmU0d_N}3_ph!8Sk-ple*8L> zwJr8%)eWPXBeFwgx>Xx76;hB)ui5Flx0Owf?>cKBUl3sh~%7;eR=!o@1^ngrF|IAqxbZ$<+XKO3zkKXq>_Tkhf@V}0SDL6JY z9a^B9F0Z2%J!*W8JibEA%qZGF|4;isLsWGa?zP>Xm8c!DL2Njg_+{U&jT4mBe^LoR ze_%{^K$DHFOj7Blx6E35pH}LZu5LkFM=#Wz?}CbeY)Br! z(cix{y6zc-%2sO6@PaIGW(T8ZxT2XO{>jg{PoH)_n(9Oeir88JlK&Pil2bn*g6(_% zufEO`pULVl8U1L;`8S2D*lhmf#@+-U-sB<(df2G+WDtz;FdgW6d{tPU`6IfCECEU| zN{{S#jOOG^_I}3ep=jier1JUhWZ2t(eVD0WKPqNCg>02luH?1r8C?%?SR!S*XHp&s zLpcBk9s^8y5JoAlbX?0`Z&9)eJGdw@$1;%cRBrQ0C;Rojr=zXBeFKq?7;Pg@-ks9A(`{vxgO@KE)=8!Tz|L*_yZ!iDrzxg+pzy8DTE`RykuRE$V#3`G$ zyDfzg#H~(_)mNe;kc>tb=>fwtei#1sY;Ey8QPAHy0mM$GTkfvDxZ>OXke#H z-45RZC58Srxaz^a|sEf&X63v670smnib)fb$jY7{NAemNl@7o#zr|Ip0nIdyX?i%*?XO+K3e%H6T<8H#|J4ae0ZCPe(5{#UX zQMhWzMovr(kn3L3l>EKD&Iptj<`^!1+=zpAU>oPNdfIeLuU5}868f{6KT%ef7EgI4 z@rRE2wVMIo+YV<9K`$<{?!nRCcN}Jp%?Wh21AgcT+_#3+Blq2FZ|Q4|M2pnkwn*~* z_wmz6gcbF7ON;;3BfM?;==FC$Y)y$k=I!_2HFA`k+Ddtsav4UiC8hTn^XB&r-wP(* zeE((jZEb|J30@YkwjrKgcjWKW=H`FwXkQC-UpIR2`kU_-aJ}1p|KwY5+%~+kMj_qK zavsN(UL-RkX+|%s({Qc*m_t5O0o6h2n^KTB=l$@psg%z6`j#D#w`vbn1O=RVU(I z0qKk0DEPa7_~YfT+gV7OtyXVc?tqFRO{KQxZT@l*By!zp0a z=FiANjkbdGA6=O`^O;?}r@?ph8j-&bPWEX0Wckp3mGuzL5gyv+cLp(R+&E8 zmdF2Ev@nF#@U{p<_=f+>IOUO!hRH|XO733zQI4Y>9#j@4Lfai7aHHp&(zu^u({x}GYA4_(0x<^qI6@Bua&4nyiq;^9w45UA0B zgAe8A(c7JE?gqgqp(Xl|DpvelR)s!=tGaBDGOuQqW0^Z%)3VGWK$kI=q>Ic zg!i;Y#G$+PRhr%}87@sks~klGv*^$QzxPck=q){IE1O=|dou3jKQB50_g%rj>$dv+ z_V-^_?z78Z{N-O={-^)>|9koS#=O7zrfqR#(PE{$wwv8FS1FPgY~Tg0XIeyPa4)m2 zMt5~Hx_cI{<9X~Ho+fh!EI2_t{L!81hi?prW4fgAq>p%tcEDsE(;1xuQF3(|z4Y>@ z-hwjV^t}U=ddAl1RlwYn93R6QoEmwx-NQx%n)82}6L{3&H%^s%+o3hDUbbbeK;T__ zxX%9joMHywR!R%*(uibZn_nT7s2qQEG}v~Q=&2h%cPio5j`WNJLqB%LV;5v1aAA*dv}%{Q-i{Lb2v)+~H%xZR3+PQWm}^8ud!{5RE63ry8zGWd`~ zoyrKLTBzsU5`X-MKNQroHsyyGo&8^6(I~;|m!0z0S@$2>5%Ine5u+mx0TSHiS);80 zxVoEBoE{}pQzD%G3~#&^D}$;Q#0j|G7K{nXH=0RiWZ?5bW z|M1oBn#xdV80VBFEGF;NaBn;YSd5>a+hWAiYvQ80+6L z`hC0YbB6S~BMb=px9aR_xk}f77(098cSWA+t3R-#W_6_C;Fp_Rwh%A;Kre5!F#Yzxm}Cm+v|gp(rVbPIoSsR~dfBTmbO4sQ~8&=#lE# zIg8zhix)`cnWBxcPCj8Vl(>Us(8bZ^i1ZFCT-nwDlz-05b>+~Gf7h5b5@5yr_P_US zmAQTK9FD0UJ=uqCWxLA>7h>p8hqd5u0a5a?6Qthlqnz8@MhAZQq2R9Rk@xXno;oVh zmxiTd(rCofPk(v&`M>BUEmGWyo$qTnv){vlc3bvobOn;(K*nR@X%GC1Dofu1{=Oyg5NEI)2u{Kwuku@em2 z0{dM-fLBq!Dgb!(vULJGNw8p`Z^3}k2D0Bie+#0>HeJ`A7Zgx};i5FvOLRDb`RS+e zQ*cBF*3*wCr|lVCd($X~6E~0`r-88&VpEJ*I`~G22Z|dEzR@RH*}EH z`gEP4Q8n8_(z=UIRBXq=gWE=!3Z#DM5UT(54_{sW{a^o~KwO;*G!j5pbeS#K(BwQ$ zn5v)AQSyQVG{q+ezjlX@e&IDs{HRf0 z+m_LFl86?z(??T3b&fudPkJ%S=n;gS^1JPEkb*Pkm+XK+(rLq8xbEYNZCP88y!7N< zpzY~y1N74Z#qqe*()wmhEbCGrXFewMr2GMo_T(>9g0RTIEbw17Fg1O(SKm~q`K=b zI)>9fc}3HNJGq|nlPd=LJ2rCRd(NQ}V0z?g!ymA_$843nG=w0a5903~-T;P(EEs$} z1!XAd&{bsL{X0?TAOt&gjo^E=CAd8wfpDF2Fe-mO_jtn>ZHJ_5M&-%`_oxgu1A8K4 zvd*T%WtK0&N@Zn!wMUqm1IAu@W;djMnFqxk-kJNl%isL=*hW`xL>OB0I z;dt7D{N3Bj^MV?~#!bjweyp;dJ^1wULq^k!V*d6IUtfNol0VB@ z-F}z9%TUAb^C9{l*Nf7_i<}v-!Z%T_;%i z2V*j>*TfF5QJMg*Y8BH89%7XXP{kqI;y={cLALCxkFy6#O*Q9sPBz! z=_^^DHBQRq$b;<+z<>L1|MK$df7$79tqFOZgR)RIjOVbRhJW$IS=w>&j5A3mo_GJ& zTm9-uJj+SimiZ*vYwX&kKtxY}K~42=R{S#@9rxQ1xetTq1wm&hTMK1kNHqGCXD|vR ztn7c(+5M$-|1pPE1;*?S?6FtB;N(HN;Rxv>oYuIcAKn$wcD&F3i+|QJ&-JQ*=ykN; z@A7Ud!6uSE#_<6YVU7&`pl?@ z9T6>TtT+G2ltaV$=1He8$vBEjQxT2G^zH!*7mXS{F#3@$vR`isDqeoq`3puHOk-%E zm8}#Nt85Ou^%!>@9lWiUYDb+E4LP~mahevOVi zc-T1!!Cc;qz=qx_34;S!wE5QI(d!3ZIoah5JedAG|4 zOQkAWzkaz?@RmMzWbicX3G)G z(=iKv^}o6Jgs@R14b>=!HtrXlqX&UYn$DzLeY=MQ=iOiuOeC$d*qaT>B)c zQ5X~2ZHtAvtrh~p_r2Ao3hcK%xbN|dhd7vTEU&5~+3^N>xaTXz8+d3aUvj3WY=!Li zcGdO%RARCw#MMt%Hyc8MU`VeSLxT99=B0baiQPw?XgETz+_{-@NZqioKrf`+#r1uUZ`C!~ud3sdutmcgufrFu^t)%8r!mTHU9=j;4%ylHImG^{H&LIaRy2<}mav5eueC@6Z?1*?9%g)ZZ!SNF14 z2$Rt>&c~^1(U^MAxH8~x0rg^*;M}#C>BH-ceeXa2p_c-EQY2xq3tr&6HKa$e7^MOW z5ix^B0F!6Q#7HjFCzosd(TsT?bxMxqnLwnYK@QIj{TWa2QQXf8i`&Nf5@>5bxpp)%@WR3!Rqgbd(q^79JbM>hXg_{WUGc`b8vG(z(6 z;nT~X{`qH@FTecy@_EkC;>&l*#>fJ`EVOi@piu<=q>q>IE=TjeuI|i9J1f9q zRBIHZZ*(iRjF>zt_;?ncXI})XI(!&ktxLcB_~jqB1=LG#Zf<{l`LS{zzJA>aiRt=h z4PWPQAK4x9;>G2~xBu79#P?Q1$LH#mziqUG5x?t=P-@3vI~E<r-pm&_JGjO7>uta8HOrlY z&>Do$5=BnBtr?oYv+7^^r?uIqKPJ~#jd~SeyorVz`~C%r{iP2UHhyfqhch#b*gSn> zt6{RQp0rc4{IQp9{n+~h-g>7%qYFl7D!3ZZuE3#xAxR$JAA8ZrO{%=k!-F5bxjd!7 z<)gt58v%Io?B|zHe)cD7aPoQGLd46X)(~{hS#7$N^Eg>UbVY_D9BfAS zTl~f@-G5vX4@DcXWLr(uc6I(1psA-dyLSl7=M|H((_1Dvh8$_?QYiO2Hyuf@xNsSXua{Z)YkZ@5vyk!MVWR8M;QLCd$H zcS1`+;5!bsi!0!L_3R8+4c+ppK*XJjCbtMZj}M%l)46MP9JnE%WExa>=S9_Ow zxfn%%9?P@J==s6J*hWi@Z1~VjQ8l*kaqtvh1n7Ht`aegPYy=pfZoZ%M=w~%r+HJ$H zSNvN&Tn!0^(1NbfgZKUzXCJFSH#`J)_u~nVJ!TZ{gRXs}LH_FDz&px(b`jJT z(E>j9%s|L!tG~D8YIV&w`wD(I>u6`|W)f-uX_b~k z{Jsb#I;YT;)k~?REq!~a8PQ2n{gkz0{=cs!&1jd{AVxeoMO1$%SUIgF>t?4=jRYadI-REjYQ?NmIpl)s+?PXFF~|MLl3gQ=jdE^)hozw?+>H13AUX3?C8x%Fudz zzN?pBm7JdI7T#4qx(FE}LI1mY=n~#3ocDy>EMoq}=w)X?PDdWwfpEv4( zsCuQ*+VX(Q4{aTM@;u|z^v|pB8^(`s0mBxZ)$`XDobVUX<;fXTcc#l%4b~9UyMD?M zZwqQ7vwPyjKdyJ5_e?(Zt{=2MAimiOL9Zzqy=1!nd1v##Yr5rmZRX`SUtgX!n(-{T ze*ew4GqmJpfuoj2{|wvTKF@yT^dGg0V~)Kk5AexkI?TYfg+`$A^pl@mdTC=tI|(31 z|JEYU0-?L!n7B=);niFP2J7v{CtIgI8>!fOh)&pi8%{oyJ)OUhY~rxO&s3*`qK98uZCwJ=1hc`}eB$SqVCy>K(t49?)}nD$mAo zA`70ITi zi&}S%!WHY#mWRyrppQ+f45v+>>re@8dr+>c9i&6$Cr=JLzq=qAmCS+9Y<=H@GomYB z-Bfh^1>j_k|NFcfVh_OVXEG-TyMQ(_Nst)HENEkXh&ql+*6#dgSFdS|mA~50P(`bc z{1ir-wr-oIO6HFjWT?yNoTDbwuDylQcGS1+g3-DAP$0`zZ1+KJ3cZeF-@6O&ikj(i zSc0#%mK>@D38I71*7MTJ)cy6kd%TRmo;1yD?z1eMazHmsj=+eYtO?9qTKRq&(ih61Xbk@gr_<)K#D$|c7!qFRzO-70f1(w*stV&Q4Q z*8l(@07*naRKdY7TDoOS1b<$aY-GbWo1HdDMwOY#i4O*64kdW^25--*>-UEE`n+xx zcEsl#$uYS_=2k5i?Z0_Zud@Z;PYQ(SzjYZu|E%C(ynK zlR!2KP|$JLcCa&dY+A}R!Z~vK#dq!TZ`fKhd&?pA%-jDS+;mc7#^h~7?ZpJsYmR?` zc!tX&B5$Z9biLLEwB0|Djvy(Q4@ z7KO4)%&w=Jgp_~6B+ zG2VCqRIj+sbI*x2XR;n27^XJJcDn)63C72^N4pyIY~R2CCU{_1;ps^Yt>;Z375L!K z-_y1OK5J1Xtv8bKA)a3(gLloLm;>I_W4bg09?6D+SP72Bs zNj+<}kWDV{we0C(b3o}+4_K28Vpb1HCBPM}ytbz8o>92;Iz4vUpr)#MLb0v3K}MtY zA;8}V-RKEp8MQ}kj;t}uzFJpcl(xnalwqCeq(lg z)3HNrX?>=9LpQEZxzgb|(-`5=e$v0y2Vmh)H9q*h$KPG$zz2)abN=wDqx^z`=fEX} ze3I$-2dA?AZ5pGZ3&46lXHAIU!~?r%A+|ul=%J~u&pM6s`^xTUdbEXazExiGKvK9} z3m(9j9!}O>r=K#dO$neQZyqK-Fz9Exh2(^BY<2d`;;}`T2R#1v+1JV0m2MhwA(_I<(Prl9L zgGI5f!O6Nnh^id;S18>c+VIaoEbz%#=JRs!HU56yHthERc|eB0I73G7YA+d%7Iwum zdCWOo$@@tm!Q@oRi=pX}$W49?VD(?asX*SitIr3`>8rDiW(CI)$_{jT`bh$>?%#GEMAdQXHTs_B>~Erxt$gyx zSqr_kxE&GSz5M%lYNVuL=a+4nv?bBVqBA>G`J9Jg9nLx0w{`*H6n#Mrd$ABUey-oz zVbmEi+4VqI`P)>XzXAx#Kkrc4>V?i%(ZSZE2Yw%vm-`eLufgbZ>{{)T-L7qsZ>jyU z5p1UWE_;?#x{?sgdI3Ez$>k9 z6b*jYi0j#bQ$decUj7q3VVd3yX5Vhy52S2;LN#ML0BE{4fJRWWpDKC^a3$bPMxQZdPz>*bFIH9x7l>HH2*&~spc6vLgDH#4CC>v(KJ>^# zt|Dp1e0W!Ekdq53l_wU)OBZBy8m7wZVXc5d6Ob>TgF1LAmTKQjdC!jGi zl4%$>qQ>?36UYYnZ!6i>K)o=LeaqKEQ|X4q4HdubC|$~X8j>Cs`G&WjNt4VGbGR72 zo}5HVmlLURPAvDl_Kh!mj!wnhD@GXSUpO_&686jB(U~$SFQ`cG);lVVK3TZAiVn~M z*1@k_`J9WtxA@i2&QP3&khE$%=J{UclpfUU_JS5Vux)$k^ctIhB%X~r*>!=$tupV# zMX}$9$LUQGy>@6gRQVkhoZR3xO<-i^N!#&kMYDzMzyBZp$ICaZGq9@pS&Q0UG<5yO ze*1a{^tfu;Vm{MM7P>vI+^y4pg7c;=i8~F>(Y~Wbz0|u1_f{__8$Gc1&E4xM{_u~T zxBqSX&0FL~4qyD@S2@-%F2DHoufqA_@?ZYH|J%zCUwxhLl<=Tj~em#U<4zf zI<&-e+1|_-EoV=?w%oJ$&8sEQ)*Iv`_h+GEw9^-7>Tg?>;ne7VSnu6i2CW6Sn8z;& zxN9i>(kr;%*XyUadfV!h?&Oz}sn-qL=J+S&>zx97SygKalJVO{2wr#pG$;BbBj{ZT zPsq7I>TOdWJ0&rA<0E(tF<<>@Pa}j^=O6$k}jqX&euxQUKzVgb= zMx!;F&WMI0$tO7|j>a9@G)N_l*lyOjimxu$F(frNXzM@MsP47Tfh*=aa`7~rKvia=6n(>KQFnwM{8a|qnt%gD2tgSkBdRta!oD1?NN%=irH$UBuE zGH}YUGNW;ISE(sgu$V3!yAEB)Q50J`Jt79drRDzZ_>K~pQAd%YnqZs*0&z^ zf+5%P!H3R$tENN48Vwn`A4;+~a#&)i3w*{i#WGxrhOT{w8@(D$tei^ziRwfD8e+1t z24M;hS-`hIamst@N2-1Q!-qmjQaO_RaC;hE7OE#T!<>a4W!lI@PcIDH*3D*LGdRJl z{N}#Kf3$GS@CBNaLk3)EApv+6D9F}zP-HxHCU?L0Q7wZ;UK>IzeGM(Z()?$~&bt}N zo=n+&-#rUAO+Ql@fCRTMDt?Ti>r)SLm@y=`dePBhINfmWyBre736fqhhgIKo!4o6I zc+=8*eKzcB)weoRR}a@aFL-D%+>=(Y+ZCXvzhT<=W}LTfC80PP`13#e_2r-cH~-z` zSO4n$vTf%>vs()<8d!g z$vL_q{^=A4%~*c+ur&kUb=_Kz=G6DT_GJBi$Ku}He0uq$^8|j#NxuIW&z&Xkz`;D} z1br}64D6#;$&<~Fzg4RR0Uzt-ueZMdCs)Ri<@g`g<2C)TlltOiFC8KWhAsI$Xk~oD z=(qI}^}ZdV^pN5Dw4RYY{?;iZD?KGLgU*E8$EK@ZwbSACTPF$%#FBZm3buBTQ*vi7 zZ#sd{cE#sk{5)I*N-dW3=1e*Q7k#6*dgC*^8ge5P?~@IuE-3NpWkEny8?S=7oFL~) z&(%BQngYuIxR+Z&MA1?dXEYQqZAGxI<=6!q>85UE%7vij~;DF z8`EgYyJ@L(O)2)=6ePPgn^D4^j5wAm($S`lf@4cx8P~4pw)UXG(#vAq*0l5&O&gVM z6pTI1*j2yDsB1Pz5u$(sAKKows#Z=<7QWN7mKl|Of;}0rX-o}U0*DVY;M+5SAtmlH zTPt08-_}~NS1YiTWJ)fcs~q*gCiP+O@x@r7#q+b~O8&0#T5DE_hT!tpk0DDjM9$?%1g1sQ<+s-&+c(dObO7>97T9ElH~_0MizMiw-0(% zg2?pHDyrT2-xF1Ip%w0Z%cC-P6Ao<+Zz%0q7nGM0oGO!Fl&Qy&LoI#LT2BdJ5wz>d zlm(iV+fd$M4DiwnXKFCO!*j{9U@}L`XxLl-zF~L0eeW-@ zR^YZdeA^kV6?l02X>-$`#pBK8^H2XY`MlXWfT!&qaoV8+d>%Ak&QY$es$ZuEa{7$X z$NmCBndkhc)9D!gXOyF*i+HDx@ls>3X5v9R5q6+dPWGl(20zSMT7$!YU7oh~;gh~| z!VdiTSYUA4Z=fps0l(4zw#8BonuYDEh`9u*8)3`m#sh8gt)+F!;s8S0rDCikl5n*uW7AyHMo!2g#2ZBnjxw^I*My@PGm0wQMGps?Ec(6g=q;1S z?6mf-g3(j)t6$x%d;>E*3>bOI5WM~aP`+f6ysEqsz$2shq4Q-oWx5Ec^mWV>EPV*z z^qGDjFn-;WU0?o#bkQF8@i=+~uo#}b2*x^Y zcuC2|4tXDbhNQ%m55)XQwAcB`xGc)$5)Y}WKx5UDG!#yD47tLA+&8|JRgtQ6G99r5 z3IpV*J#ymA*+7E8Fd*lt=OxHX+E@6lcB?`U)L@KWY&$D44}Ndy2_;bTxLQ^K6SB5|og9*wbR=Rhf! zAVHmk&*^4{;Jgvg^zj^REB_61uE%{-FUYFja2@HDDVWF`J>8Pieu5i#wq|lc^A_c~ zU*kvK2qwytNn^?Q5Wr7K-KE2Wd5mLO6{{=*I%Pt}>iHO-!5siPOP@93=&sxXucd(* z@8vW9<+&cl%J%hEuZ?Wu*Q z)M{_9U4yJ90q{y-ITmsjS12s6# z9++T~nKc~D@&eNI^iG z9@SK25}Y8W8)Wv8ybA(2JZ<1OlxV4}v;VCv3PI@}_E%Xjr^Ay?x^^Tmey6{sz(IBv zm@3H$a~>S_?zIPbuHV|eTYOd-Pd+4 zKlNS^5Dt#}EQUU`ss98S|JoS3`%W0v>DV<>%1IyZNMgAvJDO`F862hxp4pqOXS>6% zA+Jq@K$Zna@wi}9gU$x6<5)Xi;4ywiWX44KQ$|@f1)>EsGek!scs&%!n|#OY)s7&n z>f1j-5^hFQ>EyJOxFLh10ONrz8S?7ThreZaY{v!yihbCjm6IDi;dfu&8L@D(4WWz# z&%3ScB%l4!^Iu?9InSp*Mu22g4+5F#i7)a!om<&6URAoBC61AV0vJKrV#c5hc(nLn zenKw)PlQk!H>IrdR>{{&FI`V@4ld)Ur!d)q6#;0#uwQcGC`W7eRP^3?OsNzo6%!N< ztkVlExlj5VnEv%SL$4>_dI_g!&!eUC{)Tyl?%i83t3*9>^XX@}7tB@_LbI_oEEi z(M^&bO{LYfExjBi(r2u5UKE4A%qYq?xhdG6PZ@t|2;D9VzLFmp3nC(Zlt>4lOrE>* zJGq5-I(RMTf%;n=_CUMuw!E&x**Cfp1)m+w;K`HmEWZP7JaZ_JlmGULRE^NjaJd36 zOynGVhe**&PI;#>X81YcCj|kI+ZW%)xUCJ~$o~GX|MK$9KmMn-wdMTP2WQ72wL<+_ zugl#Wal$0)M;S?H?SIqWbjUZ?7w*Ts9OvC#fkxXIcSzJ2h?erB@on3TIMOv^*Bi00FfaX5FY6i6=cW<#tn{{j z{Jw2}@%W)>nflmmmFipo2QigjPnccN?i@4xAlja`5YLvwZ`zOVT?12moCMc!xH)Un z5^p*B>gjCtEr_X(&MTZBTt4miW9RofJjzRi=G4@2jvs$Je%Km`@H#bf?XCLQdkO>- z$^O`w)2{EQ4AMA_f^m`|=?WeTkxG+XVr5G;{GM`c+KrOY*J&I3Lr<&dGO&MhTzHc$ z+ckSq888LRrg4VQKg1hB zh`!kZu)*n@%$+4N-HE<4Jl(|=7ya6f9{LJ<`TdwC*;ESt@F5k0$|dvToDa=WmY^b- zoYXVE(1g~mBco4c%Aii>A11#3?Q@m<@{w)zYosV#>Uuo&Y_VHu2?801RwBYLf%(i2 z!b5bBAS+m8p@xTJAja2t<8RW*O~=IBrc%}SmDYU&&%c1VPa2TD+6Xd-A0V*sSxp~& zS6q-L-qD1H@l(zY0vh!j(jg=&#F-H^A_T3sHSR^t3>1M-2+ZEo0m;f%enLP60hOme zFn#8zdwv29iqLaC$B{r#@-x47gIX0%2_ zG?sR+?155j4d+~S(`V_EJx6sEOSFXURlGuFA`>J}zT^+r2+n=0D?6*g-NJ9jzWYle!lk-MD_LxSL=;L8x+HGa= zsBs0ji}L^uvCIr?`57Z@DC&dWQ(oH9wmBVjvC4%F&2$iWHL5zjWd!{X=-Y7kIE>8UgH&Eef-c%;>>gj_>YW!~LaQ9_Cz| z7k=3>!#8@;0X6(*R06Me9Yy%g>wAqpL?5U5_9gh$jb1&c9j`>iNlKH~_eKM*av+~= zGs`KPzb3c6{m^sg_Sd_8`;U!QwA-UW^~od0E-gy8aN*8kc zIqj|L*3%5P0Lo5>1;ELWemfW5t_4xld%d;3iEaUcJf*EZVtT!D1}<5uMZ^CNse0R@ zNYeow1$foV9B|eQ|3)PAM%k7*g~{UX@?E|LEn)-7X&C=;)j%*9pf;k7)bGgAx?g)qMi!9@HqZeVbBnEYI?1#;Fr`Z0L$GpVOZ%0kUhyPfa5xc7!Bm-+ zt9*&0rCge7xPr6t!WkxeurQTw%c^4?w#?I|9h@~boS1H%T) zK!j*G<6(Ss%ckAa7eRMMM9RjlDRZYWmq(q0>3V3&G(Fo00GX8zNBK5|Q@NcfQ-1YC zk46l1M=D6^X8#YL=TbNCWC1VIM-XXARnfJJzOM=um{{$xZ$CW)Hv;!!gNPL2Pk+UafDo>7`^PD65m`cjM`Vt2 zM3N^X8wE!}6jHvAz!6_RHLMcmV9QOJG?EjIijpf7+wUg6=U>A<1(8J2kkr2!2gLtV1C_qNQ4?3m!u<)RMj#*+ znV^$J#RCCGDOf$%AvryZZcFlsgMMV_PkCt?%nEg*3{v}8?M}7~#Bj^guR0@tMw0Bv zYk|R#cT!$M=IQBZ5QcN$Cm1}yMT`1IXZg|5-6HQG zw%xKCXkIygf~fV@%}Iak`~|rm)-&G`$<@ua)sbUcFniSEG>83U%)>7j_#jx|v^smh z;|r(Rbq5McConpv{G>kI+> zm>SYU*^8m*d(QqtbUCz$u@vxa8gjBC<9G_ko9I^lLBYU`=z@R%B`-tzYa2QObTYVG zkef&N7)|dR=6}$seQA62H||~;vJPA@1rXuEe+i};?ic)hXtLe5UE3uslr$=3%b$Q` zdKfR>T|nLr2@*&c;rY;^L{FYOyr&VBP7!?a{O3&z1-bPGd-p>l58B=IEW8mB8R&?= zcsU==ttt-Y8jhAX8=5VtET41iK}@80^zlu&J&U+NfRo;9-_^~%ev_|2hs|Ke$eo?p z`#m@=%9d=(aFxxG6^L}`INk7vkDRx5MpV@B@qz<&t{&5OwqetL@y_OIA80a?q&-39 z@UZFoaH|j=BoZJ5vY*+e9OycKrE$3H94$yH=q|W)c+<)j)NWd1PPDu{txsP9FM_yV z^HWv#2|GN3k>s%6v($Tr)At1eWg1CztSjRTeCcR3l7;^0n(aUN*cZZ6*X$sg zs!RGtC|ln!=&qHBYwaR>=i|CR35U#1kQeOI1TE}}06vnB%3$njTR_!zDb?^^ zx!6<#COp5A%SUP)a2!FP|LD_|;GT^lrDhXn@ZeKn3W*|sJp$=lBa0XfN3>P_Pk7*L zYM={BqTII6nTrU5#wRckJ<%v)#>~*xym>QBeEg`cS@hK<@mDwx3| zG|0NH;FNVnfczx&Grr2D9wzVdYV^}+Muy&mclAOJ{mgh*zKh^^mIPDep@XlqN= z$h7Tc;kAxpJ05`QnjVgpaKo#&?Nl&KjqZ53ZzF5398Y*whtbQ9d3AK$<(RAY zobyg@D-W0*Se#T1qnFUHAx2L4rgZ{wk&!K6c@=((_!c z5Ex3m|Fz?Y5P<5zH$Jen4GF%$rBe?-D+tn#a|#a%0B&d5A}r^0{?tkOoRWAU)trBF z+Tud5H?AVJ1FvGn^W>62cHj=Zvf}@3CnquvNh3sTS|ebMHax7HH3`lNcw#gl(Z2Y$ z-3EKfP;zL>!Ss&LgAUNrevR5N!~$({_nF>|2SH1ri>N4FS^!DNYwWsQ(IB*&6s_zAN=eFz9hUn$NDO+`}ZPlY?I@YW1;(ppez$*LJ?u27oG4m16 zI{)FjPD3<8^sz;#+nvE?^w~KKU{!AZXH`Wmp2iP-lCS61w%|pJ_aFZI=kpa(mDd#T z-lc<{=)i(Oj)OkLH@h&P_uue^%NmjeP-6rI(LnzWK_MWR^7eh#lpAEun(%6FG>zu| z(>Zd9!0_~Ytt&EICwh6043OH8L@-O9#)f_TC-uL$&51!YEL6pqo$Z4hKoXDK0;K&=@Fj%3$&|jxQ_5gRC)Wp zq5}7Pg4Zt`<2BepJqb0c4cYZjsulKln=n-NKm-Y0WJ18f!V~#{NoMeBWb!D(F(G9N z)A#)dKt)UHe`%g?QC+ZNa5w-Q{N%jFnPhONSpgvDTvz!~ob1WkEuI8udbS);4+qR1 zS9)bdw{!B*q%QBu`>^u>G-27zV~b7~kOd1~at4O{7G=YMhAApOyN(fb={Yil_W3>{ zk9e9Rt?ZaphD`fRPQJ^BZn(f%x@@rYE1eZP!?5y|5>ibPT{B|xi~@P-(2*;&l$G;m zM4>lC|1PJ)SiP}Qy!Qj>oj&dqIA`kEYs zoR;7sC%NFH`?Ag8GG%&^+TpfqMYE9tHreL{Aua;O(pDB<_^`^~aJt1)Gy0y(vc`B8 z=GkgD-e(x=b%)Cff^I4|hUx_g2-fTC2;|$~zpt+L>e&tideDnbHkFY)_~W4$FosLO zanmV()ogOi*(Fae9x}DypdY>Ecb%r_fUNbjr^uQGnnYo_f?c#_NorjD{b z!=_gMr=z1k`S-oi8MH=(lmE8X=0gkg*bey2>CaYh{=p6Y;}!&>$4i;s)q^+WtarV2 zKIrPjKGt45x^16+`v25?fA#&Y!&%)&)_e zd84-)I@wnqJ@Y5%Ds8sD?+QhJx>X%mWZEk!3rJHcdR5z-&4|WhVp)JIO!%eGu)um? zA%DK3uM=RDu1=~b^o6SX(vs7hAK4`fc50oDf@)l>b6GaBh`SOb_B)*qZ%MP`h|~PVfSOjnfElUV0Tu*LGc|tu9r4r5kd! zIB;ab?+PI*t};dl&Pr+NK>(=7lK~+_*NUuChGKR&(1(sIQTOs$rIqc_lYuF!^m80Z zGKjAuljuH~g0GUHI{Z>T&TI7emM*Q{BPV!UBMfrs%#sCRwy$LfTO6jotiDV6_ zv^B_RMxP<`?Qe$^e4n(l$K?xG^sXIBFV7-c*U`aTDJB;_l#&bMs9RZCloyOU}daat(0vr*tpf@b*Yf2*ROv1iD^?vN`V0qhMxthL9X)N;wBGT1-)u;7 zxY>5ef>a9=*S5>sDT4B-(B~b0436P{Zwe%oAKN)F$yeShi!FL|vY^*zfBMNU8m0L< zz39z>E&NM(-6zv^OtiUzk$$vmqc`edl@u)PN84YABt*6VrWn(wD&jc*+J~TbBP(j3 z-lRX?1!1`OWs4iNlR4S70geNmM-RXjIF|`~!?BY+waaL|;k2voyGAG`zY3lyLOPos zaqP!b;A6&PZChj0!vq&pMl@=;W#Fc#hXD%c-43TlzRzbrY$h zFHq+b@h+WZ@v1d0Mgs(GQ4n2+XKbR?PlT=9($cTl*ivNa;dnW%+dT?=k=w}u&g#i` z?RYr|Ao(PgAnf0=`FCV~p6|3&Z~4PiLZ071S7p)b|D=e-c!u#_j_Z6%V<*dt6(p&v zQ3knB@b0G!d@pFCJ3VLP7QnAj79F(+x7{Yjhk$^bt+$bPq;cIv^$Z zhuTDfcEmCgi1gXE4qAq|5o%bE;!q;^G7x(fK`92{yN7HUifDVA*5Cez-%jxu+zkf@ z`~F*@MN}%QpQD6_&_It!v~X^L9WxrkVX88E1~OJ&U;-`yZ!|zbH$C6*u>`#(Pmco;cg#Y_9*%g5#*~#__P~k$j~(q0)j}2gmHdor*IWdVHILi-rw<=G;il zXE6IV@_;7!0y$@7`bkAS77VE)^W*Vw+crh-nRwSIkSP`c@8d7Nh_~j&%kw0s`s}k$ zFSl(~ynWgm`vi?05@h5vk;PM^eQX2=T5w@+|J$}IdTG(y4jy{{@|za-wXn}=hvk_w zt&Dr;F?hS7o~IzL4Y;GrjdDr)|!E=dVY^C!aQQG5(|d z+4F)CxJu}=H?eiC?j96iJbzBB)A1+2xV%Z$w*^9C;rERe7-cXIe^*{Ta=RCdQXIRO z%#s&-c$Id#P)%uE*#o0+3mh2eHu@?T zKhA>K!C3Kl>~Jt55>)?v!|8idf4=1<+Wvj_U2h^y_9GB0;nVf-oQ@t_QaUD*_na=g z!R%@G+DIiPr}BYO*5U1XpI}~v*+q3$nZc_JYwyjUtG}*Mx@~mH8p;LSB$G{;oewbC z06n|R4h^9Pl_nc-%d~Y0DbPj_mOg62fI1aYKD44;Qr|{v=BQ^3PA~)+xnN_pmK?@C zm;hWq;{lv<4JY^mJw`_OMwf#|mS~Y9WdJOmLz$t)qi_^n_4q-rZ`AEq zLWnEd_kEP2Q9HOon&DNE%eBbz$Pdkd4<9;D{wYqHutowC+A=~X(kA#aS6({8c zH9YWs3bsz^=5xb2g$54}rX*!?m+T$Ax}Xow1Ma!P<>&&>QA3i5iU?4o#lsaR2G91D z($}ENuk0Y<|6U$_SB8Y+zVxp7r*%Ivgc{w!H2j+n<{GB6ctaCP)J2WF%VSuMZtA!R z>>%HsayfI3k#v1IP);#?hU?dBdY#OoT_EU;c|CG<>ebKI3?%rlw@9q=KkDh$1G88R zZ_hq)s81*O<$yRgPRL&Ut;Zm<=|!-U*Iw)te4|8bYjCCW3nt?C@zb-|Pv9{l6MUlp zK3nt$UTOcd4uNy`lyM1e!GxYP2c}MxoKunotQJCA^yM^C!T!fC9y=ReJ_*2rw!-R!2+QV)yG{yr?n5}$4yZ=VUaz$X=|i95CHXWhXK89sNUcE z)(li=!{k4H|NZ6jjycAE>=jf5CpjDOFn_;w57cBq(2I6eJiEO4_`0o^ohm5s(eX=p z-bJH;XFD~j%gH~0Y1edUJ|HlVxK?Mv38FWrYx?E{;&do<0s%6u4wTVe(f+cxP0}Si zt~~*eGnYo)1ES`dW$U%4EXF4IPpQ{7I=4INd2c>LP(DGkJd|6S8U~sQuVvwD0Cb4 zuMB-FJHWMz0>69_J2l|yCn22`u=nF0~1Kt^-VlD|dY!`~@oHKY=Tge1n6}B@Olpbsv%1JhDbx z7v-D$O^cwG;h+bekKdysU=4QZvtdB=k#F$WJ^psUJWv4l{VS;D*EbEP-iTpY*ucu! zPwbVjt0-cMAx6mA-)n3Y23X#H`xiiefG%aEmmYB#!T^P;vW{?DKqGp#C?es^SVtg3 z|3WgTSm;qgbh>q&&Q75rYjth&*-|DCL z$^1hvF?rO&o^3Tz?E0hrcO6{#2J4EqYJg?(y&oAFHTYY-kS$sj?MZMEdK#O=e$l3( z%&}h^Nbrw02kn%fj>%6weD}8=n1ziq#MCa=ywv@|tf_52x{&p|iz4z2@#UYj%K->J( z<9__1I%{<~8g}Yk^wC3$0v+)#U|jO4gR)zI zxV9N@b9T|Mt{5d-8p(RE7EVTI4m-W*b-787TJ)#auN@l=U=Rg>U1tdO%naE&dGF;P zzifE^&E?ZyzwdbBUl-u?n&B5snRNQzO$+EA#q$}Z3r2NG7v8s}&+AS%lJM2vHRJqU zBLZ#hizf>Qy4CPzb_oQuk)4kachl3#(m00$z3RnBuNoa$aNWauCpp7SvDG;J>>1i- z8cR-MJqd2O(dUFo%J8lLG{im4{#IrS70WNk9r6IAi6Vt@u?1q$yyhI=4;uk71-RLi z^45}Ouau%&G;lj1x>{ObPpb?k&ng(O>J{DEAS%u>U+vmwb=5Z-W=B!mwYDq>AU$2djSR3kz$XO3 zl`XmpJlH0ZOkB7$`h!1t{X-cVcTZMjEM;)+gRddgrXeSH`c(lS@UCk4snwY}oalNA zw{o&WGdZPK>Q=sVi;fIokXC%7UH=wQqPiIiE#epv%+Gy;EOdWzak9l_Oum!Kn)Dxj;yx+D{0JgC*`G z?OwsaKNR(tfci{E@x9>2)97a8@2eaAIG_Bxk$&WK@@}qOC6=d0=XrPgNbWN_dw$MS zNERnK+E-u=NFr$<5e_m1&l(0i>xJ68tOW9SM{Pb;C(XR232h@5pW$OA-LeVdNloz^*cRF_yAL`XAWz$vfn=tck zfSYI`KaE8CoLjvY#(Bf>5h4)w%3LepjfRn#-s5fXZAT*`BW;FJ1s~{|V@4l7B5TGr z`RpJraxzkpgP#M7N87;2nDN&}Z|>JM814^i2)VeDWuMdik{14FB?<|En#Od(!;7 z9y-40zKZbWMLDlp_5HMMg&$gIc~;~1I_9PeAJawjr++W&klZ}`<>k|#{Yh{BdmfBr z5-c29oAD@_YHo#ElhyfG9f!j@14jfqHhDT9;V(NxXu%x4h?bTmu0AZDIvtgIootG_ zeD}lTfKRKwtE6O+KA3KR)F&7&*8;kjoxy9eBymBa5f6(VH9%Omgep^5r^IBVEC$`8hnuENSM0=)B!+_T4GtZ zSbRCJ^#wuqM;DVZdk`M2j9=Os1nrJv4)FV?CdOO0?w78KC6V2$qdwXM8#FzszS&Lo zXw!TVee4e6`y^C$cXwb)LwBFqMO8sB&|M}xuz`p9QnhtDAOK350>UkrWphS5_{oKn zCyNCs1;x>$ZNp1n_ql$#cA`kiTGgezI=lc|5pFN;3ihp$9Kw(B-TY~V?}fC+K%N>@ zN;&w!x<5byYK#PG9}J&J@(`t=R;-(&%qQ1*HYa-()Zv0K;7Lsg|C$*sCvx;nfOIzj zgzMnDa@`k$c^YCk8V(yDYBM^~p#t7l0gi$dyin}*`g?Yiy9yXyjptmUgIbIa`3XMd z6{=2wDzTxb@+)$me_;FrJEA$WNqcyM2WGj4UrH_|%>& zHF`FB6z!WH$jEMJI%Cb*l7ae0)a2kYt}M<(5B?vtuoKKcuC3$26v*RVl(V2Uhi@%_ zV{UJH-$0QLRfwWOqG03iaa#htE%5p0pI(0WwpR)N@gFW<_4n;>|KsJ8U;XOxMAgpa&NbnGW0NByd^CdDXaU95T%RiQ#+jR8JY1@7HK~TiterM!+T7R zr>A4kh1ij>HX;xmboPEL-?@XdeBiHOvIy@MiW`Y#R3pm&`MPwIWq^NnsWRIwla9gR zqw`S%?LqkvT;P8ENzYdyJs-ej-#@S62w;(ah1Q6CmzncVmP-c=jfQNT7r!~tn1W|> z2&Auc`N=ti zhfcJQ3i3;3d#+OEpcM(W=jRs>>ID^hwCY}E_u!g}njQvAd4B|QKy-a7=2WtL%P{3R zkFI$=nk0M&(LKDrHClv3ue#OP)P787{I7jCmWIO z=0bzXh|w*@lsbay3rcChMZfZ6OE1`#gx*t3z+_YC#m1q(g!sKboc z8gUK2z_|~Dl7UvfLR-w4L$vxmN^7tEujmIW+2Wz4ZlxOTq}K&~J!5#hPkuo&eb1;y zVvYhI8D}yL*A_BmjbF72{$VenIb&&uFQYKTf97uKnnK;H=VWyjsZ}2uHU&#CyDpn8 zT65&-;Z1YdoXYAf17Sp9dYVpyNVe!$FN)3s7lB(ixcVoTWD&s|(pAUn~o|Wfk4G^D~>hb_29LMv;i-# z8zz6*6v$U!{m@82JeV3<3GFKIuHldvg4KK1RedyNH~YD%e)p{V)nT+4$=mx~IEQGS z{aM`}xcBvU+GaW&kJ^o&gdVNQ2+r#HgT)Z*dE>W7>{U*~J7XFM^dnvEVi+$Jgno z#FJSn_CRZ$0VVr0a=-{%3>zRvxWUF>FMu9J zDKF-&;haV=`Gei)GbHd&F#>{MIL|3x1sx%nAw)&V;5bm1Xe|lIj=9$=e%zIUNqkX! zLIi#qj9s20@)!tR{B(cA4}PSijT9aj!1=)L`f?U)kuy-FG&p+6$DuE}m7+9E&xOLX zKq2L@yL32omQ3S}cX?oxt4u(1teq>k4p)l2<)IA>&QtWd!I#VUBCqM={9yH&v6bH& zr3C$zBxi6-&n7v?DqLwBv1g(!n}m~tffl`1Jhg>9@W7C!ICz|79aH)3+%=i5{3k zs;+Z@?B8A2HJ1FD9_$E=#x~@s$ngYLJ?wJ#lb=q|8iqO~H6BKA8brL(&t%9PMk-I< zWTzv7)(f1pyr5Im{b+^AbgjJ-isQws-tZt4e>&_lSN}G`(-|>yp%KV$JnR1WzNV#3 zaT*t&hJM{Bd{f%_w4mtVEBueGeY4=pp5yzuxA&mwl!1c^wYQ*|JPa5kANJ&|1EYfMVzS_E&614^PQy znNqxQysG@iFSARJS$fgW>-G&8*u1Y(q7gO=;mHE7=Vd>uXYbNqGFZ3Y6nA@Fi~wJF zeQyn5g?9hY{sE6o&hpiXIVvA*reu~2=Nhp5;0zAb`SS<(#KUYKKCK-8_YH#&Rjjwo zSbX2;|Bv5&AHT#`pQBL+j#I2Y%}z3ptgNmMGp|nhE#96d2m(uoo8`cBwCL*8%<$f3 zk(9^p9=|JJhc})-Y1lOS!(YMtXV3H>q086cwI_mKWrr*I@NnF9?E+!8AA!$BJum63 zgL@jW=hZRZ7lrA=wDsI7`4!pnMAtgmXc!=hIr!P@aw}I-fbx?ie%DYdLypz8q+TVU z%gg4#wNc^aQ~cK`y{>kAt9XY#cJbET;)dl6_8*gQRd@|8rkh#XsGWN8Pp;B;nK0)S zz4H3&U$rfE4nO#$o!PLoEg>0&f=jmXU~rJ!M?rEhchdM0A5MGes<^;z;Y{%1Fe_+= z`YiiYrh9QuHUL#Xs=q1@%bXpA69`U@pg2Zt`3X z0R?BcbzmyF`|2FP%lPSO3B;tpH8sT z-7?|c$QfT1m`av=^ev!_XpH4Q;TauZMjQ$Fe|l_=@S?(%0$Pi0N$TdU*SnW(0bpz*oPy z``y3(n;N2a@i)phBF5`uPcl5zF<>XQ)BtJxMe}vs1gNyC-gw0@zC41*?=GF&7K2SKotUzJ(Z<*3 zP54PnsAfXU-sw!S zkt|-IF7QK6dw4!}M0fcd1>%TkJS}4Rl=42#)P1U(Y?JYd#jeLvL<}a-V6WBh(G8bJ zadJAmtZv=(7NvEhEji`q$KDm{cWdj%CF21`bV`VKTEXQ;?c^!n(zSXmtkB18X?D!{ zCum`=()V@h*$ThIM(4{4=9X3&I@k0c6M)HQ;*&mgH0km~`aWjH71sNq-vZ1!b4xqC zid*y^gY!K^x2Z4!mNtnJL+9W8fPa+;PO3}J6rKsXbd5i`;Rt9pEV}~J2fcN({LK!O zcZeX=ekcF?Y$*j5K60`Ip`elkN4=#Xji@vXZ=JDXXUzv!#AXK^L@-z^v)`PywQ_qhp|mx=qgUCBtVRx|O(9F3 zd>VsjuJKO}WugVp73&^_#Zy0eqik?w9l#usk#6hL$GNUN&RChzN(*q1Ly+jS^Px!> zbVCmGgLA690}g6f9ywBsO7H&Y@Gv|SXmm>lB(@m^-Yso(|zbN;EZ`P5Qd>pNj?Ago*GXE=|-_3$(b-klNq@!IE+(;<6%6a|m)6=(&`XBFp z^Hnc|dHm__^_v>A``GGm8)Ej|UlrKjKd3*H3- zfsO)m5cO7Iv&Ud->UAi#RM2dJ5z!jy^z1dt>y%awjKGrQn>F^ z*qaUL`uTfw=~F#mr(>7L{0%$nuvhMMUdyM?c=dk%)1hcxKiS#a>xq5EPzU6Fczw?J zhh{0>v;lC9L-MExn`Mh5wz1r#1Bjp7$Z>UrPsGdVC}zumUFWFdNuym8w-^rh>PS9z zW@)2QbaUT^@hLebl@df_u<7i{a5JNNF33zM0UFEVuj~&*)X`$H|XKC zocXE`{EJQYR?w6d*ou{nAsgg!tS(gnVbSZ`m6^(@paD$!TMcZ21s+WX3aX zL|ZJ2ba)z+^l0vhLHw;l3<`AZf!UD++xrGE*Rgti#i7Mk^?bO&g*=B?j0Bq;>e>0x zZr~|A*@#~G$b4x|#E$VjE|i0Zj$8`(!Rj%{+Mm2)+ zf^y76bd-G6;~eEgnF?2_qDgsUV$>_Kk}B%OG$gKJOQ0jHU=--m4{;$A3OM=TTI zbXZVCSowO18ANbZxygMR|1}4PbsYA~4l0|9s9{*?oa$`M8iC+4gy4m=GQ$vy-P7-9 z=PmE-jYwDI#7-!N%k_+Vj7XW#9nZSusqExUpZ%a`%~_`nj?qe&w~b@dRCrEBCQmT^ zoWK$4cu-EmJZ2fYAQg!wX;pe4)=ST`$;`_Yk4`>X^Xrp+6&vXU#5Xe2f?> zFw}O2J8G(So|TrDOggy1NM1r0fkRB)10Z@k?nss95pWC45X^2-gEY|4IH61~w z{(8BQj>KNh#ySa-mj$Om)>-Pj#;T6O%JZYR32uH750*VV@j!z*a+V*~$)ICO$u~-g z_X)DuucYZGdp3H1nT!v;5l=%qUsfdjax(s+2xCCmEoox|I;s@DP4OpDe&HfYjn>{hIpi1V4LbIe>;vme)nV!yXPG0J3VF4 zGt1HC^bx|8Rsg& zud=T^6yZG7!yTUo?@3X1{7s*S2Hnh2tIUXQm-7mCiwQ?`*OM$kT3Duy>0afKf|lO2 zp3CH`Jh_JC&{e5X-N<2J{Olq!n#X~sz;J?H;EDxuj86FG4Jl;-sq*R>M;~u0S9we% zjOl>+5~a?<0!ZoJYwtrof-5WzLAwxLJrHo>qc!N4kG%}%)7zek?dgXq_CJ61WBVZ9 z-F^G9DPeWIe!y_fXK4kZDs{_8f;3&<>cfrFdheyDsdzq}y4At>u<20Hdg6CM@I|^1 zxCMX7Q)l=0ujGZJe6rHPXwpSlw5mR;_|s?CW*9)LZ-S7o-*pwnB9ewTac z=e)B14oZVpzOrUJbcoD$-loFwMb8WF(fJUZmkeRTa+=-vNfC}&HO8Xf@`lEWo-pm( z5S|w|f)Pj=g(g-F`BlGb5L^Va<&We#9sQ#tBkx$tetap~UGq0*f!s0xKI-d)EB!@( zI?0&!K6st({8KMYN~s18GK~H|)&W@H=C75vl=Rd0zbtS+y~Ec^woIT?*)m6hZI434 znT(v4Rrctb<&PAlai5;DnofcB{}V&gMtQgR!Qc7r4!Xo=d=Q3(;C}82kvh8kbQ=j( z^2%5~($rhNtI&kxqpP7BE=l64I(<8!9yGYF48J*Qf~ea%a)`1GHdb%D7f-yQ^X#K3 zqcrktpi&uS#nr{`gjk$ts&AxYKdl|GTtFOx8fsM0Q&NDm;&n^Md(dw3ZaOtiM?kAw?6&WR<`k3MO zZWzWWp?XlkkSEL>D%9gn-ZUNKU2i&Sswz8SqlbMuoAc@E=_zU?1TqYio4I$Z=9_h=QU+c4(LI;pqt^0cE9JI!+;YI%N4Z55FVrg&v4Ph!^(mM z2WZI0%QLRwL0ESKbIbM zR{Wv~!8Y+I&Rsv_j3bDL{pdjEY{F9^ST5calGOPsf3b7HD3?wzXZV-?vW{J4S2UQ9 z7QU>%H?`Z7`k}1>?`1HBzh(g1d{*OdUod?6tmnx+o8CL-U-ZEk4eA*k$BN22gm%me z&_}OnVin#8PgpqfOuzi&^D@Q~gAbqj|6~>*d36>9_q`OUbak2J#NM;YU1(7FwY2jD zOZ0UX$bA{~jH2kGI#lvzlA>wKd$x$^rdhlF)b4vz=-Yr4ZmgN5SY3$z_L7N85oC7( zQQ+FEg^O$&^zFLNYo0W4l^<-r!S}rL;Cz`+XE8qEz)Pp&xqQbk#AqtIv)`$c>OoqI z-U<8)3w(5vAx3L*@V^UYn!f^ag!|x01f#Ni z;Zq#}j|FOHKlErUU1~s{S#F4Tov)`x?jL=wIQ`QfUhuXCx{l=bOC%RQwizt_s7uwH zH30AHB>mF<0y6?4aIh1C8bd7! zCm^?<4Gc!3I}XoWV7IJ*c4aH4A%p)cr$nbRQ#QL-CbJc91{yWGanH_&3AopQ2BTc; zj}A0OEVapXa?@Wrz{F+-md976rBJ?-?#vP$Ut>R=v+~G@Ei!fg`L*+v;a1Dv8Bc*G z%c>+^$X0!KGIO&6>hTm2Tx~7f@*P{|>1<=}kfrlIjtPJDWz{{Rmd#IP2C)u6_iOBiXkL>$b;jC z6U=dhE@%r$X$(%Wl}{a){1iy3;i3S?;59y7U{Zd(El_oPYm|>>4%`~j@at{OCo4Gq zr*wF`CttW6WMCZZ;T>g>E|f;t9=7_u)5GC5MA!r<3pv&-g_#0OBXd zu|;rnr)=e>R|r7H$SQL8@J&DAq1HnM7(6R5{YK314uKCdYn;MIY{%#OGFASgJEF$e ziU7=8@O-YOBXXcUG~v}KZ8TPjMu*NSa?0Uzz!x)P>ev^a!3`6sj^3t_^dLZ;rHTAa zW0XUR5h{N~f*%K00E=7!d84BTusrV#i)$=v__mQr8T6yFY3cMu zt2u5Rsrc_w=WLHIZO!G-M{9(}(~&#z)3Xtj)}rg30}PKh{f*G002f8AV5e;=jF$EN zl_zKy(4QwM*yPLvX?S$x_Vz`a<$VW0OYhSHHO~C+S&g-z{JhU)dsC`cBl{}gbDb!U z$krh9iIlQg0c+&=vx)N6-XV}z*N8m4>kWk2z?&dBkH-8%##is&H@*K;?};!K9z)}I zkH4-nll~vBcu~efY#;^7cm+VF7%B{ZhO}62~rL6%g z|3n=fn93(_lAj&?(cPnxEme5I)MoL5Umai1Apm0(?IzLi%7%2-S!})?i>oMA@5D3V zIG~kdLJbDrjDyAd@t0%3c{cVTg4Bu4Vy)Q7x_p-&2tHgI*Dek=@(0D9(YUphTgpfW z+-ftRW2o|I;vrqx_pz@r#-?a-e0(iHzQ*oBsuS{WvLrsYQwPYTesndN=yd}Iu$9{( zGkxJv!P!Pew`)6=&O)b74R3Uvw}g`}mD^sK>iFtXJ|6V)p<0=M=v1HW+#`Gr-QdzH zJW0-(^3D$ML+oJYK<9vqLasV{lp_-k#_tl$4)?2wGjFhm=^OYODe(PifCNzVZ%(ib zk}+6{!7Keuj~7Rra&$RDpc;x7dLbRSSpyf2DX4#@eUJNe4W@sLzg~3w4X$)OQqH{f zV?@UWa|o7)$C(Fj=*{U-JapY2=$tnguY85(RyWr39_%q*;NxQsp(Glm3I1Ff{TcVL za_%SQWs6{MhjQe=4IU03=Z?-t>$2e-vxGC}aA@k4$JH7gcG+kq8MJdebn-)T9c(t< zqEu$7=`(xWbbmSsLExPe)7KQO!W|@MBycMCrirT@Q+o=V>B(AABZT>WWTcy(cBr`2 zldz}X zU$*4%?Ki(~uR^2T$G6FzSibZZ-hIKY^?&an2yRMaf-5NQeIDAWkQ8!P<1c z5r6u8-4wnr3EsW>rgK?7V)XI%y_Bihi`RduQ~2BPh2iDf8t^EE=wnZS(2=&EO>Jy^ z>dA=vmP;&S&?NR>_cA4~j-Cqy|Iq0EQyqm@y$fM><1_KcBswZ@TK|93#z-5G*O{UM zqwgXQeQ^tder_O&bK8X;f3Xqoln}0V>T9t02Oa1<9|eW3AIGopO=B3pXOvfZouO#x zXlo!WbM3LnZB|TgZ4l?H#|PQ$!~pV$m;8VHsn}?Z->71Z0pdqvwT?q|LDdkh_h=d* z9R@>tR)vCK_)7-1K5>BDrxX9!dkGBaJkjJcbz<#|I+G67DWCD)BYA;v54?=V zMc*vznGJ|f<>!NJfd>wB)uFmYIl=DF5(S&^?aF97;U74mmF-V@KkPHQ(@naKuk*G? zaQzm0=wP$cG>Zz-`JswLB1xVuJEoMpVxCq_!#Su8hQ4aAbDgFj`@L)c9KP#=P zBV=%&N5bLI15)|sBWV}Yn8P&3C)oMI(NP9GdhUev@|yJu!wG)MbTJy0gKPJ=tGv^s ztMcZw*3otZV*?UL&yB}3U2vk)ae)*4jLKo)2={v2>tu9Ws`3Kj1_DBJ;1@j5}!sSWN;j-I(LQH zT^+h;ne2nyAcfA?yANi+9JdmJ0(`j@AV^O7F5trb;Za378{uR+4hR18mdS#*{^;ZQ z+Ddm1I$ZdIM?eL8u*W70$3AQzQ&3pL2rT(Dn%kh$cmjx?`KJQ>MlY{wd`s))7~3~s zB(-OhOEgntv=D;Okf%{y*E-6+mWMJQH8K+~XpX*KY-Du=axX_6;SZY`DDYbC@fJLM z%}N@pY{X7FtJ};cFyn{);fSB1bC;gf*Hf{R*GRpz(?P%+Wrizz@uyF{XP-a!uQZ*S z=q8IzRnMCZ@m&2kUw?b|wpRiBZ12kmc|U*=k&ol(2>7z0Jq4zQ@gx|H--52Z=?H`4 z_wY*kkBxzrOqOkQ-uJsr@- zzupyKUHtLs?cMw2{*(^v_wYtS4UlgS+@~uaEPB~H2wv79+6KdBDIR*~fsa3Z`|a;~ z#I*R(r;(r4VR+bnqds5W%RXerA7-oNfRC>DOJ}HNEX0drfY8 z98?6f(^-nm)JqYgOiZj+@ZZ!%XY9>C^Yr#agj-{k&7Pj#+Z}}pTOATN!Wol^zxp{ywyZ8~ zwn(K9mo{#XQI>v|a)BI~#nr_+veR4V`k>>FPidEK@2|iImxN>ce+GrY(&v}S!+Yb0 zhJ@1bo<_wtZ5PQaozBQi3H>>Rm|Sj^gMgk_FO|4>1GNJTMNt}poz|(7ezoZ=k00+T zp_3FiKEdM8VI#lyYa~MD!E`|$yy&>ZJBobcyg=zQ2R#O;v5UAL6?i|M%gj(`YB}*4 zEvFPzCNQcw)EekYa5x@!rqd7o^>lEFVeqTe-7^;EN}PhrR{>_I7#^}tgQ?g?2Q$o| zkAU$LM9>#)=WwpkPX^~XBf^nHY&>*%?p?SPIKu-5J{7GBx);n;<`@ptF*%&z@V>I> zg4XJ<&-NgfK}+5gO!G~9}dj`03ZNKL_t)w*%+5yOS<_J8!2<6vCa~G z$HFS|V7ulIUCYPBsnBeuLRUG-$_HHUuTf7)KJCj^8a9D`BbZ=WXdQtRAP}C$`^Z2y z+J7iPu0bU8mI_M8Q#b|rBTsZ|57+q3XYhv$xQ~8yYE8k<&g2W#u&v$0DzMK! z)Ab3iTgR)Aj)ub9Z_rNvVt#6fZ-GA^4dT5r*CxF2kpgvsy~1{ld*$Eu>fZZ&cf{-P zkjF^=WlJMQy!3QFoD(kcCEP}%B;E*#{oli-g;} z7q0^OzFDzpUvaCC7_EXGy^v^61n4j{T;Fs2y^{EgUMcM0d$XMxf!E)p$YvVuziGhp z`tIvqZ~XN7+lu|R*_8CwjF872ePQsW(Q!fc*_Ti0;)mXKz=k>>ETt3l?Cy&%8iA+B z_rY5l`0)NsFGuPPh+p?M$vOnSsi3h>X>}kP{dfK6AK&x};447qbf3Hf;u>)9$s8eh zqmVj`R`ZT^bm&{XGDGR3PzL^|KGNlh7wQ{T?pPyGnec2QT=(a-!}&uvzv#OOUf%Tb z#kZY*?%ND+qT7q0n$dV$$3>-j-oBZ}B#ZH87<@61?fGL$9iN!UvhjM^U1b0us9}hI z@)dSX-gP2sL^oiXthfnmqvGmdKE|JP68Kb?m8h}jttK^qizYxLK6!(0B@*jwl5;#q0w@Zy0CQ+7Ph z?@L~2BtV&q5BM%UfbRKrG%?!Y&6^XJOy~hCZzAp{t`sL|AfuZe*WL_)jE+w9=0aR2 z@7gfgk~``cTi?v_cGwd$xWfy$Aezk zcnJZfG#(t9f}{&d6pmJfhleUN+3E4ACp&JK@p93pV!uy#oxY6lv zj@KEzNR3A}GKw1;rL&Ve(@pr?U-ya}*^}j_mn%)#6Y!#&K=iRGO~?i_&ifahfcIDm z#Ewq$i;FL(Th{PMxQ=Q0hF|gIrBg^2OoN>RTzP^6%3#=PcxQ(sEbCy7=RqKHu$_*N z^szU&;oYleS2ww5_=sAn4z~$2cZiL9b5xT zm$&=?aX0bck$+Tu(zD=^iGT5_DqFzQNBnQ3QIP*wL-4MtMtB5O>)y%<%F~2O0%Pg( z{n02VxsD%~zR^u^{27b84CeB8JPX3`V06Kf4n>H{uI}Jb10P+Zur;cI>nvz|(e8gZ z95n=dlRPy zbufKI=wnX-Jo=bW8zg%&LZ`x>*7E-I`hfdsr0}rlg!Dkj_b^9GJ!7$ zij$@D;kf)7{jBsneR(Ixe5qWql3>oqcSzcu@|_|xAKAXG^v_o?z72KAMDM(2n~vh2 zl6KX<^fUXId8yDEc`8;LB5&XXTucP+4 zg2cysT|Cp_f+iU3kr$nGaWa%;gVJ=kXK(s0t&W4G)tLp2Bc8(hd$EmNT{0#C>;QGW zUe^^E7Jy3hfgF~jh+CiubdAk=b1BoKBJys8*piH!HtMXA_k)^aY`|IYlD<^syGn(X zjd}CWQlh&NW%;g;0xBl}fLj`R1P9L#Ds$o88vlhG##NjY=X0lewKHI4q^lV4KM5Xq zDSh0=E1o$i{5ox>i6=6eT`Y(nkgm^#AhgG5hNm>>@vI=2$q>KYZ}wvq;pj>l+kVzh z*UHfA0SnKN$Z(VqJr#>a=~01O1H1LcD{tw;PU7InLymsV!Jc?MkaI3K{jR4>F$%Du zGW$KWmD|V}_58{dB^nARdYLzgq$${0#uPh%(n~6RxG%cWCR4S5rC%qx8v?ydUhXavlG0KcB;TJ#ayqtps!j z+e~@mzY!DL$Mg2j0EoxQ)@bUb#?)viel%(}lYLe^b|^DETY3^~!Zm#OB1b{Oj{9{4 z$g4;lh&_fE;9gCQWYhmT7f(&!n?c#KQo0jZKh`oB0e$lJzF<9rE-1a!0GDsHNnTGW zc*p;=pIt+~Yy9!u2!G~-67VwLlOJ!yvA3brV2}Sy266=>eZCui7@4dE!);cK7N0t4 zIso}q@Xy){@YK$IA2stS+ttH5CC}QB=M%?!cYu1@EWoq&Abe;R;1m5c1Mu-v-wi0p zw_4DB@X@{Ri-`9c$>yFvHrjvaeg02vJY-Y#_Wc{_!Nka`-kj)t1#xkAUuWRMFTSGK z2c*0xX?3gExlybA27Lpd#{W-$`Qh%z;1>6c_$X<(lbudEG+m_2HG0!)FhI8+KH#ti z7e9PwyV@B}W57<7RG!8wBc(s)G{dP(H?_ECx}Mdbeya2PtS4Ze_ei8oks6KD`Bu*@ z8>XLl+T&`QC4~&yJ`R;0)jJIdd?4&ZS_6TbtZlckkMNjpk|`vzZHkB$}$7*3a>(FG$S zzFuO?3u;G54Oo8=U?ij(N~p)zt}|HYIS$7nK?XQ$u2-n8Tv!rrCK_*q+>8QTn61ni zsgyu+X~7={N4Hc4L^x~tGecrV*%Ob>^Ec*}LEIK~@rU<`d5T!#6(YNAXKbrOm;elXd>t zwT_B|4aO$}^!u@nB`)S0cDcbBHf*R^b=r%Os(E_?+GMu|T7Cd_Btuh78jcq=l+x`u z7qB;THk+rzi4+};Ki;cjtSe9n1wL=Pjvw7IfA!{}MNIY5Y{}WwWOsHWc{;!!+#&Pq zcPMc+-K%m&$>$yS`EfR|%x6Os8f0XPQ;1ICQkPC$(1=bdFr|N?5%m(7c(WIPOR%y$;eEWaS)iRCso6d5GM{UzEXLqtp0V=Z1cC zL|9V!05-eQ^)~wH1lq}l*GBlJx_!v#e9kxRWuNW359HC)A2ou{8_qV7e>w}F`gl>Z zqW9MK^Bb>N_WV9y`t&xNzijEl+wmTn5$H>qFItAVZ(4tiPB3N}etG-na5QSJ1I14) zBa|~yy;bkWUmouM>Cb&L!Q+3N!x6uygH9d!5xLTnM&j0hXuLF9e1ze4qM$AoH`2l4 zCtUcYvobYW3Ydi~^)z@j)T_NAGq@{TBxit^=Vh z+Q;OiDP{?xw7s7pJ#0xZpA{()qKnm&2$OHxDE&4=J{_ne2naCX1DwJs~=|({+>;jB) z!U#xvJtPH&Jg{I=#Ei?(M*$^=#C4_R0SxbLEgS=w?%|hz3%m#6c)%7#a*%;?zE#IUb7j=4RU+5@kZJH&9vXgjbWMFAhdiRL zyu#D@@H2$n@y#lD1qO8$`RqKCx5VKG9S+x1hxAhV`xhGSCK)cc(XZ5c|L7BxY&V)1 z-EaTK9X%-pwsXo@!`*o{ue>YLPamJ*uF~xP*=`4Cq^2*Gd3eYBA$K%WS#(G@<#)y_ z-byuc_?Lk!B!}2a%y1<*ANYgw@BuHdJ)?W^0rvuGvM6$7fs@TE*AzCGJ^Ng~8HD1< zXPyMN&4NWzUn2J6)pZ)eYc+H%W|#%crInui+ng5e!O=%~taX+F@`Va6P(x52IMca4 zK`fMA00oV<0o5tE&43Yd9g;>3rn-%U@hzyClHQlq!exWn`=)sJSRFoUl13+dWbkLN z!a5-6?Pw;jy6~c){OXGu{x4f+E-;!M+#`WrksI9wquZ_G8d(nSsAI#M+jC%zmT&z{QT_40ufrTB1qCf}&vroU%(06coP zsnDj_8?Oe_i=KL9?p^Y|?WIqT?<1HTkNu^C{P!J$kG(v|d;iCyMy0DyEjRr0pMJRe^Uv?@{@s80Z+Aa56Cf}ewXs7; z86&lJUZV~FQ3q))HHrg+&DX@`Gvktu`C~e}w6YkTR#%Ux(o~qx{cZ+I zr|9Ve*?a#*jjd^aJ>WHxmRInR>x5`NtwK@{61m= zTdU)MNp7(Ft z3<5z6dX=f3^fbD_ew@l@G(C?hP^VYy!y}-O9%M;7ZV+pSb-yiXUg;zW9_0|8y3v z@Jv6PwPWj5>{eix&SwG3#jn6D{bu;Xf?W9ZuwAFzfbi_@tSxbxpyOj58ce|50d76k zONWe1SB}%Dp;CD`qoD$hu`$9%Mvq6|$GABWIDFU3#?l(2sa-nVCQCA3Rb{mND|gqM z3F&%4IQS^zAb{+IUc9cMo)IEY7Jc7%WFz)Ni#+oMBkK5sn;!QVQJJeh#H9zya9Nq@ zWjEnL=duF>g!UZL(WyN6(O1!eVGs9EuTc-qH`MuKtgO*&Ve`bPU{p52WRLg6fH#u8 zYR!|L4i3Cm2im-Kd^)@ZY}3J>@1~dP_IR5gNxK3i2Dce;bk3;ybOgN`7~jVd#1Vwh z7J!+I3`PT+Y7VFAaHHG32S0$99>EhJDy-4}xas>Eu7s?pM-T4wt^XU&(k z@1=TAN0!Px7WOh4Hr-Y6YviKEx{y^b?9AV&k3G`q?5u$-n=v)?VTr#JA9NCJlA{=~ zTQV$vkA9MyKM#L?)eD&RN#t;SXgB&^-jW<%-hd^1XypOn(TGK}hHRt8 z8vT7bxH3MJbKm;E=jDI;koIYKmKNK{CoDB^j_olvSot=4mXz4WsSNS zF#`k*uj` zJdau9QfHE_k0bJ1qizinePulO*(Q46HGpQ4%r>67$fier>+E|?v3j=#F$P-tVe_Y^ zvqM!(2v_th_{zGdJqHM`u1y;j>I`~ketW_`dP*UD>YdnORwlqOWcTU+ z@J26tx@5c_-cO88F6D3aFO9DI08hM#9Xb+m4J|a2In*7&ux)wjZII)`XnOUq*#JIn zma1ygHM*y~&c^99R;P5XD(yWEx8)<)M>4(HQNNc+HEUyE3muDdhneuudq1&RYG*{* zX0+5bzRc&z)_;DQ9-u&*d_I%y$)OMSRzwq{d(uS#wkGA!D&!i^9oJI$jXuc5RA8fa zXz>#%Ck^Y_v01!aWvLTZ*>v^dLm#5pIus*}Hsj?~3=-d*i{m`BxkfMLtG=`SyWjop z?$3Yv({|A=fGCLh4pUajVO-*z^U4#D;h-%_Jskul4IqhKjg9gdRYR}qK*~8Tcpbr8DnpqG;c#mN%Ry120RIY4r*WZt zn9j^e_ZMz@T=15cRNF9>^PV-TDpmPMRuwcyN>3#X{cPZ73XZbt_#YkkN|&Tzr{D30 z`>yw#Egpg~BB)r68|7;J(V)lvrcNC2j7uAIkRnzABG7FKXrbdWWaPcTF*F;-hmpW- zAB(_o;0g}OCWx||5pxSzciPYw z59=%iV#oW3_5j>IHZy-Qb=$#b7KG?2avm9)$_nbQ>(~oS_Zq`wyRX68n&;%Kv1kL_ zs{-N20^-(fo66sUX{X~O>AE^UGlpAzCr_rG?Cbm`?*bJAm2M<^IyAu~+dQePd3eV3 z#?ZMK-4%z(xQ2Zr`#J$-qAftfYx;d&(={yQupeU2>DyT^H3NCD`Q7(F-~Hhq{^Q;M z`;Y(r?x%nC;_mmq``_+<`|RtxFJIUCKm6wI_kZsD}eL6XzV$A z8pJ34_D`qFBa-01?8$?#i<75A<|}~7Tm$=%|G#T9;Gh2dQ)iuu@aL;!8~^#-Q@*8M z==3i>MrOJ_q6MzGO@DOi%-6Bzc0+r~w<44`CwGrziIRHwu$jrJPhu8-d(<7lx&cqR*- z!@d9GX^ne>Ty{0%wWX*Sy*1zwj=gZt7WjdV^r7MNbsi5+^zk}?)WaM)SK3j8pp%i zQ#s85cs4YE+cXZskU?r=(7UD{UsqANp1>*Cyz!i9j&K^4h)(&qCW_2iG43Gsm84 zpr6XFap2LVvya~I>20U5k|P~|9GpDe^gCvPUA0#>+#C789vpLwo))R!*&+iqbd}ffa|rk zW~YwL0|@EaosC{nL{~6{ze&;b!e&O5$G6Fy8-+Vz_HPVZ(Vd{9en0l!d-`Y=L*9!QU)Lc{r#1YkH!So1c!aBayv`qj-AsvqNtSr%PaR^b z`9;_3e>1not2uwCxfQ`!X3aF-2#(jQdEguIIh9tXsv~RTHV`7KflE?jvZ|>fF z|NY&&AAjtXy?t@-Pk*`l{-1xm`|-OTzv~NpZ@<33`{C2qz2*78-@X0fcX$6fQcpc5 z`DG*b)-o0=?!WxHWq~#VHhOyBvW#Vfhem8>FYK4m;?aqx4!puQ$C-S4(GUjn4KXE2 z=Tn{-`1zN2cmMqT54^GR5e;tqLsx2QSj?sSG)PU^!KsV#&@=m^ky}P~*(1U-I=pb~ z*hA@q1FPfjYJg(0JPlboZU4i07?AJR39LT6%6A_={ZyaRQwic9_!G;sc35NN^4GC- zW3;n(8JJh7)a)^a(T}fnTuG8nyoFO;;a8iPjQ=%uF{|xbtHFPyvu(Q53BJjMqp=Y_ z-0+vpvUam43!Dd}OrV@&a_{!=Bh8Urr@n60;dOK?i$BPnZnFrq%nt%Q+QlQjlIiG_ zuw_p_>9`y9fgk9P>%6d0t$*+xU9#m=w*+JkT;XXtDUYdF?Jy0@wmGNoRfu-PLE98 z@k`KWG~0b?rvY>(2BL9VkB3>;xfAElwsfhK!tYQXSR=>Ya8OOem~gnKS18!89b z8$B}S02)0uso-!7tP*8+&%JP8xn$RvFQ|^ERJWY?DSI8JV5pLBz^&0Vdqgh5>zuZmPV>ZEQBO{|Z9mZx)5#x)wWCXZ1lxj_%YWJ~7k9$!0h% zDK?55xA|*9j{JK&WqRSJ8d;6E=jXQuACKxQy}Sy>p$uRBP~$@vk1gfsEa;$EKU_ml zx~Cl;*nPW|!)X&+G)s%WS&oOkrgyOK|2hCP4`(wMTN05IFW#hge4*&bpT<=9%!0U% z-so%{KiVmPPm|$R=cZ6b=s=qfzx(;mcYpbB|Ml)|BmQ5CSC4(q;ay7rKl~gGGXX!> z$i3@jKR@;In`Qvs-u-g-`kNjPWV1FxK7Zd*K`Q}1CUp1ar)=A3xIp}@m*wD7M@8MI zBbCXtdl|`;AJma}=qrHh7~;3y)Q24HeX|6A{^7^-`rm}(J1f#zXW91%wloIcd>|RK zcPN1cA8+)=FFHwI4jo;(ntg-b4_gg)`f3F2@lF1|na7e=7eiwI>r0hfQu&WLyN_qZ zQpFq0^?78oqLv9xgTrpoKeek;=!BnHx>Sc9Nwxp{diw>+U%f(v4D?vqWGcz51)-j5 zNTLu79RY>E>y_aTrNO?yq!U;i2-Zv#Uj_$$ezd#XNXaq6zj<_>Y$HN*Gp55 zBRGy0uFEP#O5%{0xfUQ7d@1kL06MMW2@--G-Y09>a8j(&g2%xRh7C?*a9mpEdf1n7 z=!TP#<<_{5#f)(L>==wFY%{*&{m?x0c`bbq$rXO**v`t~$F+(lABcaS{{TQ;elUUy zd^cwR>5$K6bJpNaKsXK1fyn&vaz?py8H$3pH9HMl1`VrGsVQzUthw)eoSb7T&u**e z+#vm7<<_|&%WTxeIuvvlt^v_^mpX~3BUiHW%4D;fN5F6?Woh(=FXsVwH{P$?Otb)C z#)_us2Fcm0i;au~3;{bCJg@w`vzf0213ZL7r@@!)@S*4PGT}A?FPq$GV&WEPb%3FD z+!`dEgCvw7?v9sNO|_GEYxmj5yWy=jO4I17>^Aj*-$t^H_-kb1Wt(Q{JeK@n4S&zA z$wtegZ|aziq`C%tI0_oAb?8WKGa~%81TT!V z`@yG26!#LMnAs>=|0$*b03ZNKL_t)f^bhYFN77yIzCRu8^1?k|QAx_P^B?VfhL|DA z9=`L8cD_=)nP;(uyaL$L;^zP?Iu$5!GZ-4jOma>1k zd!0_c`Rl&#@Kb}B_aE+l4E~!hemfptw&{;uKYA4Ih4=jbX?01hs9!alA)jnMZbUa+ z>Av`BZ-EVq_pj?9z+v-WdA!I>0iUs6{&O$e`Nu#0rTqutFO7_=$>>5SW;p3DSsb(QpuA6487W4 z#DD3X=<68r>+1j~f65341><#3?|@fsuvw;#vwa2UoL`L>`pUlQ5&1wzi2fRYG%a5* zcRf2^*tC>U1F-GXOj+5m$AM@*hLufh#w1|<)`U+02CbK<0pe^%AS~#Un<}cin_Vk# z7?LJWP-64Vk%X_yRqP3U9Bp`u zTed{s%bq;EJ(#2D8oPhl_@e`%~uR#~~Za3y9|FwqKC@B^x`eA%iH^=b+?KhA1mX+A0Yw z*ib$#2Go7!E}R1A@T>4hOcmKc%vtVrX|GA-EX?v7RV({mk8tGD1?`QA1ShYX4daQ!K zs&O+?+hcX~8*L4%7YS+j?>z;O;!cD6&@OsU8KkG;h{n(8nC=F%5kHxe&D&$nJm~Z% zzL;g)M#1D4b@$30`?mHXa+pHfwhG5*y!6?u*2)C-o*F(b$gcp zX+W00e)Cnfy}$eE$MW-q-_>B>^>o9BM*jcHzxrSAzIxX4|9{;hfX#M%nVz`%yI=lX z=PFy+PuSb>o@)4YhCa3*ilc0_7$Et5zL%zq{!NqH4Cp%pPp$Po=4)>nKz;Y~FL(dc zQve$UvlIU5H{Cq*dgh6mjx2q!SATr;b7Kj-KfXR6m*4GTNeQQ}R6d+Kg6jxm=*~tMr)9zqZ?d>B3IdX(-FS;yY}ESK8X4q>|!#+%$6AB z(nWA8{d}vm{Xo^9IHZh&ZP%I5U~1gi0e!TRaC=wOTlP?X|Ef`VC0pgn?NY)(N(HuXK27@I7?2pR;*6jhXIWbkWbE4%mVSJ~jypsW!eneGXQ#SuBWo^Qm%X~2&% zWgnH|<~dr>bbmQDJoZUX)ZQUk_46Iu-L&g6TSWH7I%{?n4!ojWSbQq?$qSoq%RVl6{m%N)KE4VER#!)6VLS5gI?At!j|o$A=f{ z*jUAGhxh)+8^HV=3_iLa?u&vdRKsNBb%VDJuq^ffjQ)n|1GzW05} zjgz5GCg6KDh*y?^p}-^cIoo;+4b)H;s#4XiVre|&h`w0}l? ztnoJh`e0_DVXy}J&6^K*fB5IWv;^^UzI~QiPeYa6;w;HvL+ylXIV+>3{GQe~t|fc~Kz# zY^Y?1B2_rWzkuc|!OY3Pl^9RC-SofM5p|D`Z(upV${BDv^|y+^z7e4gw%s4DBhuaC z-a3}eK=7G8@+sXi*~+>hCmp7n_`;t$W(**}d>-N3@n6RbVsU_M>sV%w8+&-;xp1pv z1IhW;7Lq^zon53wXq_cCs@#OHfIIgyasSRedI#@`edG>bxr=+IPZDYViw6oKhtpzE z7uS)9O9!N`uDl9DAE=!GI9!`O>FI-Y7=r0-o(1Bj#xPG2l(XrTqVww%WYeBkL{5Mz z?>-(B1Doz#BUG82)dN~%*!|#d!gG$}Imd{YLa#&2AT;&rsp(Y#^qgL{`GwIpHPZ~n$BeJOW97{g?%~{B*vN|1D^(vBq(}@fpXVj+2QOkL+Xm zls|4H8akaM`acs4y*+M?9^RFlnApsJD_eQEw%jy6Cc;Tmq9S|-A48}?)!VN^X!OlE zXdE8$0}Z=QiDQ1!CEKbe_=(hfLVel3i8>h1tJ^R7piK`mHWsMy#dNbDo9Zv^L%Po; zdl^o^mG{_^Pu;E&6nw+WcBeBGA-l(8`njhbriY5@bXK1>at@cqa?FGfZzpz~`bJDk z1_JztKGiJ9!ZRNg+~Q$0&qd(rCjE1Q&8YChYnf8#WF7ql!s@pdFU8C6d(H1dZ}fZB z%*6W--`~CN<413Q@c3ebq3>Vc{oVikKlN(pAL``%r>HQ z)oi`ko(w+zK_{W>XJ368Y#o6wd!K$Cvmg5M-#`50KX*<4w}>WAQYK;hV;HfNjo50r z;7b?u>9K3C_Ju)5VD)O{Dn@A(ke}T~vfz!xxf~tojClhhKYP|v57L%*bW$wM^mQ0y z_x*#UT1?#zeepoUs15>%&T+BoG`95I;~NKvZH6@agXEeU%pum{|F3nRGRfoJAA9ju z_}x#Wfuk3#Y@*X~zn47aJC-K2RpDIBhnEjx3PHt2>GJ!ov93O(wP4MB+y0o$rLQbH z^hF!=lOB&_(sp1Fz*{ifvb9a)NVw&oD;@(ead}2LQ*fUc7JZuDW1(q_hl? zP}aEW8cdL_3k9qfC;(|l_Lx#aT6Y>Z;fvQPSaVH@XLBG!kTRw&XV`jQLacEvPdPZw z^fDg&31S&MW9pdfbFT>lML$;DvW$p+GqqYaM}LGgXPf=MWkQ zy;-9iBfkaKWIJoVl>>kAHKmUV+$nL^!xh@+n8^6)&tbaFo(c`*(n@Ab#?`2((6`3_ zlm)3$1fw!&IN?i}(_g+Sc=sbQ{l-hQy8L{&+zdcS@u_?yFDES!W@8B0bPliL019?? zq2GL2qlqt0bppU4p%;waM*ih@xqG0HFzsdwhKIx9Usezy#PR7oa^8}~9!Sbontl(i z6CWnStK-K)*F@RS3<)_4fz?tW))H(qLGL@io)30U=ZgPO1ZBp29ue}Rvjl|;` za9H3a4+2M@2(i=QAJ^SSczW$_!Mc&G1J``b`kz3!>ONlLgKkRIA%g345KC(s^#mYz z`CI4mjj9*4z+UyyY=utR@%i%macC@bfaMt(p}VIsS}HKJVsxg1_NLDTdr`@=S6QKe z%NGiDaZ2obwpZkOlu%G#2R!|)(TmKf512W;Z+4>VrH9Dd+XP;(6@K($e0C{)jk~*q z!m$S0D`__fD7^}@&cFgLgYMwBUHEe(+vhmIk2Ok?YxJh|=zXfe_BF)&Wc1=ez&@$M43EcmB`+lAEth z!Mutdk9QHJ1{+WZhND^L%H}t>wQa=LK13g1%JXj>(jDC^kDqNWT%{#ksp-H# zQ$E?mlnv~X+5O|D(Q!;(c++j=SMMfgiQ-0eatNnm16>APt?LGT2lH)%VeDT?O4Dc=A<@~W)3I5SmgKfnA?V9y+!5#>Jn5$8 z$(%UF&X5$C5U1Btp#s#08ram*rA=W*=a<4@h%1CR!s=S*(`$-T+#wQArvU>Oo4gi$ z#Am?63tLo{vV-eq0>sM#La_X7c)}shQETR%iB@h#ji2yONsOvcWfA;P5MW%lD#48I z228X!>h8X3d=yv~5je*7yO3K-f^u$$|HIckZbB18so}So1n*=;Voa5%=Xu!by8O>P z1*me7ajuDOsOGFI5DH4(^l!S8=|Cd3aKH}Zr!4m|O3_Cj09WRWNOg3;a4aS<+EOV5 z3Zt&m(4LBArexfaJ0jBy+h+*yH&!sS${4AVPdXVjI050$W;`;o69Be5n^V{tM*)>3 z-6h$aInpYmVtCFcH&t3H0dJ!KD%bhcQDO^ZYz$nW4ULU|`!EMMWrI_Od}!}AzNJ1i z?le)mQIEi`4x}G)9ZMtI4Y}xtEU*pNxh`q22RAs~OdW>fTAd@XTL2F+<;jN+bs~s(_GX}y zPQo+4AXoeeyidg+jpeEwTXwxhjJ~SV^L=!~3&%Pl{5PGRpAo4bd&f<3c}(v`2wyza zA?u?zpT2#(d;MeYI!IejKYqCT`OU-Kzxu1+-Tm?V|Bx;{A@bGTn;(A){#`F`NnMZU z5xEar~P0y~ljJf{k zbTeU{Rm?R!aZM+d*yI(Q^Mh=Xex`3eC@yp^NwMicM}dN^)f?EaWkO+&75#WyeXD?R zfR{pf5@hu>ku1%9eo0L@y&Vxsd7fE?wU0IK6DPR=JA1N3%+H@m)r~r5!n1 zzvSo+e(>9fsWA)7NZ9<6KjxhvWkM(so!!(xAS6G%G2mdrA(-F-qz+# zx#|G=;3`QCm&ZH(@bDiNnVyfP#wbV*3)*E7Ud4xpiN2eh;l`~zBbNCpS7nQ(CPosPC*GQsKCfRlU(|o~E@Dy~AN*`Tp zq(2?eTXt6&l4Ffi7zi?chc0^2-KcK7ol13TVj9ITd5>6&U3_P{E|E)wpnN`+4D7Hn ziXB^y%sdoqyeNJY5l`f-LFBvW(<(KT#xpj9br2|;&(*lE;_B$4sKGJEm-^{!r~{-+ zaCEP6jhE9Q4Ndx6y%~j=!B0fjQ3!tij`xb@8oj)wwDC;ds~%mAGQFG8C=Sd9eB1Qs zcGC-x@Kr{TAcnso4mV#z%X8@t8xAdBFHqI!tOIXKGTyA=+q3XNz^NU4sA2aC*pE7x zAFUC#!H`^N+e=Y{O3`bO&7{TX1`E{D-}tFgY`U54npJpS9X6UbU&;=G(?*6y(s6No zq1nd6QZSWaw0Juse;d%c-)~pRqLb>eWP35DM0%w10~Snt)uB1TSmwNFyzd(V_-!^v zhw15|*Y(yJc-H#-htl4?kH6l0_x#I`cmL@h{@vY|zs)yl
ZH5qinb!^^ujcR$R2 zcE@ivbJ`?pjdCriJyO@V0lB> zzY_cPqMy~zQc{093~8V=YW?3PP9r;yN>job6!^Qo%@zbs*^4LJi=lp|7oE|3)!{eeug)xC4OM7l-i}7^dzt|$ z>j9G^A9SA{4MZ&GoX6ZsPZe~uS*$L{*66#AAt>VE_Ih9)G;g%sexT%}^CMw^6EFIT zb!!=K^hd8u`sADA<>(55wLN5C-K#uxbS&P`O?ewolVx#ss3ewBemt0st766SDU1hn z)^3m^7&=(CHX5!`@h@hVK~Kr5FtkBz^uHTYc9g}8D_m&6+lQ52fVcu+;{a^6)&8*?aSN>r}$Izz#4x~M{< zJRacIPzGP>);*&I-DXmfXXOg&>g8}%esw6kSQX&5Op+Az$GOw~40xoRfKsAxXk#4j zV8={1kxc==mY9>FIa^j*nbB0{!rA@oONBe)E#`GxG@#IpVu59&4>W^=XN_yu8YR~o zF~El*9RqTdj&E3IApbA&+mNNvp_m#Lczi)j-OBBDuqTgVOUozQg_}<4)@Dwq`RN5- z{yb;os0KG)37RzHVD+~2b);C-ay|?=^xR?Zcx6kB`Z}b;@MGNJk4o zWDSQB3;qgrKu?;`6?~#QFcBO#Q@}W z92Ps0Nq#9i=#_#|Tbwuwo@{&#t@$ke2~gT?DdWcHX6GuuIz6sW2Zmmi)j_?;GHkS> zPLF)7gDD|QH5{w2l#`@$5Hyox^*>%0 zgV?WweL@^$TIurCdHg9~@u58Wh6YU?)q^{Z1;BJXn|HlVaw#8XZn>_{RtL(Xl~EO}`hMyAz*|O%!R6o$_nw5!i^1u$~8P`j5k% z>5Yr&l(*@th~P{85>((Ci?XEZ-{%_d|cgWy5WcvwYY=YoY^`AzR) zg_q%+gWmhQ?yZUa`Uua6#wS+k63_V2DeNMeiz`UdZ*+k$+mYCLg$KRfC*7&kMn!Qw z{l_yc4TrR0zU1kem%%fYbrwv?hnPk>>Kmn!>C94?vR1r@M5%HPx=AuQE#m zrvPJUWVA97q)v48qhk=4gI#4#g5fJ5qWhsBu5)U300V;jrtI0Ep14lHdF$ZiFX-jd z3%U#7^nyO#%j18mpmZ}0t*#pBk@a+XqY}J6x9QpE&)yq|MCWXh^C%=qjT&Fx)_fJ9 zeQaXEzv`K1beRsYOmqH&y*ZTNfYtAB6r{?i}+ z(t?s$6j9Q|_wj)hCeCC(-b=m?YL50A|L|`{2crRp%cz*w$8~IxL}kXyXh_9*}czX z2D%WHP6$OMr|{}}B^DS}07xOoU>c6w^fk7Wa>yIwqjU($+LU(}Q%^`RFy$xo48Us>|*4O5wep;x?B(gQYIOXcV5WsN+5j{b!&b}=?y*V84mjsVB%T98>)8h>XB zc&=4CM`&dvIGqAWWUsP4&Cv7JXiIeGm*9iy(OAXw(^Dt<}U6zB!qfaT&dSYFaRB-o2_7 zgTpMveX|k^2xckZ3EzT!*E%5!x0k+gOGmtkPEUwq3;5%?4#9L3{jDRSd*L*bq~eox zK1s%Y`L-%C9AUH(R)-SY_E2a{OH<)jXLhZePR`NY9`gfFAN-5H-6Nl|?|bE;hLvn! z$jlRrfYI4&UGsmz-<#o%KGRS|{9C=zsx3e$X;cD2R#6r_S(^Z8V6jEElgc%NG&1l$$}b-m%Ve~ zS+5X&+YG{AT7UUJ|F8da_y7IJf9!SZ%BUw4o4$i}ES_~grsykNYw#NNt|2Q&x9B#@ zWUc-TAo#M*=fQGW)y-@Wq1lJu)1A6MiwH!sd4TFN|6#N0lIPAB8{&$;fE#)^gX0t| zVr0J;r7`;~+!^3(giaaBa*`@R=6`XVj(~)8P}T+^7cFFIV;w%3;XF?mh&N$OZ}Vfc zr?z-;iw`UD001BWNklvZ3>-$kqZ|bQhh(>-Y=;fm%GBi+4R{R+G452UWO&J#V_o2@*Kg!!@qU zBmAKsiSpxLUNHPiXACdy>F$y?+B#u1LeyQ}=>gw#5q^t)@x`hXZK{h79RAZaYz~#> zTt#Ck-m-^>{w9hBCVd6>BuKJt`GuEYjDXwJ70(7%^pB_BkgLCNIu!U$KjqB6bCL2@ zz9dXJ;XG`n3i8K9{Fl0pAhu_UXs{XlHX!ml{A&D<fPPzW<|_6 zj8DK|Goa`FU-U&)PUSI5UrjXq|I__9cYptf@9zGOfBW}+`LAV#%A%XOcSg<6ev@^b zT4F*y1$ttHH^?))A?~Eb@EE9(^9IXq1Y{R>IO&Q9Q-?z5YJBHM73}`zWx>n|ON$m~ zpFimcbZsC2^#ZI$bq)< zs}Q^BY~~~R>6AhIWQNd&kI~DCZ${Apfqx3d?zGbkU@+^DaQY&jFr^2+o-Y-Y zyrl|hm0=S6pXbZbfs$B#xl3VdXnE+{F8jdlYg{~3|7lLlX797LU#eeF&($3R5rqttFRdV1vU zm6sol3vTIMZ!^`Xjpga!=(x-kat1fkE_Qs{<9Ri{z4~-$^P_|KbSSsmsInius*R2X z$avn!lX=SH`x|k0-s7>^x#RAWj|h6 zG!)|UZNR$=%3Ei!#`XAc z^$y-_Y-w^YU5d$de9%rNQsGwBboSoq?XU-M(p>E{B5Uy>Ln{5 zD)3`3Vfy?3`dzR8?L|rPjp;DXZ#aUDcDwJN!N^Ab>S7Fg9^d;C$jJxh@4>0-j$v9R zSxVu>|`9YTw{5s26(J4SZo2{eYrDL%s8oI%B;R}49nza%4HX|zrJ1%$} zZ3y~Hh5l;n+1vf>%LNHM$Xe1UwrY^ z-MijFu_EQTXWv8D!siz+tpW48>yxokw z%x3ViwXRZ8O%fu4@jv}cbaiBD^l;^_!xM0_&UDfzbLR>vm#RvQuXt6G1RK0nz?(to zP?`hQ)wgwqk1TVB^~CavV*HQ>dFWLJDL%f^uxOWa+K+wUCym5VnQMF~CMF6V>v@hl zr7J3kfL&!gil5RMF~qL<;~IjMyL>b1(j8zJTg5f9z;kRKelM6!gQ3%}41r-4EeF|D zdb-fy-VDd3buxwteR#T+;+PW16ybq&-Vz2z!f93*UsS&aPesg-lLfz&AETc1+$cGm z%mMEjCTI5{KRxUc`o;5^HV-O zpJ~kURj6n<*zi0e+6__BiSGCrOmN+++$x`g{r64@H|Wg9E-qctdpt*Id@gW@>ntbT z%BYkY>_E^-ued~kY2DzXxR0q-065S4=#B7{@$2UtFAKh#UAXv0Ub9?<2%5fv&zEMa zRVcK=Ef`IPLtBT7-#W;w62B^t1-Xt6J&Wld``C*0Q~^dOF^Adi{EfQPBfmV2d(<_c z;kA4L=c{B982P|v*T@qeI>VF_+LhnNpm?s(^u;gc@8L4z^@uT| z@4LHS{`CFb+n%R??5TkBEP9Uz_FfXx_wU}luRU!v-Lj0wkk>JVUodZWkst4EgM6gX z&KJM`&E9R-@X6whyS0d`)ax^%hJ2P^Z1jlOdEMxD1gsd9|a<~B;W#q3jskO z5y&Ax-BPQKT3ub;QgykkN`G8t^8Y<^?USjs-*>OI<{W;EIi^`=-`=h4MX%m{)m{0F z^}1*A=c=BN*ucLx_-S(wzluk+_9CPTUND`I4I~kiy*r5!386-B7A}GAI^v;+AZo;Nk$@i(aJkH~;S6 z*}F?V{^;Y1ed4k2V>iI~FaP2*1;?O6v<_8%2SWy%;xn3-@D4nbQl3KrM1 zZD4)T4EU7igI&%VVG^~$Ss)y|WBb$p?DED5kIP2E%#Q4t4CoUbrOA8gc$|OanLwAE z?dA)X$@h01CU-anO>{v@S+4@FkaIfm5io?Z)Gw`QH9j*?32P$~i9pY&^AU&>uv12z zjwvUW)Y+lSv3O8uf5gO(paE$ZL9YYdzEeCRKH;J343dFdVUG40)jEp^SXA0?#G0J8 zv_m@7qn81MWB)MD4L#?4ET>1hbmAFucK}Ns?#kX2BLl7zTap#nQOSl|jzkgNWbjXd z<@w^!bz&CU%%mXS2efo#~+0Z1Am~b>`RX zdKzxcUQnaC`osTzoe{&=l3B%OLk2nXXXi(csN=f;Iy}~>z{i;`Zxdv2^IbZJoFr!N z(d)>}ZvMfrmbf6Fwhid&c)LHVgy-e!Y!b-gD4ZFlePt6w0~l{RH1BeP$rjNT!$ouP z0xREV$F1ZbzgMiLLhivO!%T-Sg>vr3bPn&<=e!85)ez z^KR7Huta^52>m6F(Bubgb+8q5XhnpqI!6-(H{*G=@DG0L{oCLDFaQ4ScmMu>b$kD} z|6XMV0Iey$>}PrZ`X{%Ke)1Q$pZ)l!w~v3_Y_3(97cale_s!m09q>Z3bBQhQ<`Xs+ zs*xCa~ zHa{^eHpr?jrNhxwmjgyK#>EA{+LmX53G7(tptOowNreF{A6G%Xk&9k>pFC6yRK@8A z8S?Vzb-%>vd=bQ@?Xn-rTS1MMw6$8g9S9blc&+%I_1SjIf(t)J{U2`9#`i! z67UC#UG(^9P2(a>@4=vB!;c-|RnJahy-YqOE(}yhO-t{AdNT~GCCIn z&;WA5uorX*GCq|qJ_Y<}^>r%VsxT%QP=M(0_m3UAk^|u)g3g@OY3qUMlyUnoi~)?! zXQmMdv(_CSZLa`w&9YfQ1l`dqCV=@eDwhPNj^oPatce@1>ME`}1UUCCx!v?cr8&Ji z*FR4HR7m*jv_MCzQ=J#tb*O@$US{-Jc1e1CLN#++#~9oOJb_DIlE`M9m0LKP&e6bQ zzao}wp;w@cV9A%hM4_k0iaKPRzT%3{z_aEYer&Bts4xiJY6a+v#$Kabr5Q~WSAT*# zn#a*4-0BhL`*@h?B}ul$N0L=}z!zKRw32+8oOqnUS0%yIW&Fss7PwzxQ;5fvu*Lk?Hqko!+Yk2rpllLHD}=^?bXotE1~&`$kB6u`0FuAyS8r zz-QrOr;_n*VN8~WuIFaJY&E}K+KWe&^ahXXHJS1Gvq!fd{_uOZAN=m$zWv~D{N0|Z z|D8JRI!f*A&HMB3t6s1Bn_ql-d;H0lT@sLN-Mt)dv7UHh`LuB+C_XN}mZn`QVwGp_ zzju4wlJM{R-hY1k;eYlIy2SEz_ZU35{pN$ex;^@$_xsoO9`-{wd-gk;1}ASC5M{vm zaW{yQzVojdea_F~vVj^qYFFBl*j&spTl^(w9qa=)9j>gC^m~=B|KexAy#0rM^yf7I zQdTbxJB(t6Os|Lc|mG*N%_RdPe_Z0LoFr{ zl3m+0A`@ZbFTia8!Layb)&{{HJ|{Pxn}3hamB;MRFj#PYzQ(&ak^e{bVnIInLasSm z#{rLC+zvfDRNnsvJ5e|-JPby<`k(*~ZKTG;SM|k--8hUduLe>v^o>RYY?tfcdP-HzoU+bu5sljIzU9 z!WW;5Yx0aY+K1kiN7tz>U<01M)sKfS@+^^e62is}XeIaF6_2LtLcvV7m`PBxRdweR zos6@l5t%LFF;v^@W@M6ukk$$9`gd3ppxr|33d$`BC*x*W$uir{1`&#vWO#0Ra;_*@ zB4jpVBY`reKAmAR#mdR6y&5nqc+&ZbrLA`D>5t(y_-PeDM-rJU=1;JZcL7*A-qS-_ zcxdf1NC3NTT!BWrfkV7G2pZ4Lr!^Fv+(OTBcrNF76B$_y44#Cu<j@8K|vmZe_dzcn!QB)xL3Zs)33sb12Xp=k7yf+ zl)%J5l#~pfj)4V~Y$ZtkTU?+W|6$vcEA&^r5$?^?-fZ8}^rH{!jOxTb_}+*4v^&gu zd5xFIyfO{#Z)CGgX?XF3GyCwgye$^2-slXxneVYX)q8)TUn`{7(>kUT9i5OfoaVtB zsMO8wL!T5S-e8?++IEZ$9wAF`>cbX5AVb|vaz>6PGK#IBa8dJ1T zyxQQ!Q`N+Xh9X}E7)CXr4#(c$XV;|3px^*I>DXy_@J|pyJbWX*j{A-?KN2rr)ZukH zwPFx*+k8VOC*<$XU?KYV&RF5SqXaeH<;CJx|Z*t@fMm7lXF8;t4b9&D;Vj8D%s!I#8aXjato-BcUo$o& zK^K#CwiARwzh$>L5RAtQOME&sj3*2MW4z98c<7O>#upfet%FVR8G^J4;h3E3ILdxz z!~)!nMv1g~c=v6FI2EHuMjibE1e^HHR&-n^(kNdWWCJoJ%x2SoBtByIL2|Qb`t?Wl zZ2{buX6|nURCTV;k}H1rN1QzuNEO4`tQ%qQ{jbALms1`Uf7Oo{qSy9jU~`R10%W881n+F07A5grf{Krhd;@`K%qNHuD5cJz zgKXmS*yQ-FIywcmjiPB1{A{wN12Z7J$FVqk{rQ_#Ha@@o@~?ifAIN#y68)pc zb=djR4OTBd`}p?hFMe@*)lG74*yB_7^V7*}?cX^c+1yl&t|tlb7e9|ZLoLR8dCy0G z`Qru#@7;d+`p=4~exkV5h1b2|Pqa#y+W6z}`2uzMZaf${7)$Fx?M6TMY8WIKbUbQS z|JrY(WKTB|3T%GxYl&k2(bIlLxTXI${q*l=U1IsupZxUp*RAHb%qPb!58>(FG3U6h zJvbm;?Z)7ZU8NLBx^1vx-8m7fBRHGrl(sb8@m2#6jiyU6TP|=uA0}rwXsvw&Gg^3d z!0!rnQ=kD^Lm)o2aIsv89-N>TdywTl`mh_DutyN$WpTF^swmSDkIJcVS$!PYZjb&I zGkfD6Sqtb{=Qsk(V!stZ{L~0eh5fH@+1}}~;dG&EMdVAkbD~p8h?9S+br&=`G6Uc#^ zd1u2*c77nAhJS4%w}7}rseEO9oq>8W$e-*>Zh0+u@n=(BYr0MnOXAfT>?cx_hfzz( zJ0Aujgl;1Xg#`ec&K8xSP53+9w1OiF2r#Pc10?$zNd(}b1yj;nvW5bqda~*Cbq2=) z8CGya6RzZjx`eR~)bX)m<#emOAVN>@4R{@LCNBX;eSyClo>~{&V;>oh1O_~Uh>xbm z5w;Vug*!#!AKXb;R|9~TToQ5EecBWyGo0C4VgGSo{p3?Lo+=N`HK=gUY=4-MuCCe0 zN!&W)lZbJeHKMV0gDIUOry-o~j7_-V40P5m*dNWxeW6Fr-3*p3j|~NoPLrJ_Rz`?c zzPU8S_R3AJ&D(42Je55c)6&==j zS9fH*iYmUs>)5vQE&qn<@k5Rko)7tE7lYg&gAtD60e5=ibbNJM%U#z%WADr$)lHRg zAWFDs)FGEpKCfeV(T#3Tzj%53tZVjApRRpaNkPk!^s?cMj@YgOQRu<_N|Wb-dR ztMktm_aCL&Pmf2_%~td{1LnG=VUC zLg!+#{;Akd6GQmS^U5#}1DpAha(d(st|OtLHog{v$~pc>-F#LO{5jac>9Q* zK9eUqX_7W|N58a4pL0`iCDDKA?nyKf#ykGW!(U>GU)MgNx2;1GT*dG5g5&XT)RaHy zK0e8z-f=`$M<;UZN9^)X;kQ^ZvE5S)bgl5-hd^JaET_ch=n%;0MzT$Nb-xTaD;A~# zWZY$6&WpwdX5^Tw)1ObGNh)7dSe`(49Yg!DEMoe?g`V%MnCyABUUq}?FmPD)@<90cy?4{E9uaV z;P^}=6d1gQiUnisW-rWx1B+yT1%||JU|~chg+pV^;SR#M?s^v8?E>W zsss_R<5M~M6b6Uqk--bR3e02Vrg-9d-J zp4nr8Ut`@}@oX3zTajP#7A$jA$7a=sF@PgCn&)YOV3CJ{asc%@-wOD znQ3tM&pNq#=brp@!YA@;WP7PjGCmM%b#GHWow%~mCV zsl=8F$kZpBJlGOrGw(X92hFDUE{QFt~# z1mf++bvneAOxOnMdD@xhGcVcJkiCryv@&m=J&W#p2j%KuzBfbe=TXhrg)2C0@f9LeAV2B;h z{I?{~>we$6y?pTO_EEnf@PGcxe{=h@pMFxiv5FB7F&a&@r^IYjr&hcf2yhsd2QJ#$ z*9@R=kp*))cWTJd#0dh~Z7x%9gio}ThchV+_Tli6TW z9y6LacC^Efolg;)M~fbn=>T`XSY_w3BI~9XG5K|t(4LAU2=|RR0jy(IF0Ulh1_rPq zkSESwhsURVY-cr~BDNJj(c0^!o7B~K((mXx8HNBB-go$q$0^%V|6P{BT)eQ0?`)12 zx|K~0aWp%P;gK`kx%=|bX@7_3$M<_y^f9I@+PJq+1vJ<}-%w6u;nC~iw zrqQmSb{dmKK)bf1H=Ca0-g)w*K31x)fFngyOgB0v45b-AYjUvGgpcrqa}Qq3l^G?q zn*(`+V$B995$#!9P6NU#3(r@H~&c{1|ah_inB?T%*}4?2J1A$9apIQ zB$N&+A9&2Q+mje7sa%J!H~(oMc;Dsg3^De7P!bor@OHB>-TM1FdsZ~tYharxhrf5_ zM7yU0cLzVc3Tv$i+UZzH6f@6S*DZxx?X@~`bl}ry2FL2||2Wr_&FIZ z#4(|;G>eY!=)#JI^C+FR5~JmWE}iUBo*y-;%;lBG?)L9;%hUI|Y$HZ`;m^x&p7jGk z&u;(kAN|Gc_y7H0-adV)EkrZsyKE=V^l^EM9V6b?p}Cx-EYS>DZM(^KZD8WA?Z`KJ zP~Ol^!5bVg^ezV!#~Xm7g}hzBtdubEglVu;iR4r{eOHH^4sUpT$7drQ{DFb9!5K1m zw3kc8ucFggBWu+mUFp9LD0=FBM`JF?7oIrS`xD|Lc9+YdyLvn$D$fr2PM?!vjux8R zI(R!!$VQVU{DB6wUp3u|P|(R6qYii@#?#}HX}Y86AfjXeibnJ%(v{zjjQ9F5qPxo% z@!uqh?vM$2mkA|C<-6H`5_Dc!MQeNWaXJDchN_)92?$-Wze+e2Eq2vw6Q}Hcuz^fb zy4VBU_STYX}8va-b8$Tah%#yMpTcU#fK?FygZYQYo*sgMvEwM%sA? zo52jbP+QQeXcW9Vk-O=~GG+$W8BFJJ)@Y)8v~NcZSYdYMt*jdZTnB`dgsja~f<7CvG3i5q?8 zU*R(#iMRS@YA2~wcz0_0ur51DMs%Xb{MCL-bk%8$6A+)IzJC@!#mIE8#-{TpA5#$F z42U;th~p}*r0&G~rr#AEpT1cPPx26bp+bFn&-Wv?Y&G12Fki0t3e8HM<1 z@SwZTJq55ttc@cE#K1Ut z$}wbe>1p(enZ<~*a9ooW`&QS)3(=hqCIs!+y)lQV@2naB*k&f($9JB*+dBi&=bfkh(%)}x|MHK2(o2J` zL33?r@s(bfon`lY4zKXS-}m&7zVpdC(G9@RIZy{OXy^UYZ@x(OgIA1a>s&HlF9r`= zp)<c#Y3CUPl!8>4&^3_grM+QF>z zBGcg0dpKBbE4?Tr*h3;~DLBz?Td}7_GzeoOC>WlQ0|KZ82Yj#^=*%h<)|9yebil z2f0-8O|%^Mud5zjJGEf1s&fu(A7BTMkEd_CT*P4-7C!pXoGKax0wu?+AndU3{zcb{ zx97jjf-5^{6iJ=%H6|?9=|}4>z@8dl4V59;S(-i)cQzv@Y}#qaX7(D4Bl?0ngMy1G z1sNka$@!%56X1xCFBpiUauKPHPGHbI*I9CWMPmJ%lFU;bf$NkN_`!%R|F6M>0qF{? zSv@)u>daP>A&bTwtBJF-XpAiyxUPTPNsPfOby2dxM2? zmL&PyfDoTHayZ{exxG)Fzq`26-9RQf@Z&H~&F7+A85wl0jwc7XI*xwW>5S%y8DWZF zU;Wd^#}QlEUz-zf6-+0-PnkEDJ;ipOs-FGeMq!T?!9n*7ND$qp;{NvUdvQ5gqd(t( z!kr{%6V;g-iN!Vio!Do4bk2WR5|Y1)=&sye97+C5w-H*^{66r}!vkGbTBn*_tm(=q1?RC%@Dr1e^CNOC_2$3FUAuP^-@|UN-#ZI> z!4Li~Yir)l=drw(b*}JPcb)rY;M!4i8xVM(!Ss!`*a3^i;W-Zv_pA^c8sH2T3=(wo zR>O$*s^xw1zv$*cD@3o1I<$-0xK*Gzxhjdz}sZ&d? zb^3fW|6Fw7aMs#El+jhDcwfHxBF~TKskrC&`vmO3peaHwm>p;X*=xQomz5Q!i@F6# z`_xzt=x{u=Dd~sG+P|2p-9s6g@5!v8lB?Y%=Dls3%-q{F*dwnj1wLSdGj?#ib`ouJ zAcj_78}QgOb3)GU4rkZ)@SRVp`@WLezdlBsshfPO?pzL1y8QZ`^MM}QAFO*ujsFBg zFR7AuB2H(tM=d$NL8yF{S>;iQy&8f`Fg>miIjz1pw*6rYYi!Kah58D+3k z6V5-HOgZn2S|c9Q-GH-RF{1IvkDGChz{PZ(T#hyqm4xs>essojlqX-MeN-OB?gGt; z;hjoI&A#g(?$1AGxdp-GzZ-D@M&1z|O}~fD7+30^9H9ooKuI;`)rQ1GW67Wqb}n{x zs+J7jL~E}Wy#nk!u|T#l9PS7@$Aerrj|iu*SMtU8pqvhS9lr(}&goQ-Sexx#CaCK0 zVW$*;zgc$&oaqHU8pvG|4b(E|Y@tp~xF;|inn7N4)S~OW4_ny;z2G?mY6G9@RjryW z5~<@QU}cB-;Ti1V_br{Fu?RYokNuN5du~-FfOY&xMGK7U`qO314vVua9*}sv%R5=g z7R~tww_=vZsowq!8tXW!xywq%$psHTjIVm>X!=zX3yiKsHhfBAHeg|Rn zPJ%vl*zn03WxQNo!*VkLAd_xVNBfd}0;;1eogK2xfu1gReisZ7LerHMFeU$3IA8g> z1a@Eu#n$AfS9k<#e=wa#?;7y(3zi0t4#+B*F^_h!P8c0L=?tb1-+O%Wc^&8nA3X2+ z|C8JIzW4s^cmK}s_Qt*!JrZfgZSX7wqK{UX73%ES={_JPo)_xiX*EOS>c|Z^>3r-qSE=wGM*l~`+#;;wYGP=2S^5+{5eeHS6Rxz8Bp;j=pbI`+K+ zxXS>yUg-3y^N)I6@c;D(e|h^4AAQR{6&W{b&+DGTWSG+Ai@aVQpuJ&BV4&yw<$x$JHUozie z;T`_!4vmv%j@?H^BN=xPbdIOP7@dBzGE50KekyHYA7L(Zbl~4rI z5jWHG$XM%s^rMZehEoSvxyF00@d!AKBh;|eC4jZD>dgW;_&zJ2vlEE@yvmbSr?WkR z5!Xf-BYXm$4x&PeCG-)t#IXR;xFu?!>H_IM@jGan|G`vWc_6CyuX=_Vn?omVLSpr! z*U>%<)aaH!b+`vtQpz;b9n%xuOR%F&+UTjTE-8cQKe+`b9Z`~`c3j;OV)oP7Z}y(b zU^rt9Z-6Aca5_R)gE!;1f-t-Q*l$anaO3%QBLbck&}S15%RZ@1daq3GsG^3b~Va4O7u7R?Qz|UtFmo5OkjdHaN4h`j^9}lB`oG6k7>T zKmH&5zQo$)0Zw1YkZ0Ck!LD(36xu{RxLD_{Wan=_@=s4Rwx?uOI=A^Ofr)&d%^4YO zkfanH_UN2mU9!Q8Xp<{E3elbY>ioo=m9ND> zaUf2uK0T<@H`A~G+)Ii+`0#m;IzGGo?f>HUZts8pgWi(YO>n`xN$+J(JG{2EUs4(W z*%f~#$-WU;d+t2Pb@H9lm+$z@=0#nPqqlbk zG++_;PnzX>H1p|mH~jUCe{Jp^Fa3GY_5X*@Z=dvg!hi6me|7ugzxrjJf4H6-!;DP? zV5g4RtcrNHGs3=hR!8W#HUdVyeIO{X_qlOY?9#7oUjXw{`iP%A0&(G!nW`G&O5T#& zajaHw;=RM!^bS90R7Q6qM`xwM;jz4cNbz8DxXKZYWwsGHN1BuCVi57RiTfRyDmt)o zPB`gFj|va0c)MiXel+ku@N07;8hCi4k+DzYB7vd0$Y?j&X#~-1oBji#6}rI`Qvp{C z_JV1DsdrQ$>&0(Cgu{et`F(WYek%bA`#a77S3P+A5f_6UzKxX)Y^v4(h;p?(uxkea z4FqRMJLx}$@ij435(_de$YJ-X-?lp|J| zfUgs@#AX2ChAPjZ?lM3@u@`1=fW|kW!s&0!+PU_>(>dvWqR}22c-pqoU_aU>&g^$)blLr4FiOqJK*>}QKYC@dfl}yx%Bx?4aP49LHUdQN_t7L@l z8Ynj~`59Sfjrk>*47tI|^rd9Flat*$)v39$iyb>${qn@cd)IrHp)TL@Z+uJZ&8FxY zk?cBRmCpAOQbSSu1?Zi2!nfiZKl8i%!Uki=YL{%uvPb2r^Gf6MD}*h>w`{w?4M}H2 zIz+6#?0;}+tc6L-!ePup$aY^R> z#>DZ-n2%z?l4zJ+4y!NlI%s;G0J_RRSld?T($;KK&@AwrCIgV_&FZZ%c>;!hjJFl1 zd`hO{{gbqX1g0x%zZeV!dyj!VfA;kDygTUM?d^xJdo=D*H_tunCl$Z=?4$h{kYsL_ zjm|syHMKX0z}6#pe%g00398|~>KZ+tiK7j?V{>ejoghYI9c9S!2R+sXj)+y4>iTFz zshO|of%6i;J3SS$34H?r@%fJWC(nzmXFc-w==P`o;pewM{K?O| zr=k35rJx_p5luE8Ap=8fP?bz)FkDS{i+?S_SHE`5UWql|Y3rTqOopv+f#!_v?1PXYa8%JctFs`a)S3d#AA3Ad{i74yGg|Pq_K=+euKc%WvVM*(I7KTNI9v5wZexbUz(1o@BPe z3EfFG`ParU-8QRSasmx0y0hbiRMtOUc(xrL(VK3nI%c0$4q%|O6mqj0zlVR>V{cU@ z^S9u+@yI#ecUt2cB?&tHH)_rX#sN-`aQ7X#lY?E`CRh4kd1yvwIIFHqY~$H>fI2=f z8yH=9t!vf8=|PNQ8@*fd!$>mJ@H79C&=PorB*86jztuOl&Jowraf;3y1;vEKKW`Z% zNTvd9A!2sP;S&Jo8{wla@^Eg5k@G&dT~D)T$(g4qb3BJo%tDI<#>J3NPHwtNC@ zoht5iGNg#%ki~_=H}CiXlHLS|_oJ>|c2ke!%RhUGNac7+TvIun3_c79Bry%fBS^=< zm4_R6I47XKD=&_CPXBcp!R})ooZ!^RM(x&M7MK_Oe*C86y`2pSBbK%`N^}=JWIQ%> z-TyqJ%MO}h@??5%R%55h7yhpM9)XiVhb!4cKji{*Fdl1t!L5-LBdd$<5=OMi!9J@G zZUYGD(bAz&3ap=67b|=!;>l$Oy$**>D*LLb>sABMb6$e+V>KtfgN2JHYoZ;MU0z54 zex>hd+6Qw_%m?l2EIVm97;5&OFm$*mUp7E~W(9-Qv^p?n<>Y?UW>WWO4fUOB#Ed7!_dxW|CI&E7U?ota}LWN z8=u7Dtk%9{)QfNOtAV_`{as?ZMhK&WRxH+mp|@2J`X#q#_UYqy3cNt-LBEn`N&e** zpWVLfVUJMX_KH@!34XXnY^K&VcT zpC%{GGtORybF7e4Pg`YYez~h{0w7|(DgGZv|7jijN<8k{V4yn_LLi`g*FeC%3-7+y zYDG8wy?%WAc}xEP-yi?v_GvFWvU<3iaT!EV4ky#D)5r!K#`c+d1FOq7>?VemD*`|9 zY=3-mWzpg7ats*xGdy|W-k!hecM}piIofm%W`2UFiVQpMz!7;E2GBZT#o`CX zi5~+huTa(|}V@$CAtmCAN>R zjz)4C7B={NdE}+{Y1^wfl?RtB3UfBp@tJO-F+7d^zNO>+k<1A*Vlz$<>`^^`MQ*ks zb2b@GWpX=6I$!$38-L|jnHKaTM7VyQ=Iy2fGk|FUd$rRsyJ1K0y(uVyPT$jE!?$fD zD6o(!7)r_uk6kLI!*s!O zg8=0RUSJ96>2sBm@0~gn&c$y(Vcc84nBZ&I0yU`QqmSU4OesL`BSU~~a|S_Teg;Dh zk49%EE?$pwjx^Z=VgY_n%CkrEof>wDcQiJz5ilWWa3h<%659Q;%YF8(12+6#F5^-$!F*1q`0IYx&b+3$L}&bv+n znF$4{jZE{|l@w!lzosjD`Jj`_C@`NLnuw3v_M-*x+KwKJjOnV_-~)vJ4*QRH$9@aK z@Jl@42ZBy$z>om$jcyuNC(+G@pd+}%L-$HE`{ML?p_8_gnAj8H} z+(0{7;SUL&k!!EboZ4|sY-f1WgC7TSD&OA?456R-+qG*G9|}W^5Ugz<&MQ*G+2OncH<)v`LMx+^WA4;prZa^ zIgRe&e%-+3VYB+LTdjE7>x&<@it(mr{$IX+di%w1Ufur5@BiuTr=NbA?WNC%qMO`J zrTRXxcG^&KDq-?#n+-W?@fBYDwy__54#tkz@6^%b!~lj^@_82QC=6idYc^kuz>V?S zjv#ov*5*dE^L2(N9u#e9|Dt)`OA)WjCb-g-JtuRtaGw+#G@dqjcmu>A(To20cRs%u z#8f|-1FP(`tE1=Qb^%UiOb{YSdvuUTx_><8=h^PaG}FfR*mh7y6#Tv;hlwwkd*3v2 z>dAPhD;kfOpM!#M1A+;5iq7GQIl_^4eW8Ao`t8ZQ8o@^9gW5Maodm-l$#^bXunA<; zz+Vy#m!1^#7tXe8UrZi+g?L3b2Kx{CySjqN&yRjIFDIb}a9&ZE9FlPa%&3WgrDZ20 zPeSf`UxYPMkI8_U*@g$RPlXcc{rdGfL-o-V+()H=rigM$$@A^Tq}M642Oba0hgXA(JK4x;wIzxjQ@FBxv4%+^#)Ev(;@30V$0^ILJeuLn zHb5)WkQW1Zqn_YK`dcbq#j&ZmtKc)JS>p;;qn?H7bQnynC1}$n^a{Rv13vkpbFzl% zBd*mGVs@KQ@qpd&G~d2P)0tm%vpo4#e!0dOYY%rgbQ5N<#P1;O9UM7QKf32%rEAZV z10c96v;L6@`bC4D=%w=fhX(v{LeFX2;{QUrQ&u_M0U>bb*dZNt&{*3xFx>IM-n0Ce zft92>2L52Lz2gItiHTnPv&2G3bWu}x;>g*Uth^(!<3C(6dhWn)c%?0LPFJ<7A02d$ zpJ&(T$}4F2ksv9jkZMd(`~-I&z=@G_`4MBQ4*W=_PA5CNCOzl3$6poN+SCRw`BNM3 z&BnWT!Q{g;i?160zw!i7KQ#1BH!!~Gj{jeO{`Kvj|LcF(O@E(sS%7`=NVLScgSAH^ zRP^wwc(PncfB7K(gqXdPmM0dR(dZ~L*>$u#?jxr~?&zKV+9Eiy@ZG(V&6KOFs&-ax z0XqS{#hCFPDgxf#shR!4`i_#o$-V!(cIhp1y|((MEBx}2p?tq zzQqUNXtSHx80@~sV=K>Ge{Y_j4WZ8ZM4v*=C)`3;C_z_1=eET~@R_R%GEbDUC*av^aHO-ac9V@+7HlxBqc|u0H{Hf+kqGAbMx!FX)dxV;bQ#NOp?B zY!NtWWZGaKI(zkb83-tE02V*s-?g=wpMsNkAdO0>9oN7YXRZOWTc5AJ3ijUlbv|x)yNjP0EHbN$ z_mV`U?tEd8ks!AF!NgEkQejIYG`d#Ke>S9N=j_>E8Y%s0-F);zY!o(AlN(dO|Lzw+qq@>O)ikVa)9)N#cnhmmN-SB zm?}Y!9(aSv1k^_6iyl$*BEK#BfAE7Y@AqR(58uCi)(d|A&7b|{?N5IEi!KGAoUJw) z^3&DM+2oQ_{mdKP*{^eLr)q6vgY@=;UOcEcS}so{BadhUQz9BO(_}=r;cISb#&T* zV{Mr1z%fO)EU`j>3V5cAc-irtkVNhJOo(H{^6`o2}XA^6}y_+0?GzA%yxl4}yyGKUgPh z1Wt6WKkaaE#XyGQrIUj@zSC-dw&U*+9gpaU$p|dZZZCH3xEC6T=_QK4dFMx%+fE_w z2!tr32($^(<2kQhZsdILkR(gqG^=BSIWNMKDZAv+h%I% zy^Y=u(CYtmV4Vg$znsd|52bT-m`{5ln)o`8Rd%95x~^NVG0_AdjQ{{307*naRQqiJ zlSiX{l0nFDd}XWI^=;N5L&fR00YbFsi`NNcp|2A#`#|r&-v>e5Fj67CO@i}BvKXjU ze;7nF+$Hr1BW)kK-Xy^M!>?0P)IfBKX9r;H_tj}ryemSYR$X93Uc6I4X=Pcg|9sM0 zAC-FV{`7S-;4s-vgX}cg;};}4K9Zx);%4-s(*D%F6>Yo`8A$ZbU}&J<9%FLxID?gV z+78lNx-`@77IRmuXjjfHjkEn`-35h#IP2N?{@@;xE`#h<$jP(cDFGyaQhA*t#!#ax zQGJ)rw7&B*V~?){kFQt+&%OA2SN~r06EU%1kPcplg6hdPd1v}(qjbab@J6q}_SN|j zk*$3qb?ksdZFp@PQa0H;C#()c3oLoj7YFb7@uA}A$@|Y+%6xKr`tFAf{(Jua`S)*+ z-~a7a0RCq9&u;(z-~Wf(ANI&4s; zgDqE_cr9L%i>VoFdh7%_x*t3|7H1J!Jg|={F?u2{lC=qXL1K83#&+nW<1qRX@@h|g zI~Bp{H0Fh!!_hrNe=EGnDc@asbr9KLg57FhaEGQC!{mVCG`XisG+KRRKRI~$D>>EQ z?L@7`@)$%(0b0}`IGyM=-c=MkcYeSQzhK&&k6y?doD^##QO)rgIQ*04AVcf8lLL5Y z+`lVduAglu(wI!YVu4SJ$GyG|u~#5N1dLysRm4JLz`J+$|{ z{%k>M^z174leHpwKlBpa6a3MdVF}M~TGAO+nrUGjDiz!Jeeu`fBfXkr5JwoTBUc(R zuWlI?yMCPoN#i(Bm&UlL7w@ zJFZMWnV*(y6NaBOx(PM|;z5-5kS~Rj_9zsLNy32I*`JQd!MAd&Vs`?N2g*Glr~6c{ zeu{R|q0WDLbP~$jZG)0*KT(3;(*VUDeQb-KlD$DqWT?80Ue1@Grt6|$Wo&Tl2V1#@ z6KxLa+%Mr7ii)1P#M*L4YN!mg6 zeVl=a)c!l1e8Z|xxlIgm(|kp~PL2+z-fSDi*)_iQE^iDzyv4@2fiKb1BiVP!FPYB! z>?@1$R_Q8}5BQT`X*hYKwVR_>hL4&>D}b{Ry5se087g)ehjEQ>*sUr`&UPEMtZ?3zI;OtCYVHNpn*_C9$&GH)26X=;bno24(*eC zJaLeF7qkc7%x$p5W4J80d~rD?f}0hv4Mz;?kR);PN-8l%M&Dp5GyMtfr%;zrOCA{{ zJu~h``SF^ug96(hUC1g4C_-`_rGry>^w=9b zRIcN6EC#Kv?J@{6iUsmlZkhZ^GyQjZxsMzDzUgU$C(nBFr(Y`k`uY3a0QlkU@q6EI zng4IxzWVIF+iyPo#qIb1^iOWT`uNw$5$)QhmA^fS6VJ0v+Urm!LRUIaM%}eJbneLl zQC(w4VuOYqvrpAaQsDY0Kcg@dy5ajD?Q}wNRFr4mqNtK=6`@@wa6bKuy!K~?0wkK9 zuMNxRJ^e7FjuBoM#(H%fbc*uXv^jhaOpL0;pFHs?py{=03bieNBtIgK>A1K`0#B%p zVC6$xEGq(h=|1>QlX*Go(&-{T*@&^mw0(GloE?swBOmPzg4ddkwI;`vOWE*j;YRh` z$bL;i)fHcI&YqA$KY)Bs@awO}wx>N=i8VojwhP|tum!qnf(+i%%4d@!lo7XQ>&WQR1 zn4nouRnOIwS>k1&i}`M_NqH~XpqzxWnKI~W`AZDiP&hP#$%|w*TJ;xIBUy>J5+fZljs^QXU)Fxit%p1Q%2^rSfj0;>IOFV(V7Nh*>WR!&-&^l z!)K$t(;R{goi&7_d3HED=$tllo)qx#z|XR1vN%r%sZi$!Y~gm%kaRdod~CKP3B-!+ zPUqZy*ECNVSakMA!}Q2P3s&uLOTYx+#|V(aUtydKUw?$J6o9cEK8N91HE z9O89^k+AaO_=R%x&dz5rgm{H~4yRMGAq|Szt&&V;cokX3j$WX_0488q;Y>z&{i%DW zq(fVO!0xl%`}+gH4LCXZiOgpZ)II|Eu(t38x*q~spKY+4?hUr6xSQ~*7oTL{X#(&7 z8pezy`Q}0X2?5+<*B$m~$9HO&=|ty27KU;B6CF$fAXDVh!vmk4;Da04lV^kO@&a%# z9wPlN{jH%{&8Q?Lb@mdn35mx?9~aY>pw0TfY4-oX_5R-J|IOo`?SJ@Q0 zdl9|Z{`r@Gar?=i|K#?oUw&3wPlV*ykM&Fm^3lR3Kbh70>o~zy!zTP41W>Yf0p#S6 zY_$%qy7S{uhn6yWa>mIM8O_9_vU3w8=Vj%&Dw)b-zH}i z2H*JA&ERIg#aw0TFS{df9=Rku*&-;ZIh$T&B6swLqbB(}Cd&li1ReB6c)RR+A1nd8 zZwH5|i(T@theuUQuDc8!Ep9sb0>d!o_sX-wJ=kZ%`4ijASAnwU@**8N9);->E*rMJ zy`(VM!z--mgCvUhJ3f20Z4+Ky`d7*J_ieEiz$xEl)~|mwq?j6s9_ULr3^LfmC(Yl_yY2J0_JbIylMKb6*9+b??a3?duQ?VJb|o00%#Q(>G416$mkgljsSf^dhrt%vH9e@Z;2OVU z4{@PE)b_WXU3H72&QvE0y%Asid~s=tX0=u+ zUC5Q|oNS_cORHskD=J@m);!CBw9M=0h#zg5n~~Owoz9EpW<5$}w}}x5^Ey7%gSc#Z zH~O)7Cio)@~f+f{4J8&b^mpC{Sz&k!X)UW(vjW_la z#WCP9!i%r*=r}{8@pjNk*UO>I0`6ND-9^6}1g%{0O+zFL(INx->4a?%pBtW>G6|DBAHPJ8tfNQ0|dj+fxJbCVgJ`G!+zju45S^tBs z`9JyKd$%{;@b}~gzk7TA;@#UvjbC2>ov&}d>SbV`e)8$<<1c=mjC5}xQAa>t%HW*> z3yVoa?mAMDJlXpqwj6c8M6op&U=YYJ>e&v)&S}OE$L?Fza-6+(KNA>!nqM+1q&Y7+ z7OzoMM6P`>147<%3p~f);WFIZH#Y}8Q%#$=Ko8{hG@(`*|v|92VD93Qlx+WYJO@Vn$yGyY^yA3X>4 z7mKY+mmyaZ2WGoAG%7JuVw;Y~Z1g&;_~i1V8=u0rq4Kt!qY~=u52k)`ZJmwo<&H1B z5x(dEP`+)X>CFyFBMvuULkDkreCT;Oeo{S(~Um?OOVC5^T4tfIq| zO#BiW>rSqy1{zH>83T^+88G^HV{N3@nLDNTNFRLkOBUoEPGu-;Fp%u`4qY`$?6$@m zpV?(PuOfD77Cn3{eR>SV^-n(2P);WV_S$BeW5eC~JbP3}uIxjgdjg{SLP|89spINX zzCVgrK6<3;oUNx<`Y936!EX7L%we-Ke{D&%4v?B#l<7`>29Tkmi!j}ZV z5C3$4E`ITG<4iI+me8iedF))x?4pd+8AP>df{Z)!A#~9#s}P zR>~edc~-(LDHY!j`WfF|Q`jqhKe&DU^!?jA?|=XHqL%^v_J_ZBd-a>IZvXL{e|!7* z>p!{u(|`Pzw=aJ7)$NPVzbq&8w!l^Zzse?CVYqy>x7U$bTVoxvb)@q;vr#ifI(1no zIV=T>fit@cGCDBW%4c~r8tU%@UV|=qV>bW;O}7wtWo474%COy_@`_PSVO0eeSt*uE^hKGJ47n zpFrg>)|gO7FT{pljcxL^KZC(!A@6s#Be#N6-b$xycR7Ibj7VZ4S+8@fLCq1>W9Pc} zG48cbn0x244wO>q^mPs8QM9&fCRkDwz7gy?)kfGF>)oKU2Ob5%4A}SNi=HOFCBJ0) zrjAXc(7~+1p+mtM{r2&h!Q;6Ov_>N#$r&@AgwW*zOK#pLXDOMy?t17iw{sZ^s0W(`lH#Y^Lr4Id4=HtrE2P2F$X`J(^Dg9zX6}v z36|WGCU}%^_Iufr!BI$UWv)r4g(RkcQ#@iPk7XxPT zXHby!6aG=Yz>J+GXCX?@OWg6s;>1by%NGj7RXQd{&Afkprg?Nmfnl;G-{)*^uGHi@ zO7!~z>JO;f5CAr_B_z(IE#bY@2pm2UVee?KKca(U43lNP z&o8sEOJ{ustG;cs^9x`6T?u7)M|qN7I*lj~95oziD~B0*u0wHgg3n%P=_3=?zwMrb z9Z!b?uD^Qtqbw3Fflbf=G!#~K=6W7R9WS|63etP=#EvsU)tB3&m(%qex)FgNp0 zSz$d#Cp+4kElhFeC7<}taPSx@NhJgW4}oe^JcD+Q=LwL?bB4yRVQLh6LLuC(E>y=} zBSEE0Ucc^@c&IjzE%7ANW~|A-u_Iw?lsa13?sTXcl38+F#pD@@OO_LCD#MLo#jG(J z;OL=j4IpQ`rsV#LhBnzV%Z=c1oY+|d9lS;ip%WZ6l+Fz%$n5$cgKX5RLA3(FZZZqG zMKr$WyCabo1N;p9CaVG9oj)$>7|mb%lPo&H2g%C#x|04K!^zLtYc`8X-C!#Z7o zD_;la$8sR`2XpbOh#zz;qdmT>j%qQP{ryz%u@|wzOJsg#P3mc|g&mMZ5G7LoP`Ut@QZ$e;VKt_Lt0U_0N+B0KasD|!Me+;$tNcdQ-N z`wVY%W~+;4rE6W|73JlLLyt_01&yeBd0N|*XDt1Pib?zM#IYDhFZktj)#}>>5Ty zk^kr+KDG?Aa&QW=&uDf8fimzrTy-ZWccG~~ImbIt8xq57=g{{K_$7PXJ3p~8__VzE zGQ((n%WMQ^H+0)ZdOYaIjt7U{XJmT!DPqHbb!2Ei-QtJkS#7BLq6dFv0;J`s!|PO% zfwyK4g3CeiJlw!^1Yv)DT{=~9Z9}@Bv_I#=d0VRN+fA4GBUV16S&0Z*Kw_-X;C_)S zK{Q(P7KyFDLmSwFcd`~k2^)ZhxX$vL$<2w?&6_w-G~qW=>R60zxh{~!a^x2nbzL3yhRG8@67-{0(snLf6M>X?^-57!we$4 z34P8XhO$R9QwS~9U&(ANX z1TtMvIUhPJ0dsr~c4g5K#*(o%vB9rX{}n7=t6M>3I)r1x@B^A&V#6|cV<7GDc)-d~ z&$^4TbbQ(kaCrE4kIsAVJ-fZ^*ZEB5-0!pdDT3FrOxr*u%}2Ed>mlObEb&z9?gab4eFC;gwKgWm~w^1Kea^XKordwcdl&-M4CM2{N)Jo@0nUH^a5{`H%FjOe{@ zZa@3=k8l6OfAinnUi|8l+p{O#{r}Nla{pQ3P9j^HW*Zhsrud(IJW3cFh$qW@8SE7X z;n26vc?bN?GVF1mp;a-O$A_m)&Y3|~HXEH-J26_F8uq@Z?OunDF~QfDdYspJg2%SP z<=p!q035t&0w(@W9@2R?`Yhz!ElkyYJYx3^VfzHs_g%E$S3PP?JQd`YM*1w z!;C0{W1l%Rg*adi#=a%uuEk11yZ#QLe<^8i;G-A?Do(;3-#aRl?~Yohxq5c13C{U3 z73>|mtbm3k{~4$<$^UUOc_EFB^!I-1tueM@@XqbSl7Krszx?dW+v|tP8_Id8Ago>p z<1S;Hn}1vu(b(R!Ml5mN_1E}Mo4Onn{L;jn=OjI=!h56VH4M6+(dv1O?#%vAXOeDg z6TCs!te=t##_4UUa2kvOh^GM9IIK*1*(eanP%=;CCFbMv^dV1rEh)40D#$n{@#f2@ zrPMkdD8U(Ykc%ACJq_4iK<+Zl&ZN7_1nYd&Nt>`(d~^(Jmzkmmf7`@3^0QM%$7T_m zJmRxsPj9%^kDd;S$frRCw@>=c4ar;mrd92-^l5CE5iR@Bfp{D9sO5-d8YogXLvwdTqOq$!0ql{xSqbE_<$0{T+VF z+)Iz$S2VKmoGg=KKe<(su}1Iu@HgEJ^{9Q9{92Mo&X+CvY5Dy! zpquK(?no=$NXKOwOAFc{G(_mY3m=l}p907*naR7+htXAMM4p|rDg$-4?3~{Ck&#^@fAK2pvfEo6 zxRl9J-f$huOqC9QG(`WqZ7h@0cAcUu~Op;o~mj z>RQ98P`Ar&#YuH>V98+dtVN zt0VT}uczI6^5Vtk#OUQYS+Mfg$^6>S_%^CGP}vK(Y8&UCp5)A*I`OP@d-qwgM)w^r z=c&!mQwO*~D2SMTRU2S$v9OtO`aaGURwP`N+sa1rS{ZoMEcS8F_djgu|8YO)^Wf?C z>-@jpa=`a*j~eWJ+AIA&>K6)s|Brrl```b^|M~X)AO3E$ex3fkA$GW}_E}JfuS9K? z@U%mZ6lzmN$7J#ArDQ(8{3wQ#l0Ke>kW12Kd8)i4_{32_{qa`y8RN8z_-ingM=b*O z4h27s+MW2w5*j-ruFZ59KX{-@fg; zU?)ci;}h<94D?XhAbPYQ<8|>~bzkYUj>w=Xh;Q=Yc>xtmXj^QRePcT=@;3!8bI9Yi z4RB2I=)F32WJTxE35Ag7(p(3#7v3FSwBOnZDA;Vjy7>coaT%RmV#W5ZOM9^Res>#4 z43?`gYTE!dL}$wHecv7S4s-9EhlNHujN$CzoPd{phuY}Dr^o#3gik2mMucz658NuQ zuc+1Q6_mT#rov;i9KhhV2ndR=-2tU>EMavx`V;joO8bPU(Yu>o(&VV=P~b|s>xgA( zW<|b0oS|ySlE{uZppT@w_o^?z9Z!_8-QzG3-W|0Q5U~JwkPOL5vb|6zqm7k1K`e>1 zUt>wGhyCR5>wW?7kvCpJB2y2hx_kJYe*Tkrh_IjHY(?Xn;3%^j{YpAx8Ja=G0yDV; z=4L-7Bm1pd%yDdD0Fez`zmG0E3;B)8CbCr#@c%Z`?+u38oE}N5g0A0uZZDT(1vK5PP z#e>b={4UW19)5I}#M?`(fqWB@uO)g?RrV%-?A4rB$g&R|kFKaGsS}l)f~CU-6503x zymZ|lv`&`40GB{$zu5LH+1JSy?*_W}(Qm+HVT z<6r#M?N5LFC%6CgzyD|XF~#ff?%O?K{-;T`fmC)(lll44H_uWx8;j}u@BIL&lE?C8 z0hXw3WbZ$D#r*PUa2Yt9s?xa{fYm$iID=TxL2Fwuf@f{2QgK@lga!6s^Rv9YoKrd8 zd$&ML*jMa9Tg4GdxyuJE;CVLL(5uDNsfiP@CjG2X>;NM z9_VTWq^?j85l)l`drHH{vl8EGd0rB;txmuN%$9iSkn7df(b69=ELhbu#&6QK?z2^# z9{URRl>uXP%>c*vEp2w|KV2TOBe~;oUP^;=$HAWeQ+GQ5(_vxyBwVvpHWk106_ajS z`??Mi{F8NPT_ca#6F*5?{CLDs#|%e1egD1pTS9-@l6_0v;eYYzC&50uz5ikFv=8pZ zin=;xKy@sT^)bbayVm(llAzprjT{N2C3w)ts%UbYJGUc6s?`gwZ1zJ2=Z zU**SEFItNK<?($2(7U`Ic=0nz>-2K^K4uQ5q9w)xIhs zCxM4|;#^BuyDQ$kBtVlwRng~g1AlymH&2Xp_3bBfI!0*tDs9;SN89LkzJ2hUb)Uu+ z+Ofa5J2wQgoyqb9Lnyp&`JdxxuevXOM1!&AFCtcSc+#~k1MEdxciEJ+Z4dy5Z4DrY z+%cW3HZk1g)9H4w`aXofe_OuFw=~Y#`HuTLKSJmvT=GT$W6E8hyMgZ|0mp|n!ISXX z9MTyHH~5GzupLfTvTth5X^euz$n*uVZ5@Wj51B)B?04k@InQ%X0SQMdB!4 zVm2E#Fpx~qurqVtOtg^}-j=!OH8mnab{*axHN+PxIqsbaB#nS$gFPe9&e7=H^1P=b z?vMG*cA@N&NO&Da)1ZQ`!Hz~J@meR+`2=eZ_XKL416lez(&ARIU$!6Bj@Mt;knp-E zkqH?hbTLn6301Njt;*SMBi@1=;BoD-#o=^1&%-D9=$!^gKKPxtzt=0o4==V`$J@y} zmO>Q3`E_ef)h<;Ka9sKGfuHn?mHlP~h2nctpN$<|v^uAGf_%`jBpZn%YS+MB3M6$WAN~*4%vVJpI_Hm|HV)K>h|-W{^IuK$G_=O#ya9&eDm=ye{uV? zLBx4+PAg!^|E5mz(@#FR{pMFKktg@-27}Mv|KRqMAOEfiw;Krjpn3ayw_ktu;`RrB z@>jQi^8fsc+n@j8zq!5q;>GQG9sA2Gk;Z0zWMgsn-Tjov#y;A487|xSiQ|;(Jd{AH zOUAXcMVC0L*cbmtbFM~U@Y>DQHZPvag~lRPqi|fzY;|c&ik*U2j*toEMhCxW z9@6%NQU>nW5bOCb9jy?O4j-QbOV?{#m9q;58C91b9m+X}HaP-V>>a9uy<|{V=4fZq zbXMg??F7(;cPOs>ZktKNPL*C|cU?i-PG5hd#^Fb#JU|I2dP&mke-2wf)iH<$*1i(q z;0e26IYEavR8EtWw~SNb+DsJwf(rnC;V?`{Gdx_pu(;^1jw8TTHZ#?Ugp5L_bUdi` z=&gA5o1s>wL%`O}D0k;=Wq{^Q@iii}x0n>}xqA)jqgFCB7~G7ujk?$2kesQ) zA&pi-8=`4+!!WvoSZwyS$OA8Ch+% z$_nHL1D6eKL%@xd)K?Z<+=B~nydXr$$h(ei_6*Qd2eVlt3a`Jk#4RyHBMGeejBi%P z4yPR7Q-4G_gOYeAoQ{zlc7se%?09VTt=&dY9XY_K*V`mU!FbxyeiMac5sL-`SM~P0 zX)k;OiOEQ(@XkF1+3jc>G8?ulW&?@vfs;bcy(XtV2%J(XYdUN*sIZ*BB(u=f+0QxY znuNhQC*KleIAc)^?Rj=aUP5XR&{Db8sONQHWCOUeM*Ei2{T%lOJ?ZqUS@`Fle^KZD zWo3`n*;-LBC~=A4Wxv|_MIErmxgOQ2zTbTU?u1{A>Du^<1`ogb>%YEz{Ogaa`?|7^Z!bT85zSy)U3lGq<;%}s z-ah)}FE{vbS;Bzt-48z4FK>SN#TU2V`CET$9rvRyQ5dWkh?v3iGnK9GSt)qkew|+S zns2kS`vSJyUx#nt@T3_(d~f!1qu-Nm@_Y8)huz@U5Bq%hTert9X_)yx`Cgs>v)f0X zyt@70fB1*DfAmlP`R!Lf`RVOVmoh#m2Z+a4DeFg|x~LMze*2{3`Lz8z-&E#ZwFJx_ z^JTsfL4&KbCfHep!`p6cQhP&ZEdzZ4b{fRuV}s~oWZ@)#RlQ(D^L|`;>Yu9YWR{HA zE;;ESP%n2VJ+W5I%NfZlK6gn?J1riqfR2(j)X~xDE&wIy7!H{U&_5jyaP(B;evl9B z8V}IQ*Evnw>Hs;u(?aJuj*bD$2*vZ*`s{Z(5N@zrB`8P#|8%`svn|PWm{)f=cW!i} z8%>Y|1(LETl0sMtDI`7lK@kpz{j>Oc{NNYA_`zW-9F{4`qGX8#U{WLo0^Mjddb+po zJnH+tmAUunCROL`U0IoHUMt7SsvQgEke+8h;3t~G@Yw?|)5$Kh`QZ)Dq?OA$$?0@> zG5Mzk6af1u|7t_z9lUZ*y#?aLqcTY>x|{DwUU4a(fgC`Zd;4z*v*nJ+lG|0mo3sh8 z=AFwEB>6VK&e~!f^7T?OXFFIpcH3}9&>zjkGM25R_pa?MG^UP{3jZtY&pd7PxmjK( zs!fEogON!AF->%hZFCIOGzUy!=cBO2UX+*Aq@!b*{DqERsVaheDK(mi2VuZ92r~q7 z)WMJ~je0a1Wh;fSvPKlTwmODuZ&-Te#V=;M39B12+!F98h#Bo)x$3*&DJ#&W37r!u zY&Ftwwlnp9r~`%_Fm&BiK-ntao;6F$rQ|w8^{|oyq|>&;wQC!(lnWlC>o{0@x2%S3 zdWV5#P$-U}F_)@r#cq&AxuQGkoxzC>TPS=H@bI%j*`c)Zq13>3mlH?%a+^PXKP=6b4=_fW{ zMuOZ_%t~WtXAYVS5ID-gV=vM7)<)Ju1ad*kQCN%I7fb#JtX&HFp&)N9}9ht13KVj+JMjCkEHt93*;yix*q}jZkwkujL zzhC8^zpDgx4!4;JKYa9vM&e~<&z~_HfARGA^5G+3p0d>bgy-Mg4G$mafAQJpEVDDv z09P7!7#z%CjMv}(@?&lTTsuDd)u+IHcKqVQ4?%fJXM5v#aQ}X{A|7&fCGG8Z-lD_* zlof(o9ATfE9h=qm+ zCDgEl4hcgcyeRsLDSVA>=G%cwVZpKGtrg|MU{=%q6?B-JWvx#TFTjuADj&T^f*NlW zNO0OiWuF=5+?R?L!Kk^O{+g4LNx6DSF3nh9ZP~fs7TmC1+x&fUU8I z2s*vyhfErEYfzkKLup5pX>G-ztBGX0JHzg?EqiFQ+3oI7L!Ls*land%_dr4uRe@6w zij)bf0UCJNfK)aegJt5&uNkoTpk)H-0&DPA89c*(=!2Q;qldH6d0db%$A9e*$ML=B z2`us5sp~!mEj>Ohg&p?F3cQq+!^&dCY%d3SQ%PqKBil@%EP(5yo?uW{{>jBQcFr@q zo`I3~qIpjPGvOHwxH_O%+7o`tY*O9fYsJV%@UksPLYeZhgH=*vS3UQhjXhIhDmeEh zfiOOVtHJVI9y!o)Fxo0jc211(je-@h$~28QWZ>vWe=(y&C^zdtpn=XyX4ejQUbIAS z2jp`2MN8^derLolo-qqWFE^~(C}=b5(qosg@7`uf{m$`#_trn6!+ptX-28j;=y?OR zJ8bl`!*2 zz&GEym)-T}7j&dBYI*W=Rt2m-_wL<0zVXd_tN?t;lLpAlQwnb}NOZgMDMmQ?Llq_0^MgvPFD)N@2{_J@R;B2d;Vw-dmh-(cN4wc|8V;o z@3OQ1ZD#$v+3z+pe`fzLkDJG%=U0xu`01~X|Knf&>*FUs`tkAP0s9uP;~U<7&J(`y zuBH$lx>`Sb?Qmcp#L5+)397a?A+lr!_12HB{30Hf-|0A8hh40q{o6W?FTg34GF0&So5SV zP2yTLT;UOJ=voseZd;j^h%nSWsf^vou>UpCr6Z9|U%47UP^>;S01+2nb?nH7xAixg z;)7F*q9_Z0^iUd(>>%{enf8BVjYl+Bc`)KtgGa{Y3yH;3m?d+ns|$V)16nFQ9WKdj zXObg~36crsdHBw?`p(h71RERQwdVjV$n>dg1~ea1BEB8{^v^?MVw8OUuz{lKx4{u6 zi>&KOn|m&w^uC4#T?Yyk5Q(GnhCxQ9;jb@)nSQX+#uOME@M?jc)8IK<(YPdleO5k2 zCmsfm^f)|ZAkE<93EE2 zFSMibX#r&p}lj3GQBn;oN#13X4;jX_=w8Jd!; z`ojN$7pS;W0aKQRx%FXiF3q7#J#*<&x^{SE09WUOtnI2NFWX8~-u&f1b)ih$@D>d_ zB4>E|tB?3NtG6rWG?~av`Mm{TnR(KY2Q(q-%0%~H&C*V2`P&tQ=7W zPru~vV#Cf<+S-T&CwaS9MXD%6oMUfg^t|jSzK_H)hmU1OwNKAe@B%&`BF}UJP{ZEE z4Y72Aoox`M=#soI>5+%K=3O0e0P9Pqs0iRpJl|%wyFPkF2XDi1gX>$Y9yp7?P3P_g z(`P*5CykcKvGZe=ZG)#g%a2icCjigQ`v@YP zt}8u{=(HXDy>*WPgmf5`_?sG*Im`Tl{RVgGh`;@v-{7_cwNt(8g$PkbUPfjg_0#H0c2hr=yNbpQz9*kR&!sJ9|00KkGz z=SWJOlnXm_oDmgcbS5UO&vg)HDsg1TPCIMxho;3bGN!Df{M-K5K`au{x2|rvtsnf& zsxAr$=izzbWDqX%g=NAgjA<=|Jx68IL#xf$meA(XVinHg~_l4*-MD5?u z&p+gEIrf>XOUKALGE8jHq5U0K0=!aE_sf zZ6%737@&$kkt5T}4LpN@#&zkUJuam!m#4x_x>k!X;r&sVxmSxJDV~vLC^h&tUZ+~A zOghYSOd+Z>B4jSGRAzI_j)$XBXm?nvOgmz;nVyw^?w^Yr;xHH*eWT`7I!R0pEKz7g zGcduig4qJ2Pj)#$f}vCL!N{(YoL$)YHK;kypR$flc?>VOFJwMI%UTdAjAG zMl>+m*G28Zi^|$_u5?NbUiLD%zNb_5#)VIWXfsTfX38`78olfZ;!22f z<$SrRU!5Gl#GBYQ#^3OQCu@MalW{sg<9~*4#bl(dl(A8k85CXNIrr)g1^P~Zd&aw@ z?dT~$&-`Q_e-1pQWCUu<&TjUj!*oekY&$*opa|=8`tBj|Eq4y0ydUmBP-D5;AqzVd zS4yPntokOMs&stx&f8DV>2U6FyTIE4w|(4h!S>@hG??Clx5M&Krswdy%LYF?`sYud z99MYu|B*9jVBdfL+s8d-_da^~oSDAz%kwTv{nzNQFK*mCK4(9`OWr$h_bq;Im;C}? zeEvCj?;hWH=j}EDe!_eAUp#x^Jyr1JOWq@3*Zi723f2!ig5PiOw!(|+JlD?# zz?-a6T<|l#KHhhO!OAr^GF-jq6IA?i-|OQ?AN=z8)9?S__^Utv%Qiv2K?Wc5^94(< zc~sWM)PVrQ0LbHe@Qm)~#;H!`nD|_===I7o{(4~zHn%=HAoN`XmU~-jZq*?@qEAOl zNgV@@Dt@VZ__viPV+@=b>?i@S@VArErx>->pLs-#eEcq-+CA&Iu)vtxnct=`AZoj9 zaE!3k_G-h?Y`68uygDFpJ)(ntt%`A-EhNb?&9O?sA-u%vOZA%kp-X}QFWaH$c^&f6 z#zo7{l!W#J!M3p;cI)9a*{V`|2V}N}2CySPWho!SGf_#@XhZ_)XRfQ`TRh3n07zin zhX2@8KS@Bp7(W9iZaU~6{OF~2uFo{-k)F%G;*n*&vJR@I8SbreQ=vzaX_w?E>+}`B z(-pA5Z28TuTzb|M+qoE6B3JV}{jFb%&I|xVP?=#s5czf*W+D;7=;Htt1t=q!rda2U zw?}{)Gx8`t$pZX1fUHddT${b=L=;?jOrgC^Lbu>Gy5c`DR+imJiCg137~n({nL^Ge z$*Lg8xZ)PB3QnFf)}yWK;7Ds^imZ6jw{zgC&T5S8tW4+2A0iGUY(HAOJ~3K~%#qsg?|q?MN1lt|5V-pjmo4ySzLN zk>X_AV1Ps3=-Fg-Uy}}!jb$&GF}gIRhpbS}J+z;5CGUN!LqmWim$R5=K@h9k0!Mk( z$A)zd&NY;p#QZgj*O!aVGduP@7Ed53nOOQd? zfz_4{EczR?aI0{_Sm~jC&ELHckF+m-anRR5M;+y7yzwV@tLYJ>H|OyJY;1@?xE>~^}c^ALNs{NiYATC)Dt%B(WwjTV{rYhJ)MHyx`CZt zy^*N1Yh&VNMkOBT_`4qr%L`TsG^3zChH8iAa&j-van`S>3=kdk`sm;@I(KL0K4Y)u zPoBJBMZs05Yjo_qgopt_yOfn5c^#BoXG5N=9uL1@&j1~}w*(wS+~OJiJ1pV5AK(dl z2HZ=aua)6~#~59mc=`NMcJ}bHq@$dW8aOd2 zD|W31fBPXX|Dph$>X0#0J%SeWh8MIpkNANlzh!F@r=vP64eK3WiV3Ccm_FhQU%q7~ zrLPpEGrF~$G;tfL21`pv>#c#UYzs%l#hW(Ve28ld^MfUweUwLBse7n{xY{nPDFR)) zNU3Atc3HiI&X8X`Xmhi*e8teuB*61;cpx8xhHSib9FXC$zD1X_M>IH(ZsVWiT=EKY zxr{VFgVRXI0+r&d1vy`(($5ksm+qL)hA7=~SCob!&QCkVFmSCMDI&Y&m=uVtMU-2H znkVO&0c?%x!1ZmQ3(B!)%@ZU#&j3Iu$1o5a10x?wl!GJdux+F#Rz^QqMYC4iG5n%6 zWGEY1AdIXat^iOq?`vD34U!D2!7c^N$UzrDAljK~nwK;VQ|T6t^%A}0vLrPFxh*Hj zAxRI%h*h^L*hn@3!gu-si~i0ECq^#W8{0NZpaS)>i3~J+?JOEh*|5XZnOe=8`Oxyk zD!!a4ArZ8}5X-T~h&xiZ&US+Pg+ZeWI@vHlr@;(1P@<8XvkjqT(0R!cuyz^~u0}<3 z292FFkUGF$QNZz&E9F66n^d>aG#GhB1#k*KuA2cve8_zXVx4o!=?o5Rb+zBcrSZcL zHh~wu4G?0y4yQqng3`^xg7pLG+ZaO2`}tFOr8}mHcX9)Re|$Y|K1LvCccME8NJ=iW ziTTF#O_uJRAv*YI>&)`>k)~ zTW9pXZ4;;W4uY3#O!Qp@7py`Edj(wgHt17rW4+b}zCXm56t(Y~^32~B&C-f$;RqwYh9@pUqs>?1x!eQ_QE z}%{oGp)lUf^qP`kg$;;Ih~76!r*4uNip)QLq*XVX`QAFOC&pjIHEk=9uU%C6pn|Q2;^{6yzFUG~bD6118;!D~2+Ns|qgNf~PN&8)MWF z0->DR&-;sx8(v9(AUf%Uyk_a-S2P+`Mn&BRy4C5(4;~RS7{TdUp^`-&6std-Ui_s4 z+&2EM^pducXiGayXLKr3V~{<>TMiYNLmCnjFI(U@?VS%n*F2_s`yZfE6RzcO(y>73 z3mS3teC4~_>0G2RYB@7e7n%XiYm9|!vuHB3A4Ffr2VU^B<55k^$TBDxnx zJ2+U;j`DSuZpYmK0@@DjF4$GyOSPzLHn0wMUNVFB0v=WB#}x{- zK-cH-eGT>tRvmmaQNO&yj9-19F>rXw(*YL_0+9WMPcy*Bw+uSK;iCN^z7$K{T%o$j zegEs%?=k>j$)9`t7rfl(D);&Q$>OKT^|N2yKmP28|NrrW|MY|7DSH(j^LXU!idSjc zikxbvuSvBmw)4@}?7V*T=7PZtw|#sHC~);)<6#3I?B7eU>}b$IeD#%94uzaakLo0h z^rr!1cIvrUo8xdttk2@tBfYfo&GKg9)3-Liq85YPUK0B(G+arX1V zLExUaUc!@#a%In$fn%MHegzes=ocV>D^l!{$u>d$$lq!uaw`9cx8##uZq`4_D6f7c zfcWYrzd6RsW>_`@BVoLb@1}P`0z(;2*Nf2CPM7^ItSx>=PLRIRX?P8Q(OkaOSqW60 zn)$yifYLh73I)M20?P2`x0oK&l$#iaql*S3E1r~D39=kX^R0BjOG7+@Si^;ExyBfJ z6rMO`Y_Ei~66iGK8&EXSL6rCQJu;KiF$0Z}k30oV5V#R)0uom-TOFtHy?H`^cW^qp z&3EQGK$5C@S!oGG>QD`AoVhl?j+=AjEFiyo@g_ltM(XjU!wq0i{n7@Nb2S!-05eT2 zm>uy7S=uwJI>;fdNm{`Wn~PVx{Ka<3$IARsBm=aHq8_rN0W?VLRDEnPS_)jJqcGS> zqI?{|hvZcr zbA9T2{7GNujeIGiz{UlBW6K_}@~1%}XkPJrJVOY0@M0hb4qzBF)}h<%Gk8Nj{Wu-} za*vp9I(gX;ubI8@zxV|;JrmX2CI&@xwqf!pXZ)kef06Zs|+ZvUA)D{zdO7@ zi2MB9zJkQ@tIr-BKm7AQKmP0oe|dcI@h8V)Hx9a~u3%Qb(W71R_)1TlUhd^GKYFn% z0vV`F=lJj!@MDe3cAoVSx%%AIu9_n{ z>C^rdG4$7}Zy3O+B1CD6>dBx>xfE?@ab)~JK$gj;sCuIWKBDVUy+PJ#D@TcVtUL#+ z8~bt_3UhG0O=O)j&YJA@EXz>tvKRFd8`ea2rtS5An%m12yuh-&PhOv!93~hd8!n z-H+Ef5buc~vsH#e$dQHXVH5A91E+~bu%rR#)$iJZzsRen!g>My`BhEi;d*!wua@Pz z%@g|}tFl$m0S=~OX(@>Iw$+i>K1@jr?tut%E*+>PPf)IVT3G@z5~Yg~xbNjQnKEQS zz5(adp^ivtz=<@Chz*1=`Gb>(G|Q8A4j5s^k)gv&It$)}jixhZ(*}_=RA}*YlYD^bh8%0WCn&oO3Z8W4^2eJnh z(ISrm?X8rs0?vT~_{xfGT-DL5zcX?~Yx{fY6qjDZ zRGGyWyia_w0oQZu1sv*_VbkMC%IIJQKJKl2GFEn17p%)Zh47S3gyP*{Y5zI554=C`CPkm=Hv-)Q;K0ZI z16O$c@Kql5yUs?!8@KKJ-$F;8c3_bA>4V3|U;g+f#}B{%7sn?be#Bb~dF+wf3B|m| z(^UFV`3n+#ieKO9sdef*U+wOZ_T6+$u=< zvegqE&~w1&Z69rHEd>m^(xl9Kt6cK&1bpSfNRVqu)lM0|`3E{?Q{1XS2>MMS{12gP z(W7B3F<9HWJAjmz12|WeeBqJH_#M(CSzGIDPvx$&--38}apm!G-QSv04pxDbakqcs zPW!D8aFPy%J8#IMUv`IPhq0~=z*T(NWxJ83+n`PgMzv^#FPI<^U9d4U!+do!R=HlE zeQP`2?6*uTF*bh~;4AAsxSko2X9vaXG11XtY%_dE24y#%$dhGt$UbBp$)<0u;d8b` z@EI^5$dV;DAphQP6hUQO?9?YM`O+>Pl8<-k@$Hsz&cf{c1uKzgo)WOq=MU3Y;!BV!@s1G-(XjhlJR?8KLs%V5dIHI=%C2wHZ3$wZ#?g|YHkZl^l( z42*ONYGHFeA@3&=bds{V;m@BGzG(*zVLN$$Fvr!A8_?%50&D<7D!0EWaLpg_A^-Zdd4=!w z8|3kpz?SF<+#tA`+mYpiGJ|sKR$l`SAX4o{+GPN;0>{*-Cn+iHmcZv z88aQEiwiq{Btpb|&|t^wWf`J}0yg^XMi>J6q}4!ZFnX+=Yi6r2q-PyL7elc20S?n9 z4%O-&Jgg5@68$eM8tDV;y>c`Fwey%l?wQ|kAXEB;I~FPoD7YN%|RMkWSOkoWc=f~Mwa z;ye#4rvSK<0+B-;-ka%A&=}pUT^{KQfNWjb`0AZ=XOKdHo=T% z^0IgSgZl}6%7&2s5^-#EmPPcfBao&&QC~3jSH#h81U-d=1Hh0}X7JR>w##x=cqHRERm%}x*;`6UG2%2$ubo~}3iWf*`i9rc0AH8|Gp0r)C0E35{EA=b2F z(NiEaQq$?YGFgx{coK4WO~FjjMMdos#OnA#d;; zY)$N6#u_50jaGimM9n5tG4L7*A?-IWQfUA67U%LhF^&Ug~ zl9w5M`0>ZbkN)b%$B%#XBVI-PklP46%D0|u(5}j(N-Aa?kAzFu@=0Tm-DsMH#rjr! zm!JK`#Fyw>3>s6|T^r_87<8+MJasfMjHU(&=;@WwY@T|W)qP~)`6L{t9R_9@*B(`s z(qpge2V+5Ek4|-(0+U|33tvR?3VxTk(UiKAn>Emv^lmtV~4?2>VXq=+BfTnFyIl?b$rBl zWK^ExWCK$A4uo_iqJr~M8)0mUNcj(eugJR425v4E`N0*x1Dg}`@wd%ZHvO>917i%s zgZNguQ`yOhn+7_am|UU(E&q%YneWw6X$zi?rd4bmJ8xfm9=;|ZVvGS^tU`2m?!xUn z0XF@OJ2=2EDvRh=4CIIr?OH^`FnV-j$SesC#~4WAEWu(bd-J7ZfZ`NomHav=qHMl4 z`W7mIhs8iIMwWYcIh~`4$t(b^SvW%z`*#K=g&4C;lhUhboYRH;>=de*8NJ$osO>7=w;{&9KGKSvhje0x(XP0u2O6qb zaMlVBrvbvYm+%Xid?;Ao&^ClAkPDS#&O96><4+Kcov2)$+3M-HCf(^!40uKDxQv@t3DuVPBL5i3UC zCXiZ0u1**hc8vPOX0TZ}4bYyQJ9}|O$XhUuVb$CCY0$&HRu=-Yn<5~S(SFq(^c)<@%{ZC zvk6cc_>eV&0?UxeMv{EvsZL+<@O#&s-6&|9Yzvx2w!u|gZK_-pj%|jH`x%U#E68%4 zIJn9$UN&IiCvp_d1p>##)+fIKA0FtSY|7JW3HDRxUR8|$HL$A@*2(~rvJ_MpWvh~zI<|g%nug*h~@sj{_)RvhXB7^$i4xl z`jEH&*$X28x+REml7uCf#+P!C_6r>=$Sz*15jLUY*Jp!hkXr}1bon0r8w{9h$0Ru! zXhiDl)H%{pK=-M4T*^^6k3?0dq#)l|5XU@uz+d?}OxLDNDR4`c+FoE$Y$hqvP<{cW zVG3V_r`}#WFvFK-BCkKRB+ojWD!}FkEM{MCKR)~^ge#9hV(w?1zu*UzUhpXA9w-cL zV4^lO*;iL5;z%C!#%k+nEW2J`dHNoNbn z>?@tXKdlt)VhYly>y;VeAP>*6i`snUpiq(2JNjdQ1)}X&T$jHw)mt3xa`o14L5<)#Z@Ba;=zS(zQUbnHF$3PAxyqe(1r z4E_Y1fklo32Lsd~v6B@F*txZVp7XyN01^0a00JOo@l$ux1n9WYMnD~vbJ^0sbedKp zrM9Op`5qY<^Dys|q+S9V$q`(N{@rWv57C zbk%#(oEM*TaaDmsuxvx;(Ma8f*o=A^&hoT^>&q(eN<&0$NDz^%30@7HJT0Ix&n9Dl zfk$RvU}fxD4}2A^?0P2v!r1uw3gjBb9jg%29t}FMZMQ7+A8>Y_+HJ>IYHO`l6OaMg z4lImQrBN=^6C^e;6Hm?j$!<1IUfMy|0gAD=8+A<~wxO;Z4iKa}vwmvs8Vnk1mVg?3 za%?yV%)mm-^%Q_m{M*?#U~yl8T6?W6jVHDSl}CN&DMPXt5WpolocWXI>xzx>!ov7K zmyw;p5Vs7Z;I9QEk3Kp6 z`sY78KKSW}{QkffJSxa6R6acGuODZi$+e=U@$N3o85`Kh7enZ2Tx>`X>P+X+RSKOs zQz^S;1Fh;Hy1RjK<4ovNrlR0`)V3MdT|?ih?w5v9)(XJMM;2K(Cw{16tGp~OfQLkb z3P?r^JR>75iwkh+$Su~ctKs1+4I;?Fz4Xh$Po&1<%1V0izV^l^A6^`Hfcq|aGJf|8 zzn^g2KmG%q-S_VN`tjl$H;#|5JUhO`hYvpbWfb?5X;%}Dv*8HYLiYlWDA!r z^>ON?U-TjLaoYF|KGLAQ@g%;aCk@UiZ)p)<;Wj)Nt4MnYzuOa`JiOk=ZtFKvI2Foh zcq1T58~Vu`7$RT~f)frMY~x12>Z1;BrwzGmf_G@-YSj`^0xzGI)CXVhLKO zV`a$@i3y9LH86H}T(8PxeEAH&v&TlM27+ar!6uJu?IV4%l48+z1CE(N;+raW@llmd z_}T<%i6|NVoFPgtJY}=YcqJ&cjro0bTgFOcqYONO>ZG+N4^}L=iOY>p&19f;)>}a! zY~uuNy74N-6=t;FK1ADOk&iRm8XTg(4!_0*BxOpm8zXFJ$bZO>-gO|fkP`WAw+`#(ufw0Jf zT>YdmoAqw;ZvPwJ(`O^!6*~E=oNwOpB|gmd$>-g@bVOGVUITD^$(ss4<}HDL{lSOF z&wu*C@hQ9dA3o+sdwBEU8{Q?*${ki}HX~oOg`rlN{~G7HwrqF4DUW@3i53;LjQQds zL!b*#-qbec+>ly59AMj}Ht^#AxlI=(sd12J{igZMuU6{XQdftd$z{+DXa(2iW=;Vl zmpqn1%NWz>ra?Bn2c19wE7gs=kfBvaA>Vv zy+)&9;3v;gNydrU)V#DK64E50z-Ol7jT6Kh8VVwaUsImYwm_j8JPWoBeq5PPaR=!P z;srDYmNa~Zvq8jV9wI;XNY#?f7}ZX)M#Byfy!acx`AY^~jKejBJmfUG2`8Zcd>Mos z>6S2wWy#3qIe|v65s1=)bSRtGEGANm9uBQOB{wu|$vwc!RO)43tqo$(K+R~I%AMT` z-vT2^3@95hgW14V!u$*!8wVOtb&$3ayk_*^V_wQ0T^oBgR&}s+;mpX6!(FoZMsn(e zJsQwC{XEf^hq3}>QV!XDW+py5r#N=DHLjGuwld~Zq3uvaS7~H}P8ujTDX^n`(o8&+ zP#1anZ#>XZHM_^Q^W+EDz|c=2H@XK4WeLWM1PwJz_&}4&SqF|Vz8J@4_>)Z7$<2SS zp(E-@WxP3vp)N_=R*bW*m8PysPH+`ZJ?;D*{|KTj+*z;heMds>gShQg8w3IBN1L#< zHWA-@(+w~nh?me!JZ{)=?S1X~N$}llIU6sz&~F^lwPv6i34BcqT;n!?cn80tca@a^ z?NKNqn;dE%v=_oTxa!Fh>|*0}lb_-x!ApEt*7vo&YzA~k|21CQd%@fLdX$c3{^vB{ zPuL*%;LAtHM<0K7{QQHTA3yu~FL-DF=f_hw4$|>s2(*zA!f8j1*h(d$T3<7&g0DCR zE|V)e|Jr(Db9^x?cZxUhssnU&2YDXe&hb{~c8L7vb*<`7TAM?^&5`oZ;ye#Os|Vtn zSEP~{o0ut`hJ^zVC*#wBnqE{3g9wRCfoiTgB73@yzf{Z)-Mo=lo5+(Bj;>>io{>RE z=4jp-KWq37=I(LHmha;ZeBZyxW0=R|;~#zR?c?K*KRDj`?v3M}e{%cyAOGn;IbJ;c z;P|!o?i~Lc_Wu6=a~$8eb>;Z{>F_b`d>@3*`E;Alvws?Y9b-=Ts}rAWj_e+_jhux~ z?&K{a9kQE%<%0{5Hs_LG6yjP3XP%pzAFN2bfu~WtrTQXvBRx&twoe%`pUN#tLi$yT#33d@k??!MNDPHhM zffy_f5s7-KlcE@MjH9q1CXsmrHsJHZ46}#M&^^-`+17VB(9xL-UVSqtd0WQh-p+uO zfwhn10(T5IWaQ?|!;0I0UORG(EYi0H$d%=fq+|^ge6i)OthVr$NJBJE`Xw6jwFM?$ zJI<2?+If7Tt@nO{>1g!@$iUkvxUB7LxCTc0_8QPre=qYt=N;VEB?ZVnFx*crD3g20 z{TW(+Uz{F<9A}O`^UU+!%s3^OJq%VmD)Y1fpRqG}lxZ+sp^a@UJ+v(7|AM(31CF=)8Ja68b=URgf9?Ohn!H{rh>03%P}(4Q(eby~G~y+8E0! z*-)RwYIJRgk}i(8HJumQz*|G=;+37PX*Gd^P}8_W-|j%0`;G-mXG0&1_;)$oM)ZOm z^;duuPMAz+>JnW$0A)a$zbj{qxW@f^9}n}M{&n68*o%pXxnAf)`BgTK zU1yp9Dl>fFF6h1fH{1enC;u~EHs#L~fA+=0kyU!nl~wZz(;-B~2NzYd!JzEo|3{O@gJf z#s{{}-;pA^VliE`MWJ!9*bbhSi#l?GsJ3CC!2l3AUol#PrfusX0I=f7mZT$Q z8@iE`ASH9`0_mv;UL=%7QM?njx_8#SECw?d$V5NT%C3=|Rcy?#*w8+{uyc)J&-D@a?3 z8el+78I=E}_GN%%6KTiIs7k5%c*p~smhkCt;Zesl1yEvF~Pr*gY@_@nP-raA;^A?>qw^`R35GYuuJK%>Fk!W%uwkEUP7#bU7dO_?@<9_E9BgtF1ZkXMj^ zm-VEV2dM{ScD&^4Aq1p4Xje7#5LvW{$0p|EdhP`x$U1XH=|Yw|S20L$Ez zv^`G7pUb{2I>S#v;0bePI!3VsMX`z`Kya*`&VpNukWP=5f7JZ4;oYUF6!XaKO z2I4EL=2b5$v$uy|x}gw0?zynr9ecqKP`Ck{dLtX(2A-Xe-3LM=BY4=RxI!io93)A* zOH`7K9et%-JMePTVzLUSU64sRq}MWkhB&LGehnzVn+=b^N*gkEmfbRt2KxDQgB_}$ z>pu1c?-#Ew*u2N%c$8gX!`@AH0s6x{Hz~h%!E!#MkgG23H@L!X5OxsroX7JXK7PjX zzMcM)g3Wvnd3OJCv;7+NHrb&N$5lE#!$~!@WwDjK8bGmyb2Y{~ zNNfRPbmZd((a7&W2}5^Zmr{9HKWZmbT-y~G`T=rWH*nxW>UG+j1%(YAruoDy{wiB} zih1L~7!Hdp<*<$?D0Wvy%-9Q#xJiKjT(KBg2PTHbxd)v;t|*q82stRD%#D-&I1&VP zdSYpeQyoR_>`*-tAH9I(L_X(aHJ{&MfW-gsyH}1s{0DCzPe1g4_+Pr^q>76Ke7Cu zkI#R?BeKxH4_zM?@CBUHrXZ>oLmA)u;NB8-z6QX|gaIDm^c-@DH+pCE+>x+9x#`r5X{Ggr~w zG@w~NvKlneLcVFRM266kDR--bx-b2dD`uWoo($@%hR66)Lh_$HV{Xe63+z1oVSyym zWDQIDC7_@IGw{lS7cEVaos^T&jHFjZfmp#L?}%;YWNeF2w1t!O2tAzDN5oxzGT0PY zI2DoWT}BS+%QU^dEa#%V0t9eTP-XMcVFOQN%NN4dC zcVws`n`S{3I%Wng5Hd*5zx+n6Q6X#35-7?Lx8WW=eH7KDc|d__37z}=Rxz7h1KhSD zeA-Ss-)<-2clvHf3rF7S+%<;s+u_zwvq8@L*7WrD0F)u&;;qIu)QZ+}uv+Vb#j|;M zh9Dc5e6|{SxvJ?5V4NW)93rotv1jDvGw#{HWJdV>c=Gru9lV|VqvIhz{rmXIqvO$oN0q(cO?}Vo z%ydG=@k>j3niz0}WUskZ$mXi30NU5RBKpD*au}DU*m)9U7%v2~ z@f=YK_H8m7{^EeLRxPlL=98N=_quip2?hF9;W84*OgUchEj6DF97$1+k(r0?$FyP-dzUh?I7wr5{Iwp2a)Hb(317Cyi1P- zR*G^c6RvK&r*v%hPJHasWTn9TyQAH` z4@e`mvK^?@_?u;Jb#od@H%a<}8yk`QBVpxTK5}O7{Kelf==TsP?!X?sFvW@(EV(0i zoV8N6vQbA~8|yX-0_MQM2F<==eo#okkU1NT4eb2e$00KLs@OaiP`l>Uj?lp~Qoq4W z8{gWXzp`OOpKCO}cH&H+Sw+CMc1D!-n4tEx<3VniK;E>M_{gPlX7T#S0mf@LOCGma z;srqZ=zYB(e|gG402uOLv0?1_i!WIYesMfz>;5C&!sq+^pK?$C3Cs8|83eeb|AKON z@4ukYZ#TUISqEHp0{YJwfWNWB)fmD&uWIDx){#q~&GdwHS=>PYas|h?@jL!jH?E2Q zp{Er&xNvaRe4G(ds|s8zO2OgTPqLYZ4}flefJMYeb);BWz<@N1O3pzshP7)me}mR~ z*gfN1w~m)z@`Fya9sZK?!%ykWAJd_~yiFWEIiCHB{BOQF zKF8z#`QQHO@teQ@TgR1mfAe^s4*wrt|GT`3`HzoR{D_l-z<0iL_xSz?50Bflkx!VI zXhNmfo06@e9m$B?+IQa~t#T{BGLnxyN(-ZHw3GJquHegGze*E$|qsJ8=8{l!ivx`N7IB|FY(#fi@<7l%PAm9gN0v5j0d zUxo+a>xLSl5*mf&qq>{lW^D1mjKLT9M(=E1qdq7&de%^zpSaT86;UoZ6wJ+g&Jj7` zNqBI!bI8b!V-TetSLo^ek9_jO%w@Y8bi>uyrNjVszU|P$<^09o$yGW~RA;@{ z-L;FIwvBMVa;L7yp1hOuJ{5WG9(+U+*hTv!>-9kX?I0rI~tEx zRjgpx=OiOA#a0!5iHn}7*-C(4wqw_j(_iMa5%DdD%6#FAd(fx1_Yo7mna`N>9=&lz z4r<~9j3iEZQDyd^f!ELXF8E(oTyVi!_=;V^*#o{^ds~d8j2|X=^lx( zGJbIIB3x|(U_Ke}niZUh)i|ZEHrJ|t!vw$;00j=by~f{`hsC9-_94_UiD&Fl%C)zK z@~yI^nDGLLK8IEV4TBpEwD(@|iZ(t|W(PCcBVCkF9W@B}UBa*S(9xs*7o4N^&iLWc zQ}>{NC1coYV}ZB^IJ3169-nc&=y?DY~9{~l=$6P=A_&C1#-S>~5 z{_t1FH+VP01IB5OpFd*L;N9cHPkwUz_~Y*%|NOuE?~j}CefILxAl|U=6q=s5~#u3R`1zOo>lxQW(rf=*@tZQi^7in6t zF=Qj0Ky2W~as`}7>qxF4UCuVd2sK7e14?%%3@frmXpt_{JmU)m3~=hOkAWRQ%{$`` zJq^&|c_9j8zMq^xLb)4#?VvF)0}g>r_p9}7de;C+5m}UJ*+lye zoWWyyRena;OvpPyt0Ez+ZiEK z>MElZnv0pGInY`$SvP=Cs))Q$2bPcpxAzCo>DjrCp3q20mtmPjx=AscfZS#o(72w} z2w+inY^OLZb^H7DU|eHn>)0v@%`HG zeB*fW;K9M_|8e7O%FxkRea%YG&3E}}WvbO(23i;2{KoOtyEl)wuiQKS!!LevTxHzw z(?9)xkGJ3YHTHFY|LoE6-o1l;9W!A57ytZw$N&1j{~146#jSw$<{%0|(&LlJJGa+X zeJ&0x(=P~sUB)!(N}dKZhCI0{wFXQDHnyw&S|%vA!MG;KfLw=m&?jc_+4-ll3FyXV zXBq>8H+4mPeK~bf_{fO~uenWOoI^gpnGgngm22@L;f44x>D}Q8N5VX-NP%f!zEAoSAV& z%PK3qPG6U2mu^FP9rvx)y12|knCzikT?|sRX~-HB;hfDYSy9G7v9gC!DUHXq2j1Wbm}g93&wIe`2E72G*LqO4Mbiucc)&wd_i75 z<54|)Gw&0S7_w>t@HCAYX$0$1OSn*Sl_9fHq>oL{20W3#fB+IY3Gy3~`pq5a^3+yS zrj50%KTt$zjb;yGWB*X0qs8o-idW-Bl7p4b+C$sBCqjMP?OZ2}y~1@6WJ>=cly*>H ze=@h#dv|=bj6#{jhk=Tb%nD=u7c#D~KEl1cP}0@M z2LR{LM|`_CLL0J5Wb8TU$`L@w1*nm`K;SALQ^>i2ujQ~wJY`9KJt95@8bIF(vnbkG z&aEuUP6s&Jd<;ac8p@r)8z#Ab|LJj?dh*_H+&Uiqf;|Age&e|R=`(H#Tsc1Y|2{f? z`|tgo<0rrP@VL1579u=2o?m4tLqvP2_2A=mBdcJ(hqoTYAfco>2 zTR?Yjy*YmOTQ`m$e8Q5IT=j$Xzzg`fGNLxwLftL!lX>wCiff0Oz+wrh)fK!3JJQN_ z<4=*ZjpZ1p&APytcPhhPR`6|#>1c%?i&&tqJ#Twe-x30Ol+6*+YAeMPQ+^#JhZTmV&lhL%J^PtMBYJ!tUL}M% z)7$t$&uHfBI9_kML0#hGwK6EbvR}qM^QkZzBkzVi^FGVtENa7B?$u?gFjEPct@E4$ zw8H8s8lXCp7!8G?BrGrmszD`R=!oN0yjQhJ7vv@Xtk*2@4u9Yk%&#GppxGi^X-jkP zqisa?&zGovNrPkCBDKr}QNA^f0G2MehHiN3yDF(QVcZZf6xG9n%0@+Z5RiIY*8o}w zZuZz|0zUiMF$}Vds|eB_eJvnQohc(VvXS<_x_o5R>^m9siF~9jFuya$GwuHMv%b*T ztSE(^oyDZ=(5GUR0qHYjFn+;qbe*!^K&cp%x!j+G@}PK$OdB;E%+w{C&=mtQP6IHM z6+WEBHpz@1|I`y*q~xlEyYan}wzn;!Nx3f=Y9o?7TsjioffM&{`N7_B7!8fQbaHb) z+L3^_4`i_#8WCH0frkFnYMk_+b;c!czNaHZ`H==teG2qxNfk6e6}_``=&mUzbHPQu zPJCzzC{7H4j~}+`r7+qUV%xv9AvWmb2fxZh9~vaYEhZI2IOr#&WnSrbG8(K+jA~F{ zi=3f=NJe>M!HGEs9ZMII&*h_Zkf!pBN13ekfZ_^R(8vRQ_#0m23l|KDNLjZ+ zroe$6nd{hg;zjSruitrjymJ>y$bOBttnxy}Bo<&b8y&hwKk{!rp*~&yO$fUpYQy z<-prvP_$yPf-Coj&xjT2V<`|8YIhvtFtzXw6>Z|tuX1@ z1&g-b#94Za(vcy7_n(hn+fLO*8F_RP;Di~id}+lYKibBCU>+Nnr1$DYomdIj+yQUwQDoyQ=ir^o zH@!2A{DL|Go$*GO%dnUAtMvSJ(p-2OyXY*L&km9eHgJhw+(x0JV@ucCc1r@T_p$V? zw6s}Ewl)gVwWe2Qp$&zkNq0~tenE;0YXbrXMVAiT?z9Dvrbo1gVf4MWsQd;lm;6uz z1-2b($b6ZM{Lq_=r&fv@LCMu{a`sTh_86>2 z&^*fBgAktABS@W&SCSYqcB9Uet#sfggPoaBI z-yh8JC)6xc_wp#LV+bn1VZ3I@-M<}GVFz`m12CVN2ublFhItdon9?SVE4r_Pj9vhz zu9~5Nq(YII4AO_j=}CboO4CjOc+ZyxD)Dq|p(r{WyVw}|yYN??)r#cHpA21Qp?1ga zbGX7AAzu>5Bt}NS#<$W?e)xo(dBBkgD>2%lMt^EYw{)%jqWp$<nV z|DwgxpMwa~$q`Lxi$|Vz^%%Yb=Ss^LEaQLlOXA;q{=bB;uV%je=J@1i&yH{Z!8^y} z7ti$M<+S(CbIH4}W_3B^wFdJL0cp@>J-- zQ=P|MUKI8IyF5J-@8PKo%F^l!=ZSN5L^ge84Dxl_uOu!uYv!%7RHJi%;b%|_viq_n zet>GcjhXa@JV4Rdc1V`s;97hJVNT={%s~k>@(}`S-+dt2$Zm%GJvA^jer7dhuq`%z z=NuqkwQuGM#HaF8;n2%`+N^{e7qIx{dmf}sZ;2vAe4~sJ!(4U}L;Sg11jYg7w*oK# zuvGYfB{9aAz#Jo}Fk{%2yKuakG7Yx{61Eb*zDB>SzwxzYSct29c#j3Q5<~CI>r86P zccOJJA~o`#%cMQ>Ud9u}pz<(($~J7d1i^Clf-%=fX=J;-HJN-%3py*MH=|3AHlJgq zOR>A|y-zK@sla0y z;Y!JW(UwrVSVdfNL{C^}Lsl&E7kAFstE?4ofuJ`q&b%X0F9@6?Wo&M*s2t6-QhMns)@UOTB?yKK5UmLc7@erfE7Bmd~G9cOB^S3j! zLw9e3w?k^&#StMn;j4UHQX0=~2P7crEdp<&03(oJt^^oMb|!RWbu18W+~6g}>J0=m z`2!{U$df#0v6Vq9k_*SunSg?`?4}F@q;$)t(RF|%LGrYjg8=9`K)dcTx4MCp}Yk zCBJ-S4o#Kjzqm7IM3jel3U9~l#XdG{Dtc?g3}^r71dO)!FPT7%oRr{oojnrfr-Crr ziodK&W}G}DTEha-B<~mUPJ64JX5}EB+L$u!u#aL~S@cQqH)S&rH6Po_EEoq_+SWnb zo8xVE@PCsPfY-dh#}_+2_=1=HyhS}=aI4Flh*&2+`Q)!yL3+p|eykEadBE>Ed_g(4 z13v#^cs_k|@MP!lO9m95JiKz;Wq@$^^@HQP-~El_U;W?{1{QZP$$3Lyva04;4K5N9 zd!V0w@H__9oTXKxNG4Gca9C(A6qw9nD`KRfVRCGw&4)r2Mdl)VHokGh#Fq31cH1zz zz&25dF*bCTPrAXKaSI7DBsg50M3|?nBhNFwif$Ma`{HbXN>}#aZMiny`TRF|o@Ep2%y*}^ZeOLj>CE}-WmeS96FIfB zy4Q$LCElev{G<_%oj~wB1kUno1I@HZ^QcJ6HeT6C zMRP=2GLVn1K0_SkU?2m5i63%TgC7l9;14?);oCD&XR3|JTefnxmM?1gn%sY7=~7xA z*@3hCTu%2*Va|LQ3T7H;Vg068LBg^C?=J>8Q2HH?< z*4O}w-XeC;6;|>lrf~;PcIHn3d-P{n%9uAO$+>W4H^=Zx zKL%jC^HHK(*!$PtyLr5P!b%oCfr-@(VuC*aL9o=5d$D2)}(3oZDBApM3U+A9Z37 z`ITdAuH4F_o>+y!(VD?=7ZM~*vMR&G-IV5hxgeobSiEdv+?SHf(_R`29c}5 zQ~u<%K~$H0h%W0IK9WvzyB#2JU?CYEX+fs~1KmZXpX4#d>B%%9`~duoggh*k#F z8#HxrP^H`+(D60X+WIQ59Jc;7$j~RoHt4e{lxr`HZ)EEA!(5(@_>ciTEvcRk9hwb8 zKU21vWeE`C>M!Hkd;3n2jZa{Ek(9G&%SLF*peb$T>PZt!)Gy*;W58TuuHA-)`pE^P z_;edXd|4(x2wXUH(?fTY5U!Q|1%>spwJ#QCjLXac5ZGfBfe7%a3@R z^Md>KaQoQ@yu^pj{FlE#cJ}W5{4YN}-T?EGd;51T-e%JvzY_TB#_{4)>cK4p6vQ0+vYIMgkz$gu{>8vti3*|PNJkKAg zjA<6Yp0uYQ((;;Pm5IOdI&1gvEH|hRIa(p8Pe37S( ztCahatX*7e0_CymdTAI0Bm1qxB^Smo4t)$Nx;Dl<*!3DbB_}3x9UfC^IJ9cmqE& zMCkN2bR|Cqn1U_WK5y_%@9X)Ne^sup(mBg3ApbUg1p4qDSq-AUL3isr41&KbgNFB* zhLA+dz304je+?zYJSM&lC+N3_p(}_k!zT287b}9 zw(-I;m%q(R^XQOL$mk>2lyE(%;3$iZm5l~UXe&_dlYz&-l_c3NHB{pINp7)frO7Ee zr!4vwfgi^@!A;M63bhBz=v~lT}pL%908G2S>+{7s5%hfBbo!wo-(2SIrHw( z*nF+feBkFJi#U*LU>u;9;>!5UfY3Ir|2Y+R+RT+4GFr~fCdlZm3FtHcTCr)no3_3q z!ABstzV1@*%9q~!V3{;2&ju7)Zg0rBf!X3WF>BtyD3VVT#1~o}(m?hGhyZ9dvXbB0 zrrorqgFEr`sfE#69joTF{!Qj0%k^VeeMIN}0iFIAbmTYg@hV^jn$My4${H~u|;eT&rq-t}SAhP+Es30eNwCg*RhlKg0FYDrdfpoUaSPEjn1TO}(j5+Nb zKBwh0*v=8`AZR^h+3z9I-rCeb-Uhd$H6HV)gKa+fJ^?015mZDzoIOdSJkp5*z2b6I z&d(b*IPsK4H0!A6tP!2klxw>NQ33?h_sF_fW&BjT?l@wxllI0+9Wp zOzMmrUcxDWhsayNE!@thREQIU9G;Rwcv!YlWAT-zaLb|QEFPXcHqNBauv?~pzV>y- zT@0ghPH*td>qHBXpsbK2CZBWxvapm-s3o)Tef%Y}IqFYw;ggf8U zgMy?^w%H6IPJ?cv5D&8}(nqHY$_IZHaxK42lVm;@er$iTg*o!9Q!!}f7+&habzhu8 zZ`ENG9`07eY(V8nB>39GL(2U0mdUHjO8L8T>SwUG}W|7=KhWs`;# zG5cc!Fa|IGajjiKr3AgRozgL=(~K6*y>L3{#gkkhq{N^~WQPOVPGTaRI!*!Yw1+Fi zMkipngDn}pDIXHO5q990bG=5ox5*MSopwWyb>WyhfNB=5!EQn&9)ixiU3~L|4_!cHoSIF7b{l^kNPH zeSp3FE0dGI>mUIKaoNv2?P)%U;W8r7fANVSMbwLe$ZKg5<_==&n%3K@^YWQ_{qcLB zuh|Iu*+W(dcoKlOJ;U?W@$NTo9p8SBUx&PR_xO_^f5F=!&%B4f@+})`lMEq@S|Yc4 z*;$((s`gr!*eZR>mriKeueWVdfv0`Jh7@^dt!TxEj7$6^tSrIl9$C(1eEiLfWKqs> z#lSBb4Q463>f1Mk#oyZA0dur$@1oAvB0RUjxCH*Bn0z$_f4dEdfY;(JBH?ZUC7H|? z4vYg$W5bT#cyZv9zThay3^-^Hvg5?lA`P4|GyEJ<|Irpe;$tL12S6um!E}%5ofp(L zwxX@FGd(gD7+Q1o`V4nTzpv%X!>q5$FaaLuSH4E(p=)}7{%i1GLH^oBxXElmLL|B! zat(T_pi=^#O3fcRUuY@#o02!uR|TT)nM zu;9#RuP8O-7;T>?}t~GUc%HV(YSRWOfx|PuZl)K|D%6` zuC$10HVtWM%eO5iEe$L;Y8UtfxHB&s%f(XTMM=MzpA6u0b^}?aQ3guCew<+~2guo~ z4Yjq<&JP-P%pUUV+6wA+ec>HlSYW4+9Gp2uGhr+rnQF)bC!ce-FVYDxI}q~v@fmGt zV+q|VzQCk<`yVNf3twf-OvzUS7d+xCSD>byLE-~D5Av_k>0XB({Q-yXZzq$+WVFC; z{5vz=ja+`0mW`b>>KK7mEFU4{Pd`u!44?e34$x310pVpsTjR%b^F3||PjM}jyn1h* z$uDjsrz31eCalI0&$GI629rM|$WGEUk7CPDID^J_`O}Cl8u-h`SNFN!!VR#7bw8>k zg1m}hbwLL05bsG_9G~GT2a&?d4s&%Wn=)nAFAYY7g_aHc(n$)@Sn^jlrKw3*d@17$ z1_DmZNr^ohUBC6#y<-27FMeW0f^*jk{?z)$pqfq<(@t9>z}SRii3e_WrqkU)O_JhWv$ z&SfV`{>+d|Rwm5ZZ;O|(708$)S&UJxt6%1GjX!r=>E+@WieJg7EaGtPh~%OhjCe+r zv=@vgWSDlai@_B#vl&08y#Q@a19Kuh39I-YSuz5Q;l!G2FgpNVh+9z41tQK-uDRZH z&ga2b$;sPzi}2#JptgdmaKm0cZ#>WD^Uc4(rp$a7Fe95V`xw}bGr2o&!$eABFB;`1 zI4PaXzc_N0@5bNfvC-%n$o^8qg|%smb53u1BmWs~^H|1D$KgI0i3IVHZR1s@oXXxY z=~J9u$`hmjtn%4oi8K60#;F)l+T_xP*jSy|@XUWe)=VgHo?+fE<1_Kzn!bG{nZ7wfn%1`zQvV5li~oOE}-WxA!=ve zm(U|r%AhjRNkC}tMjTplN>_@#r{AV2bA=bFK@73s)nsB~8+flFNuq3Rlw9B{YpzCC ze3HJE0c=|=jwV{23OOU2ndmwVC0Pk3XSs3a+xTiFT$;f2XsyQV%Yr zcRIBDZes8Wm z<2M3$s(~NK`kag>k7gj@I|bgpedYMh-+Ax2%?7|Hyi4N$`oZV%8h*-vuKYxFkCY~Z z1Dw@YV0+PIQ@vVF)tZrp;GDe<>0L9KHtL_gxIO-welTHJ_ay`Y1uc0TR zpY7K!()g#?!DHX_m(<(;(;k^YsMn8BW-JY{tcd+?v!rBf7Lc8y6}|l=y6#d;ithk$ycj=xv@#m zaV#PY-xVCu-4duPa!_%aeGv`Kkj%@x%p}?T^om!_zGg$yKDLZrN1LD&eq=?C#Pw_C z3ugrYKz@5UxJonv{1?vNt=X33%F^1Id8ACKBvqBFXn4T5JhbsQ`B@l`fLUV(uRK5- zg|)Rc2DDVw)m=ij%sh;5j5)$*#9lik!_V2_?(;I{#Xa0(#fr67@=Ke1T9Xqm#bh^! zA{=C_YD{v)X2E7pftwA%@0aUj?Z*}4Q?RorSOK$EORm&RFs66xZ9`vI%cJq%>HTni zT<1HU($)iN{vjK^@9R0u6+S-by9p?~GWCO5eSaHlZ5DYoi?1_wepOBWq^&d7djU9{ zq%%f#^2&^I>N_04Q^)rHh(0#R4HrP#WW30!dARicKoOPb!Q_t1R2jLix+^Ve(e7@@W z|I&B-e4{7#Kl8AojD4jq^!e#u|LpB&-~PvM|KT6>@z}5aleb_0_TT7xF}{2Ii%)-{ z@%!Z8g!qk~{^JLTey&&iKl|zDxBvV<`YV0?uks|EfAg>Z9wV}m`2wzU%zbigO5TVx zE4nriqZ4K4G7ISYj-qMIcZ86`cgBMpGqXg>#M0{zVR8|~1+Lbe{0<50gY?ObF!%?C z@2L3V(-1z`vaxlM-#m%qxgdMJ({yRrM~xQlaI(q|`{{WgCVr^^g;nTP_%0L&ZyW*u*J9V?Ha6o-@W7yL=}_ezZ)sv?c{3csS*YPC0+sRztI36A+5$ zT{J&y0Sv_+_<)Qj+9Ac*=$1$KD?KsklZ>(B#D9V<447NDzThLJywuQ`9HAfC#86rE z%7d16oM;~!?e@{3l-~EKLpA5}1({0@XC?+{_p)P4yE4^}{ramDj#m-dWccMf22H!0 zCZ8I{&bWn9oh&LIXyq*U$x{~ZnV{0o9+Lch`Y}Hd%2Z=S=b0rDfX4S-AgW5RJdp&-}MEziIZ{Xefqtjc@gpjhFaQ; zSxCN@53$eUFL{;bo<)&R8DY&N{q6zS(DaqZlKonnzkbJ0ec99P-!FIo4h8w^kNRmY z@)fE0#&Zb%0UQAsmy3bG30=0i!r(0hm6KmO{1-l7yu%mU(1y$_7R_&OrV$enA3%$Y z?O}cs&&zJKEA6qWu0euXk!-br80Q?oEeNipCZx3td zHiq&HlHV4Rf4phpzS=fyBW+^@->9X-l>#|s0;o8>ZqYf7%w@(D9ns~B$EAxs zqFlH*#rTzCJ2oXBeQr#gzEJ+|O^c@tx(6{{k z@$cTg)&qiH>M8%<=)IxeeEPjNSB2o8h5qKpx?s>lgg^V+pT7M;zWnT;{g-e5^MCs< z;+y6RzIr^E=k^t6)OO5rnKpT}-`akZ*Rbp~lT&>8t}e3PheY${Pmu6te4I+Yrc$;n zM~viEGMo>5D`p;yg_iTy<1BFJ)P`*Z;K9KU465Q8nEI_gcJY_eIfyyjVvPO!0Se50 z+aON$!*He=LMl~HZumN$F+;v|W5aWksY@7j}Zw;adj|G7fd~Pus58te3Asw*CI3y0JyI^xd z@S^Wq>yl@|@aKXt&I!hQjn2{dg_da9rX<3v6jmn7Yn#=BR!)ifez1o{Km*D@@_zX^ zzD~Gx*DhL`i$4}Wz*tyWRP^V#SzSc)hHtxL-`cWKTd}A$OEkgCkL3FR03ZNKL_t(! z9I|M#@qWwVfwXezPl4!S!oRpE4KHL;kM%0v(#)JpXlN};xIHIcl{GEPO?E9a*{kylndHdBLe)9Gk zZFc|aFaF@4d;Y91{NZhYH{II*{9}Dtmgew}zWb4W1K>OTV&E5V|Ji@>H*bHXy9QtU z=Lm%d?BEvKjRXTj>?xp@#pMyYgdr!Ko(s)Ox!kgmUHPF9l6l6xnVo1 z_yC8#eWt-C#yASsKmY4tW_6i|8pZ2-quBRaN<(ta1$bIaf}02RzLQQ~#cT6trP7~2 zLMTjraEgyS^42gHIM`ti_C=O(xqW_3HFWV!ixjViv#8;O{#y+Yg2<2i_>Kz#b76Tc@Lb?99P*v9#{s`Os1|6U&%{LQz2p;+||f4YG%&CBCF5d8mJhREEPw7z9DH*^R_R+gMH?410-haSB`pOTko!uUhLL~ z*5$P>KK=Mfj+CtJ*a$y;sNBXbEwUsy6C?2_|A9_UDu31Ehp!gLhxn3Q#Fge!_0@cy z#KeH~&OO5Q!A$p$9MaI4(8w|w5fPSNA^raMf7B0s@SF#?e9gV($+tK~2hZ=*`Q(=0 zoU~qCgASqrY^Uwi*xR{pUf=*)K&8LpS0`=tnf2B&PkwTO#_8aAQy>@ml`{Pi4kt7V zB_*=z^NLCw$R4|?ZPQvqH#R8x^8%#hUp9FV(BFgzg!<@lyvi(GKB>hp&oDesF>#?M z8sz*!m^MFd=hN>|j{2=K)((g))F{Xo`^}D{g~N*$I{53wT${9>rl+tkL605Ovrr*t zJI+()F8MZ^7%Zzuv#J8iFCONnj+rmq?spOeGvmO5DNDd`wPAD=Od!O z)$>CC-GBeTy!}dIJT~fY>|}g=!g05XF(DrSaeg`hYGYGJxsrSgeQs^zUM2+(-sbrM zeD&0FiZxG|@Azn0a;ApH$EFO94cT>05Rw-nIWN+?mC2#!Hsh#&_T6fVhYM83%7~nc zqLDZ8`HE~lLG%4Q1LQno$g%1u`rBOv$F#U@kn! z6LZG0LU1@U)?ktc^f_l0k7H4ctdsE%nXxQ+!gTS10Dz1+QAWlQoWKe^^&_6PQu{ig zB{RuFPLP9ne6EeyMJBjH12FzFR_FpoTLpV5@e|+j2NfD)SbM3fgH4}!2Ed-=d9Qai zB->H%?wh8GYzCt~8bI-85xdKWcR4H@$z#S3-zY~X0mV0SbZGplnbc4mr(1G|hnUz% zWnX{rviS$gw|mjygmL~b8pFh4+Ytp&h0ctDbHq8Wk4xzc_gBx#2YC{`LQ(5v@(Jbn zJb-f!Wl=!~WMOM=eA2T8UQ{qi?x;C3_9TXf<9PKu9K7IyCB46p2I^Gkt#m5#RM)Kv~T)<~tNSq5mf)$p&I@yVrpJV~N z_yll@sE);B0FLpPXKG6YDzy94ny#0^=J5Ll)@?QBB3poVF7X#GT6+u2^KB$ZPkW`yS2ftzI z;*A*D=)uf|v>4O2BJneitgrEqF3-s@i@iZ3?)<4C`<}br%xB(+mN8LY$)oe5rT|&f zm=C5g5rvCqd4WMqJem)ji%bEWz;|RN>1a8qT)67PZT@}k+YWxjZ;-q;YaHV{m|$RJ zV!#*1*=XUzqo89C$j#WdU*LLQvJ{dcCq91)z)oYtCAH(R@d$^9lB9(n$7&^9js?z% zP&#sU&Hf}Q8Hg)EOSf9ZYM=Te`O4O=li_5==Rbu}`7KAgxSL)Z<)@xSmHarliTL1Z z07=8}(V2038B=WF`;5or&-qs>pMGT%h-`I|tK_{j>{4Po;}W^jGl}3(VuwW*oRq|* zc(w3UF6>Myq?!W@r4cNE+v{dMVxbr_C(aN!S8V5End>dQN zzcv)_Pu)1UMa@6VtMK7CZZNCneh$&4%~vMF>;yja211E-BfJ8(A$o5x$JHZ>af<(d)#K|e5jyH92 zx~Yq*LQMFPwn5Q$UzeHE%PUbfVaX6DwF)902iuTMYW+C|j52QW4LN-GMLMxbkO#Mr zgqFDRLv3R67)Vd!;h`lj#BYP(NQecW3k1>qo-~RGAfrRi@0>rU4_Ds;;7e)!Jb*q9 z$fWb*n|dhl=Wjpx#V36&@ISr%_#5GVqdNt9Sok0HKt)4%-tH$5d_c`EcM z-oVtnom1*FFG7>AN*p=3))6NX0&y{GbwNSCG>%;Ge$vMY|EOOq%pFIJ=uh!79)vAm zlj!6Nv4~$Z&RMZh@b^?OUigW=4(8E@M2$Vt$lMk|vqR#+&YA;TJcdcOfD4plC6@`o zi;^)z!vd=GygRgPMp(62Ra2k zp9yL=5Af}n;OTwm?X!rtese(WMOLdnCo6KM ziJTK(C$j#T7q*dXL$-p0Nj4E~UF(&N{#j?u8vzG{hPcE=-Nh6Ad=+uz&V~_e0*rrB z^M-@=6tLhs30PK__t@3)Vyk)V+ne5$TEqPjLCtwjdvEL$r~K_* z3}1i|-sIBbz2T8n-*>6pAV)+=NXb8hpM3Mp+wb&=6z3g0{qy;L2?K(X{=g&*aL7He znmzuhO$kl4Fn)xbYM;Ss^O4YBe*VeZH^2P)?XUjXw{JiDr9L6@>8EeM{d@h0&>#Nc z?eG52-@W~cF9-q%d|xbMv~z+xDZC9pJmiR1%CW&ba2zv!@H4+pXAmESZQtuH*uNIgzKcR7F-5G&Xjfeb=Q5J=c2{8^qSl0x$0Xn zksI5Sh+{ev9|aGY_=guX@)5O6fKV9&{5yJ?A1u^4DX<&~x*O$@g_U5@|EqN`h*Z#3E8*{GY6PI5ZN`@`Q57Z#z!u5he(zLF?gvQtK@`XQMc>Iw$`J!v1 zPg;1t)kTlD1_;=uRyu4Se1g=t@zS`jlE_P9&IV^6?2HwpL>}cLen=H5*3(aVnTtI? zOo%OSB9a!*zDmyD+0O;w*h4D$(%7=_N{@fv_kDi?oZ<~aZM^WNYe_ni5m88%^q6b> z`~^y~9R%>@n;nNZRANwZ&$bhnHv)pwK!D4uQQvitEq@P#e9b0c^>`u>Jn(s-^UnE{ z{BM8D6Zw&=-E*#JAiHUfuig|8PdL$STV%g-)*Nx1s<{o!rSR`&-hHLF1-{jn1pVd@ z`Uv;G`}*ytzxesv|Djj?U+R5>|NB?JfBUVzOTYssEq?#OKhh)&OyoT0)_w2|$vNP2 zy!n%)*A+R;{LvGq$nmwzIzx1=sV}tApeJ+U^)u~UfRQO40yMr1CjB_1@lq5xF>4l} zTrjXmn5;34lO$8NJa)qLxQ&cCU|7Ss^ZdefHg~Q7NmdO@OzOh$EH=K;M;%ED<0S_@ zw_(+Pf915klft0b01G+K9pX`23e6`1tRoa`a(5sX%$Z-Nqbr>H;c*6+nC!dNeXxuL z0CZ9rEL_Byv|ZEJzfP`KI+eHbLm%2J+)bzb#DLaP-mAJYt)I3^r$)b}a_v)ZKK;sU zIw{IvA(3`k=UBZ`>i6!qO^v)KiblNnrOU#I_QXmZdd90d!8zlI;zzbW^3kuX2e2}Y zzx_C*S;6G8@5koT-Z<}S^G>`ZRBSI4TN>ZfXCw04$DDkc$fd)rPsJ`Spk9Ctp%YlT z{`ignyh)Ajj;$D|c+*g9`~%C+1CSTt_+kJ$u5*H02A+x+8}c!my5-<0nEb!Sc>9j3 z`wGrs15d-M(YIYTVo5mnX^jw~C_xNHeW&LIC#n))QQGr~{CuU2UmJpcCXTm3_(&l7 z*fcV@jvshA<_iGP*c{bvPDN2k?%k4X)EWc2g3_YR*GRLZUGdUG%oG|duE9zw$bDM1 zQGT_!bC32gV$a@crRmRMLpi3COxC9R~j8pQ5EbDdM z!VuOvzp1|x0Mh{K3XHoo;BirI+xUVH zcFGO)l?XN-{*yz}%z-;Ro-^qtE}W2@HaL9o1wWfQo1C5p5GyG%26MuV7*#Ll5pxxD z@WtYy!WV9Qnn^hL#(!_JMnuRo;PXik8sNYuhm48N1=+h|lS;2M>=ZnSCsY|g+wo?6 zec>%yzI;l4TSUAJw{+vz^_A*?aiPmR-Nm^zx(nM*{Y^LTq=olz9u zesF7(jb?3hHYTySBEE02vZ%4R!O&u-I}~5(6`8*J#tSB!+4uTvSl3H0Alkerw!?+a zyz+7%x;F;uE#R*`%Wt)`D#(90Zpm@l$X%J828mEjSInROHSObG^`M*XBWPC+M*iD}=$DF?xh= z?B3)=?>U9`XMIuh7y9i0zVM%qj(yg{to#hJcfHBUz*FIx{wIcNxvtxLcZ_?X6WZ?helV z(QP=)FYMYu2^eFTN=R_V2ELMO*C&UTIgAL%$0dvih6CWv>VHJ99JL)ZAy{~d&GL2| zTKg7Q2gfNjYA{+p^1uArf6DECWvMGqyS_YW^fzy}(WAVlorx;$lYVpF6J`!lJ^k4acR;!Fnu8EXuJf$HtD6F*ZO^fh1ka`V}EW6`>OHQ$? ze^;>&q4^@j_DH5=J&Z!(ywl)ju0m9P2I87K&L`)k(B+eDV`S`K_SAq!0m5>|Dj=L_ zCl{BLsQ9Vk#El#TF3uTz==z@vMB;8-$c~zz$xVQiZ}KNS&v~`Ib}L7=HFzV87V8rg zI+M&tT^@C1X}5qG`WR(Ej1l8cojs}AE*7#+T6w$Pn6Z@@kU4#aci|Pgy4%hx+P2qr zG3ehGT9R;;*ri+f)|Uvwq1otL#4G>hZ-1)Wy13La%eXO#lYi)^)EDd}H}L9v+9&5v zyT&m@e06^v>eJTSn59E&zN;fIjCA};pEx3epY$S`aXoXYjehqrKwpxj(1 z_0ar~9+QGikXyrEL~zLe$AvRj_XAuPtdkxYj`)DS2SS^C?p97-t2DLc5mm!EF`Z3T z{`(kQur5A}tN#4??kZ+FONNGu=p7^bfmz=^ zjBC5!F01}&4)dAa#TWY-Jn;!W{ zpmRE6C#N`-5*>XWP|pYCVnQF)`+^G|zJ@Dr6V=W&uG(NtQ*uDU4R6R?j93T{oH*5# z=0bXECr^D-L;bw}C#T8DU;@g;BQdFYoqxJzAA8`^(jPV@*SCVY_M*p42xOcewc>C5 zbB+hi=abKY@Or1p;V>sLfp3x_cH`J|@@@k7o-3-yEh4|30rDLj@FCia&q)v=Zia$kzpUE--Lzq&m_P1Oeg7=hzR@t_x zcd34LE8iN~wp(xg-G=`x8^8S0w(PMF_2lxsEK`G6<5$|DJ^kDEkZYf|5RB3G(anN+ z{COW!bnqdYYWuCLd$89^wCvvf$@tZe@{0Ao4|?;Ie3&A4Juj0W#eDib$>Z1aNIrbp znQ`gKU!T0D9{$!wuYU8&-`|QSfxGW)0xV9^tbB3U=%k^4M8zR}2&SnYJ1`TIbu1jv zgdDj@-N(HCz^i?mfxu?NtE*W|n{Ttm>Cnj2EMm5eL>lPxs^@wLQEtJ>_aX4=T6r=h zjK0+n`0~J`AfrF;knxSh-DT`r4@`n!Vblg{-{?c1i>fGPzK1nB^<{&~Zv4S_;pjF# zdpJ9)7ihV}*x(UzV=6x2Goq!FCS22x74fA)c|TX0b~Q_PZj+!TLH)m;^wt+i@!%p4 zxAEVfB=GzHv57`p0gv;L9vg_j!I|CP)=PXGIv^i%t%zPwn!U) zn@*d*;vu{FM>lzm+!;4XqamH1kazvaGlu+8z+qCpk0Tcap~naOLbhXC9&rS(wUM3w z$k1H*pSRZBg&P#B%J?um80RcFNJEk6dxP`H$eL*qqW0?tPw- zO4#NVPlU_l@PZLP<%OsfI=%-Vtk~mN|4O4z`@++r$)aYyiNwgbr2yr=%y}WD@9~cj zkGmG7>5UeZHQi5a2Dtpe6!a{zi73L%MHV>~e4DoWI@m=PR5xV93o&59^979;PHO-H z(k!Nom%Hx{$xG&}y_wCTi4_M*hi9o)7TF02`GS zW-eaXV7|~VjW#+oz8YlEq(m z$}9iCzOOIb`gX3dhXLg8g#njOof3W!5eGOi#`kQ^$($&4Fa355;#(tb?2|Qv;Wn001BWNkl~dOtt49J}4J#&}Nv9eQeKM)Hopojk_rsbCEE@T@C-*7i};EhE<*gfg*Wd zO%f1yA1L_cyjUY`?gGFvU2WORUuYU7#R;bT*F~Z~Wnd^_N)dka#W!6?E)v-M2~gbp z-a*>dn4BfoR*HS{C1UVIpCVdjTm}d;f-wjVBYuhjhDMuGnf8w~JOT}+a^+Wk)Bpd_ z&;cWL(WxD(?bpV}uinx-C)hT`B*Uv+nlgTFy6Yh=$o)Dd1>P*Pk2rtKu4ZQ;aR&&V-bo#Y}A9*#H z$$oq{5{H+?dg5%HwCn3pyZhETbmA=5(Mao#UGCDC>)uESSpVZY{G)Fg%*BUWo5#0% zZcDEZwGKG{G7gj5-o}<6e&wT6Jqr#C21WALI{l7C?a-&)kB+6bYU1$%rXW-#sRO~& z!DhzRTn^Yb6Y23)et-I;FPY&BZ=|FD!9_&HYH7Mnv$v6xV-`fPM#$!IKvLS2`830i zKYgQbxXZWJF$Rf;SAw(2dJ6<+FsSTD7I$B?crllMN|iBmJ38Nyzy=uOenrO5?DA?? z@m$}TphY|2EbO!sRUde}-Qf^8ZWwWrVZR{CW-p+|Mcu)Ez-z% zli{v`%GW>9554Ob0f`dHU9LI%)1T`+{HM3i|HOFi1QB-Pu59#qSsr|Ez7AD=%Su20 zRm-Y2*6d@78GqIBD`6ot3r-gsuP>m8KQW1t!i5CI;bSMw-lcKOB95z$fv>sdqnO+O zfJnD~vMJ+|8C)P}Tp2emW^g^XDk6{0B^(WIPw=ru=7diX;d%5FC-(i+{}}ZhDcTN- zxy*Tz`Kg}%;Yjjt|BxFq9TK~3Jmkp9iJ7*?4j3*PR}o#;T1R`a((K^2hMG350j8-^ zoGlv;$9N5og6vbSyz(umL*>fH-M|p&Og6P@AHC?%Isq5w@DC3fr82b@>U(VHuVR3{ zG2)Y^Gs$<%9h?c`mWSl17}ONv)scg-scV}yC3ez&e=KftA78+#B$Mvsv9TAkeQ99H zN}IQRdDX6Ltx=yz3TCdv!H(?Y3ps>ca$=-?&&3Gba&UZxd-uEJQ@v+h{fCZ>yU8E?t@kj8!Hk1+_8g04@$}D` z)?yh?bWdBf0E)G2Vw;PN`~sr*yvpYl zm@%-=EoA7*hu@#(iFYn$WXBs5Ss*uaZdo6_&hr79V4M&1bZ5aAOS13Js;euLQ zkgMh^^7OF}KY#TlH5RW4HRf-aft#3~_KFpu2o#q7n>e8++v?=uQM^d9>vfG?kYIp%QMh4pg#X+XFd}|maZ3c)w z3p$tH#1y}ulJWWY7JmGU&xsl%HMH9z1MI~F$!rkECl+27du^0DBHLHq_D@8|i>h&` z@ZG}du))^H@RH*bAChcPeBRt~$U^s*oGh9*At;_#!~q|_5fX;; z1$`+Is}o5>f5eRfIzI38lSO`8#eyPg-M;`vu5jdNE`}H!kCys;r@+^m2Y>mOf9JQqvzGIp!sbR zr&}=-tHzDDMLbvWfj9>yxyU$}Zcyo{-f`MDcpwggp5`rr7`hkel=3Q9`sP{i&o~sL zqV2bS(&^XU_Ji+-=IYpA5m_bGm06^#SsSg(c=n-}wrl9YciP4;a>s!09EOiP1s$T|HMMPc$-&R`2%}ZJB|{obJdz?a=L8QiK~8m6b}mZWhqZHc?!SS z2RFJ|I4iU5)Q63`V~OwBkHmXka#+mRlTS1zzlX<(8=SD~FTL=JUpe=s)6+BQJ2ty$ zHjbUAlWOcFC$=PTa1&d6IC^!E#bNxLyA0@3p1M!$%2;>~wN*RC-u-9$vSSPuN)|*` zNH*N~NuF>s_;cQ{(CVM{wHW6WZ0A7b<5>JSxvvj!6MpiP{%J=$$0x=!AMtPJ4S%10 zh&CV93LduPv(_YvH|4qPa9pClb78A5n&Q(u zx*+=7zxkWDfBXOZT2jott!FgHlH82X#OHNza!pS4k4W&*@l(fPTaRDNQHZsnxX{I4 z#)%w&Va4V!yv%>Fk&vt%rO(B9betpd0NXZJ&TVAI$M_QXp!oVFPDJ8uu*r`=}2cyz<)ztL^$&neX&I*53Tfug>;kx2sRx_A8y>k=m4vpP|vwdB`G$ zRPh+EUGBQkD<7Tm>(il;=Ui1X4#h{evb51ZI`8EwBaUQipZJU8X@Jc!+D|ORu3TO@2CHKeT=E$m(OC%UD^1$%TMnIz;Cw-WmR#UN^AV1@>lSl- ztgq6XO=8Dh8EE|rPVLm^W1~KpA{U(sZz`gwBh9e9XcI&FCR!wZ`&HTun0*>Cd>Znl zTR*{Qo4S9W8&12 zS8o~vHb2i5`^@<_w#B<{`}1mjz3TS1b2)*r*~IXHG1rFRIA z56I;0H|CG>71apN(Qwku8kD(Eu*Axo3;3$%GR3x65EtlsOI26r-}?6X4zFja!VTZe_}+hDN#L-|5k_ln&c=+Qn^+WO1#fQG3;|&8Obe z?~PZT($fE7KekTw?tbl<;GD93*?u00wC=o|bea2%)7`(jF0$d*7VD5@<+CG^k<5U#fQC8ujzO-%DL+Ll<`Omv2zy8+RZD-LysRP zPxKR$DmT-w{$&*QHOFa(6Cc9sI8_db zNp(mv$PqX4r;2#sevb`p?ceL${gTTci-}!u^VxssU&+Q-Uw!@d2cD-8TyTAip71Ei zQ+%SWT}uba@t^#e$fm}qhVFT-HZ=03ql-n+;Xg5g#ipp;-W&5Z9$<@Yg|x9l*K}PR zkbHU=i!;k~90^hAXN)n7K`~SZt5+zQ z3uN6oLIxdK`zkx?jSs>Q1Y?moFg6$;OyHwqP-m5xg~tV(&?MJfBZiJ$2B09-r~gCc zDPLJYPf5Ep>b8-2Ww-Q$kyibwPWh!D-1vR^J3)!_sjF1jm(r_STj`&;YV*h^nX+V3 zjoymRT6t& z6KliL3E4B`gS}%F=asmhsX*J(v1WW!#~1sXzK)4yYz327_``0VaJI9Dgv1g_LH6%c@;Zj zBpLU80YlvA2!#2=T@B?e-;WT7p&66-)dbHC9jUQHF*yS2h?YS0JOKXY9766cUq$=~ z4t&0Z=-T{Ig69nxnamlRt!<2X4pQ|k7W|~&c+|EH9K&xa#?2KZb?tO~H)D8`@r8+0 zoI7gMsC8+iw|3-~+j$NIaEgeRzp|xXIdE?!iN)Z1ktdIS<=Spt&{sO;Ims_D;Ainq zB8s`OcDs13pS;~~8`;`_w%ez!eB*>hfxh)aJ;yD-WlMk3ww<<%R-N{#EAtT#`nG{i zWjFmUmy@tGbXvCVrqvH<)r&0CjDjCr;pXHkPwBhK*Qe<8hJElcdGYeEUrr1m#med4Udi{CT-@LX#LY9Sr z#b(nlo@^xKd)(F=-ihHp7miGIGbl%1{dAmh?BfR|aU(yAKRg&ZZ2dDFwb2}(K5CT4 zSHD8d@kygEjmuV~;=kLtu0}Q-#yYulBbsnAs`Xz?-=;>V^O}0;8I+AT@zV-T7mFqX<4~$oo#mLl^(pEXPN}@)|&2Q>#Cpn;hWBoFmL%V@mg$$Kp*aj`3Pg z;si1B6TfoAF?t;3v*(?#8dGcBCUf?mjxRnKtBxa_8G|^ke)3Wbp$Y?(l)g2QIKuJW z1};42JOI<*Qy^a*2N!B;S(jMp5xei+sFq8fZ(civ!-awUiB_$nL&@9;M&*!9j*%HT zTAo-K6X1IQPJV8O)I>nrue^kAWn-1`N~g5SJn@QCxmWzHdjd>C6KLC!d}2NO#cREN zc-m+BG&P%7ov}E%%_6XIX}{9j`rE(Ii;zKVkJ=NRiFA!;xb^YKGS+v0%ByYLl?hMq z0jRIrr>#eO%d`!>xU^eBqu0rK_vz?l43Ay#g2J5NZLaqB30YhAMS@0}{M>l$sIJ{( zFXzV+x#A4%>0jevjLtai@!42yf5djt&EqA~k!obp>2+)=lY_!B?S*F$)E zAKsfDuf);8eiPCSUlR5i5WZkXY2e!@dHBtu=;4ny%NZZZ!XtNC&xlt9m2vIXKfE!H zY@1?QM>dh8ZMHXC-qOlj2DXRnbs-Xf$PyYC464E4tqL0TM{W1T=Z=Co&P6V)$dR98 z;L{+;T2lw*uy0vRz~R^#@=YPvRGpb-n(nyFuP^X0(MJ{C%^`zerU@VQt3p7P*)-bM zfSM7bmD#nEGV;ZH*2tIk)WuGDJ-I4V-^wqYKBYd=eJ>lC*SsR7zT+L*_HC#cD0RwH zWc0$GV|{`5w2w&n+wS{zPK?r{HwMPuop<*E$tRzF{eJBO6Ox7Hnq*hIc*CMQkQ*QPB>pt6#bpUEG*XQ^KEfSzeuPe2zQXR~QY(L*HSb4UK03EDC_YecRi=ZOmA)F`o1K%pGh5yT|0YK7+4~ zkT3MP(#;x?O^A7Q;t9|5M$Z-yPh#b4 z5s!Jf@*N42a>r!yJ>a(BoK(CbB(6EJy7x;PnhVyo%{=$6_xJmD{xim;KyI&f;1B#`Fu! zSui|U_xVzPQ7TSzw_||+O13`2r|f>^gPXiWietv;%C)D{>v>R};#qsimw)UW?8YL; zW9fvt$V@q~f$VnqwQnG(|J^&ZK1$6M8mQm0{ zfBAug#2lBUYZ(JdUb4c%>i3N&(=^#}8y~=w6n1%q?j=?^(PrrUXj67`OuG_`Tig63 zAi2f{7}tZ75R}(Zxg;Euc+tqXj;iDhF<7Qf{%{o8pe0xLJmuL88wETe*-9>8zSP@= z`F)I?bL?fULX$Q=e8sJM5x)DDZ@7f6Kh?ga&jlp0>tF67)Hj5{Mo?V+edgj|-om)% znAmQUXRuW~-_p<#oD~0FgfEDg8zehChKXFz8;B@0G3lM-liq%7BtL40*C3Ph9~y{_ z1tw+7n=rK6tXySFr@!0upXC+*Nuy7xAG9m8>&W_lgG(v>^i95ei~Z0fJ&ouBQGFPd zEj|5bF3>%>^+Pc;*}-^^SDSEaW9iFSbza*$I)k9eG3i(7Lfkm#Mf)K>A+ zzhm;C6JPcNOnfRnTKD(e?>O;ES|V@%YX8vDt1qpM<=k_=L_dr3eT_q|TiY3zg{`JB z*POM^9`}y%49xa{_K}}W$eR&9Oq=B|JhzJ53--P2=vcBMvzJ5_j z2LAe#wsYIRs7>`HO@}x<2vU4K6$+2lhe0KcpEmBiUc~ z{R$m=WNIQ}S1<7`U2mepPK@}&9~(BGfKW@maM6Wdei0>k`u0ohvj*W>e6yci4XC)n zreIC+!-ervplOV(?V`h#ljjHsXPye_o~w>p(9 z{*j5j_K~kl^-E*=v{XjoF7203ee^=5Us~JLEp1n(bn5yiRFfGVzA)pIU>D=iKh6O% z;Kq0N)ZUtOk{?bWUv-bWPdkKB-TGQ=5aHkAy4i`%jY-=lt##jzNo_>)U_a@H|4|+e z9o_m^U$QX1_D`BCf0G#dTdPMexj5t4T-DA`hY>1jhUtjLjyT^!zXYOpM4Z&;z3%x z`4+`Uiok7u->TQIgX&WW-TV#ganI&&U4i=*J#VQ&Q1O^SpP0~t=7;+D#D{oaYm(Z2)^_|M zhYSW%pJbb`c-4*E1U^F5uP^nZHDX)87-QBbPriwxSA1xd%Wdi7$Cja8+m%Om%h2DY zI$OT{C+&{Y6TS5rhpktgp8Vyt-u6qY{atIsCk{sFXT;!|C z`I&-T+o@}(`jOq^9RI3?JSFvsMP8V`@!Q_Ap&k6%r7kw@^lizw9MC0SxZnkP;&Kir zuHa2aK7ol?@q-=6$imIv%{O=OqnjaPguH`9P?vpK@_Yf2TAl}=^@#^d^U*}|qQB||LGnn8oFDp8kN+6w z-A?GVXU8Sa{wZY3!zOl}`@Yywdp2<}a!yFF_%!@2N;V)}<$WG478SFw|}om=AmraSD2fR*7heT!K<&(K5nBQts{39pmOR* zWlWdir1XYgo%&h-T9^M({gm6btFy76wDwz9|EbeSK>Q~^%mX$cPm60z!KzH_k7%V+ z7boo;U&UA7w%l&tb>bWRoEY~Fv+=^;zV7}jI{fOAo95-Bi{`XIc$E5IKeu0pmUArq z;0LBTFX)c%&p$7VrKsI(EOg$-%PPd8UwqCP<~@rW8u6iVLD{7-u=9EG+3SK6ccg=d z576RU9&={gn$)Z-mKeGEUmx!MbB?34;{fyY;f9|Uer#K`^GpFTJAQ7rYL))X7vh+< zf38K+)8ydTTN4Mt?f>GAWj zDdvp46b5vZd<)+4p~r@D!N#tf?!3kb`DEu)LB<=}i3xov>fg*o=N#vmQ$Wsn{grI^ z3wXrheRXfZHVHBiQXvtjtnGHv!)$QWI(VO7aaT+fL^ILZ$&qV zB-57-3HUKJzl001BWNkls{;^L4Dq9lR4$^cSzD zW<`k}^6|0uE(CD3z-JlIt7k3v-+!l9Od=S{oef-rsuI`-pRHoX~P9CT>a zDK7N*CJ`?w89&e8G60N}aja{D$+KOL8@?E@eQOX)Hpn$JBo2`okF$<>)16n=xXW7| z-&-~tw6|O?ti<#MmY#R0Uq;jcJxb!^!T?!z^fJ`+MS${ylKC{E49^z|_Iv1RkG7=d zA^;;Q&?gmYckT~o;*9e6%^d_JQRFZ(SZaYxAaQ>IrWKs`xJb6G`4mPYWpAg*gRdYP^42@ytQxi*l zyW@BvmEhJjfDW~Ha6&6zy~|$W#lE=qBeKCjOjsUuL`&%M-?xqwu7JChEQbyv_;a^v6dSn}^EyZh$V{tfesyf{qfj z4`~|H8@41c<4-1hr#S5#Y#Lct-5cKk#LvVCM{y&nGF3V{@>T~vvw;&eR}|8+p%5?2%tetfv#yYN(kl;7|8FIjE5_k&oXFhN+^L=7w`OFbpOddA2(*u}GT7TneGOJd z``8Fpc?TQ0lmGHJF74vLr);^>pl>8g(&B zlPfM@7Nhu;+4WN&9<=_oU!BTR7q{*5up9d)&dL_2eHOT!({a4?+AXHMN~{x9Y$BC0 zJ95YW+Om}NkjY>C3;pCUnBd(plhcml)(s}U3HUO{qSV|o-+O_=f?6A2i|1}ugB=Ice!LAYTLT@YoCKGgB?@F=Z3oEqIRsLutkJ!ndHM~h_^K(3T6{<+p2Uzj)&&^QgT)1ht03a zp<14q)RG%c?Fl`YK(_u$b=JKl%WUxt<0uNE1~v!INLIXfbd0+6#*}xMjIa3F!ZfgD zbWeUrh4>yO3voEfURAWLy*mTMo5Bg!oYjF&2@`ddjr0-?t5;j!%{fpcclOiPN<^~K z`C4STFoO1NvG#t3b}izbUqeSlu&8{ISUmyk_vT8LuP+oa`>svo$a2T+|}T) zj+rTY|BJ_MYkJ_n^{IADZD+f><3Qd2G7^afzdK7npKoqTZo>{tt8$6L>#Fb z9vAM=TAolOlgYn7-_S7{@8v8=KVJ^`!F=H`|UoPVPZ%7%Mv>@N^-!4fb z0Z+Crt?GKb())gp=@IDj)aYJ}&21#Be&0O)&;yn0rz)}!jdh(EoeWmvz~id-J-_7& z1|3<{78Q|0PsdPywQDbvQ4`KwAI+4wySRMzLPs$Wb2>$fxdp$ zj0Nq%YEoj1ALx{VoY0-+t_6?FcfSa~vG@!w7`WNdbgd~Hq`FbdQ1Regw#crahx7(* zMGc`nKlF(-RAy}zv&!A;`?cjpz|FdJE#sD#C(+&c%v{cN+0FkhJHcGeOC_z$do--f z-m@*;_n4mga8!)mhT&18WP5skXy=2f@(I|V|KScpNq(h!`(^O~Q`&8mJRha%wlEH_$S@!7j!@-YVTjNMG z)5zZ&MScJ!!3?tv%sQ-;xD{!=5q;2d!9RuTcyl?$e_U@q@Zm~Le|oj-Bi7Q)>7Y(i zx37`i=2w3#&jJ^GUwoe-tHd%@L#IiUCfv0Qd?R`9K0Rzm*$6fkpkU-%8x=h8CSfbZ zWrILyhxWZbt!!F4eQOc*Z`0W;WzH_cCw@1l@9&IDQ-l6ll09hL^wP`Rji>}HOH=Q= z@Wxf^n3lG8Y4kfqpYKm^C>w)(ep&35OA256eNw-D(?93c_4xXXl>D6LwKne-NbH+?V6RQBX=YK^|#Z|mvt&ahhfA^Nd7 zu+vWA*qOWRrs!T^T*t$#pszqnWUpbvLLt=AW@^tKZ)9_>WE9>|O|C?ouX>h*>i?@L zXPl~RKRXRQ-W6^Rn||mg$d^V{&Dpy3RieKK6clz_;2V6E*Pzo~K90!OdzPnd;Xmko zO&IF{?^Bgam(+2UAH8zH%?E&M2HF7=Kl5GjAtAb_iD1^52OGPzNU!Qy&n`Aum`q!A zdhBWTsL^YzAsmhC%Z%*N-^(z?l=y(Yp0BWL2*QqS+DZwnq(>*CAmgH-$Fz;9pv$H` zE4z(bbP@OB3v*n>o!9(g1S^-GFRx5`8a{!`$Q{G}Fja|YJuB&5vfZUm8~@rgV{07j zcInAF7pC?m(cwSXqvN0OEB$iXU;b{~C3>8Pkq$DHmf|qKyDbqztFYY&to!JZ`%Aq; zTA>nghe-+k@?+d#xWr@{t~57QsBK+0vcZAf`KFtD40`$Qs?EdBGj7ANOqmWBshOg8 zcMm<*ZvXnq*u&|eoQz-RAZ%kJ)B!A*X_Z8J- z!rvxfU;T@5C?0*MjP}Udx>su!U>`HB<#Icu+`w8q=$vEx(mE>sS>T0+=`&H$j%siI zCg`>l2Iff$v~6l1CR0Oxo!0@6!p(f%@bgy^Jj&j08{0pqmOe(g3};4uPz_x^IQsTv z^QZy;24l)(FrL~QTpLhgt~(rMJRTQs*e?X`dS&!7_d-Z3qMwyj5}5(fTn6nb5q?n)_*zE z@5a?EM4v^TMe;`YkN;8veby3d7iMk>`gdKCyg1+_U9I%>)Llc=uUD?4jDY5%@kB^y z;uwf8&plTroK2#NN+a{t%4wjjj}tJNHiatwkNUFu#+>7KOz!;P@)3!m1bNh2(k&;+ zd%ETwquTgrK>PwdMT*GV;64i7|Vwe9j3l={{2FRwVv|U zAFRFSPljSZ*_5rWgT>*BnGz4e>A%0PRViswXh!VxagyXd;d)5%?fE=cgFD_EecUfM zHoLk*KZnHWwma&v!Jxea5>B<5HK~ek#JKyU`zW%qz5jS$%g}nNbVz6!g+9XeK<4-s zrRv@_mO2byMJV2iiBRrdP+2f@MKi}|sd_WXJ}&DeJ{O)k#mAh^q}ma-Y)m#XSQ<3x zuSx$!B*@Lb^Bp74AG~)#mDbCn2jRAgKOt5oT`#>lu9nhn_U0XKvpWCRU}_A){Kfc3 zyWSGTpx2=K&n>V|Z{OS0ot+fExTak3Ai|<``65^{YKk;){`E0Y-|MjwIO!GB{TGsa z=3RERq+{%#Gd6Tl--yHq!zAXiP5!H2m|r09LCCOmYLLr zO!fW=B_9Y0u&QI7T_MX|zZQ^sLKyVZ?9_wt?GQe-H?jVw@FmBsod_G%t+8C0f}E!8 zIJ(ZPBG(bJcC@}++q{-kW##K$Y25LBTvYB{X=9@-Vf$os+=JkQJPi(-WE;)ZgY6Zq z`IcEw3AHWB#u+%rKd|}mOW9O*!*cY&<1+peqmdncqsv)B(6}kH1QNvtaWuh_Nm^w5 zi;vn|I{IIE;TP-;XrW6UAx9AVqXDhyM}_3``UF)Pwhe!S}i(5fQZ?xshHXkW_B?gnqSg!h{- zKO7lZ`W7dLJnaIG7L+6ZHb-s2Tx6{m?kZLVI2hvVE(0`Ld6(Clm_1|W)Z{5o731CK z?oV7EVWdeNS*bnYC8J)fRV2G1vr>g#d_z=fbJ^~DZnjy4nwkVPid1yOKaS(jJcP;g9 z!uCJ2?Ou>Xjq)F6rC4tabh}lya*_+4Ve|TkRi!W0l|_7U+|d3PxU@nSpYQmST}G|o z=WaclDOO3@gLeR42}y}qaZ%dC0U72U#1y_s)e=VZ^f*HR9LYDL;i zxI0+}tXi5I3=g+GI3k{cSkU`kG*eq1WiWA;}jnKcb(jmAmid9b6AQnJk=QdtCae&*1h;#3r;Dz(Kf z#+hD|C@&p6n(Ej4>JhqmFLPtc**p34K*b!oH6rBVjW^WwI~uME->u6?cbi7DQ31a+ zb|5RjA8L4t{_wcb^;TCI#pP4q@BCefbl!Y%J>ABvSQuf|2(I-pTBU@bQm;lGlDU26 zRXWJ%O2w`tsR4!rhr)!+dht-!&8l#A?AC3V?yn?Wx;*C=p}s%ZwV>rbT}$0dDwTZ$ z(r-v^h$pJ7bBcew)3w@w>L=vEwUZ@$!qm$l&0{1 zZx`^(SoiN>19xvuP{kgQHpG1ZqvJH*G2%mwe<$FoHJ!}rvdf|0)j!TAh z0A9tZ_g2lfTxQ*HV|#cW%xsGDslvR6stlTL4IyvH8ECfmjjWC8+>~5E!`IzpzWm%< z2FM4rwAJBNRLU+|@lVn{z)@4ldbpvwY2HHoMYsnososN&+%$nWbJfHNgsuVlqB?%W=&OXZ67c(N-3af zF{bz?7bbSBt5&Glv~gTJuduKe+*tPFidxhGUF2gDEb9j&sG_9t?c4d06n*NQg;>Yh zOiem_n~c{Y-g+Z9wj;=-Gm>NWuOsfm>4qX?r2eps+>KVB7-_Po7#KSBm>9g^)BD6_de{s4-zY%ezRc~RIL=PvGlB} z?voY);pt?~SXj%Amdrqy8sJIW-mql%x!BfE0m}ASNrbKt!i` z&LzTV^L4kUBGZ*KR~?nI@A-v}HRs3MjLrIC)x8vRE>S?Mnm&SOwq8|rzU5Dfnk-h{ zn<3Tv6tL+P!tam(c`*uN%7@X~lDBSiZQ9X~1yOdUgc-|x z+0F3B!P~i9BeKTEmfa8E+NumTd!>s~qeb@4Nh3Aj?{e`xEu-VLj&-cM=sR(h1R_Vj zU++bD#d{8Pt^{9uY2@N`SV~FDJ+Skb%KqbM16Hq%IAh$~t*$ zE?UpPdq}7bJs36cp~%|CNF(tB2`iqrAgUP4u$`yCnSrZ*}moLF)Am>TveKTgGD z`$2y$zAi_7d%qBtpZhC_2W^Nf@TJ1c@pu*a5%==SRhfaPp$VI%Q13et8x2vo9`7 z`zJ&=NJMudQA0k%XX)>5&ib|fWvq0$#ma}18!iQ^W9z?MUnh5Ix!D_5uD|!2TCQxL zc|a}r{N=D^!1+>^hWF#ul-?7+YS@WVO5TH}d#B9(j|_fOPcxAlQOZ^CAkbg`>1nFH zMz8ki{aqNr@%_i(_tS9V$6r!PzlRdWPsObV66dPazNrhBkEb6lRr5P&b+~t$GC}Qx zg&k{txi}f=KKVK&{?)|IHKo{rg{?ybr4j@O0SAnIGmXQX&`nDWJLuXo4ysATbA zi>d3tjf#d6JM~_|!_g>RA~}8~cPuV*GFw9K3~Ai0p?E5*V*U@@`?<8TjQ7LZkic~D zI}Mz{?Q{PKR`f)_{?StUQpR<;^Ve*@9Q1cioSdoJI&<3?zaOQZo@i^K)-~0!GyjF` zKzqpdW<7?z1D+GT^4CVUm$Juks^qooKNdxO>fnx_Wtb%Is3=k3A;KMEyf;-POOe6ForJDM$Yz|n*~!lfHJcOfsq^uS5^j zY`|Pi|8L%%PWqFudvi*@Tq9-U2mx?Or79?|io-r(o=Z8I^Xns#$T%ncNIj}1Ei z*q{**WWd$Y2QR-dU$MA3yAk>f3e@y(6aZmDV*S^Mvmg>7Yx zo;wNg&G4AjKQ}<6-X?6FU6%iUkrY@~9VK<0mD=ky=NBMVnpt-FfeDxOp_1_nMKq`zFFYg0X!0*-pcr zxYh*AJMZm(cr9m72dpJyE2G9j(TCE85m)seU3#(@vKv;k2VgMxCv^vSEOFes!8=8; z606cM<%jxLfh8NP{-9MMKp&PWAGEUMj*2@Q1HRj~roA3EdC%+jUmw_<=2@q6tCsNN z7Vxz>)wx?sIDI3+t6{wIz6y3OiAmT)n7qFXrIrlDVnzj>wgJ zt{0@|iuImBX}>jVllt zUQwT{dr~2^b$8)_+shxhzVpLp)xKr>89!~em=0xsO&vdudfLo!X@p{LsgGHJhV9dg zkGOf51sagf;!h;pZMv~lwA-8%SmzEhDoL9DBln|CQ}+ZGJ=firOSsMmKLGYPZ}EmK z`1hYOMZHT;z^*h%!_h|rAXU?LsAm)dz6-QsZZKmzkaN8xDxY%_IeE1o) z@S)VOy+3+(7gvRDH99_A6xDkHG3le#A5@6y6r{c>TlWO zs8cpkF5%aUN}K)EV<&V#2+wFIH$)z-CSZY5WKjJ)97J8Sy_}YjZ)gaa^bo z=Kc~I%AUMTBF7J&E5R?LMWy0lsj~GGM}j+jj*TXrLkX(fmhVScafaVdh*AX$=B7N5 z@VYcO7m}*?E&vE9GT?dIEzOSlj&XcU! z$`6~p^;K~M_ZEiJyl3!VGX3w2Y_PA2y8?IXGI?h# z>Rmvgy`g>Qr!1KmAiQpGRq_mBxr>;adbd*YAIQ)9vSRC0bh#X*snU$#4}$uk@4E571+5E7N82R#wC8qCtzy{C-b!?P;w4lU;uzh3*_!{w(TOS*q{ZeDzEcY-(fwtT55e}mAYjE$&P!h zYT~xI_RLHXZEK>o=N}+Ia9XFI)U!GcKI8C3oyL$JFt4e)F@QpTxiNgoDND9V8TP>N&TJoh zl=tpI6MC{#9qG=SD#kw>g<>5%0I?<9WZsdSd*q^Eo+otiD1^x$$_F83n>GBldyp?j z*K(I8{)3Msic5=8Xjd9M-2qUNs_2v9(MX6KVoOt$lcF3`�P-Ls5Uqe*7?A3ivgQ zuR&@P-B5*XQN+>0FE|=RP2h$vGxZ3}6;zZ%dT|92F4~=}tEHbdiZ_Fq(@Bk7TG=lEeu(^<_z?zi-P&$?(gV$7$ne0^k75Uv8@HIpX z#ik>)VQHS_)|SnU>1xF6yt|>iW9BzicNs}oa@7fZI)?pcdp54;`moQ7%hVP;s6d_jr!-+WK zWv}i!lbmqsk|TRE--iSUF>hB)F8VR;(s1N`SDKS*9eAmi%IwG>rB8W_s=-^p(i5n^|)W86{kmb5LDJ;U>a7F zfx|-wOvE5dKuHcNmFot8xaUSSky>bwWa*Z${}r-OY_w&H+Pnrgp82ZK4h$=jBQd?y z`LBe&LHq^5Tcm6~_;cBo0G_z+Ye`iK)Q0(A1czDLW zWg_nKq~y5c7+`PYNw6hC}wXK43b!8qA7hJf1$Hr{6L z1HoLs;tF=?aKt#a+vN{jEv(>aH!R!7>u;xjQ6iA6-dKMmIZedVrdt_XWAV_!twEk#Cl0dk7KjsdUUiN!E(i<7`Pbut@Cq0s zf*o<`fTVFK@MPz<5?dIh$a2+h@{u)m<49;46O`NZu-av~z9dYA)ZjhCZ~sKApiAMx z{3{3FI&;Dmgy{RQ#vAz=gY`@E_r%1lLx=hg>?RxjU_SCc>vA?lsd<2%?|A`|zgJJm zy@VRm_{B0GUp*%;E>_eV??}AS;0=@ykw;C0%LUE9t&^N-adKilG!WQhIiK8ww9rf( zmJR1l4O-zPqg-hhZ2xMii$}M+x|HJE7E?tka>rD@*3F%8I|BSqw0(^7@D!MTKV3Bc z=z_H?5B@u&l#VPHjm6G19$8Ep*gy~HvF5vjbrGPRQzY15@pmtTjiy^1anxqI0B)-? zZ;THF5@0LK`ZuE^iTy=kF!zj$j*snE~p zt*7tlb`)>0kb6U^+o58F`mf1|Y$Nd^JXr7qPYY|Txrvyo^CK=gzb(-@et32C zEu5;|GV{{IU-xDR5!gBeQ-5RhU|6Gqzkk=99ZW7zUEHbk=_P!B-EW_tM}PQNq{b|x zhWCl~n{SS!bRCn{;U93^a4ds(r8nj2qb|=5-di>PLK{`j4`P z@LbT3Cr9i2Z^xvD-wnR$!_QS?e$g=Q`DS$-v5Ve)zhA8WVB+Y=uy1%ewLw2 zPO|MBwJyk+p{Q2m5xRBvGtPEpA)IGlbvKF zNtXObc-!8JQabJY&)?q*Rq>se1?Mjv(-dGeMf*AM=-vgEaU#VfpxZ;AP=s*b-p{*h zVMM3p78WQR&~j{OUQ;G~rJ~-%#y^PwoZn*Z6~9{Q7F`M3vVJMJk7JeLOj7Z4M8y*gssl4jb$-EX^XKj3E7(1PS1O|mz5h5T=NJaE200> z?3i)2*u**Wee&;q-QQ!buotsDQnm}~uZn;tQb2Y)ht3MT83kJ>dduQzZ#j{9f+^0$ z7&mbNT^w@}X!kxhEPbEqFye-eZ_C%ArY-LwZ~j6Wv32kP zVTl5N7z?5fd{Wd0F#i0o_nNF73=HC}>EjHLgE7tRtS_9m%b|qX`87YcX+-~!F6hC z`P=Yjn46EFz_(?tDam$|SAWe1LB_hj$N+G>zIPv6&BbUFD*lc@&*f$Q2sKV0#{xJ3 z6i1kz=n>OBzS=x{%*sj+5ZO0i(%`rnUh*@gz8MwUP4a3q zol8w$vqV|13$K=h($a6v4Z*)I9y48fm!QNWP;ri_#_slurpuZQ05{fOT?V66Wf$}px(S)Y~r5ol{@4IYdqHPqING+=jr2Ht~lAUAHG9g zmE=iQz}j!#Xp!9IfWsJ*_`y#eMb(IdIYmlhcW{%LzlvnIHZaeh#0-C12Qn&3P>xK_ zf3kpjeBnn=>;L8lsE9%}6sE75l)hfa;2^}Igpu9O_VH(=^7?`Ov!zJ2MDx19I}KaP_c3pQ%ZZkhbc}F@{dL(Z+j^=nUyI_PCCfPp zv$k4piMp3A{SK;s^c%eWC+f4k_{4l*+HCIKgb-rOTA`yTR9?IzoqwzhKd=jipZSvL zfd$UHe(Bc~_(s`uwG&(zPz8#p9;@rh^BK}RMn6Wz zqgE?&zDQOL=k86X6%j%N_4eT<01Dz^bQ!mKNY~<=y4&<*Ev!J#;c_URZZniJLe`Cc zPU!ctVr5ms7!4jLf?#<4B1Yf3!`88<3gfiRo|2Q|xwI_O?uX`)`LC1zW$67jEU&Jk*+o zjoK=Z)m-*^N<3GcI&X<)Cs@X==C`Cf)t}54c=ibE<`ymhE%1gl?)_*g1v!BJVctD^ zfxKqf04vbXQwFUcug4yDq|W5|(YHBUs31#xt|Byfz?eI+sA*FmdR}8s`|Ltn8&(tXl-iAQz6O>@j>6g>Rl zA-*%eqKZ|~;W+r~69LD6<-2yvo3b~42ull_MXDd!L>A@H0RxHYDBh+;BIZ&%ICGW$ z9|V74van*G*zd{(hm2^kr`%GtetimI{LBiO`|KetUbCpsMBUN8yYlLFaj5@F7=LnC z;b%J;Y!h-a>VbueyL|AgZ`fMSW*f6#?bj8$f=SSplG6lj&z=#+{gY z`79&04*!V)*rQqLmF+LDV?qDq7F{LxgvhvARG*L!zjwL3bK3D>b;W^rt}5hFdI#c% zFsT#CN$el;-b$s34tx$wh)C{~T(V%LU^23*%K?_~$K7hf{R6K92+e=d`S&A<(oB7e zR~ua=y350>^3ixXf55G0S+nW`?R6$>O`7uhs``d2&)N|hUVsORo%z|##1I@-k+<#M zK%s{kNV^~FY6G}e1DgdeJ<&;GQnH4ORp(hpp< zbU^xim}XOn@9p~eM+?WPBHopDDtb11t;j8Gz|R?W+J@u!-quDsE_^% z8qSNI#l|*0KjV|-ircVW8=JO6oE={8T7DzhW>CfSI=&e3%r|!)=k*Cq zp`qA$hp3p!v>h8~a&xlJJsSD_=}OswnINV~ndGX@t*?Y*2X2?_n34Sc{$jW(+1Vyr zt1^_)u%qhgl4~|%!W?oqPARkH%Id`K`aBN5^R+mDu(i}9x}J(!;Qn&~$My4S)2&~e zn+JH3;(%rOhXbh~{P1Wi4>xkp_o1jorahqTgJ{GoL~UrE|IqWNmQ7uWQ6a)Z=)_xr z;Y^6zmJYkRpQ>Nc3le?T;lC^y(dc4tXh3g|l!N$@W{(mUJ=MnAHGLb8H47JbbCZaI z2mCtYXsxweUftE%QRfTx3}uHD#MB;?abky{{$@;^oO_B>4#>USeXydXj_-a78I-g z-I;MJv+x6FsV?WRRINQeA#J9ybg+3*?y{*xc2B$x-;SMHmS7$|?lMH7y=Yh#DN3-e zZY==kj9WccI6ET9GN8HS=8pib_u6g`d`1Jw;5sj%LDVp*0R^xeHHq>EgOH>Ya&u+8+ns6K7I$sCq6!8HnlL zvdza+zlXRd$bJm&eUS0)w6*VUVXn#>e(H`HDoV-vr(lXxx9*_6JrJClZ5FQJciCGc zcw?=)qg!i^ZSU9jZ3`6UU8w!mG0U}}!9TTRPaQZ@Gj7|tZfb3o4JYwK)ZaHWmy zr=DnYsGug;K^-iY3ef^LV@UV!(BH4MtBD4g_nYb*R@OD84dBhQH+5_d{wK7{Wp!oi zGCyZManmAs^C`q%vmI%|K@*|vZk!1>T!0UKXbX2Q-5aFqrcj4*CB#SRK_9Ml@bheT z4c8!$IbW28qDf~$(}(ty0HDe$q9+d>DDCIR9+AT6z;ZW;Htc*TUdKw-lo<$yXE?dlw=?i!QGxyv2%_tpXfq~))0!^xs zf!SCK#;~RdTWrSs8FA0KD?9%kQ@?`O=Uz_Y;6{FSa&gbc_1}NULHHpWsKJVcHL`#D zBEr{RRI=MqqK(CwK!xFEU5a)Qaoto!yxTb3%}^5c+g4mbg>l_D1@oERM1C0Qn!iE; z>iBm4=l)tFS*P*2C%5aWgQ;px{wJmwVV0mj3yv3YuRYz~Er#HCZ|8cQAI9-bLLo_?m`MYv)!%4&^z8S0I@x~QHCkc$S zZ}o!rLJ*h`i`H(->IjW$aK%cdI5hJ$U3;LP5;UI|{W)w17^fx0Py@g{0k2qWm8F{m*-poB+#BK{MN;hn~oSX*2ipUHG;hqY#|p z*P+H=(+Q+~0id>#CTt0S=b%%dM{)>IwR_}b8%qY@&tpsfW+=D_2N^=guzc0 zw6Tp>77M+qe}ZmjE@9l3lS`wqIG}Y}s^`{V&q!l2Hr|LssQaC+>vrRRd?eD>aK(Q!-|Nq; ziIHtLv_Wde@G-xs*~e~0VHejV+g3eV7T>Kn(>Z)?zG)F>KGN@_{$Oqm^-jT9Jjc-Q zKV1}=;bhre`F0Mz7kbc(f!X5lkCXjCn-Sp6toW@({1z`BEXl#tkp5k263PE=pjXG% zbDPr|)+wCCw62U2{pUCNM*-rW>WbQb>AGX*8+zAu%2)2Z@1Bcp4)qOadnFE%WZ#^> z_OWJTfo{_rr9TRl2srx4wS>9~E>R4Wa^W9!wBO_eE8(jBY#yHkJonkFDyQPK8?Mh; z$*DBr1UQ>&*+3U^Q%lN#sg$QZRzsWXRNGE97wAe zomBSi56SqTp2TD&bhTiyP&CAqdC96{r9$wy2~t2lgIAyDwYDJXbWLpq+J*g8bH816 zxy=~jzAU^^rH-k!tLl>BFqLCC@;M!(mREB~TCr26P37w~ax0b$n0$GoLWUzF%7P^Z#>KG={ z>H!}vzx1H%M;Y`T(A@R z_xxJ^qPEf*=nZCYgK)hQBO%Z}qMzp>n`vCx0_fFQeuEJph#}o$hl(2B;^9`k>w;ou z&GPUjqrXGIVE>*LdVfkZY6Wj(9MuZ(a|Z3KCfvhU85NiyS`cWjyD&R#7E&V1n3?zW z{rZ@VWImDcV!n4Y@c)|OVl&m8O0&j=u;*X})LaOg-PzoOrv6YsChK?MDC@|b(IYO? zgo=>?;(SgEZGQT@##Wh=`%*@u85N0*z*gJF)@b<;;9d9w-=;U_(g#L$0o|u{1DV~% zcp=t<{bXKdbS4L{8ukPjIM*;BIc$h@FIkkvdt$|!VBrkNS$93uf_?<<^hq?@&jj0( zgR{=(u{N%VP5f5)6-zeg+4V|aOv+>Td_f}q%OEOJ+stop$O5Zy&>mNBtT+05p1@WM zr5KAMr0Vki6zaQRtQJQX7lWsKW_U}td@>5!o0)0ii`L^k?PDpGx@%>pMg4 zaT0~tCmGE!mx{q|Bu8n3oLLy^ba^JVDS1teSyYCvQ~V%Z$=%8K;-2LPUwZEli(K%L zyU5_p+y+Zo>lK%s!H0W&+%7Z~d1(eQe|=E2xS$jE#q>{NG$qYs<^F(+`=2vErcEeO zfW5&^)Dj)RrxK98cTa*UZ))aD3pO<1l3zDg2~ol%kDA9I3o0l^?!PFfZ7f|Gj-ayK z6f;Bslg#dHcaQ~yi=!K#Sl1!_qx2%}X!LDaW3J)RaR2r&7QQ%}MhTRizP758=LBD| zo60Z8FG`%p2%jZ>Q{q-j>Y%f!Ql03A*2-Fsq?fQl3EKtqU+EeNq;YBL1AHkAv0YCJ z+AZ99OU5j@e*4_RnP+Sr_@<@hyWDUNW?37gv^JeWdrRuWp8| z@I*+=vEp3Mf;~AnEZG@6OZF(t&$qh6a;rp9Ud9;}j56O3FkYHlceQs2`@5dI%EQ&f z71izc^m%MgD^!3e;BfHNhX_skgVXZ5dI9=^nL(9b&p_faOKstByS($b=!3q$E7uU$ zbF>rWR63{V1)~i}iYf;t1FU?!Y&m44I~!-1vK}m2&1|6r3gxq*WrMDlhXs;S1c)d# z)2-IS9H%mxa|7AA^ub#|o2|NOma%n)Z9%0%3@CZlg&7x=xEy9q+>{Ico(S1P~93rI(obZV@%$xZ6w%{x5|7!sj$-=0o-4KS| z*)E=$aMf4$gV~9t=jZNV(}z+t(afRytq$ox{Tv*sB*!hFz{7RzRHcpZZCj`(``-lj zHH~NnI|{{`=!YQQ4W%giqnE01_UfdXd3YdX%sDdpNPcKEM`)?>vePb0usdFyLl9uI zL;}v08~#o!g$*0hPk{>@{x>kJ+YHA)edR4LY`*s5S`&!V{OoO7cgWgV5H?iXnA0&& zU#W?tiTEy`cB8j`dP;t~Ne1*OL?$QjXPvkJgm?{%wjFc}P40lqyRd1H#-2?3KyLnO zT~OGf39rST(f>E_UgEjKmLLyyY8KNYpnIE=*j5`DnS<_4*{rX$R-I=V*EVF3@V#G5 z6y}EbGl>+z=%&hELHv$@yHcs?C;jA6-a32#cZa%EXtM=(zi7<56^%98AkY6+i4E4r zudiLP**=DW*N7nF7AO8pN2*@9_|+rN0$g&aNWaHPGOHrfo+#hGzDFDYt%tYW)%BYo(t;H{_;G&}Ue?4-yBQ%4}Lh?YG9; zgLQ~x9T+*!*x!BT4%~GjczddpPZj1=JdyL7>4ML$AN{Kj6<2H{g!)@E{CdsE8o5A+ z%QIf>nD(low@m?+ADp0A{;}E=z(zu*{*NGXMH$J86;;0YrYNr3L`bJb<-dU|9Du)`idp6@Vd?GkVSq2SeO<~U$q zNYOTqJr~+eb$ngs28Js(fkd@ClM2K9t;)TU{s?pZPpa2Y!m3j1t3-LVJy_%l_Af;L z(!eur*axxNISGi%QR1RT8TtXbJ`*mE&)tn#EA2%;9{BNwjD)BhdDeF0l{3&;jV?<$NBxJOGI~UeGuq|0`;kC|&S>(RHxW z-7TyCYN+2x;HJ9dMIQP^@_X>qySL!gK-q73QN9RpJl_T8v08EfWGj64b&_5DDjO>O zVAXr1?tjiLoM|tuT1pg{RSCIM?p*1 z3z^)2gMMcqw9{?I?=-r~9E!bU3X&018O9h{9ke1?CrYi5mXOGEPjH37d zh}M46EWq`*%Uhbch1B zmS9jspQz$kbojr@ZMd>DUwSqo?mF&(zxb{}L;^h>QDyLAs@f7-Ycu*g{Ss7F$}g*1 z`~}UM(gj2MB3WPY#rp#iO|hqaDak|H0WjIpBZ=%S-rU+Gv2}B15AmWrA~Esl|FSh`s@kQ=y&(o&o3-wQOedC$&ic1*^ z&0^{0qf29%tHF2?ZPtNbk?-Xq9T-7|oRoyoZdA_`yhau^gjX9AGZ(kN)Czf89&*ay zKC10`H5pe`>0h7r>iln-5|lK1@MHwN6FXRu1y^B|`>TRkzh9zM_MYjxK3@E7QM)@b zf%tNqh~x;@)4f~d=AZI+<4~1wyrz268vgE80G1WdC@eNM)Q^mms1t+}04xyov+7{B zXP@twYx#k%*yy#ugKL$?L%pjVLBAklTSh}UNrwhJrL}IKW9JdN62vBbKZE~pV!yaJ zjH0|4v-9D-!Mc!7F+bb%bR6$a|5sZ#xh0>eBg&F(iAK~>wd_tb1l~?r*`xP^hNJir z>#iZ;x>fU7NBMnBB9$QI0w%)}Wj6EIBOF9wu}@g77)MgaK325hf2}ijo=czp4=M{? zyELfI{&5G(P`eoyZ798^|5xc9Vi_z(K9^HmpXbyIdr3i#o`5)cyGfK2J~sB6tfXNc z;iy@H1;V!X#Qxz!RQZKSYLHzjn*OB;O8%Td#sLq){cxd^FNV$Dg1enH#f{VGiJoNv z_j_@TyW56#+c{rQ^e;RgX>~6pMa5Pba;$UV%AM;cJC$u^ZQyyDO*ZrXul#f52Mr0e ztFJa2WV+oC4Rak`;pb{Q>#DI8C7$v_?fSSulN)>RP(K_`-xp8~F`WhMay#m_ctUWH zAb1mWPu=$tPee-YT{l}*7*bmQsSFtHy4sTa$qj5k71*1jQkxrSiqzn4yDnW538T$8>aLi6d5s1EN+KpHuQRW@rn1U6 zCBp-0v1Pbhbna1i?4$m>*AGSouWDCyJt@SSA+pzj_aX6F3xA`W6Ely`Ombiw}h*f9yrh^i4r`Cpx6=2cg`8;*+LdSJD{w z12|#JA*tqo)+%S6Yzkn;W0&gpl#XH@E_L#+B~0@K!<}XF6&SVVi0X^^b1en}D&b8^ zBJwD{IU<9}PzU=%eF%|Il%Q1q?O-rV%&qWU$QR4bH1O>w@jS~2=?YhN=q^s>aEl+s z3p*ko9`mo8{31G{*@^{L50{oOJ>p_TkoIF@S+O;9l@}8Gs(M&vDD`H4AI)(iQ?e?H z0GH2d0hpjK=j1V&>kbAn-EA>&>*K7oEb*O_q(Pu$C*XAWK&Fgfan2;V6( zX=r%TUHk{;xEYBJxNv(oV{hoI(>-f$PaVN-Zf~E#9(GO9a-Bui%fh3t$krQ1S-O>v z;Ze2B5OEo4`TG;V1W?HEmW7$2u2P{faLy@EI=&;QyJR5Rm#Av`rmWbf_8_C9doq3&sC2S|;+JTWr}|^%rO*?zu0& zyDLKW-e?GGE=GF>2P=MZi_)Czdeqivrimr%em#%>SNxQ56h(CZC+$VX{SxU*eVn{C z@4|Y}czeX(WxcJ%mkm2ZK}KZD{T{EunHkxa=?W_d_Mpm-C!Nbx|CpyvbR1K+?)drN z?=dGoWeyMRxKvxpxF~+g*A_!yYTpL3(1;MVJpL`D-8TxT@D z!Lq>+ZgLJVJyqY(nsM}1fvY@o%jz(5;%RKD@rCW0&O(s0Kg`WejOL>j6H}lsuC$)7 zNCHoee~@1%Cd!vH@8O2#N3?0lxoNpHV9~!s?|*j$Sr}Dti(B(oy%z^$pluQH4P}*U znf6<}H=uJT>aD-`A?P9pxs7g)ZU=@kSkd_H;ye!p*KIB5>5hQ1Im$_z&v*;ht*}HA zB&~J5;H^1zAos=%Uc=itXrCQkB?(imOBWENCruFB6wwyCAdlgv8mvuv51n%DqSOYOjGMeM$(!Ij|G6okcequpvg4aB={dWrXk<9QK1aCU=~!mbBb`{#_r zObCDBa5V4dZD`4FNJflX=FlY{@>+JSOJXhF+5G3UqDr&z=WY@^+4U}PFq-e-7$#P; zFHbH~7)P1L{fs|uucJ4}I13aj>KMI{0LvV}d;~VH_HCm=xIdS9juLi{6TjT^SLC{r z^Zn)8;6mj;9a6hokA^O(UEpDM=kxvPRn3N$llY<)A8(Y?dMup4OuD(d)N{NkSX%qr zlcVpgm*=65+6?Z&D?T72!Dl?5gPAUrAcW%2RmzuJGmy=-;dl6m$XNnc$|3E|(dOB( z!}zCDxxU7VIz$Bx+X|e2q;5`%Ol^C9UjE0w(czPp^quq)Ro2b{{e76_FzAqWTHE!m zh!v>TT;eIff{AR$&c)#R3UQVjd=^nH{SeFuQSEf6Aj(IaIjr9}{6mhmt3fpE4XAcn z^pjXSzNEJjW#K79XG_)pdsUi95mt!Smm_p1{hbz3)>322F+sF`<-!1b^E-(q67^?L{1 zd$Lvx!5&1+MrR3X#56l-7Ahn)1mS9jbaK>+WN#kRNcBD56dF_FH=os3l4m~|RApXJ zf>R8IaowE(RqR@ZZnCOHOBr%_hPH#kI80G-^j2S{FQRPBYmr{&k+}9e~GT*cp z1~$4f+6_k~RAJ~q2SnoA zjJ9Mq@};tiGE&w2`7t$LjV1Es+-}ca?iILZ_+2aUScx?%NWCt&D$WkWT1+3#@a|1- z{*at!j+B<)XEsi=B?InVe2q6rEcmt!4$-O}$|m04*jk}ATq(3jnyAmvTH%+KFGcKW z(gLuz5GfR1+PkbkU zPl8{oVmy9$g?BT&%+2S9)mHNsZ3Gr?%$?0E?NSR!kK{mh6@2kW$LcNbs$uiVeAa5r zCilnz*~ajNjT09f)c_bFWUUmqK{|?t#>qkOIqJu5G z6#S%`TW_DWqHSCXS2Eff=15{s!_e#T~dJ4-F(z3I5%-Y zs_DPZbr(H)njq+~P-@#k9Ua&6B1D}JpNhbP?@tC6r$zhpgnFIkL&_WpZ{461 zX%u|sOm~-T7#Jm=E)|2>PMvOfy}DX{z4PR!{%bbsZr|1K?X{9MvuBTl7GK|4AG6d9 zwGr;-y$JyKUhdV=oIOox(Nal}d1<5Y)Q+CZDMqvoxK(-B9cOl*?TlMP+hC*;48vHrT(2y{Z?)H@RbT(^!6(DQJCH7qp z4p$(MW7)gJs9j+Y8R%+-&+%@DSIdITINorrWDuw!duXPZPeO}j+KkJg zyF{ebzk5SO8m5En#YY415@>nIKOP@CpXRfOzriqv8_yU$tG(hA9|@E~GFjyeyXASd zFmO>DWwk`_=pDEshGtGw{7P6_NOv5UDbN2wlzVH1(nO9wnUoex0b(YN3ZoF6NXXa= zTT6gqk2@o1Pb@6`U zn_r`59p*ySGTBWb(f$ucdILmZ^Lbr)u&3dv6{aGIJXjJ0KMjf3tdSJ3BbTbJj1w`M zNq3lvb|};LaH2lJl{S7)asSHe9YVj|T-h&<>ev?-CrCx~;4L~#)#2?GddZafQm8!bYDloB050-^j~6)km9+&(C+HAp&3shW44iC zlzSdkEg7(eGIW8#YulFv$F6mq`48+Os!hz!QwRIm9LNcPIf)L^W2V;kKpg<6^)7S! ziGvRZ_m<)i62Bl|Mmzj8I)oXEnksX3>14EU`|bE7wNfU#WMv-cuZ@JZ%pw41Ftp|U-!^nX*0$XbQA@GMACi5nMOSs2 zsx;l8io&}3x3}&pbU#_?C93MFQfzeGr3I(iq<$m?ww%cfd{{y~UCuZ^y-Wo!bo_lE zzT6ym;1_QE?4Azhgq!SQ*!9o{R%X#fXy5tKsNus{vrcSR?5x8i{KHGr%V$&DD9)!` z0w=MzfM?qM;vywbdy~=>Jl)>VcRVnwp<`-P&*qtaIK0) zabqWfmqV5SrtXd23V>pl=!&~G16^Gk$)(*J1W+s^MJF!U*w`!99e&YuM4sy2bpG;d zu^h|46rv4@XchCN@oXPY%>R1GZ|uT`*EiT!@xxyERBO z7>SHw%9geVuU`Lhd0rV86ia?U5NTKe@Q=fyd(Zgk&oR)ox8idEN*ecuzv|sDlzK77 z)V@C=Q1hWR_Ahn&MRD;!a9IqW<`hnK3JSo#=}bMH<_DBLu|FCGf30h?h1d?(Rew%E z={87g6^V#BRs97H-51lpO|Y*}|H8&CQ0oR&l}xemT8pt)kt&~Q3Q)6pY{n4W)HW|jC>@syei(ym@Y2qZ>-tH`XmBj-6lx0jJ5u7^wB zg*ZQ3#e0h=qbgbY&?|83>Mgg< zs?#EhqXXsz$H}hj^hPbGCAd`LCk*G?5HtG<3FpKxJTsoTS-!1_b*n*{qFlanPipmwACQV@G%9aHC!p( zklY|Ozu{-0c;RE@R{jZ>S+Ay+msmwpnC#g3GnOlfzI|h*sOgQLI!j#t@xrGTyUVOa zMMkxaaEUkC-povKOij(;>b$bqJHO3@=`Z@lT(tqe#V7B1X;&0><{Ls?$Mb>)Nnay- ze!Lmmfz6S$?=MY7$5J8@EBL@=u*adcSa5opE0^ORnk^a4EwNWfh5I1-KT5rU z{_U3b(%)&+=+y#yJFO&$U&Kk9cE1HWh~OcN3`Oi(DXv9IzYb#>?${%)1+z?m`j1Am zVi{Tl$3F#cPC?dlA)C9%nScC$7GQcySGaR)pgUPnrINMmpLk~o{NKmBys*~7KCO2z zFQ_9KwV86nWhsp2*ag~C2XlS&7_#66N;Tekzb@%+eWAmM^4tYqtj@>mT5XJmts=M`?SIpoTCliaPhI?Vm1;Juqd^iukYMW*kiB0*{{G)|WRdsG;0Al&s(7IwXF z5=d^JaZi41L;Py!$IMxtq?56o9ON;aRW!@{wO`@tS=~_r<?cYys`T zNU)be$g^{TyHG^~2cXSvSj(tE+6W6ay)+8}LU+fSi^oJJu1r@GLBu*bLlzzJc1D90 zqy~gkswH7Sdtg+&@tYytC5$jb9||xtw_l80)r8B|yhWAyVl;IAb5{?qB^!I7ADg!6CciftHooVU00)F;r_jA+1y7 zWw0N$$0_bNPW<|$vYh2kUHpX2RRAa(F?WhRq82jNoJ<~Uf?w0p%ghv!evLOTC&RAw zWzzNVi1zXDo>OjFn-u%m2c-%|(*QRE(3ZtJEWd`HH-D!Z?FPVq7C{gxmD$3 z&N2A=G+W)Inib@0dukIz%d@3a6JT6*$Bl^yiIqPZgcc@CC(vcS%PT)fDuSDxG@Cm_ z94?|2UdT9y)pf=4i*N{t7KqTr{-wZDW&7;;6H*I>yjVGBfLgQ}**jd-W#aPnZsqs> z$J{Gl6#cIYKykxqi=l_{TH1*VSOr=Lng^p%ugjUhI=wo@L zyd2;#0EFrIeOJWp_~{kdSgZt>(cfKBiR4WLC5jd@@+;W|%rM0BNjR99VNWmS*u?g9 zKI$xaP^=9|8f zpXtD5k>ncfv2%P}tI2QG{c_BG!;0WG4-QGJa-JS6f90BS7dc)$e|c5Q{wHW%=XUl) z03QUej?O9Z=Up>)lwd1YzS>(kA@#D^pk6dY7$h?o%-M`Hsvg^ql&t znUABJF21QH$?L?@r`OHI1vlgS8%-8s$OZJycA*EvN%BY8lh_ryNv z;V8F#6m}n#|7DaDN;gExEy~w%sjGDvZ8cYrfNFpulJji0pC7-9paS^y3BTrR{LWsI zKQTv=$C`%e*Mk!~5S1e`yPRhIOU6*G=zVoU=BRBgy(~pCF-V);rJUvh_|JDVn4eLo zuFU*qK`mzvN&H=%O-ETLk*&0z{DFF-RJh<#%Ep@yOkK8F7B2p}WW`$2c`bO=qZ1IX z^@&kGz8q!zA(}6$N_*?UMY9AFlX!rFZeD%C3dl9eUy;(BvNn9_aGYrmB#rC#U6kFF0Ufjj8U!L<9SW-0AYw#hG?`}FN zhQ{=>2rmBOmQRrpPrN56VFKZU zPoOZPyw}Mk_PSBV_~QERrpa5T&K@qeB(yyC@YyWZ9p3W;Kx6GV>p$RmhW*~cxm@4O zVg4@~qQ1@8tSGLFg8We7p15hy%Q;fwmipYiM5hhX?krXey!y5rQ}=G?)>!iN&QOg0 zMJS3_=Gs=w1c~Z%Q_HsYPbzove|-h<=)HWFq~1$)hLKZdMg-9d+S&5wUq$3Fi^N0d z^7rb8-DH(jk{%0^8SU-t6c^u)|DsS(2*-U2L+4mmsY~QeS$BRA`8}@Sba@{f%OE<`ersdzhb8 zlSfIB2e$`aunstkQ>cVN`+MA9sWvY5@i8sP^O!TKAYM^32v9z8Q~zn^LUBd1idAmY z5SAf^?rO-IDo+prh@vY1#Lb?oL?ft7yIc(S8}BGK!qkX}R(nwE{8#3m+ zC$uBK3$h;}el}CYj=ImRG{IXhzNJ~M1*b|gCsSl^m5<_fINxPUl-G#+u5teC$H%rt zY)`ve%qlI_$k|N{V>1(xm@(i!0qGloaOC0ziD#}go%N7cf{g%BvReR(9X20L`>)+=u@j5H>J#bj#ED=iA zn1j)4W(4bRGGQv#;@2!xz>*%{RFPe+ke3kKmES)kSIuD5+50mxSZ-? zt0DQ&i<*z@vxh7TgJGe0p>!F`(0|43PpxFSlvv@t<^*v`^hYjyKt+Ej`+EpLs8*6YKf&=hGy1it2Cqj*)~CwvguRYGw5w@2C%>c>Ll~< zmP8Oz%7XI3;i1+BGR*r79vjOtAw>F=h7mtp#PKd4Ol2j2na@YpzGjo*42;>U$OHK%8#PBQ>A2ijbahW26aCt71+4M+fhd$KH4oPNfKcg`os)#fKJM}88!e|8 z0@^)Bmsi3RP@&$L89LngxRvr!(2CUBkn@Spzk@Y5Jp zdF~C(9lMzZCjG5D)f8pw6UVcmpxRg+)0tv&B5OxirvTEbA6!$k?yuR5Shpzf92zLW zmRf2hgSVVTwcP!P#l!H#Z1!qCG7FPr@b}HyASqa+5LJfk2{dB7&q!_*`PdhdaOhPr zflmBXyF9LY?Qe@sFdF&!|90UYH+TsK6*5_y#Z1(&k_4&87Xan;H&--1mtren7)WqT zbqSQDevHQ-&7a|*Csy3`1pkF#9_98aVAl88FxIa+ z6*W*!5Y4-3Y0eX47Zr+ivGPycR%PzwhA!C_viTO~v$4KzP;Nzo)&M=L|1vmd(6fcn z{J&Yp%$j|ggHR!;fsjIE_~9r;G1N($jEa3r2|6j|{7EGnI-;oOMJt^*wKYl3=T0nL zoAdGj;^~Cx*~@vzvXR1oSUEJOQKCu1YS^RReY2oJ%RMg^6U`3WuEFPe7fHi)ggUd;fTyvHX8pl+;R-< z>e$*#Z~E9g1%q2};t=j{^g{8(%Lx28f||8ohttkqaEuo%mu)AutvzuEX0-e@`j^qq z>Hu(zzVIf^NC~}gr-EPjtA7A#dKj+6SZ@I==KvjCLeJ9p4_FV89u(!547i_9ztFX= z-5*0+?hBns&TV*mfWvTX2%|0Zg`f_vv1{(LFGx?BwVtjnjozQJ;23~_$Xzec$pNGw zhu#Kxv%K4()kwF239Xxlz9O#WZl-)`6#G>w3E>}%s?8_5AU1h(Jx>4rmW}pial2qJ zb&(kuCZ?$WSvcG0u4p2x7uTKVmo$*tJ%OlTF*T{8oze?Dg+wkimEh@>TUCBv{iUdf z_4$A1f(l~S3YU#iyC+BOG*IirRUg|Hnj7|^3uTq6p=_UpovnzkicXpMd-W4n_}6Cw z$F97GR)bzjXFVUdSmH2Q%m!oPKnbcY5vE#(S~O`;8F!e2R{6{PEe5kUd zWzYPS=EBeNJN@ElKihcu(EP>m<6A797iT2)pVasNF1B-q42*5ZqT+wAHm&(qlo9v7 zzUM|5U8ZjGOp_~mPm7*VI6;n>f)W6c7PkxA*B}{bFIoMjrSGqnj=r#1c{&@w!Pk(B zgmDuiy}DiATPbGN1)d?=)&7bb&3>~k79mEPwAcceMrd4n;jQ*1e)NssBN6H^U5E?8 z1;$oBtp@e=ay_&n9xVzSA31*(#Li}B^i!<5rkOq{kbL`a&a~!l7lJB-(ya5dbf&zS z)x;X8JH+*Wr|?_mu;-WFn;Fswmzbm(@6RfGoZa3 z;d=|jqbqAu;)%Am`Ii0nD@rVbCp+&|dyos*lY&+ypo?is zIlo@rvGhrO4huHAc+gt$>~Tn>UkZAkFWh}kBn zdWc>O=zhA4_w8h;vRwc$g|~<;r0rF|6s&Y24B08`QpsPHos+B=X17IYIeIZ6VHc?a z3jr2_iWst%k~j{&?>QS_3|}ozEbc(7mUHXcg!3URn>SypBDvx_sEBApznn{G0j~uRZ&nYsqxw3 z*M?w*Sml!oJerDvt9(H#0jA6>Z(87@8LcZTMHv zYlkSBTJ(Ww#g&AhL2T|Zfi3ApmA!=*Id#?atdZd7*p z#FsPHAtwc{KNQAonf|Q{k$u*A3U(Rhg<_FCwA8R=g*UJ=1TU!OuW6nzFhu@esT;+0 zIhcu<&cBj)T7At|F)ZfYufFwG8*9{Gd((9C zC|3%>sJ86f{;sFr&t{qKo6w~jl9b_GM&@@%RF&KT2^|2Cxzz zyuu*GH5uV8vyXcIst)MprC)i1oXks#J?=Cp$ryaZTr~L`>;xEy;k_gElC1Iq*QLRQ#sXmTA$qj}>!cy~TFO(Z z<(SP#(bQx~hEMA|9`pPTqWlSCjJ29mxFH;%1Q>)zyMN4VW{VcluL81gQn16=CgECM zsK?ZaKauE@JjWj*(IFnOe+>w1bkq>(`?^4>&h zwtDUGaUd2!h^A|u6*wNpbx!B5cG_g59lv+j|h(;ksF62 z3iAO+U@QhgLf8z{-!B`rr*) zHTaxFnnsSDFx;+c7EUO2#oM0zgfP80P9hi_=&8okH0Sw!9qlY=rsHGOz=J${1nu)K zOZ!EZH#@^W-J64wA2TBbsp!ciw9olL&C9}{>6{@+IWl~wlBx+D2>S3gbCRn#ft~}L zhb?qWRhFfCNbq|?#ZS;QUpm>1j^F2w={V-Bugud3-$6?Ws>GZ=g%cQWK zV|t-sI$^kB@Y~7N1@}|(LI2-750`;te1asJAj+t;RmK^8(0-47*T4ilu!{b0x_a^_ zE|bo!tXGMIZyFzvI&D651{nTz5$P`0SO3T5+Kizx3(n`IkVY=22DC2~Rvq zAF>P2sMlltA=lqFbQ}|m^c@%2WW;Z%?Hu)G8OAr$HaWV_}< z!HB#qwi`;NeVwUn=M4KhgyHC(8Bhq&mEy@d-z*WQD?bx*d=xm!unC#N8m6As`(_Tg z85_|}mq&{mv$2Ea1u37M;)6xPO6YyufMZ1B+Q~wjRD@*kj79Hj`u(_tyKr5)oApbbd~ZucNNNnayn}Y3+NRTUYL>ffw@_dI z;R&>02xn~N8K+fS1|2UXZP=C+BCVx8=A`kjXAeCucPSY2TIhiwLp1a0ZV$-JMp~H% zHVVx}&r8xvU4WR`N4?&_0$l7g@3OJI4a_*QHeAC9s5X3v=98rhQ2PCXv8Lna7lbtB z_stOq$QR%S43F3E46h@~;iixVQ7xZZ7KCA|I-Z|9idsJ?8xy@VKT7c^#4uw3{1Gol zxv;jL*1B`qSX2tUpO!p$wJ70jE{?*G8?#o(YT#(9`K67wRMQ+?iY%armnK7L-SFl2%AzL zj)wl+8S2fJJabn5o%i4zLeBSKKbf&+bxSCb`ieV?UL-dr%NA4U#x9VLL>b1yb_FJm*_J9qsZKg-HE8n4)Nk zAQCaW(|*po~>6 zNd^fInBD)+ZFrRr^;?D8Mai~T=A1DWF2@C7vE`74>9V72Nk&^pot?8w^Le?wwCAqx z1lNRSM~HkFTwJ;d4`beZsK4Sz_aJ_@)JEZ!*B7vj23a0HJ^W#FRKMj+0hrRx>;F8I)s44L7R-)|@^0p%p z+qk<2?jpu1?N*3Iq~I}A*7IGTcTd*Cy-`^Y*{9Z^#$oLKKDXn71}7ImY8tR5ROWlP zecE6l-tuft7_Qq|StD*OAU6%I`c>GbnrK+f-BQ@L8kWtrRuY|KJXbf%W?uiaeq#FJ z)Uo6-lQq+v0gqhUxlSq@XCxDmXunj2AJA72uk@b)_a{3NWuxR2D45z3v3Xz{>*7k+ z{JmS(4$n?W{$2b$PyO;1S`)HU@hx(q&mUWq?>S=J2t4z;gc31Re>k=M<6y?XQR|d6 zo%{OoXBJHtMvD~fNT~XaGHg67s9OR_oaWgx6_V(gC81(d^5>`_IN5oG7C2M9J5b?H z#zDZFziYY?>j`EcGHoW$DmtfRhr3HR&bx7C#2I7j-H2lf?l>nTHMZSRVr?S4KX!cJ zHbv;?4@Oh!+YJG1W6c3do%mE#55c1w%ONy2SZ2~4+Ku)1qBJ%nfEFM zs6eWy-nlh9YFoX9F&ef=l^nw;Q7xX_)D!i#lbchvo3c8H0cEHx7avY-FhzUv4>yYS zD~2G}n#J16n*~chMH5*Ujudnn=K|PO>x6@6xc{F8Ag9svevA(?i&hX(Ta{Ep~cT`gg8 zSk38H5H?bmi}0k5#s1H?h``(5RHbz-0tBX0eNpadhTwK{?UKX=TlHA!yskHCX_jA^ zExl`iHtj%3=RZKPD-kP)p<)liWaC=Wo;hHfjlbTG)$X5vIQ zq|+!t`xNSmYU<82X}L-1mbNd|u=AS!Nx0wJ>pVQrPlflT;lyx9T@pt=ACAkfdaMa}6 z1Um}AF3;Aq`2i_qqr{fn*m8^VU$C^V5wn0iKRqz7v43Xh?9EywHE40>*H`4y4{ov; z#x3;vr3v|>c;;}S36E=Uosv>k=STio{U@e1`Qcg~IZ%CldaO!eyDUXN0u=_P;2m$j zJgA{eXP9M{tLK)h-5bIwx6OUZ2E^kwR9xC=_-gccSi|8fqv?K^piVo`cY&ivCx>u` zH)L?Ge=}aiT!t%$TM7>{Tv+Wqi=s|8RFF0}uP9EDYj%%r9=66-Pi)qYiyIjb>lC$D zovjnmMO;%ory%=}a=3@Ro8pjJ`zM#98Tn~#WTjfKcp0jfj_5IQO3n`0g+3^5p_St| z?#f_%5)Eh}=N?u4?@()N75qupF}Gl+_n5wKY?h?ZzI*k5NP9YO&Yepmt;TRRg*2j0 zt4TkKoS!>vMy525#PEWgU!*R7Eb}8zO?L#^jFAB09{8p7&#|8CJmBof$FNxJv80>D>r3&X+P>5p|)-Iw8v@J&6^aM z!s69tTWKF}hHJ(Lpcbu=-Vx!YyZtkheg&xZViVL7@o^mamc^C^wte>zg8gFIp~9_I z8E$#^nr3s!uP?ZNX}>1PR*Aq(H|x(*PgftuB(g0xZqHxH^915z!Ui~SUBkWT&(br| z(P%$VL|QBXmOQ%V0>V8`=)@cr8@2tDM-NOeC|rB#^VxjM&8*A@IQq3e;8kF(e(w2J z&cKEw92)M8z6)#Ccy0U4_Ji!@xp4t+XUWJPi`@1(@K(0WPU*f6;?hh_KRQi2S8wUv z9l^cMEgKZ@c0EyqO~$*|F&;QETMjA9);}cWmNWTK!!-b3a(||%$}cXLuh|#wj#qn9 zN`yd<3?S}F`v+cU`V=&b5~$0bx}f|}-lc4It4~Rb{^>#cQ5Ta7e;y~}Ry0>+wOc=~ zx>g^ozoiqVwKcYm^A9R-&wk_3pOu%a4jpKe@VQ4@)+I$4i>uD#x}XGp$J_`ql2+&^ zQ)rs1bB3g_I2;-<#vzA_UBrYyuIB%PqiY71jMggwP_?*-X4W z7rghrv;4WU*e7n6NsfhI&cUiLComo0p`}KOgUl+LV zOw4Hj=EkO`;=Y3awpZ$zm-2E*Rd8C>4s!TnI-?tbemNC^bvs}VvXP-WgrVJ2Go}`^ zwjn8XqN^*lcR^~~;7~FF1{PR+@Js-I$?V?r-jAy1Uv5@EoFw&Z%qMiKKYj@^vV--% z0p`IOHU8dbskxYB_?x9gfYWqrv{ZUIAPVJuC>q#eoSUzOg`tY2h~esX^}$EG*q1jC zJ)rmAJwt@&LBF(t&KUjccraZr%1IQfC$2V z%LBgP9-T@!m@I6}frYgydmNNq&V%!}UkA}Dlrg3B>Auj%L7dl9f(Kk~O$?X_yt2jL zV(faMY*O!4r`-39bmaN9Wf-_7YHU(`|2)C)W2qaM110W0vGJ~O&T*@-h=9kq2)T%B zR9tKFi@w(E@6$hK=P}QG(>6fy&aNwup#Y$}Xr@J%uBa|Fa%uiFZMkGmMdjtY+c)$2 zkkiWphQD!!PHAQtQ-m38SblD8E6{jbcDoeQ+!3K#CjG28x~Y+4Yaw6$G|OEBKmf=o zXXO1_(_3PdU=qDM(9WuzdP1=@`91$R%Nj~L|Iiq#60jBjjC?sVc$mb8_M|pBmnGG~ zsO+*)>%lP+@kYN)AAW%W@g(QTC6(}x*5aySca{k#E{2E8?>Z+sjwzA%+7spumzJ() z@*M_NgPS{j-+|>UXc<%{haVAFwIj_mR##9io3`PWci1PHjH%({5gP?5j;pZFW{l!=72G6nzhn4W^el@!hiJ94w&;4!H^T5?qr#6p@e;9j;mW_{}lyw=V6M)bs4FV;QeQu-1H&M0aAKH8$b8(;6uN3Gs5pJB3-$GcH{A3%KOk%K6pqDt+R#mg#*NI!Aj!%Eics{bJ$J1LtoyOx<&zP zYc?7e53X)pFvNDfEe%cbnrOxDuP)Fpse->)o&Ja@{l9t8WIvE>h{c7aH0DAu6w{TY z>%O7fsXh64%dW{&Dfi_Wo%?MAWlSl>@_T%8UZbAzJuo{i4`$9N0dp=d82W=TF!zk6 zz&nI>OlIghGx|#2&0DjWNqf2oM8IFM`*UWRF%1zAQ$hMck<9X#pGZi{*8928XoWmAJW_MZ}LS}k)=PxUc& zDbTQq3axyxEEClb^tngE9=?pN)Xx)80=_&pH5q?O2rh{LMiF%o?vQpfI}5#+%oVB-zRgJF1}y%Ia8Tlw2lef2y! zbsNt2JCV+RGxbpi^!lw@`_3{@e6wya^$e7QE_i(?>K9TP)OJxag_HAKon`uDr}5|J z658%4AA~XkG%`PRbm>^|aodR!tS5i=)EYo?ag%OVlRAlTXM)i)*!w~CW!Tao`iDW10FDI_YpGceCa&_}Y$2|;o9N{@uII zk9eLN<~vLsA6+X|HT`ZskrmX~ZkZVX8S(V_DkbR!)TNId>$2en&%CAG1^r1NWRSV; zi+Id_3$jwyX*pl{R!?irukiO~LC;ysl9y*_*SK_-EiD?fG8-CfI~kSw4|Y+jannCN zl84~U)tO4$BqcD~N269uL4>BCWOHk?kj6Ebide0EX9vDZJ3mqv;K#=>ScuWv-b|uD z=lrl;5l@lJ!2f`TI!U0;nJ8xxZLvlg+)B73X>(4?w^H~Eae?>oUw?>c@vI#792OLH z#!HSzXufUDz;m{}q&mevluJrm5x~!BM1Dx14lm}t%ys`rdlMT2vyb+HxMpn&Yo_7S}569sZ{Ut*9IMJ;P#@Km5_^0$WFXwTKcBVF~S}G|FID)Jeb?-onzpeGF}E zLHrvtbn*^4!a7>z{V`08?6ze9kc5$UpRhM4QSJ~bJ~%FPcTZ)Esid-9UE23iOC%Ek zSib=aYzo7i`O%k~lML{TE$`4_UNHqE-KygSGguq)CoA-pF0@K0fYoT{lk* zrNqFyzwT&FH#j$h6>sO`_twT#|QWzq0-|c3~y$2K@?ovw~3# z8mT?v*w`e+w<)}CbMQl*8+9y`GhKb9nuFMnFb9AhMpr2H zqP`$bP*yq?F1eCJZZ$eBt~a1Lkwn{`#gqHWB8_)iyvKr$A8cx55696iA^w|T%|IH@ zkzdZ_^b%)Vl?&Fb%z^4yC#0ghOgV?RY`;1DPG z5v>x@+w=Hw45qZUeQ#6q}BIB##v++5dthjWHuFmJV^GifTEd8PQ6au=-y{ZK9 zI3+T0NxCNw25DA*tiGKHY8ayRxm%Lr!Jlv4>uC}{s;xLZ6Av+}kpWqoKT34y6bz+r z0U|4GfL&Dlrc?LU=^X}0KPtl_VsOst#&q`Su~+8H%r(cGzr18|JQkedBCl3XQ{gb? zI`K;JjWn|5T_$I)KWll6C8xThuI8VN1wEby)?XXs+cJP9B$qBpvQQTqr6eu_THN1t zc4%87qtNe&Y~(Y<8DO z>zcfJ6L>&wOgrrcUc91ZMmUAib|%Yn&&fFT0aPZ=agS#A4DWX`j1Z;X#(u zqahO8QF(p6UbL7wn0EPlJFHd|>^z9pww3l`fR zsZ$!tj59RV(^pK71YJ)1ot@i&wK>qfK~h(Vx`rdOXKouxYT)9fatvhQqbf6HvXdk^ zt1vF_TP#lJq@6Lee`_?G#(1mNPx9#H27QH<2nG(?wbF667Vlg5aAgtn{%%L~R|gEt z|3UeCZ(4g!7Il|WY?gjp#XwL)SqG)*kj4{06h&WnYb%MCIpK@f|4~uNm}}dSfA=(T z(GA}CPPJ^hyl_6ZA`~teG)t~3T81}q@{aC_L&U>lUM6ZJ^B;)xpMp^vBTIm&CA6hM zl2s}AgWG-`HtjeTMFd)G7wgMuDh}Tqe<+kT(qnpi&@tTVT$=ynPf=$0wP;Ah^kG=1 z>2V4!ec=P+^UWZ)YESAfoi!nZ@_fpadIse9R`k0>2^WH9zL9V@EsmG6x3K??Q zYSpF*dEwM*e|VhHUT1h)sH%Ax3fL9f@EE@N!~$L?OdHsG#SIpNtSGMsWeB#p#^osy4XJ`p<{W2bi_SOt-0JW_k;p}%@WKxm*9K_gg zUVI$GVmECz-j~cr+sQ@*XQ0}Sb=AW`j^7Orjz_DoOt?~m9$#FQ~nz;X@H1!rU zi>JOZv?i*hoXlAH3GDnPXh7BE(&rBZa9Z9yF#`}&-L=4w1j~m-uL}yf;KtW5id75g zFAx`MP6^u&wlV63OUrAl{!U8a%y}pLFM@sRNWR^Zo=!?z33NnSZvU6uTHETd(8lGz zL`^pAVJTj>nAB5o&+#g7qGGzw6-3c|siczTn(|VeQtp$1St~dQt~}WG=w5r_lek*5 zP5s~Q{c#lIl`sJ#;0@LBPx1nDqf^RNaX>Pc)9uO0jjOl+H@>GO6 zp{zC&-`sN%LA&cQk}SYtJBo zSKUAV5c|ch>EeFLR#6#;#=RAR)VWM|=wsRt$@VYO$VT17y!|!-qd0)8uGC~kr1;pK z6a?K{=GPIFG(UZnka|3Q>3q(}gsNG#K@@6uikwb>NwZU}2^xEGom@WF*_;?BC){xY zzLcbM&(=QxV;a4(smKTOGEO@ChD6SNpeb28lu;KlwGZ#rO51fdoVr-|Hox+KkBPIq zMXhWxbn1t*IPF zl)0CgvY6Sb9dnE+*lNb-7@}gP>}`poRr!-qijtMl(~SQc%5+AGQG*AJ-*CZZZM`&r zI0ty$ta|IHh3aICrTLUp3^@q zyYvY5Ry}-zHzA(KRa{D!R`N&b=hoJjQFgqEG&ql~9$hL>p2VWpAXW`KlJvFMES6kv}K3CSE6>M38Y|!3{`%hvy&iv;H6dn)G9*->j(GK#RY@fHdnE z=Hoxlf2_Vy0?57+vyB3q@6I__1_bgCO!h8|GVO0A*OoQ_GR$LHKl0nnnwX4ddBo<^ zY+y<6>=n;pDkNLpubJprrMB`iyhO}*V*#FI{(~3aD%Zoy%YlNn8ww$=>09ennD8(< zM*Ji>zzZ0}7m{O-!e=-bM0;FJUlC?E>ZCEuJ6;4*R0wq!pzcrp?2G^qcZVBm2V(Ey zoYkM0{IKOA72~(UH;2OSa)6uAI@yz9PV9pnwJ6pyYG|R31gcP;(X1ro`6=?L82Wc( z{mceJbn?@z>}cPqr&?Dw1{U*9jqc9Ptyx#hmDYLl0C(lSKJ5{x`F0R)aBBg_V!UW5 zo4)@JY2P#^TS(T&;P=mE%* zh>M2Ta=+r`dQO#OBtp+ny0b`&z?ag4 zFE9jQg)ABP-ap09l+cbbBmg#To4LAO$-cvt!%;D=k>8j~SUapj-Z{&VBwSn>X%h9m z+iK6bm@3+eLne%$2ZHu%0S z;gO_ho|9~u1k0}~_^W4S46T^*+FtP+iQUFULo2*)?%&96%807=&bD8fH&Tw@)@*4I z1pbS5>g--i59+qEpTK_iuNaD2x7>=17=w28?(*7WKL}i^sjQo%PdGGBcwww1S@2O`?Rud?<^J&MuaQB)>8D9#g0M&whKzkDFQ&tl(+%8Nk2^3R=_Z)QO1{flyp zz-Lgd7+UGRGP>J<64uktTx}GiP8u9FFH!-v$g;kDHtPVI5`+6yQt%Zc>qBG>nvQm4cF zjpCT8OaK3CgZOB@0OScTrF?FeOj2Vrl3XZ<`0kgA zFeijod3l|9zQq~OC>&3DeAZCLre*yUB)*0uL&$X%Qt$k+AjI(Wf~h|SgIP-5TciGE zeMlz=!=|q~P=1@&TjZA_F#f}CvAw8`x^=PZ)OEbZw!1#AJ!j>x;6bGcgr>~>U_OSx z?{4s9*3i7KTu+ySqACJMe_sgcsrtdbIa7D%-yfJTH3=$U*7JL_TTYH>uxa}r6eN*(Nr@in4H$Rb<U?tIEzo|Fv&(spk;u^)ZE2-j63rzbni@IQjaRu>ZY z4V7Q<5sWu;kSh#pP9-1pFd|zwbY290ybHXYw~(JRnc!+^T^pn5|J>=0GR z@fofLs)`F2!ALEIv>Tg0w{HNi5oWCrUnPt{B}auVwItmq>Sm*j2|RB8{bpAYl~QPO z%jTZFOxDzMuWgDrM@{h+lc|@P);(}Zne{}MBFxKN!`4n*R0rH#^_!4U0+FjZQGYH0 zH@Smj%Xt`)pZ6vtmy9@*Hb1{~yJlKjsY_d+SI zNCBPD9|M4sXCBSyQ<0EtJxN!obKKij26)inZIZKW6?J7qAya7mKz)a9(oHm|G8Hym zy!)8aD;&^rGvs+b1r5fv_uGjdWFg|NTD)PMaw4>{|7OulAs%XYKNzd)sQ>R@5Q_aX_tvEgmdF<6I zZd0HBhtl6fY`RiSWJ-tv{&c!s-LU@4WJ2nXIs`-yC!R!JhWFyCakH`5Y zC%Z{s+dji2x!RhQMfs_D_t()Z$9X8@aK~8Q5U1oZrVgpF^~Gk8CyD_1`k(5|A)?B7 ze1Nzq;TA`E2RHtlN4>TP<3vtFLLwBg0Y8kZu0RFifM691Z4bD{E%_Rqnz@A?@C~pg zH2dbd$W$K@i%L<{;^r2cqdwn~;V{Tj+$<3&PI%$ULxKB2rA4vNo$2UZ`kx9h-5j8O zaXjY+DRhg%I&vP^A$9(JkH@iIG2UPLa-I?Tyvw9I$bdK=6P;*lm)D0t)|~tUvavK{ zy`mNzdis218211>EzUD{ET}r>b=)Ywm*X-O6#M+Ao84p0xC&!R6 z2c~n7i9)=}eB8EPT(CKLVUOil#G^lObEzPDywNuNIO}w@dwNe7>0JRr>$)8x2ub1P z<~L@n%}Ujf`G;c;3u>8rW)_6LN~u7NBvt{$SpZzX z6+@FtASymtX?i!pX+K*f822p5^Eh5}R{cskjn5-K<0{7)b(~KDJe0|yhe87-f}pWj zER@EGX7w`W!HG2bnm20;rapCVFsfz(`EhQ%%l6+=wA_)2$cH5i9xeS8kvSO^u&~^+ zmLFiTY7OyW`0Ic(RY88@oEeoALyAkYPJ(=-!R76nOVrG)GW8z`dyPT)xXFlCO;9zn zv87TrtABPV;jy6oO4sWd<()vL#q-+{AOqM5=vW&AsM0)16!UTwX>pZ>19g`umeldpwEIeH&xQt2VPF#0 zQhnL_07cuXcbrfnceY;Y&{C*fprS@)U9I5)ss&wzwy<&>B)gt=qdrx^qiI_sR_`~2 z3e+0B0JMzn^NsvE@ci;vWER1CQ8lMrj*xez*Zaz(uoXv-=6mHkS%FZkt>0v=(9G_? zA(zoDCJ;)J(DCd;9%tWQugXHU#oYC@|KH<}(&A=zs|juh_JwIAjn^(lSz?iPV^TlN4e$D!=nx-4V2wxnm=yH#lf-4Z)&^ zZ$^_Vr8FIWEbHdZtDfIq15_FB&c06y9M}>7_dtbfEn&8;`(k)zuPu)=b&uTEkqS!2 z3OFz`cYtF2r*LlUNJ;D5$aoR`#%i|z&$7bR1(BR@-?f=)n3{=g_3~y6xjofQA`2Sx zYp7EnI;Fp|tVd4A2!}2!9bdEA0&4CU4r8on&HF1jF=fflKZq7_IK z#!MbC=dHBWEJUAF!h%=b3GW?P{rdg_bsGjNJYz6V_|8OjEsHM=J=i-AjSkBvdTZqP zxLQuLDvFaLR^*y7bO+v3wDWDed6{I{A-bD?6!R~t4Be5~du=$yl8EC7Oeh=RMiNl#yK22HVi7&SFP(L2ZbcwAdk^h`15)>cf}* zzCpC{nYKv}ke4`1E8X7#yqs0td?rD$n@4DDzphAV$clF#4nYIG$<3H@l`2x9eq6_E z(s1A#jPgrT=eYnHBHGbe(w3si9@WF?vsTH?G zn)){X_%+3!ncEQZ9r-~0)~B=W&4+#fE``@$NCi1xiO=#E3od;X zH#E23n0$4n6rY-RWB++!1MJVY#{-!6HAjK)HT!+cPnRxc;K`dmN4GfeGkSPX7FAN% z@|6Iw_lryJfX!O65u2*4Xg88{wA*)^2xrjkW3H#V&X#m4G?8^_Hi%j{5%XzM`_aI4 z!OXxk*leGh40x(^EKVFHbwCtLSL+V1kPx?8;nQ#lHjCIW49Tg=ac}bRgYi#R_eUyQGwXxZk7P_0+eJdkfCkR&PSjT6D65| z0s>-sFKoV#fO?kk;X}2FOyV3k5;d499IfYc2DD_(yX!7IHjOH|(!i%waPf&3Ey;Q~b2glk?1U6LkL_I2 zj(?QTZiBUwB(I;84-eIY3-kJjZ_Jbtt`tzl@%aC8=hvzq{f+;2z883o9YG#$_}7J7 F{}0DzxUv8M literal 0 HcmV?d00001 diff --git a/apps/shark_studio/web/api/compat.py b/apps/shark_studio/web/api/compat.py index 80399505c4..3f92c41d02 100644 --- a/apps/shark_studio/web/api/compat.py +++ b/apps/shark_studio/web/api/compat.py @@ -30,17 +30,13 @@ def decode_base64_to_image(encoding): status_code=500, detail="Request to local resource not allowed" ) - headers = ( - {"user-agent": opts.api_useragent} if opts.api_useragent else {} - ) + headers = {"user-agent": opts.api_useragent} if opts.api_useragent else {} response = requests.get(encoding, timeout=30, headers=headers) try: image = Image.open(BytesIO(response.content)) return image except Exception as e: - raise HTTPException( - status_code=500, detail="Invalid image url" - ) from e + raise HTTPException(status_code=500, detail="Invalid image url") from e if encoding.startswith("data:image/"): encoding = encoding.split(";")[1].split(",")[1] @@ -48,9 +44,7 @@ def decode_base64_to_image(encoding): image = Image.open(BytesIO(base64.b64decode(encoding))) return image except Exception as e: - raise HTTPException( - status_code=500, detail="Invalid encoded image" - ) from e + raise HTTPException(status_code=500, detail="Invalid encoded image") from e def encode_pil_to_base64(image): diff --git a/apps/shark_studio/web/configs/default_sd_config.json b/apps/shark_studio/web/configs/default_sd_config.json new file mode 100644 index 0000000000..7a98a441df --- /dev/null +++ b/apps/shark_studio/web/configs/default_sd_config.json @@ -0,0 +1 @@ +{"prompt": ["a photo taken of the front of a super-car drifting on a road near mountains at high speeds with smoke coming off the tires, front angle, front point of view, trees in the mountains of the background, ((sharp focus))"], "negative_prompt": ["watermark, signature, logo, text, lowres, ((monochrome, grayscale)), blurry, ugly, blur, oversaturated, cropped"], "sd_init_image": [null], "height": 512, "width": 512, "steps": 50, "strength": 0.8, "guidance_scale": 7.5, "seed": "-1", "batch_count": 1, "batch_size": 1, "scheduler": "EulerDiscrete", "base_model_id": "stabilityai/stable-diffusion-2-1-base", "custom_weights": "None", "custom_vae": "None", "precision": "fp16", "device": "AMD Radeon RX 7900 XTX => vulkan://0", "ondemand": false, "repeatable_seeds": false, "resample_type": "Nearest Neighbor", "controlnets": {}, "embeddings": {}} \ No newline at end of file diff --git a/apps/shark_studio/web/configs/foo.json b/apps/shark_studio/web/configs/foo.json deleted file mode 100644 index 0967ef424b..0000000000 --- a/apps/shark_studio/web/configs/foo.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index 58b0c6c00b..05a9bc363d 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -5,9 +5,6 @@ import logging import apps.shark_studio.api.initializers as initialize -from ui.chat import chat_element -from ui.sd import sd_element -from ui.outputgallery import outputgallery_element from apps.shark_studio.modules import timer @@ -75,11 +72,13 @@ def launch_webui(address): def webui(): from apps.shark_studio.modules.shared_cmd_opts import cmd_opts - logging.basicConfig(level=logging.DEBUG) - launch_api = cmd_opts.api initialize.initialize() + from ui.chat import chat_element + from ui.sd import sd_element + from ui.outputgallery import outputgallery_element + # required to do multiprocessing in a pyinstaller freeze freeze_support() @@ -127,27 +126,8 @@ def webui(): # # uvicorn.run(api, host="0.0.0.0", port=args.server_port) # sys.exit(0) - # Setup to use shark_tmp for gradio's temporary image files and clear any - # existing temporary images there if they exist. Then we can import gradio. - # It has to be in this order or gradio ignores what we've set up. - from apps.shark_studio.web.utils.tmp_configs import ( - config_tmp, - clear_tmp_mlir, - clear_tmp_imgs, - ) - from apps.shark_studio.api.utils import ( - create_checkpoint_folders, - ) - import gradio as gr - config_tmp() - clear_tmp_mlir() - clear_tmp_imgs() - - # Create custom models folders if they don't exist - create_checkpoint_folders() - def resource_path(relative_path): """Get absolute path to resource, works for dev and for PyInstaller""" base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) @@ -198,6 +178,7 @@ def register_outputgallery_button(button, selectedid, inputs, outputs): chat_element.render() studio_web.queue() + # if args.ui == "app": # t = Process( # target=launch_app, args=[f"http://localhost:{args.server_port}"] diff --git a/apps/shark_studio/web/ui/chat.py b/apps/shark_studio/web/ui/chat.py index 3a374eb5e2..917ac870bf 100644 --- a/apps/shark_studio/web/ui/chat.py +++ b/apps/shark_studio/web/ui/chat.py @@ -5,13 +5,11 @@ from datetime import datetime as dt import json import sys -from apps.shark_studio.api.utils import ( - get_available_devices, -) from apps.shark_studio.api.llm import ( llm_model_map, LanguageModel, ) +import apps.shark_studio.web.utils.globals as global_obj def user(message, history): @@ -186,7 +184,7 @@ def view_json_file(file_obj): choices=model_choices, allow_custom_value=True, ) - supported_devices = get_available_devices() + supported_devices = global_obj.get_device_list() enabled = True if len(supported_devices) == 0: supported_devices = ["cpu-task"] @@ -240,9 +238,7 @@ def view_json_file(file_obj): with gr.Row(visible=False): with gr.Group(): - config_file = gr.File( - label="Upload sharding configuration", visible=False - ) + config_file = gr.File(label="Upload sharding configuration", visible=False) json_view_button = gr.Button("View as JSON", visible=False) json_view = gr.JSON(visible=False) json_view_button.click( diff --git a/apps/shark_studio/web/ui/common_events.py b/apps/shark_studio/web/ui/common_events.py index 37555ed7ee..7dda8ba268 100644 --- a/apps/shark_studio/web/ui/common_events.py +++ b/apps/shark_studio/web/ui/common_events.py @@ -7,49 +7,61 @@ # Answers HTML to show the most frequent tags used when a LoRA was trained, # taken from the metadata of its .safetensors file. -def lora_changed(lora_file): +def lora_changed(lora_files): # tag frequency percentage, that gets maximum amount of the staring hue TAG_COLOR_THRESHOLD = 0.55 # tag frequency percentage, above which a tag is displayed TAG_DISPLAY_THRESHOLD = 0.65 # template for the html used to display a tag - TAG_HTML_TEMPLATE = '{tag}' - - if lora_file == "None": - return ["
No LoRA selected
"] - elif not lora_file.lower().endswith(".safetensors"): - return [ - "
Only metadata queries for .safetensors files are currently supported
" - ] - else: - metadata = get_lora_metadata(lora_file) - if metadata: - frequencies = metadata["frequencies"] - return [ - "".join( + TAG_HTML_TEMPLATE = ( + '{tag}' + ) + output = [] + for lora_file in lora_files: + if lora_file == "": + output.extend(["
No LoRA selected
"]) + elif not lora_file.lower().endswith(".safetensors"): + output.extend( + [ + "
Only metadata queries for .safetensors files are currently supported
" + ] + ) + else: + metadata = get_lora_metadata(lora_file) + if metadata: + frequencies = metadata["frequencies"] + output.extend( [ - f'
Trained against weights in: {metadata["model"]}
' - ] - + [ - TAG_HTML_TEMPLATE.format( - color=hsl_color( - (tag[1] - TAG_COLOR_THRESHOLD) - / (1 - TAG_COLOR_THRESHOLD), - start=HSLHue.RED, - end=HSLHue.GREEN, - ), - tag=tag[0], + "".join( + [ + f'
Trained against weights in: {metadata["model"]}
' + ] + + [ + TAG_HTML_TEMPLATE.format( + color=hsl_color( + (tag[1] - TAG_COLOR_THRESHOLD) + / (1 - TAG_COLOR_THRESHOLD), + start=HSLHue.RED, + end=HSLHue.GREEN, + ), + tag=tag[0], + ) + for tag in frequencies + if tag[1] > TAG_DISPLAY_THRESHOLD + ], ) - for tag in frequencies - if tag[1] > TAG_DISPLAY_THRESHOLD - ], + ] ) - ] - elif metadata is None: - return [ - "
This LoRA does not publish tag frequency metadata
" - ] - else: - return [ - "
This LoRA has empty tag frequency metadata, or we could not parse it
" - ] + elif metadata is None: + output.extend( + [ + "
This LoRA does not publish tag frequency metadata
" + ] + ) + else: + output.extend( + [ + "
This LoRA has empty tag frequency metadata, or we could not parse it
" + ] + ) + return output diff --git a/apps/shark_studio/web/ui/outputgallery.py b/apps/shark_studio/web/ui/outputgallery.py index dd58541aae..a3de6f7b57 100644 --- a/apps/shark_studio/web/ui/outputgallery.py +++ b/apps/shark_studio/web/ui/outputgallery.py @@ -6,7 +6,7 @@ from PIL import Image from apps.shark_studio.modules.shared_cmd_opts import cmd_opts -from apps.shark_studio.api.utils import ( +from apps.shark_studio.web.utils.file_utils import ( get_generated_imgs_path, get_generated_imgs_todays_subdir, ) @@ -22,8 +22,7 @@ def outputgallery_filenames(subdir) -> list[str]: new_dir_path = os.path.join(output_dir, subdir) if os.path.exists(new_dir_path): filenames = [ - glob.glob(new_dir_path + "/" + ext) - for ext in ("*.png", "*.jpg", "*.jpeg") + glob.glob(new_dir_path + "/" + ext) for ext in ("*.png", "*.jpg", "*.jpeg") ] return sorted(sum(filenames, []), key=os.path.getmtime, reverse=True) @@ -52,11 +51,7 @@ def output_subdirs() -> list[str]: [path for path in relative_paths if path.isnumeric()], reverse=True ) result_paths = generated_paths + sorted( - [ - path - for path in relative_paths - if (not path.isnumeric()) and path != "." - ] + [path for path in relative_paths if (not path.isnumeric()) and path != "."] ) return result_paths @@ -184,9 +179,7 @@ def on_image_columns_change(columns): def on_select_subdir(subdir) -> list: # evt.value is the subdirectory name new_images = outputgallery_filenames(subdir) - new_label = ( - f"{len(new_images)} images in {os.path.join(output_dir, subdir)}" - ) + new_label = f"{len(new_images)} images in {os.path.join(output_dir, subdir)}" return [ new_images, gr.Gallery( @@ -223,8 +216,7 @@ def on_refresh(current_subdir: str) -> list: ) new_images = outputgallery_filenames(new_subdir) new_label = ( - f"{len(new_images)} images in " - f"{os.path.join(output_dir, new_subdir)}" + f"{len(new_images)} images in " f"{os.path.join(output_dir, new_subdir)}" ) return [ @@ -234,9 +226,7 @@ def on_refresh(current_subdir: str) -> list: ), refreshed_subdirs, new_images, - gr.Gallery( - value=new_images, label=new_label, visible=len(new_images) > 0 - ), + gr.Gallery(value=new_images, label=new_label, visible=len(new_images) > 0), gr.Image( label=new_label, visible=len(new_images) == 0, diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index f26c7967e3..6cc0ce035f 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -1,35 +1,26 @@ import os -import time -import gradio as gr -import PIL import json -import sys - -from math import ceil +import gradio as gr +import numpy as np from inspect import signature from PIL import Image from pathlib import Path from datetime import datetime as dt from gradio.components.image_editor import ( - Brush, - Eraser, EditorValue, ) - -from apps.shark_studio.api.utils import ( - get_available_devices, +from apps.shark_studio.web.utils.file_utils import ( get_generated_imgs_path, get_checkpoints_path, get_checkpoints, + get_configs_path, ) from apps.shark_studio.api.sd import ( sd_model_map, - shark_sd_fn, + shark_sd_fn_dict_input, cancel_sd, ) from apps.shark_studio.api.controlnet import ( - preprocessor_model_map, - PreprocessorModel, cnet_preview, ) from apps.shark_studio.modules.schedulers import ( @@ -44,46 +35,167 @@ nodlogo_loc, ) from apps.shark_studio.web.utils.state import ( - get_generation_text_info, status_label, ) from apps.shark_studio.web.ui.common_events import lora_changed +from apps.shark_studio.modules import logger +import apps.shark_studio.web.utils.globals as global_obj + +sd_default_models = [ + "CompVis/stable-diffusion-v1-4", + "runwayml/stable-diffusion-v1-5", + "stabilityai/stable-diffusion-2-1-base", + "stabilityai/stable-diffusion-2-1", + "stabilityai/stable-diffusion-xl-1.0", + "stabilityai/sdxl-turbo", +] -def view_json_file(file_obj): +def view_json_file(file_path): content = "" - with open(file_obj.name, "r") as fopen: + with open(file_path, "r") as fopen: content = fopen.read() return content -max_controlnets = 3 -max_loras = 5 +def submit_to_cnet_config( + stencil: str, + preprocessed_hint: str, + cnet_strength: int, + control_mode: str, + curr_config: dict, +): + if any(i in [None, ""] for i in [stencil, preprocessed_hint]): + return gr.update() + if curr_config is not None: + if "controlnets" in curr_config: + curr_config["controlnets"]["control_mode"] = control_mode + curr_config["controlnets"]["model"].append(stencil) + curr_config["controlnets"]["hint"].append(preprocessed_hint) + curr_config["controlnets"]["strength"].append(cnet_strength) + return curr_config + + cnet_map = {} + cnet_map["controlnets"] = { + "control_mode": control_mode, + "model": [stencil], + "hint": [preprocessed_hint], + "strength": [cnet_strength], + } + return cnet_map -def show_loras(k): - k = int(k) - return gr.State( - [gr.Dropdown(visible=True)] * k - + [gr.Dropdown(visible=False, value="None")] * (max_loras - k) - ) +def update_embeddings_json(embedding): + return {"embeddings": [embedding]} + + +def submit_to_main_config(input_cfg: dict, main_cfg: dict): + if main_cfg in [None, "", {}]: + return input_cfg + + for base_key in input_cfg: + main_cfg[base_key] = input_cfg[base_key] + return main_cfg + + +def pull_sd_configs( + prompt, + negative_prompt, + sd_init_image, + height, + width, + steps, + strength, + guidance_scale, + seed, + batch_count, + batch_size, + scheduler, + base_model_id, + custom_weights, + custom_vae, + precision, + device, + ondemand, + repeatable_seeds, + resample_type, + controlnets, + embeddings, +): + sd_args = locals() + sd_cfg = {} + for arg in sd_args: + if arg in [ + "prompt", + "negative_prompt", + "sd_init_image", + ]: + sd_cfg[arg] = [sd_args[arg]] + elif arg in ["controlnets", "embeddings"]: + if isinstance(arg, dict): + sd_cfg[arg] = json.loads(sd_args[arg]) + else: + sd_cfg[arg] = {} + else: + sd_cfg[arg] = sd_args[arg] + return sd_cfg -def show_controlnets(k): - k = int(k) +def load_sd_cfg(sd_json: dict, load_sd_config: str): + new_sd_config = json.loads(view_json_file(load_sd_config)) + if sd_json: + for key in new_sd_config: + sd_json[key] = new_sd_config[key] + else: + sd_json = new_sd_config + for i in sd_json["sd_init_image"]: + if i is not None: + if os.path.isfile(i): + sd_image = [Image.open(i, mode="r")] + else: + sd_image = None + return [ - gr.State( - [ - [gr.Row(visible=True, render=True)] * k - + [gr.Row(visible=False)] * (max_controlnets - k) - ] - ), - gr.State([None] * k), - gr.State([None] * k), - gr.State([None] * k), + sd_json["prompt"][0], + sd_json["negative_prompt"][0], + sd_image, + sd_json["height"], + sd_json["width"], + sd_json["steps"], + sd_json["strength"], + sd_json["guidance_scale"], + sd_json["seed"], + sd_json["batch_count"], + sd_json["batch_size"], + sd_json["scheduler"], + sd_json["base_model_id"], + sd_json["custom_weights"], + sd_json["custom_vae"], + sd_json["precision"], + sd_json["device"], + sd_json["ondemand"], + sd_json["repeatable_seeds"], + sd_json["resample_type"], + sd_json["controlnets"], + sd_json["embeddings"], + sd_json, ] +def save_sd_cfg(config: dict, save_name: str): + if os.path.exists(save_name): + filepath = save_name + elif cmd_opts.configs_path: + filepath = os.path.join(cmd_opts.configs_path, save_name) + else: + filepath = os.path.join(get_configs_path(), save_name) + if ".json" not in filepath: + filepath += ".json" + with open(filepath, mode="w") as f: + f.write(json.dumps(config)) + return "..." + + def create_canvas(width, height): data = Image.fromarray( np.zeros( @@ -94,110 +206,27 @@ def create_canvas(width, height): ) img_dict = { "background": data, - "layers": [data], + "layers": [], "composite": None, } return EditorValue(img_dict) def import_original(original_img, width, height): - resized_img, _, _ = resize_stencil(original_img, width, height) - img_dict = { - "background": resized_img, - "layers": [resized_img], - "composite": None, - } - return gr.ImageEditor( - value=EditorValue(img_dict), - crop_size=(width, height), - ) - - -def update_cn_input( - model, - width, - height, - stencils, - images, - preprocessed_hints, -): - if model == None: - stencils[index] = None - images[index] = None - preprocessed_hints[index] = None - return [ - gr.update(), - gr.update(), - gr.update(), - gr.update(), - gr.update(), - gr.update(), - stencils, - images, - preprocessed_hints, - ] - elif model == "scribble": - return [ - gr.ImageEditor( - visible=True, - interactive=True, - show_label=False, - image_mode="RGB", - type="pil", - brush=Brush( - colors=["#000000"], - color_mode="fixed", - default_size=5, - ), - ), - gr.Image( - visible=True, - show_label=False, - interactive=True, - show_download_button=False, - ), - gr.Slider(visible=True, label="Canvas Width"), - gr.Slider(visible=True, label="Canvas Height"), - gr.Button(visible=True), - gr.Button(visible=False), - stencils, - images, - preprocessed_hints, - ] + if original_img is None: + resized_img = create_canvas(width, height) + return resized_img else: - return [ - gr.ImageEditor( - visible=True, - interactive=True, - show_label=False, - image_mode="RGB", - type="pil", - ), - gr.Image( - visible=True, - show_label=False, - interactive=True, - show_download_button=False, - ), - gr.Slider(visible=True, label="Canvas Width"), - gr.Slider(visible=True, label="Canvas Height"), - gr.Button(visible=True), - gr.Button(visible=False), - stencils, - images, - preprocessed_hints, - ] + resized_img, _, _ = resize_stencil(original_img, width, height) + img_dict = { + "background": resized_img, + "layers": [], + "composite": None, + } + return EditorValue(img_dict) -sd_fn_inputs = [] -sd_fn_sig = signature(shark_sd_fn).replace() -for i in sd_fn_sig.parameters: - sd_fn_inputs.append(i) - with gr.Blocks(title="Stable Diffusion") as sd_element: - # Get a list of arguments needed for the API call, then - # initialize an empty list that will manage the corresponding - # gradio values. with gr.Row(elem_id="ui_title"): nod_logo = Image.open(nodlogo_loc) with gr.Row(variant="compact", equal_height=True): @@ -216,33 +245,33 @@ def update_cn_input( ) with gr.Column(elem_id="ui_body"): with gr.Row(): - with gr.Column(scale=1, min_width=600): + with gr.Column(scale=2, min_width=600): with gr.Row(equal_height=True): with gr.Column(scale=3): sd_model_info = ( f"Checkpoint Path: {str(get_checkpoints_path())}" ) - sd_base = gr.Dropdown( + base_model_id = gr.Dropdown( label="Base Model", info="Select or enter HF model ID", elem_id="custom_model", value="stabilityai/stable-diffusion-2-1-base", - choices=sd_model_map.keys(), + choices=sd_default_models, ) # base_model_id - sd_custom_weights = gr.Dropdown( - label="Weights (Optional)", + custom_weights = gr.Dropdown( + label="Custom Weights", info="Select or enter HF model ID", elem_id="custom_model", value="None", allow_custom_value=True, - choices=get_checkpoints(sd_base), + choices=["None"] + get_checkpoints(base_model_id), ) # with gr.Column(scale=2): - sd_vae_info = ( - str(get_checkpoints_path("vae")) - ).replace("\\", "\n\\") + sd_vae_info = (str(get_checkpoints_path("vae"))).replace( + "\\", "\n\\" + ) sd_vae_info = f"VAE Path: {sd_vae_info}" - sd_custom_vae = gr.Dropdown( + custom_vae = gr.Dropdown( label=f"Custom VAE Models", info=sd_vae_info, elem_id="custom_model", @@ -253,28 +282,31 @@ def update_cn_input( allow_custom_value=True, scale=1, ) - with gr.Column(scale=1): - save_sd_config = gr.Button( - value="Save Config", size="sm" - ) - clear_sd_config = gr.ClearButton( - value="Clear Config", size="sm" - ) - load_sd_config = gr.FileExplorer( - label="Load Config", - root=os.path.basename("./configs"), - ) - + with gr.Row(): + ondemand = gr.Checkbox( + value=cmd_opts.lowvram, + label="Low VRAM", + interactive=True, + ) + precision = gr.Radio( + label="Precision", + value=cmd_opts.precision, + choices=[ + "fp16", + "fp32", + ], + visible=True, + ) with gr.Group(elem_id="prompt_box_outer"): prompt = gr.Textbox( label="Prompt", - value=cmd_opts.prompts[0], + value=cmd_opts.prompt[0], lines=2, elem_id="prompt_box", ) negative_prompt = gr.Textbox( label="Negative Prompt", - value=cmd_opts.negative_prompts[0], + value=cmd_opts.negative_prompt[0], lines=2, elem_id="negative_prompt_box", ) @@ -287,41 +319,39 @@ def update_cn_input( height=300, interactive=True, ) - with gr.Accordion( - label="Embeddings options", open=False, render=True - ): - sd_lora_info = ( - str(get_checkpoints_path("loras")) - ).replace("\\", "\n\\") - num_loras = gr.Slider( - 1, max_loras, value=1, step=1, label="LoRA Count" + with gr.Accordion(label="Embeddings options", open=True, render=True): + sd_lora_info = (str(get_checkpoints_path("loras"))).replace( + "\\", "\n\\" ) - loras = gr.State([]) - for i in range(max_loras): - with gr.Row(): - lora_opt = gr.Dropdown( - allow_custom_value=True, - label=f"Standalone LoRA Weights", - info=sd_lora_info, - elem_id="lora_weights", - value="None", - choices=["None"] + get_checkpoints("lora"), - ) - with gr.Row(): - lora_tags = gr.HTML( - value="
No LoRA selected
", - elem_classes="lora-tags", - ) - gr.on( - triggers=[lora_opt.change], - fn=lora_changed, - inputs=[lora_opt], - outputs=[lora_tags], - queue=True, + with gr.Row(): + embeddings_config = gr.JSON(min_width=50, scale=1) + lora_opt = gr.Dropdown( + allow_custom_value=True, + label=f"Standalone LoRA Weights", + info=sd_lora_info, + elem_id="lora_weights", + value=None, + multiselect=True, + choices=[] + get_checkpoints("lora"), + scale=2, ) - loras.value.append(lora_opt) - - num_loras.change(show_loras, [num_loras], [loras]) + lora_tags = gr.HTML( + value="
No LoRA selected
", + elem_classes="lora-tags", + ) + gr.on( + triggers=[lora_opt.change], + fn=lora_changed, + inputs=[lora_opt], + outputs=[lora_tags], + queue=True, + show_progress=False, + ).then( + fn=update_embeddings_json, + inputs=[lora_opt], + outputs=[embeddings_config], + show_progress=False, + ) with gr.Accordion(label="Advanced Options", open=True): with gr.Row(): scheduler = gr.Dropdown( @@ -331,7 +361,6 @@ def update_cn_input( choices=scheduler_model_map.keys(), allow_custom_value=False, ) - with gr.Row(): height = gr.Slider( 384, 768, @@ -397,20 +426,6 @@ def update_cn_input( step=0.1, label="CFG Scale", ) - ondemand = gr.Checkbox( - value=cmd_opts.lowvram, - label="Low VRAM", - interactive=True, - ) - precision = gr.Radio( - label="Precision", - value=cmd_opts.precision, - choices=[ - "fp16", - "fp32", - ], - visible=True, - ) with gr.Row(): seed = gr.Textbox( value=cmd_opts.seed, @@ -420,159 +435,149 @@ def update_cn_input( device = gr.Dropdown( elem_id="device", label="Device", - value=get_available_devices()[0], - choices=get_available_devices(), + value=global_obj.get_device_list()[0], + choices=global_obj.get_device_list(), allow_custom_value=False, ) with gr.Accordion( - label="Controlnet Options", open=False, render=False + label="Controlnet Options", + open=False, + visible=False, ): - sd_cnet_info = ( - str(get_checkpoints_path("controlnet")) - ).replace("\\", "\n\\") - num_cnets = gr.Slider( - 0, - max_controlnets, - value=0, - step=1, - label="Controlnet Count", - ) - cnet_rows = [] - stencils = gr.State([]) - images = gr.State([]) preprocessed_hints = gr.State([]) - control_mode = gr.Radio( - choices=["Prompt", "Balanced", "Controlnet"], - value="Balanced", - label="Control Mode", - ) - - for i in range(max_controlnets): - with gr.Row(visible=False) as cnet_row: - with gr.Column(): - cnet_gen = gr.Button( - value="Preprocess controlnet input", - ) - cnet_model = gr.Dropdown( - allow_custom_value=True, - label=f"Controlnet Model", - info=sd_cnet_info, - elem_id="lora_weights", - value="None", - choices=[ - "None", - "canny", - "openpose", - "scribble", - "zoedepth", - ] - + get_checkpoints("controlnet"), - ) + with gr.Column(): + sd_cnet_info = ( + str(get_checkpoints_path("controlnet")) + ).replace("\\", "\n\\") + with gr.Row(): + cnet_config = gr.JSON() + with gr.Column(): + clear_config = gr.ClearButton( + value="Clear Controlnet Config", + size="sm", + components=cnet_config, + ) + control_mode = gr.Radio( + choices=["Prompt", "Balanced", "Controlnet"], + value="Balanced", + label="Control Mode", + ) + with gr.Row(): + with gr.Column(scale=1): + cnet_model = gr.Dropdown( + allow_custom_value=True, + label=f"Controlnet Model", + info=sd_cnet_info, + value="None", + choices=[ + "None", + "canny", + "openpose", + "scribble", + "zoedepth", + ] + + get_checkpoints("controlnet"), + ) + cnet_strength = gr.Slider( + label="Controlnet Strength", + minimum=0, + maximum=100, + value=50, + step=1, + ) + with gr.Row(): canvas_width = gr.Slider( label="Canvas Width", minimum=256, maximum=1024, value=512, - step=1, - visible=False, + step=8, ) canvas_height = gr.Slider( label="Canvas Height", minimum=256, maximum=1024, value=512, - step=1, - visible=False, - ) - make_canvas = gr.Button( - value="Make Canvas!", - visible=False, + step=8, ) - use_input_img = gr.Button( - value="Use Original Image", - visible=False, - ) - cnet_input = gr.ImageEditor( - visible=True, - image_mode="RGB", - interactive=True, - show_label=True, - label="Input Image", - type="pil", + make_canvas = gr.Button( + value="Make Canvas!", ) + use_input_img = gr.Button( + value="Use Original Image", + size="sm", + ) + cnet_input = gr.Image( + value=None, + type="pil", + image_mode="RGB", + interactive=True, + ) + with gr.Column(scale=1): cnet_output = gr.Image( value=None, visible=True, label="Preprocessed Hint", - interactive=True, + interactive=False, show_label=True, ) - use_input_img.click( - import_original, - [sd_init_image, canvas_width, canvas_height], - [cnet_input], + cnet_gen = gr.Button( + value="Preprocess controlnet input", ) - cnet_model.change( - fn=update_cn_input, - inputs=[ - cnet_model, - canvas_width, - canvas_height, - stencils, - images, - preprocessed_hints, - ], - outputs=[ - cnet_input, - cnet_output, - canvas_width, - canvas_height, - make_canvas, - use_input_img, - stencils, - images, - preprocessed_hints, - ], - ) - make_canvas.click( - create_canvas, - [canvas_width, canvas_height], - [ - cnet_input, - ], + use_result = gr.Button( + "Submit", + size="sm", ) - gr.on( - triggers=[cnet_gen.click], - fn=cnet_preview, - inputs=[ - cnet_model, - cnet_input, - stencils, - images, - preprocessed_hints, - ], - outputs=[ - cnet_output, - stencils, - images, - preprocessed_hints, - ], - ) - cnet_rows.value.append(cnet_row) - - num_cnets.change( - show_controlnets, - [num_cnets], - [cnet_rows, stencils, images, preprocessed_hints], + use_input_img.click( + fn=import_original, + inputs=[ + sd_init_image, + canvas_width, + canvas_height, + ], + outputs=[cnet_input], + queue=False, + ) + make_canvas.click( + fn=create_canvas, + inputs=[canvas_width, canvas_height], + outputs=[cnet_input], + queue=False, + ) + cnet_gen.click( + fn=cnet_preview, + inputs=[ + cnet_model, + cnet_input, + ], + outputs=[ + cnet_output, + preprocessed_hints, + ], ) - with gr.Column(scale=1, min_width=600): + use_result.click( + fn=submit_to_cnet_config, + inputs=[ + cnet_model, + cnet_output, + cnet_strength, + control_mode, + cnet_config, + ], + outputs=[ + cnet_config, + ], + queue=False, + ) + with gr.Column(scale=3, min_width=600): with gr.Group(): sd_gallery = gr.Gallery( label="Generated images", show_label=False, elem_id="gallery", columns=2, - object_fit="contain", + object_fit="fit", + preview=True, ) std_output = gr.Textbox( value=f"{sd_model_info}\n" @@ -582,6 +587,7 @@ def update_cn_input( elem_id="std_output", show_label=False, ) + sd_element.load(logger.read_sd_logs, None, std_output, every=1) sd_status = gr.Textbox(visible=False) with gr.Row(): stable_diffusion = gr.Button("Generate Image(s)") @@ -591,11 +597,75 @@ def update_cn_input( inputs=[], outputs=[seed], queue=False, + show_progress=False, ) stop_batch = gr.Button("Stop Batch") + with gr.Group(): + with gr.Column(scale=3): + sd_json = gr.JSON( + value=view_json_file( + os.path.join( + get_configs_path(), + "default_sd_config.json", + ) + ) + ) + with gr.Column(scale=1): + clear_sd_config = gr.ClearButton( + value="Clear Config", size="sm", components=sd_json + ) + with gr.Row(): + save_sd_config = gr.Button(value="Save Config", size="sm") + sd_config_name = gr.Textbox( + value="Config Name", + info="Name of the file this config will be saved to.", + interactive=True, + ) + load_sd_config = gr.FileExplorer( + label="Load Config", + file_count="single", + root=cmd_opts.configs_path + if cmd_opts.configs_path + else get_configs_path(), + height=75, + ) + load_sd_config.change( + fn=load_sd_cfg, + inputs=[sd_json, load_sd_config], + outputs=[ + prompt, + negative_prompt, + sd_init_image, + height, + width, + steps, + strength, + guidance_scale, + seed, + batch_count, + batch_size, + scheduler, + base_model_id, + custom_weights, + custom_vae, + precision, + device, + ondemand, + repeatable_seeds, + resample_type, + cnet_config, + embeddings_config, + sd_json, + ], + ) + save_sd_config.click( + fn=save_sd_cfg, + inputs=[sd_json, sd_config_name], + outputs=[sd_config_name], + ) - kwargs = dict( - fn=shark_sd_fn, + pull_kwargs = dict( + fn=pull_sd_configs, inputs=[ prompt, negative_prompt, @@ -609,28 +679,20 @@ def update_cn_input( batch_count, batch_size, scheduler, - sd_base, - sd_custom_weights, - sd_custom_vae, + base_model_id, + custom_weights, + custom_vae, precision, device, - loras, ondemand, repeatable_seeds, resample_type, - control_mode, - stencils, - images, - preprocessed_hints, + cnet_config, + embeddings_config, ], outputs=[ - sd_gallery, - std_output, - sd_status, - stencils, - images, + sd_json, ], - show_progress="minimal", ) status_kwargs = dict( @@ -639,11 +701,22 @@ def update_cn_input( outputs=sd_status, ) - prompt_submit = prompt.submit(**status_kwargs).then(**kwargs) - neg_prompt_submit = negative_prompt.submit(**status_kwargs).then( - **kwargs + gen_kwargs = dict( + fn=shark_sd_fn_dict_input, + inputs=[sd_json], + outputs=[ + sd_gallery, + sd_status, + ], + ) + + prompt_submit = prompt.submit(**status_kwargs).then(**pull_kwargs) + neg_prompt_submit = negative_prompt.submit(**status_kwargs).then(**pull_kwargs) + generate_click = ( + stable_diffusion.click(**status_kwargs) + .then(**pull_kwargs) + .then(**gen_kwargs) ) - generate_click = stable_diffusion.click(**status_kwargs).then(**kwargs) stop_batch.click( fn=cancel_sd, cancels=[prompt_submit, neg_prompt_submit, generate_click], diff --git a/apps/shark_studio/web/ui/utils.py b/apps/shark_studio/web/ui/utils.py index ba62e5adc0..34a94fa014 100644 --- a/apps/shark_studio/web/ui/utils.py +++ b/apps/shark_studio/web/ui/utils.py @@ -6,9 +6,7 @@ def resource_path(relative_path): """Get absolute path to resource, works for dev and for PyInstaller""" - base_path = getattr( - sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__)) - ) + base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) return os.path.join(base_path, relative_path) diff --git a/apps/shark_studio/web/utils/file_utils.py b/apps/shark_studio/web/utils/file_utils.py new file mode 100644 index 0000000000..cae925f5e2 --- /dev/null +++ b/apps/shark_studio/web/utils/file_utils.py @@ -0,0 +1,83 @@ +import os +import sys +import glob +from datetime import datetime as dt +from pathlib import Path + +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + +checkpoints_filetypes = ( + "*.ckpt", + "*.safetensors", +) + + +def safe_name(name): + return name.replace("/", "_").replace("-", "_") + + +def get_path_stem(path): + path = Path(path) + return path.stem + + +def get_resource_path(relative_path): + """Get absolute path to resource, works for dev and for PyInstaller""" + base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) + result = Path(os.path.join(base_path, relative_path)).resolve(strict=False) + return result + + +def get_configs_path() -> Path: + configs = get_resource_path(os.path.join("..", "configs")) + if not os.path.exists(configs): + os.mkdir(configs) + return Path(get_resource_path("../configs")) + + +def get_generated_imgs_path() -> Path: + return Path( + cmd_opts.output_dir + if cmd_opts.output_dir + else get_resource_path("../generated_imgs") + ) + + +def get_generated_imgs_todays_subdir() -> str: + return dt.now().strftime("%Y%m%d") + + +def create_checkpoint_folders(): + dir = ["vae", "lora", "../vmfb"] + if not cmd_opts.ckpt_dir: + dir.insert(0, "models") + else: + if not os.path.isdir(cmd_opts.ckpt_dir): + sys.exit( + f"Invalid --ckpt_dir argument, " + f"{cmd_opts.ckpt_dir} folder does not exists." + ) + for root in dir: + Path(get_checkpoints_path(root)).mkdir(parents=True, exist_ok=True) + + +def get_checkpoints_path(model=""): + return get_resource_path(f"../models/{model}") + + +def get_checkpoints(model="models"): + ckpt_files = [] + file_types = checkpoints_filetypes + if model == "lora": + file_types = file_types + ("*.pt", "*.bin") + for extn in file_types: + files = [ + os.path.basename(x) + for x in glob.glob(os.path.join(get_checkpoints_path(model), extn)) + ] + ckpt_files.extend(files) + return sorted(ckpt_files, key=str.casefold) + + +def get_checkpoint_pathfile(checkpoint_name, model="models"): + return os.path.join(get_checkpoints_path(model), checkpoint_name) diff --git a/apps/shark_studio/web/utils/globals.py b/apps/shark_studio/web/utils/globals.py index 0b5f54636a..977df7304a 100644 --- a/apps/shark_studio/web/utils/globals.py +++ b/apps/shark_studio/web/utils/globals.py @@ -1,4 +1,5 @@ import gc +from ...api.utils import get_available_devices """ The global objects include SD pipeline and config. @@ -9,11 +10,18 @@ def _init(): global _sd_obj - global _config_obj + global _devices + global _pipe_kwargs + global _prep_kwargs + global _gen_kwargs global _schedulers _sd_obj = None - _config_obj = None + _devices = None + _pipe_kwargs = None + _prep_kwargs = None + _gen_kwargs = None _schedulers = None + set_devices() def set_sd_obj(value): @@ -21,6 +29,11 @@ def set_sd_obj(value): _sd_obj = value +def set_devices(): + global _devices + _devices = get_available_devices() + + def set_sd_scheduler(key): global _sd_obj _sd_obj.scheduler = _schedulers[key] @@ -31,9 +44,19 @@ def set_sd_status(value): _sd_obj.status = value -def set_cfg_obj(value): - global _config_obj - _config_obj = value +def set_pipe_kwargs(value): + global _pipe_kwargs + _pipe_kwargs = value + + +def set_prep_kwargs(value): + global _prep_kwargs + _prep_kwargs = value + + +def set_gen_kwargs(value): + global _gen_kwargs + _gen_kwargs = value def set_schedulers(value): @@ -46,14 +69,29 @@ def get_sd_obj(): return _sd_obj +def get_device_list(): + global _devices + return _devices + + def get_sd_status(): global _sd_obj return _sd_obj.status -def get_cfg_obj(): - global _config_obj - return _config_obj +def get_pipe_kwargs(): + global _pipe_kwargs + return _pipe_kwargs + + +def get_prep_kwargs(): + global _prep_kwargs + return _prep_kwargs + + +def get_gen_kwargs(): + global _gen_kwargs + return _gen_kwargs def get_scheduler(key): @@ -63,12 +101,15 @@ def get_scheduler(key): def clear_cache(): global _sd_obj - global _config_obj + global _pipe_kwargs + global _prep_kwargs + global _gen_kwargs global _schedulers del _sd_obj - del _config_obj del _schedulers gc.collect() _sd_obj = None - _config_obj = None + _pipe_kwargs = None + _prep_kwargs = None + _gen_kwargs = None _schedulers = None diff --git a/apps/shark_studio/web/utils/metadata/csv_metadata.py b/apps/shark_studio/web/utils/metadata/csv_metadata.py index d617e802bf..d515234083 100644 --- a/apps/shark_studio/web/utils/metadata/csv_metadata.py +++ b/apps/shark_studio/web/utils/metadata/csv_metadata.py @@ -29,9 +29,7 @@ def parse_csv(image_filename: str): has_header = csv.Sniffer().has_header(csv_file.read(2048)) csv_file.seek(0) - reader = ( - csv.DictReader(csv_file) if has_header else csv.reader(csv_file) - ) + reader = csv.DictReader(csv_file) if has_header else csv.reader(csv_file) matches = [ # we rely on humanize and humanizable to work out the parsing of the individual .csv rows diff --git a/apps/shark_studio/web/utils/metadata/format.py b/apps/shark_studio/web/utils/metadata/format.py index f097dab54f..308d9f8e8b 100644 --- a/apps/shark_studio/web/utils/metadata/format.py +++ b/apps/shark_studio/web/utils/metadata/format.py @@ -92,15 +92,11 @@ def compact(metadata: dict) -> dict: result["Hires resize"] = f"{hires_y}x{hires_x}" # remove VAE if it exists and is empty - if (result.keys() & {"VAE"}) and ( - not result["VAE"] or result["VAE"] == "None" - ): + if (result.keys() & {"VAE"}) and (not result["VAE"] or result["VAE"] == "None"): result.pop("VAE") # remove LoRA if it exists and is empty - if (result.keys() & {"LoRA"}) and ( - not result["LoRA"] or result["LoRA"] == "None" - ): + if (result.keys() & {"LoRA"}) and (not result["LoRA"] or result["LoRA"] == "None"): result.pop("LoRA") return result diff --git a/apps/shark_studio/web/utils/metadata/png_metadata.py b/apps/shark_studio/web/utils/metadata/png_metadata.py index cffc385ab7..72f663f246 100644 --- a/apps/shark_studio/web/utils/metadata/png_metadata.py +++ b/apps/shark_studio/web/utils/metadata/png_metadata.py @@ -1,6 +1,6 @@ import re from pathlib import Path -from apps.shark_studio.api.utils import ( +from apps.shark_studio.web.utils.file_utils import ( get_checkpoint_pathfile, ) from apps.shark_studio.api.sd import ( @@ -66,9 +66,7 @@ def parse_generation_parameters(x: str): return res -def try_find_model_base_from_png_metadata( - file: str, folder: str = "models" -) -> str: +def try_find_model_base_from_png_metadata(file: str, folder: str = "models") -> str: custom = "" # Remove extension from file info @@ -101,16 +99,13 @@ def find_model_from_png_metadata( # No matching model was found if not png_custom and not png_hf_id: print( - "Import PNG info: Unable to find a matching model for %s" - % model_file + "Import PNG info: Unable to find a matching model for %s" % model_file ) return png_custom, png_hf_id -def find_vae_from_png_metadata( - key: str, metadata: dict[str, str | int] -) -> str: +def find_vae_from_png_metadata(key: str, metadata: dict[str, str | int]) -> str: vae_custom = "" if key in metadata: diff --git a/apps/shark_studio/web/utils/state.py b/apps/shark_studio/web/utils/state.py index 626d4ce53f..133c8fd82f 100644 --- a/apps/shark_studio/web/utils/state.py +++ b/apps/shark_studio/web/utils/state.py @@ -3,7 +3,6 @@ def status_label(tab_name, batch_index=0, batch_count=1, batch_size=1): - print(f"Getting status label for {tab_name}") if batch_index < batch_count: bs = f"x{batch_size}" if batch_size > 1 else "" return f"{tab_name} generating {batch_index+1}/{batch_count}{bs}" @@ -18,8 +17,7 @@ def get_generation_text_info(seeds, device): text_output = f"prompt={cfg_dump['prompts']}" text_output += f"\nnegative prompt={cfg_dump['negative_prompts']}" text_output += ( - f"\nmodel_id={cfg_dump['hf_model_id']}, " - f"ckpt_loc={cfg_dump['ckpt_loc']}" + f"\nmodel_id={cfg_dump['hf_model_id']}, " f"ckpt_loc={cfg_dump['ckpt_loc']}" ) text_output += f"\nscheduler={cfg_dump['scheduler']}, " f"device={device}" text_output += ( diff --git a/apps/shark_studio/web/utils/tmp_configs.py b/apps/shark_studio/web/utils/tmp_configs.py index 3e6ba46bfe..4415276ea3 100644 --- a/apps/shark_studio/web/utils/tmp_configs.py +++ b/apps/shark_studio/web/utils/tmp_configs.py @@ -7,9 +7,7 @@ def clear_tmp_mlir(): cleanup_start = time() - print( - "Clearing .mlir temporary files from a prior run. This may take some time..." - ) + print("Clearing .mlir temporary files from a prior run. This may take some time...") mlir_files = [ filename for filename in os.listdir(shark_tmp) @@ -18,9 +16,7 @@ def clear_tmp_mlir(): ] for filename in mlir_files: os.remove(shark_tmp + filename) - print( - f"Clearing .mlir temporary files took {time() - cleanup_start:.4f} seconds." - ) + print(f"Clearing .mlir temporary files took {time() - cleanup_start:.4f} seconds.") def clear_tmp_imgs(): diff --git a/shark/iree_utils/compile_utils.py b/shark/iree_utils/compile_utils.py index ca6a12c45b..f5f9557744 100644 --- a/shark/iree_utils/compile_utils.py +++ b/shark/iree_utils/compile_utils.py @@ -64,6 +64,14 @@ def get_iree_device_args(device, extra_args=[]): return get_iree_rocm_args(device_num=device_num, extra_args=extra_args) return [] +def get_iree_target_triple(device): + args = get_iree_device_args(device) + for flag in args: + if "triple" in flag.split("-"): + triple = flag.split("=") + return triple + return "" + def clean_device_info(raw_device): # return appropriate device and device_id for consumption by Studio pipeline @@ -105,7 +113,6 @@ def get_iree_frontend_args(frontend): # Common args to be used given any frontend or device. def get_iree_common_args(debug=False): common_args = [ - "--iree-stream-resource-max-allocation-size=4294967295", "--iree-vm-bytecode-module-strip-source-map=true", "--iree-util-zero-fill-elided-attrs", ] From a43c5590d55a76a782ba77d0c509261e5cf8e6f9 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Wed, 17 Jan 2024 12:21:28 -0600 Subject: [PATCH 04/25] Add test for SD --- apps/shark_studio/tests/api_test.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/apps/shark_studio/tests/api_test.py b/apps/shark_studio/tests/api_test.py index c88a1e70cb..f9fa23df4f 100644 --- a/apps/shark_studio/tests/api_test.py +++ b/apps/shark_studio/tests/api_test.py @@ -6,8 +6,26 @@ import logging import unittest +import json + from apps.shark_studio.api.llm import LanguageModel +from apps.shark_studio.api.sd import shark_sd_fn_dict_input, view_json_file +from apps.shark_studio.web.utils.file_utils import get_resource_path + +class SDAPITest(unittest.TestCase): + def testSDSimple(self): + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + import apps.shark_studio.web.utils.globals as global_obj + + global_obj._init() + sd_json = view_json_file(get_resource_path("../configs/default_sd_config.json")) + sd_kwargs = json.loads(sd_json) + for arg in vars(cmd_opts): + if arg in sd_kwargs: + sd_kwargs[arg] = getattr(cmd_opts, arg) + for i in shark_sd_fn_dict_input(sd_kwargs): + print(i) class LLMAPITest(unittest.TestCase): def testLLMSimple(self): From 019ba7051d7e69d82ae7214b322fa6b5f5fe2858 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Fri, 2 Feb 2024 11:25:41 -0600 Subject: [PATCH 05/25] Small cleanup --- apps/shark_studio/api/sd.py | 40 ------------------- apps/shark_studio/modules/img_processing.py | 44 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 40 deletions(-) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index 2822d83829..c26c25bf00 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -374,46 +374,6 @@ def decode_latents(self, latents, cpu_scheduling=True): pil_images = self.image_processor.numpy_to_pil(images) return pil_images - # def process_sd_init_image(self, sd_init_image, resample_type): - # if isinstance(sd_init_image, list): - # images = [] - # for img in sd_init_image: - # img, _ = self.process_sd_init_image(img, resample_type) - # images.append(img) - # is_img2img = True - # return images, is_img2img - # if isinstance(sd_init_image, str): - # if os.path.isfile(sd_init_image): - # sd_init_image = Image.open(sd_init_image, mode="r").convert("RGB") - # image, is_img2img = self.process_sd_init_image( - # sd_init_image, resample_type - # ) - # else: - # image = None - # is_img2img = False - # elif isinstance(sd_init_image, Image.Image): - # image = sd_init_image.convert("RGB") - # elif sd_init_image: - # image = sd_init_image["image"].convert("RGB") - # else: - # image = None - # is_img2img = False - # if image: - # resample_type = ( - # resamplers[resample_type] - # if resample_type in resampler_list - # # Fallback to Lanczos - # else Image.Resampling.LANCZOS - # ) - # image = image.resize((self.width, self.height), resample=resample_type) - # image_arr = np.stack([np.array(i) for i in (image,)], axis=0) - # image_arr = image_arr / 255.0 - # image_arr = torch.from_numpy(image_arr).permute(0, 3, 1, 2).to(self.dtype) - # image_arr = 2 * (image_arr - 0.5) - # is_img2img = True - # image = image_arr - # return image, is_img2img - def generate_images( self, prompt, diff --git a/apps/shark_studio/modules/img_processing.py b/apps/shark_studio/modules/img_processing.py index 80062814cf..821f7b86eb 100644 --- a/apps/shark_studio/modules/img_processing.py +++ b/apps/shark_studio/modules/img_processing.py @@ -1,6 +1,8 @@ import os import re import json +import torch +import numpy as np from csv import DictWriter from PIL import Image, PngImagePlugin @@ -8,6 +10,7 @@ from datetime import datetime as dt from base64 import decode + resamplers = { "Lanczos": Image.Resampling.LANCZOS, "Nearest Neighbor": Image.Resampling.NEAREST, @@ -158,3 +161,44 @@ def resize_stencil(image: Image.Image, width, height, resampler_type=None): resampler = resamplers["Nearest Neighbor"] new_image = image.resize((n_width, n_height), resampler=resampler) return new_image, n_width, n_height + + +def process_sd_init_image(self, sd_init_image, resample_type): + if isinstance(sd_init_image, list): + images = [] + for img in sd_init_image: + img, _ = self.process_sd_init_image(img, resample_type) + images.append(img) + is_img2img = True + return images, is_img2img + if isinstance(sd_init_image, str): + if os.path.isfile(sd_init_image): + sd_init_image = Image.open(sd_init_image, mode="r").convert("RGB") + image, is_img2img = self.process_sd_init_image( + sd_init_image, resample_type + ) + else: + image = None + is_img2img = False + elif isinstance(sd_init_image, Image.Image): + image = sd_init_image.convert("RGB") + elif sd_init_image: + image = sd_init_image["image"].convert("RGB") + else: + image = None + is_img2img = False + if image: + resample_type = ( + resamplers[resample_type] + if resample_type in resampler_list + # Fallback to Lanczos + else Image.Resampling.LANCZOS + ) + image = image.resize((self.width, self.height), resample=resample_type) + image_arr = np.stack([np.array(i) for i in (image,)], axis=0) + image_arr = image_arr / 255.0 + image_arr = torch.from_numpy(image_arr).permute(0, 3, 1, 2).to(self.dtype) + image_arr = 2 * (image_arr - 0.5) + is_img2img = True + image = image_arr + return image, is_img2img \ No newline at end of file From 01575a8ec346b77a035e2daf6889ab6f84866e7a Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Wed, 7 Feb 2024 15:54:48 +0000 Subject: [PATCH 06/25] Shark2/SD/UI: Respect ckpt_dir, share and server_port args (#2070) * Takes whether to generate a gradio live link from the existing --share command line parameter, rather than hardcoding as True. * Takes server port from existing --server_port command line parameter, rather than hardcoding as 11911. * Default --ckpt_dir parameter to '../models' * Use --ckpt_dir rather than hardcoding ../models as the base directory for checkpoints, vae, and lora, etc * Add a 'checkpoints' directory below --ckpt_dir to match ComfyUI folder structure. Read custom_weights choices from there, and/or subfolders below there matching the selected base model. * Fix --ckpt_dir possibly not working correctly when an absolute rather than relative path is specified. * Relabel "Custom Weights" to "Custom Weights Checkpoint" in the UI --- apps/shark_studio/modules/shared_cmd_opts.py | 2 +- apps/shark_studio/web/index.py | 4 +-- apps/shark_studio/web/ui/sd.py | 23 ++++++++++-- apps/shark_studio/web/utils/file_utils.py | 38 +++++++++++--------- 4 files changed, 44 insertions(+), 23 deletions(-) diff --git a/apps/shark_studio/modules/shared_cmd_opts.py b/apps/shark_studio/modules/shared_cmd_opts.py index 93a09c6758..9aff75c219 100644 --- a/apps/shark_studio/modules/shared_cmd_opts.py +++ b/apps/shark_studio/modules/shared_cmd_opts.py @@ -601,7 +601,7 @@ def is_valid_file(arg): p.add_argument( "--ckpt_dir", type=str, - default="", + default="../models", help="Path to directory where all .ckpts are stored in order to populate " "them in the web UI.", ) diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index 05a9bc363d..f697e2cc16 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -185,10 +185,10 @@ def register_outputgallery_button(button, selectedid, inputs, outputs): # ) # t.start() studio_web.launch( - share=True, + share=cmd_opts.share, inbrowser=True, server_name="0.0.0.0", - server_port=11911, # args.server_port, + server_port=cmd_opts.server_port, ) diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index 6cc0ce035f..cbb17457ed 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -226,6 +226,17 @@ def import_original(original_img, width, height): return EditorValue(img_dict) +def base_model_changed(base_model_id): + new_choices = get_checkpoints( + os.path.join("checkpoints", os.path.basename(str(base_model_id))) + ) + get_checkpoints(model_type="checkpoints") + + return gr.Dropdown( + value=new_choices[0] if len(new_choices) > 0 else "None", + choices=["None"] + new_choices, + ) + + with gr.Blocks(title="Stable Diffusion") as sd_element: with gr.Row(elem_id="ui_title"): nod_logo = Image.open(nodlogo_loc) @@ -259,13 +270,19 @@ def import_original(original_img, width, height): choices=sd_default_models, ) # base_model_id custom_weights = gr.Dropdown( - label="Custom Weights", + label="Custom Weights Checkpoint", info="Select or enter HF model ID", elem_id="custom_model", value="None", allow_custom_value=True, - choices=["None"] + get_checkpoints(base_model_id), - ) # + choices=["None"] + + get_checkpoints(os.path.basename(str(base_model_id))), + ) # custom_weights + base_model_id.change( + fn=base_model_changed, + inputs=[base_model_id], + outputs=[custom_weights], + ) with gr.Column(scale=2): sd_vae_info = (str(get_checkpoints_path("vae"))).replace( "\\", "\n\\" diff --git a/apps/shark_studio/web/utils/file_utils.py b/apps/shark_studio/web/utils/file_utils.py index cae925f5e2..e6844fe17f 100644 --- a/apps/shark_studio/web/utils/file_utils.py +++ b/apps/shark_studio/web/utils/file_utils.py @@ -21,11 +21,14 @@ def get_path_stem(path): return path.stem -def get_resource_path(relative_path): +def get_resource_path(path): """Get absolute path to resource, works for dev and for PyInstaller""" - base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) - result = Path(os.path.join(base_path, relative_path)).resolve(strict=False) - return result + if os.path.isabs(path): + return path + else: + base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) + result = Path(os.path.join(base_path, path)).resolve(strict=False) + return result def get_configs_path() -> Path: @@ -48,36 +51,37 @@ def get_generated_imgs_todays_subdir() -> str: def create_checkpoint_folders(): - dir = ["vae", "lora", "../vmfb"] - if not cmd_opts.ckpt_dir: - dir.insert(0, "models") - else: - if not os.path.isdir(cmd_opts.ckpt_dir): + dir = ["checkpoints", "vae", "lora", "vmfb"] + if not os.path.isdir(cmd_opts.ckpt_dir): + try: + os.makedirs(cmd_opts.ckpt_dir) + except OSError: sys.exit( f"Invalid --ckpt_dir argument, " - f"{cmd_opts.ckpt_dir} folder does not exists." + f"{cmd_opts.ckpt_dir} folder does not exist, and cannot be created." ) + for root in dir: Path(get_checkpoints_path(root)).mkdir(parents=True, exist_ok=True) -def get_checkpoints_path(model=""): - return get_resource_path(f"../models/{model}") +def get_checkpoints_path(model_type=""): + return get_resource_path(os.path.join(cmd_opts.ckpt_dir, model_type)) -def get_checkpoints(model="models"): +def get_checkpoints(model_type="checkpoints"): ckpt_files = [] file_types = checkpoints_filetypes - if model == "lora": + if model_type == "lora": file_types = file_types + ("*.pt", "*.bin") for extn in file_types: files = [ os.path.basename(x) - for x in glob.glob(os.path.join(get_checkpoints_path(model), extn)) + for x in glob.glob(os.path.join(get_checkpoints_path(model_type), extn)) ] ckpt_files.extend(files) return sorted(ckpt_files, key=str.casefold) -def get_checkpoint_pathfile(checkpoint_name, model="models"): - return os.path.join(get_checkpoints_path(model), checkpoint_name) +def get_checkpoint_pathfile(checkpoint_name, model_type="checkpoints"): + return os.path.join(get_checkpoints_path(model_type), checkpoint_name) From 25312cd791b78a426f55dae3ecc62c0e13c055c9 Mon Sep 17 00:00:00 2001 From: Ean Garvey <87458719+monorimet@users.noreply.github.com> Date: Thu, 18 Jan 2024 19:01:07 -0600 Subject: [PATCH 07/25] Add StreamingLLM support to studio2 chat (#2060) * Streaming LLM * Update precision and add gpu support * (studio2) Separate weights generation for quantization support * Adapt prompt changes to studio flow * Remove outdated flag from llm compile flags. * (studio2) use turbine vmfbRunner * tweaks to prompts * Update CPU path and llm api test. * Change device in test to cpu. * Fixes to runner, device names, vmfb mgmt * Use small test without external weights. --- apps/shark_studio/api/llm.py | 210 +++++++++++++++++++++------- apps/shark_studio/tests/api_test.py | 17 ++- apps/shark_studio/web/ui/chat.py | 142 ++++++------------- apps/shark_studio/web/utils.py | 12 ++ 4 files changed, 218 insertions(+), 163 deletions(-) create mode 100644 apps/shark_studio/web/utils.py diff --git a/apps/shark_studio/api/llm.py b/apps/shark_studio/api/llm.py index 852f5eff58..a9d39f8e7b 100644 --- a/apps/shark_studio/api/llm.py +++ b/apps/shark_studio/api/llm.py @@ -1,10 +1,9 @@ from turbine_models.custom_models import stateless_llama +from turbine_models.model_runner import vmfbRunner +from turbine_models.gen_external_params.gen_external_params import gen_external_params import time -from shark.iree_utils.compile_utils import ( - get_iree_compiled_module, - load_vmfb_using_mmap, -) -from apps.shark_studio.web.utils.file_utils import get_resource_path +from shark.iree_utils.compile_utils import compile_module_to_flatbuffer +from apps.shark_studio.web.utils import get_resource_path import iree.runtime as ireert from itertools import chain import gc @@ -16,6 +15,7 @@ "llama2_7b": { "initializer": stateless_llama.export_transformer_model, "hf_model_name": "meta-llama/Llama-2-7b-chat-hf", + "compile_flags": ["--iree-opt-const-expr-hoisting=False"], "stop_token": 2, "max_tokens": 4096, "system_prompt": """[INST] <>Be concise. You are a helpful, respectful and honest assistant. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <>""", @@ -23,12 +23,34 @@ "Trelis/Llama-2-7b-chat-hf-function-calling-v2": { "initializer": stateless_llama.export_transformer_model, "hf_model_name": "Trelis/Llama-2-7b-chat-hf-function-calling-v2", + "compile_flags": ["--iree-opt-const-expr-hoisting=False"], "stop_token": 2, "max_tokens": 4096, "system_prompt": """[INST] <>Be concise. You are a helpful, respectful and honest assistant. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <>""", }, + "TinyPixel/small-llama2": { + "initializer": stateless_llama.export_transformer_model, + "hf_model_name": "TinyPixel/small-llama2", + "compile_flags": ["--iree-opt-const-expr-hoisting=True"], + "stop_token": 2, + "max_tokens": 1024, + "system_prompt": """[INST] <>Be concise. You are a helpful, respectful and honest assistant. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <>""", + }, } +B_INST, E_INST = "[INST]", "[/INST]" +B_SYS, E_SYS = "", "" + +DEFAULT_CHAT_SYS_PROMPT = """[INST] <> +Be concise. You are a helpful, respectful and honest assistant. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n <>\n\n +""" + + +def append_user_prompt(history, input_prompt): + user_prompt = f"{B_INST} {input_prompt} {E_INST}" + history += user_prompt + return history + class LanguageModel: def __init__( @@ -36,41 +58,85 @@ def __init__( model_name, hf_auth_token=None, device=None, - precision="fp32", + quantization="int4", + precision="", external_weights=None, use_system_prompt=True, + streaming_llm=False, ): - print(llm_model_map[model_name]) self.hf_model_name = llm_model_map[model_name]["hf_model_name"] - self.tempfile_name = get_resource_path("llm.torch.tempfile") - self.vmfb_name = get_resource_path("llm.vmfb.tempfile") - self.device = device - self.precision = precision - self.safe_name = self.hf_model_name.strip("/").replace("/", "_") - self.max_tokens = llm_model_map[model_name]["max_tokens"] - self.iree_module_dict = None + self.device = device.split("=>")[-1].strip() + self.backend = self.device.split("://")[0] + self.driver = self.backend + if "cpu" in device: + self.device = "cpu" + self.backend = "llvm-cpu" + self.driver = "local-task" + + print(f"Selected {self.backend} as IREE target backend.") + self.precision = "f32" if "cpu" in device else "f16" + self.quantization = quantization + self.safe_name = self.hf_model_name.replace("/", "_").replace("-", "_") self.external_weight_file = None + # TODO: find a programmatic solution for model arch spec instead of hardcoding llama2 + self.file_spec = "_".join( + [ + self.safe_name, + self.precision, + ] + ) + if self.quantization != "None": + self.file_spec += "_" + self.quantization + if external_weights is not None: self.external_weight_file = get_resource_path( - self.safe_name + "." + external_weights + self.file_spec + "." + external_weights ) + + if streaming_llm: + # Add streaming suffix to file spec after setting external weights filename. + self.file_spec += "_streaming" + self.streaming_llm = streaming_llm + + self.tempfile_name = get_resource_path(f"{self.file_spec}.tempfile") + # TODO: Tag vmfb with target triple of device instead of HAL backend + self.vmfb_name = get_resource_path( + f"{self.file_spec}_{self.backend}.vmfb.tempfile" + ) + self.max_tokens = llm_model_map[model_name]["max_tokens"] + self.iree_module_dict = None self.use_system_prompt = use_system_prompt self.global_iter = 0 + self.prev_token_len = 0 + self.first_input = True + if self.external_weight_file is not None: + if not os.path.exists(self.external_weight_file): + print( + f"External weight file {self.external_weight_file} does not exist. Generating..." + ) + gen_external_params( + hf_model_name=self.hf_model_name, + quantization=self.quantization, + weight_path=self.external_weight_file, + hf_auth_token=hf_auth_token, + precision=self.precision, + ) + else: + print( + f"External weight file {self.external_weight_file} found for {self.vmfb_name}" + ) if os.path.exists(self.vmfb_name) and ( external_weights is None or os.path.exists(str(self.external_weight_file)) ): - self.iree_module_dict = dict() - ( - self.iree_module_dict["vmfb"], - self.iree_module_dict["config"], - self.iree_module_dict["temp_file_to_unlink"], - ) = load_vmfb_using_mmap( - self.vmfb_name, - device, - device_idx=0, - rt_flags=[], - external_weight_file=self.external_weight_file, + self.runner = vmfbRunner( + device=self.driver, + vmfb_path=self.vmfb_name, + external_weight_path=self.external_weight_file, ) + if self.streaming_llm: + self.model = self.runner.ctx.modules.streaming_state_update + else: + self.model = self.runner.ctx.modules.state_update self.tokenizer = AutoTokenizer.from_pretrained( self.hf_model_name, use_fast=False, @@ -82,7 +148,9 @@ def __init__( hf_auth_token, compile_to="torch", external_weights=external_weights, - external_weight_file=self.external_weight_file, + precision=self.precision, + quantization=self.quantization, + streaming_llm=self.streaming_llm, ) with open(self.tempfile_name, "w+") as f: f.write(self.torch_ir) @@ -99,19 +167,37 @@ def __init__( def compile(self) -> None: # this comes with keys: "vmfb", "config", and "temp_file_to_unlink". - self.iree_module_dict = get_iree_compiled_module( + # ONLY architecture/api-specific compile-time flags for each backend, if needed. + # hf_model_id-specific global flags currently in model map. + flags = [] + if "cpu" in self.backend: + flags.extend( + [ + "--iree-global-opt-enable-quantized-matmul-reassociation", + ] + ) + elif self.backend == "vulkan": + flags.extend(["--iree-stream-resource-max-allocation-size=4294967296"]) + flags.extend(llm_model_map[self.hf_model_name]["compile_flags"]) + flatbuffer_blob = compile_module_to_flatbuffer( self.tempfile_name, device=self.device, - mmap=True, frontend="torch", - external_weight_file=self.external_weight_file, + model_config_path=None, + extra_args=flags, write_to=self.vmfb_name, - extra_args=["--iree-global-opt-enable-quantized-matmul-reassociation"], ) - # TODO: delete the temp file + self.runner = vmfbRunner( + device=self.driver, + vmfb_path=self.vmfb_name, + external_weight_path=self.external_weight_file, + ) + if self.streaming_llm: + self.model = self.runner.ctx.modules.streaming_state_update + else: + self.model = self.runner.ctx.modules.state_update def sanitize_prompt(self, prompt): - print(prompt) if isinstance(prompt, list): prompt = list(chain.from_iterable(prompt)) prompt = " ".join([x for x in prompt if isinstance(x, str)]) @@ -119,10 +205,12 @@ def sanitize_prompt(self, prompt): prompt = prompt.replace("\t", " ") prompt = prompt.replace("\r", " ") if self.use_system_prompt and self.global_iter == 0: - prompt = llm_model_map["llama2_7b"]["system_prompt"] + prompt - prompt += " [/INST]" - print(prompt) - return prompt + prompt = append_user_prompt(DEFAULT_CHAT_SYS_PROMPT, prompt) + print(prompt) + return prompt + else: + print(prompt) + return f"{B_INST} {prompt} {E_INST}" def chat(self, prompt): prompt = self.sanitize_prompt(prompt) @@ -134,26 +222,40 @@ def format_out(results): history = [] for iter in range(self.max_tokens): - st_time = time.time() - if iter == 0: - device_inputs = [ - ireert.asdevicearray( - self.iree_module_dict["config"].device, input_tensor - ) - ] - token = self.iree_module_dict["vmfb"]["run_initialize"](*device_inputs) + if self.streaming_llm: + token_slice = max(self.prev_token_len - 1, 0) + input_tensor = input_tensor[:, token_slice:] + if self.streaming_llm and self.model["get_seq_step"]() > 600: + print("Evicting cache space!") + self.model["evict_kvcache_space"]() + token_len = input_tensor.shape[-1] + device_inputs = [ + ireert.asdevicearray(self.runner.config.device, input_tensor) + ] + if self.first_input or not self.streaming_llm: + st_time = time.time() + token = self.model["run_initialize"](*device_inputs) + total_time = time.time() - st_time + token_len += 1 + self.first_input = False else: - device_inputs = [ - ireert.asdevicearray( - self.iree_module_dict["config"].device, - token, - ) - ] - token = self.iree_module_dict["vmfb"]["run_forward"](*device_inputs) + st_time = time.time() + token = self.model["run_cached_initialize"](*device_inputs) + total_time = time.time() - st_time + token_len += 1 - total_time = time.time() - st_time history.append(format_out(token)) - yield self.tokenizer.decode(history), total_time + while format_out(token) != llm_model_map["llama2_7b"]["stop_token"]: + dec_time = time.time() + if self.streaming_llm and self.model["get_seq_step"]() > 600: + print("Evicting cache space!") + self.model["evict_kvcache_space"]() + token = self.model["run_forward"](token) + history.append(format_out(token)) + total_time = time.time() - dec_time + yield self.tokenizer.decode(history), total_time + + self.prev_token_len = token_len + len(history) if format_out(token) == llm_model_map["llama2_7b"]["stop_token"]: break diff --git a/apps/shark_studio/tests/api_test.py b/apps/shark_studio/tests/api_test.py index f9fa23df4f..bbaa813c06 100644 --- a/apps/shark_studio/tests/api_test.py +++ b/apps/shark_studio/tests/api_test.py @@ -7,6 +7,8 @@ import logging import unittest import json +from apps.shark_studio.api.llm import LanguageModel +import gc from apps.shark_studio.api.llm import LanguageModel from apps.shark_studio.api.sd import shark_sd_fn_dict_input, view_json_file @@ -28,12 +30,13 @@ def testSDSimple(self): print(i) class LLMAPITest(unittest.TestCase): - def testLLMSimple(self): + def test01_LLMSmall(self): lm = LanguageModel( - "Trelis/Llama-2-7b-chat-hf-function-calling-v2", + "TinyPixel/small-llama2", hf_auth_token=None, - device="cpu-task", - external_weights="safetensors", + device="cpu", + precision="fp32", + quantization="None", ) count = 0 for msg, _ in lm.chat("hi, what are you?"): @@ -42,9 +45,11 @@ def testLLMSimple(self): count += 1 continue assert ( - msg.strip(" ") == "Hello" - ), f"LLM API failed to return correct response, expected 'Hello', received {msg}" + msg.strip(" ") == "Turkish Turkish Turkish" + ), f"LLM API failed to return correct response, expected 'Turkish Turkish Turkish', received {msg}" break + del lm + gc.collect() if __name__ == "__main__": diff --git a/apps/shark_studio/web/ui/chat.py b/apps/shark_studio/web/ui/chat.py index 917ac870bf..f34f89bc78 100644 --- a/apps/shark_studio/web/ui/chat.py +++ b/apps/shark_studio/web/ui/chat.py @@ -11,17 +11,21 @@ ) import apps.shark_studio.web.utils.globals as global_obj +B_SYS, E_SYS = "", "" + def user(message, history): # Append the user's message to the conversation history return "", history + [[message, ""]] -language_model = None +def append_bot_prompt(history, input_prompt): + user_prompt = f"{input_prompt} {E_SYS} {E_SYS}" + history += user_prompt + return history -def create_prompt(model_name, history, prompt_prefix): - return "" +language_model = None def get_default_config(): @@ -39,9 +43,13 @@ def chat_fn( precision, download_vmfb, config_file, + streaming_llm, cli=False, ): global language_model + if streaming_llm and prompt_prefix == "Clear": + language_model = None + return "Clearing history...", "" if language_model is None: history[-1][-1] = "Getting the model ready..." yield history, "" @@ -50,8 +58,8 @@ def chat_fn( device=device, precision=precision, external_weights="safetensors", - external_weight_file="llama2_7b.safetensors", use_system_prompt=prompt_prefix, + streaming_llm=streaming_llm, ) history[-1][-1] = "Getting the model ready... Done" yield history, "" @@ -61,7 +69,7 @@ def chat_fn( prefill_time = 0 is_first = True for text, exec_time in language_model.chat(history): - history[-1][-1] = text + history[-1][-1] = f"{text}{E_SYS}" if is_first: prefill_time = exec_time is_first = False @@ -73,101 +81,6 @@ def chat_fn( yield history, f"Prefill: {prefill_time:.2f} seconds\n Decode: {tokens_per_sec:.2f} tokens/sec" -def llm_chat_api(InputData: dict): - return None - print(f"Input keys : {InputData.keys()}") - # print(f"model : {InputData['model']}") - is_chat_completion_api = ( - "messages" in InputData.keys() - ) # else it is the legacy `completion` api - # For Debugging input data from API - # if is_chat_completion_api: - # print(f"message -> role : {InputData['messages'][0]['role']}") - # print(f"message -> content : {InputData['messages'][0]['content']}") - # else: - # print(f"prompt : {InputData['prompt']}") - # print(f"max_tokens : {InputData['max_tokens']}") # Default to 128 for now - global vicuna_model - model_name = InputData["model"] if "model" in InputData.keys() else "codegen" - model_path = llm_model_map[model_name] - device = "cpu-task" - precision = "fp16" - max_toks = None if "max_tokens" not in InputData.keys() else InputData["max_tokens"] - if max_toks is None: - max_toks = 128 if model_name == "codegen" else 512 - - # make it working for codegen first - from apps.language_models.scripts.vicuna import ( - UnshardedVicuna, - ) - - device_id = None - if vicuna_model == 0: - if "cuda" in device: - device = "cuda" - elif "sync" in device: - device = "cpu-sync" - elif "task" in device: - device = "cpu-task" - elif "vulkan" in device: - device_id = int(device.split("://")[1]) - device = "vulkan" - else: - print("unrecognized device") - - vicuna_model = UnshardedVicuna( - model_name, - hf_model_path=model_path, - device=device, - precision=precision, - max_num_tokens=max_toks, - download_vmfb=True, - load_mlir_from_shark_tank=True, - device_id=device_id, - ) - - # TODO: add role dict for different models - if is_chat_completion_api: - # TODO: add funtionality for multiple messages - prompt = create_prompt(model_name, [(InputData["messages"][0]["content"], "")]) - else: - prompt = InputData["prompt"] - print("prompt = ", prompt) - - res = vicuna_model.generate(prompt) - res_op = None - for op in res: - res_op = op - - if is_chat_completion_api: - choices = [ - { - "index": 0, - "message": { - "role": "assistant", - "content": res_op, # since we are yeilding the result - }, - "finish_reason": "stop", # or length - } - ] - else: - choices = [ - { - "text": res_op, - "index": 0, - "logprobs": None, - "finish_reason": "stop", # or length - } - ] - end_time = dt.now().strftime("%Y%m%d%H%M%S%f") - return { - "id": end_time, - "object": "chat.completion" if is_chat_completion_api else "text_completion", - "created": int(end_time), - "choices": choices, - } - - def view_json_file(file_obj): content = "" with open(file_obj.name, "r") as fopen: @@ -198,7 +111,7 @@ def view_json_file(file_obj): ) precision = gr.Radio( label="Precision", - value="int4", + value="fp32", choices=[ # "int4", # "int8", @@ -211,12 +124,18 @@ def view_json_file(file_obj): with gr.Column(): download_vmfb = gr.Checkbox( label="Download vmfb from Shark tank if available", + value=False, + interactive=True, + visible=False, + ) + streaming_llm = gr.Checkbox( + label="Run in streaming mode (requires recompilation)", value=True, interactive=True, ) prompt_prefix = gr.Checkbox( label="Add System Prompt", - value=False, + value=True, interactive=True, ) @@ -260,6 +179,7 @@ def view_json_file(file_obj): precision, download_vmfb, config_file, + streaming_llm, ], outputs=[chatbot, tokens_time], show_progress=False, @@ -281,6 +201,7 @@ def view_json_file(file_obj): precision, download_vmfb, config_file, + streaming_llm, ], outputs=[chatbot, tokens_time], show_progress=False, @@ -293,4 +214,19 @@ def view_json_file(file_obj): cancels=[submit_event, submit_click_event], queue=False, ) - clear.click(lambda: None, None, [chatbot], queue=False) + clear.click( + fn=chat_fn, + inputs=[ + clear, + chatbot, + model, + device, + precision, + download_vmfb, + config_file, + streaming_llm, + ], + outputs=[chatbot, tokens_time], + show_progress=False, + queue=True, + ).then(lambda: None, None, [chatbot], queue=False) diff --git a/apps/shark_studio/web/utils.py b/apps/shark_studio/web/utils.py new file mode 100644 index 0000000000..4072491cbf --- /dev/null +++ b/apps/shark_studio/web/utils.py @@ -0,0 +1,12 @@ +import os +import sys + + +def get_available_devices(): + return ["cpu-task"] + + +def get_resource_path(relative_path): + """Get absolute path to resource, works for dev and for PyInstaller""" + base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) + return os.path.join(base_path, relative_path) From 230638ab9afe6cd0e707477a9988ba40fb95ec92 Mon Sep 17 00:00:00 2001 From: Stanley Winata <68087699+raikonenfnu@users.noreply.github.com> Date: Thu, 1 Feb 2024 09:46:22 -0800 Subject: [PATCH 08/25] HF-Reference LLM mode + Update test result to match latest Turbine. (#2080) * HF-Reference LLM mode. * Fixup test to match current output from Turbine. * lint * Fix test error message + Only initialize HF torch model when used. * Remove redundant format_out change. --- apps/shark_studio/api/llm.py | 49 ++++++++++++++++++++++++++++- apps/shark_studio/tests/api_test.py | 5 +-- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/apps/shark_studio/api/llm.py b/apps/shark_studio/api/llm.py index a9d39f8e7b..647d6a5af1 100644 --- a/apps/shark_studio/api/llm.py +++ b/apps/shark_studio/api/llm.py @@ -9,7 +9,7 @@ import gc import os import torch -from transformers import AutoTokenizer +from transformers import AutoTokenizer, AutoModelForCausalLM llm_model_map = { "llama2_7b": { @@ -109,6 +109,7 @@ def __init__( self.global_iter = 0 self.prev_token_len = 0 self.first_input = True + self.hf_auth_token = hf_auth_token if self.external_weight_file is not None: if not os.path.exists(self.external_weight_file): print( @@ -164,6 +165,8 @@ def __init__( use_auth_token=hf_auth_token, ) self.compile() + # Reserved for running HF torch model as reference. + self.hf_mod = None def compile(self) -> None: # this comes with keys: "vmfb", "config", and "temp_file_to_unlink". @@ -267,6 +270,50 @@ def format_out(results): self.global_iter += 1 return result_output, total_time + # Reference HF model function for sanity checks. + def chat_hf(self, prompt): + if self.hf_mod is None: + self.hf_mod = AutoModelForCausalLM.from_pretrained( + self.hf_model_name, + torch_dtype=torch.float, + token=self.hf_auth_token, + ) + prompt = self.sanitize_prompt(prompt) + + input_tensor = self.tokenizer(prompt, return_tensors="pt").input_ids + history = [] + for iter in range(self.max_tokens): + token_len = input_tensor.shape[-1] + if self.first_input: + st_time = time.time() + result = self.hf_mod(input_tensor) + token = torch.argmax(result.logits[:, -1, :], dim=1) + total_time = time.time() - st_time + token_len += 1 + pkv = result.past_key_values + self.first_input = False + + history.append(int(token)) + while token != llm_model_map["llama2_7b"]["stop_token"]: + dec_time = time.time() + result = self.hf_mod(token.reshape([1, 1]), past_key_values=pkv) + history.append(int(token)) + total_time = time.time() - dec_time + token = torch.argmax(result.logits[:, -1, :], dim=1) + pkv = result.past_key_values + yield self.tokenizer.decode(history), total_time + + self.prev_token_len = token_len + len(history) + + if token == llm_model_map["llama2_7b"]["stop_token"]: + break + for i in range(len(history)): + if type(history[i]) != int: + history[i] = int(history[i]) + result_output = self.tokenizer.decode(history) + self.global_iter += 1 + return result_output, total_time + if __name__ == "__main__": lm = LanguageModel( diff --git a/apps/shark_studio/tests/api_test.py b/apps/shark_studio/tests/api_test.py index bbaa813c06..98cd310060 100644 --- a/apps/shark_studio/tests/api_test.py +++ b/apps/shark_studio/tests/api_test.py @@ -39,14 +39,15 @@ def test01_LLMSmall(self): quantization="None", ) count = 0 + label = "Turkishoure Turkish" for msg, _ in lm.chat("hi, what are you?"): # skip first token output if count == 0: count += 1 continue assert ( - msg.strip(" ") == "Turkish Turkish Turkish" - ), f"LLM API failed to return correct response, expected 'Turkish Turkish Turkish', received {msg}" + msg.strip(" ") == label + ), f"LLM API failed to return correct response, expected '{label}', received {msg}" break del lm gc.collect() From be4c49a1b9c1615afa262e19772fd7fcf66d0741 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Fri, 2 Feb 2024 23:24:27 -0600 Subject: [PATCH 09/25] Add rest API endpoint from LanguageModel API --- apps/shark_studio/api/initializers.py | 40 +++- apps/shark_studio/api/llm.py | 116 +++++++++- apps/shark_studio/modules/shared_cmd_opts.py | 20 ++ apps/shark_studio/tests/api_test.py | 27 ++- apps/shark_studio/tests/rest_api_test.py | 45 ++++ apps/shark_studio/web/api/compat.py | 218 +++++++++---------- apps/shark_studio/web/index.py | 11 +- apps/shark_studio/web/utils/globals.py | 19 ++ 8 files changed, 340 insertions(+), 156 deletions(-) create mode 100644 apps/shark_studio/tests/rest_api_test.py diff --git a/apps/shark_studio/api/initializers.py b/apps/shark_studio/api/initializers.py index ef9816cfca..6cc60d26f0 100644 --- a/apps/shark_studio/api/initializers.py +++ b/apps/shark_studio/api/initializers.py @@ -7,11 +7,11 @@ from threading import Thread from apps.shark_studio.modules.timer import startup_timer -from apps.shark_studio.web.utils.tmp_configs import ( - config_tmp, - clear_tmp_mlir, - clear_tmp_imgs, -) +# from apps.shark_studio.web.utils.tmp_configs import ( +# config_tmp, +# clear_tmp_mlir, +# clear_tmp_imgs, +# ) def imports(): @@ -46,9 +46,9 @@ def initialize(): # existing temporary images there if they exist. Then we can import gradio. # It has to be in this order or gradio ignores what we've set up. - config_tmp() - clear_tmp_mlir() - clear_tmp_imgs() + # config_tmp() + # clear_tmp_mlir() + # clear_tmp_imgs() from apps.shark_studio.web.utils.file_utils import ( create_checkpoint_folders, @@ -85,6 +85,30 @@ def dumpstacks(): print("\n".join(code)) +def setup_middleware(app): + from starlette.middleware.gzip import GZipMiddleware + + app.middleware_stack = None # reset current middleware to allow modifying user provided list + app.add_middleware(GZipMiddleware, minimum_size=1000) + configure_cors_middleware(app) + app.build_middleware_stack() # rebuild middleware stack on-the-fly + + +def configure_cors_middleware(app): + from starlette.middleware.cors import CORSMiddleware + from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + + cors_options = { + "allow_methods": ["*"], + "allow_headers": ["*"], + "allow_credentials": True, + } + if cmd_opts.api_accept_origin: + cors_options["allow_origins"] = cmd_opts.api_accept_origin.split(',') + + app.add_middleware(CORSMiddleware, **cors_options) + + def configure_sigint_handler(): # make the program just exit at ctrl+c without waiting for anything def sigint_handler(sig, frame): diff --git a/apps/shark_studio/api/llm.py b/apps/shark_studio/api/llm.py index 647d6a5af1..fe64dac4f0 100644 --- a/apps/shark_studio/api/llm.py +++ b/apps/shark_studio/api/llm.py @@ -3,7 +3,8 @@ from turbine_models.gen_external_params.gen_external_params import gen_external_params import time from shark.iree_utils.compile_utils import compile_module_to_flatbuffer -from apps.shark_studio.web.utils import get_resource_path +from apps.shark_studio.web.utils.file_utils import get_resource_path +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts import iree.runtime as ireert from itertools import chain import gc @@ -88,21 +89,24 @@ def __init__( if self.quantization != "None": self.file_spec += "_" + self.quantization - if external_weights is not None: + if external_weights in ["safetensors", "gguf"]: self.external_weight_file = get_resource_path( - self.file_spec + "." + external_weights + os.path.join("..", self.file_spec + "." + external_weights) ) + else: + self.external_weights = None + self.external_weight_file = None if streaming_llm: # Add streaming suffix to file spec after setting external weights filename. self.file_spec += "_streaming" self.streaming_llm = streaming_llm - self.tempfile_name = get_resource_path(f"{self.file_spec}.tempfile") + self.tempfile_name = get_resource_path(os.path.join("..", f"{self.file_spec}.tempfile")) # TODO: Tag vmfb with target triple of device instead of HAL backend - self.vmfb_name = get_resource_path( - f"{self.file_spec}_{self.backend}.vmfb.tempfile" - ) + self.vmfb_name = str(get_resource_path( + os.path.join("..", f"{self.file_spec}_{self.backend}.vmfb.tempfile") + )) self.max_tokens = llm_model_map[model_name]["max_tokens"] self.iree_module_dict = None self.use_system_prompt = use_system_prompt @@ -126,6 +130,7 @@ def __init__( print( f"External weight file {self.external_weight_file} found for {self.vmfb_name}" ) + self.external_weight_file = str(self.external_weight_file) if os.path.exists(self.vmfb_name) and ( external_weights is None or os.path.exists(str(self.external_weight_file)) ): @@ -209,10 +214,8 @@ def sanitize_prompt(self, prompt): prompt = prompt.replace("\r", " ") if self.use_system_prompt and self.global_iter == 0: prompt = append_user_prompt(DEFAULT_CHAT_SYS_PROMPT, prompt) - print(prompt) return prompt else: - print(prompt) return f"{B_INST} {prompt} {E_INST}" def chat(self, prompt): @@ -248,7 +251,7 @@ def format_out(results): token_len += 1 history.append(format_out(token)) - while format_out(token) != llm_model_map["llama2_7b"]["stop_token"]: + while format_out(token) != llm_model_map["llama2_7b"]["stop_token"] and len(history) < self.max_tokens: dec_time = time.time() if self.streaming_llm and self.model["get_seq_step"]() > 600: print("Evicting cache space!") @@ -315,6 +318,99 @@ def chat_hf(self, prompt): return result_output, total_time +def llm_chat_api(InputData: dict): + from datetime import datetime as dt + + import apps.shark_studio.web.utils.globals as global_obj + + print(f"Input keys : {InputData.keys()}") + + # print(f"model : {InputData['model']}") + + is_chat_completion_api = ( + "messages" in InputData.keys() + ) # else it is the legacy `completion` api + + # For Debugging input data from API + if is_chat_completion_api: + print(f"message -> role : {InputData['messages'][0]['role']}") + print(f"message -> content : {InputData['messages'][0]['content']}") + else: + print(f"prompt : {InputData['prompt']}") + + model_name = InputData["model"] if "model" in InputData.keys() else "llama2_7b" + model_path = llm_model_map[model_name] + device = InputData["device"] if "device" in InputData.keys() else "cpu" + precision = "fp16" + max_tokens = InputData["max_tokens"] if "max_tokens" in InputData.keys() else 4096 + + device_id = None + if not global_obj.get_llm_obj(): + print("\n[LOG] Initializing new pipeline...") + global_obj.clear_cache() + gc.collect() + if "cuda" in device: + device = "cuda" + elif "vulkan" in device: + device_id = int(device.split("://")[1]) + device = "vulkan" + elif "cpu" in device: + device = "cpu" + precision = "fp32" + else: + print("unrecognized device") + llm_model = LanguageModel( + model_name=model_name, + hf_auth_token=cmd_opts.hf_auth_token, + device=device, + quantization=cmd_opts.quantization, + external_weights="safetensors", + use_system_prompt=True, + streaming_llm=False, + ) + global_obj.set_llm_obj(llm_model) + else: + llm_model = global_obj.get_llm_obj() + + llm_model.max_tokens = max_tokens + # TODO: add role dict for different models + if is_chat_completion_api: + # TODO: add funtionality for multiple messages + prompt = append_user_prompt(InputData["messages"][0]["role"], InputData["messages"][0]["content"]) + else: + prompt = InputData["prompt"] + print("prompt = ", prompt) + + for res_op, _ in llm_model.chat(prompt): + + if is_chat_completion_api: + choices = [ + { + "index": 0, + "message": { + "role": "assistant", + "content": res_op, # since we are yeilding the result + }, + "finish_reason": "stop", # or length + } + ] + else: + choices = [ + { + "text": res_op, + "index": 0, + "logprobs": None, + "finish_reason": "stop", # or length + } + ] + end_time = dt.now().strftime("%Y%m%d%H%M%S%f") + return { + "id": end_time, + "object": "chat.completion" if is_chat_completion_api else "text_completion", + "created": int(end_time), + "choices": choices, + } + if __name__ == "__main__": lm = LanguageModel( "Trelis/Llama-2-7b-chat-hf-function-calling-v2", diff --git a/apps/shark_studio/modules/shared_cmd_opts.py b/apps/shark_studio/modules/shared_cmd_opts.py index 9aff75c219..7992660d96 100644 --- a/apps/shark_studio/modules/shared_cmd_opts.py +++ b/apps/shark_studio/modules/shared_cmd_opts.py @@ -439,6 +439,13 @@ def is_valid_file(arg): help="Specify your own huggingface authentication tokens for models like Llama2.", ) +p.add_argument( + "--external_weights", + type=str, + default=None, + help="What type of externalized weights to use. Currently options are 'safetensors' and defaults to inlined weights.", +) + p.add_argument( "--device_allocator_heap_key", type=str, @@ -580,6 +587,13 @@ def is_valid_file(arg): help="Controls data tiling in iree-compile for all SD models.", ) +p.add_argument( + "--quantization", + type=str, + default="None", + help="Quantization to be used for api-exposed model.", +) + ############################################################################## # Web UI flags ############################################################################## @@ -676,6 +690,12 @@ def is_valid_file(arg): "follow symlinks when listing subdirectories under --output_dir.", ) +p.add_argument( + "--api_log", + default=False, + action=argparse.BooleanOptionalAction, + help="Enables Compatibility API logging.", +) ############################################################################## # SD model auto-annotation flags diff --git a/apps/shark_studio/tests/api_test.py b/apps/shark_studio/tests/api_test.py index 98cd310060..ba6b7fedd8 100644 --- a/apps/shark_studio/tests/api_test.py +++ b/apps/shark_studio/tests/api_test.py @@ -7,27 +7,26 @@ import logging import unittest import json -from apps.shark_studio.api.llm import LanguageModel import gc -from apps.shark_studio.api.llm import LanguageModel +from apps.shark_studio.api.llm import LanguageModel, llm_chat_api from apps.shark_studio.api.sd import shark_sd_fn_dict_input, view_json_file from apps.shark_studio.web.utils.file_utils import get_resource_path -class SDAPITest(unittest.TestCase): - def testSDSimple(self): - from apps.shark_studio.modules.shared_cmd_opts import cmd_opts - import apps.shark_studio.web.utils.globals as global_obj +# class SDAPITest(unittest.TestCase): +# def testSDSimple(self): +# from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +# import apps.shark_studio.web.utils.globals as global_obj - global_obj._init() +# global_obj._init() - sd_json = view_json_file(get_resource_path("../configs/default_sd_config.json")) - sd_kwargs = json.loads(sd_json) - for arg in vars(cmd_opts): - if arg in sd_kwargs: - sd_kwargs[arg] = getattr(cmd_opts, arg) - for i in shark_sd_fn_dict_input(sd_kwargs): - print(i) +# sd_json = view_json_file(get_resource_path("../configs/default_sd_config.json")) +# sd_kwargs = json.loads(sd_json) +# for arg in vars(cmd_opts): +# if arg in sd_kwargs: +# sd_kwargs[arg] = getattr(cmd_opts, arg) +# for i in shark_sd_fn_dict_input(sd_kwargs): +# print(i) class LLMAPITest(unittest.TestCase): def test01_LLMSmall(self): diff --git a/apps/shark_studio/tests/rest_api_test.py b/apps/shark_studio/tests/rest_api_test.py new file mode 100644 index 0000000000..a3b4977863 --- /dev/null +++ b/apps/shark_studio/tests/rest_api_test.py @@ -0,0 +1,45 @@ +import requests +from PIL import Image +import base64 +from io import BytesIO +import json + + +def llm_chat_test(verbose=False): + # Define values here + prompt = "What is the significance of the number 42?" + + url = "http://127.0.0.1:8080/v1/chat/completions" + + headers = { + "User-Agent": "PythonTest", + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate, br", + } + + data = { + "model": "Trelis/Llama-2-7b-chat-hf-function-calling-v2", + "messages": [{ + "role": "", + "content": prompt, + }], + "device": "vulkan://0", + "max_tokens": 4096, + + } + + res = requests.post(url=url, json=data, headers=headers, timeout=1000) + res_dict = json.loads(res.content.decode("utf-8")) + print(f"[chat] response from server was : {res.status_code} {res.reason}") + + if verbose or res.status_code != 200: + print(f"\n{res_dict['choices'][0]['message']['content']}\n") + + +if __name__ == "__main__": + + # "Exercises the Stable Diffusion REST API of Shark. Make sure " + # "Shark is running in API mode on 127.0.0.1:8080 before running" + # "this script." + + llm_chat_test(verbose=True) diff --git a/apps/shark_studio/web/api/compat.py b/apps/shark_studio/web/api/compat.py index 3f92c41d02..7944eee336 100644 --- a/apps/shark_studio/web/api/compat.py +++ b/apps/shark_studio/web/api/compat.py @@ -6,7 +6,10 @@ import uvicorn import ipaddress import requests +import threading +import collections import gradio as gr +from PIL import Image, PngImagePlugin from threading import Lock from io import BytesIO from fastapi import APIRouter, Depends, FastAPI, Request, Response @@ -15,22 +18,15 @@ from fastapi.responses import JSONResponse from fastapi.encoders import jsonable_encoder -from apps.shark_studio.modules.img_processing import sampler_list -from sdapi_v1 import shark_sd_api -from api.llm import chat_api +from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +#from sdapi_v1 import shark_sd_api +from apps.shark_studio.api.llm import llm_chat_api def decode_base64_to_image(encoding): if encoding.startswith("http://") or encoding.startswith("https://"): - if not opts.api_enable_requests: - raise HTTPException(status_code=500, detail="Requests not allowed") - if opts.api_forbid_local_requests and not verify_url(encoding): - raise HTTPException( - status_code=500, detail="Request to local resource not allowed" - ) - - headers = {"user-agent": opts.api_useragent} if opts.api_useragent else {} + headers = {} response = requests.get(encoding, timeout=30, headers=headers) try: image = Image.open(BytesIO(response.content)) @@ -49,56 +45,58 @@ def decode_base64_to_image(encoding): def encode_pil_to_base64(image): with io.BytesIO() as output_bytes: - if opts.samples_format.lower() == "png": - use_metadata = False - metadata = PngImagePlugin.PngInfo() - for key, value in image.info.items(): - if isinstance(key, str) and isinstance(value, str): - metadata.add_text(key, value) - use_metadata = True - image.save( - output_bytes, - format="PNG", - pnginfo=(metadata if use_metadata else None), - quality=opts.jpeg_quality, - ) - - elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"): - if image.mode == "RGBA": - image = image.convert("RGB") - parameters = image.info.get("parameters", None) - exif_bytes = piexif.dump( - { - "Exif": { - piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump( - parameters or "", encoding="unicode" - ) - } - } - ) - if opts.samples_format.lower() in ("jpg", "jpeg"): - image.save( - output_bytes, - format="JPEG", - exif=exif_bytes, - quality=opts.jpeg_quality, - ) - else: - image.save( - output_bytes, - format="WEBP", - exif=exif_bytes, - quality=opts.jpeg_quality, - ) - - else: - raise HTTPException(status_code=500, detail="Invalid image format") + use_metadata = False + metadata = PngImagePlugin.PngInfo() + for key, value in image.info.items(): + if isinstance(key, str) and isinstance(value, str): + metadata.add_text(key, value) + use_metadata = True + image.save( + output_bytes, + format="PNG", + pnginfo=(metadata if use_metadata else None), + ) bytes_data = output_bytes.getvalue() return base64.b64encode(bytes_data) +# reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a +class FIFOLock(object): + def __init__(self): + self._lock = threading.Lock() + self._inner_lock = threading.Lock() + self._pending_threads = collections.deque() + + def acquire(self, blocking=True): + with self._inner_lock: + lock_acquired = self._lock.acquire(False) + if lock_acquired: + return True + elif not blocking: + return False + + release_event = threading.Event() + self._pending_threads.append(release_event) + + release_event.wait() + return self._lock.acquire() + + def release(self): + with self._inner_lock: + if self._pending_threads: + release_event = self._pending_threads.popleft() + release_event.set() + + self._lock.release() + + __enter__ = acquire + + def __exit__(self, t, v, tb): + self.release() + + def api_middleware(app: FastAPI): rich_available = False try: @@ -119,7 +117,7 @@ async def log_and_time(req: Request, call_next): duration = str(round(time.time() - ts, 4)) res.headers["X-Process-Time"] = duration endpoint = req.scope.get("path", "err") - if shared.cmd_opts.api_log and endpoint.startswith("/sdapi"): + if cmd_opts.api_log and endpoint.startswith("/sdapi"): print( "API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}".format( t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), @@ -156,7 +154,8 @@ def handle_exception(request: Request, e: Exception): width=min([console.width, 200]), ) else: - errors.report(message, exc_info=True) + print(message) + raise(e) return JSONResponse( status_code=vars(e).get("status_code", 500), content=jsonable_encoder(err), @@ -179,14 +178,14 @@ async def http_exception_handler(request: Request, e: HTTPException): class ApiCompat: - def __init__(self, queue_lock: Lock): + def __init__(self, app: FastAPI, queue_lock: Lock): self.router = APIRouter() - self.app = FastAPI() + self.app = app self.queue_lock = queue_lock api_middleware(self.app) - self.add_api_route("/sdapi/v1/txt2img", shark_sd_api, methods=["post"]) - self.add_api_route("/sdapi/v1/img2img", shark_sd_api, methods=["post"]) - # self.add_api_route("/sdapi/v1/upscaler", self.upscaler_api, methods=["post"]) + #self.add_api_route("/sdapi/v1/txt2img", shark_sd_api, methods=["POST"]) + #self.add_api_route("/sdapi/v1/img2img", shark_sd_api, methods=["POST"]) + # self.add_api_route("/sdapi/v1/upscaler", self.upscaler_api, methods=["POST"]) # self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) # self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) # self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse) @@ -221,56 +220,40 @@ def __init__(self, queue_lock: Lock): # self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) # chat APIs needed for compatibility with multiple extensions using OpenAI API - self.add_api_route("/v1/chat/completions", chat_api, methods=["post"]) - self.add_api_route("/v1/completions", chat_api, methods=["post"]) - self.add_api_route("/chat/completions", chat_api, methods=["post"]) - self.add_api_route("/completions", chat_api, methods=["post"]) + self.add_api_route("/v1/chat/completions", llm_chat_api, methods=["POST"]) + self.add_api_route("/v1/completions", llm_chat_api, methods=["POST"]) + self.add_api_route("/chat/completions", llm_chat_api, methods=["POST"]) + self.add_api_route("/completions", llm_chat_api, methods=["POST"]) self.add_api_route( - "/v1/engines/codegen/completions", chat_api, methods=["post"] + "/v1/engines/codegen/completions", llm_chat_api, methods=["POST"] ) - if studio.cmd_opts.api_server_stop: - self.add_api_route( - "/sdapi/v1/server-kill", self.kill_studio, methods=["POST"] - ) - self.add_api_route( - "/sdapi/v1/server-restart", - self.restart_studio, - methods=["POST"], - ) - self.add_api_route( - "/sdapi/v1/server-stop", self.stop_studio, methods=["POST"] - ) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] def add_api_route(self, path: str, endpoint, **kwargs): - if studio.cmd_opts.api_auth: - return self.app.add_api_route( - path, endpoint, dependencies=[Depends(self.auth)], **kwargs - ) return self.app.add_api_route(path, endpoint, **kwargs) - def refresh_checkpoints(self): - with self.queue_lock: - studio_data.refresh_checkpoints() + # def refresh_checkpoints(self): + # with self.queue_lock: + # studio_data.refresh_checkpoints() - def refresh_vae(self): - with self.queue_lock: - studio_data.refresh_vae_list() + # def refresh_vae(self): + # with self.queue_lock: + # studio_data.refresh_vae_list() - def unloadapi(self): - unload_model_weights() + # def unloadapi(self): + # unload_model_weights() - return {} + # return {} - def reloadapi(self): - reload_model_weights() + # def reloadapi(self): + # reload_model_weights() - return {} + # return {} - def skip(self): - studio.state.skip() + # def skip(self): + # studio.state.skip() def launch(self, server_name, port, root_path): self.app.include_router(self.router) @@ -278,27 +261,26 @@ def launch(self, server_name, port, root_path): self.app, host=server_name, port=port, - timeout_keep_alive=studio.cmd_opts.timeout_keep_alive, root_path=root_path, ) - def kill_studio(self): - restart.stop_program() - - def restart_studio(self): - if restart.is_restartable(): - restart.restart_program() - return Response(status_code=501) - - def preprocess(self, args: dict): - try: - studio.state.begin(job="preprocess") - preprocess(**args) - studio.state.end() - return models.PreprocessResponse(info="preprocess complete") - except: - studio.state.end() - - def stop_studio(request): - studio.state.server_command = "stop" - return Response("Stopping.") + # def kill_studio(self): + # restart.stop_program() + + # def restart_studio(self): + # if restart.is_restartable(): + # restart.restart_program() + # return Response(status_code=501) + + # def preprocess(self, args: dict): + # try: + # studio.state.begin(job="preprocess") + # preprocess(**args) + # studio.state.end() + # return models.PreprocessResponse(info="preprocess complete") + # except: + # studio.state.end() + + # def stop_studio(request): + # studio.state.server_command = "stop" + # return Response("Stopping.") diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index f697e2cc16..6ec9ab2210 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -20,9 +20,8 @@ def create_api(app): - from apps.shark_studio.api.compat import ApiCompat - from modules.call_queue import queue_lock - + from apps.shark_studio.web.api.compat import ApiCompat, FIFOLock + queue_lock = FIFOLock() api = ApiCompat(app, queue_lock) return api @@ -43,9 +42,9 @@ def api_only(): print(f"Startup time: {startup_timer.summary()}.") api.launch( - server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", - port=cmd_opts.port if cmd_opts.port else 8080, - root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "", + server_name="0.0.0.0", + port=cmd_opts.server_port, + root_path="", ) diff --git a/apps/shark_studio/web/utils/globals.py b/apps/shark_studio/web/utils/globals.py index 977df7304a..27910e74ef 100644 --- a/apps/shark_studio/web/utils/globals.py +++ b/apps/shark_studio/web/utils/globals.py @@ -10,12 +10,14 @@ def _init(): global _sd_obj + global _llm_obj global _devices global _pipe_kwargs global _prep_kwargs global _gen_kwargs global _schedulers _sd_obj = None + _llm_obj = None _devices = None _pipe_kwargs = None _prep_kwargs = None @@ -26,9 +28,18 @@ def _init(): def set_sd_obj(value): global _sd_obj + global _llm_obj + _llm_obj = None _sd_obj = value +def set_llm_obj(value): + global _sd_obj + global _llm_obj + _llm_obj = value + _sd_obj = None + + def set_devices(): global _devices _devices = get_available_devices() @@ -69,6 +80,11 @@ def get_sd_obj(): return _sd_obj +def get_llm_obj(): + global _llm_obj + return _llm_obj + + def get_device_list(): global _devices return _devices @@ -101,14 +117,17 @@ def get_scheduler(key): def clear_cache(): global _sd_obj + global _llm_obj global _pipe_kwargs global _prep_kwargs global _gen_kwargs global _schedulers del _sd_obj + del _llm_obj del _schedulers gc.collect() _sd_obj = None + _llm_obj = None _pipe_kwargs = None _prep_kwargs = None _gen_kwargs = None From 1541b21ab4f96bf8b567d1e81a1f82ac48f73d9e Mon Sep 17 00:00:00 2001 From: Ean Garvey <87458719+monorimet@users.noreply.github.com> Date: Thu, 18 Jan 2024 19:01:07 -0600 Subject: [PATCH 10/25] Add StreamingLLM support to studio2 chat (#2060) * Streaming LLM * Update precision and add gpu support * (studio2) Separate weights generation for quantization support * Adapt prompt changes to studio flow * Remove outdated flag from llm compile flags. * (studio2) use turbine vmfbRunner * tweaks to prompts * Update CPU path and llm api test. * Change device in test to cpu. * Fixes to runner, device names, vmfb mgmt * Use small test without external weights. --- apps/shark_studio/tests/api_test.py | 1 + apps/shark_studio/web/ui/chat.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/apps/shark_studio/tests/api_test.py b/apps/shark_studio/tests/api_test.py index ba6b7fedd8..28db38b2a8 100644 --- a/apps/shark_studio/tests/api_test.py +++ b/apps/shark_studio/tests/api_test.py @@ -7,6 +7,7 @@ import logging import unittest import json +from apps.shark_studio.api.llm import LanguageModel import gc from apps.shark_studio.api.llm import LanguageModel, llm_chat_api diff --git a/apps/shark_studio/web/ui/chat.py b/apps/shark_studio/web/ui/chat.py index f34f89bc78..294a623f44 100644 --- a/apps/shark_studio/web/ui/chat.py +++ b/apps/shark_studio/web/ui/chat.py @@ -13,6 +13,8 @@ B_SYS, E_SYS = "", "" +B_SYS, E_SYS = "", "" + def user(message, history): # Append the user's message to the conversation history From 5f675e18af23328d931482566083df442f46b48a Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Mon, 5 Feb 2024 11:21:30 -0600 Subject: [PATCH 11/25] Formatting and init files. --- apps/shark_studio/api/initializers.py | 7 ++- apps/shark_studio/api/llm.py | 23 +++++++--- apps/shark_studio/modules/embeddings.py | 10 +++-- apps/shark_studio/modules/img_processing.py | 6 +-- apps/shark_studio/modules/schedulers.py | 48 ++++++++++----------- apps/shark_studio/tests/api_test.py | 1 + apps/shark_studio/tests/export_unet.py | 41 ++++++++++++++++++ apps/shark_studio/tests/rest_api_test.py | 11 ++--- apps/shark_studio/web/api/compat.py | 9 ++-- apps/shark_studio/web/index.py | 1 + apps/shark_studio/web/ui/sd.py | 16 ++++--- apps/shark_studio/web/utils/__init__.py | 0 12 files changed, 118 insertions(+), 55 deletions(-) create mode 100644 apps/shark_studio/tests/export_unet.py create mode 100644 apps/shark_studio/web/utils/__init__.py diff --git a/apps/shark_studio/api/initializers.py b/apps/shark_studio/api/initializers.py index 6cc60d26f0..42622b54fa 100644 --- a/apps/shark_studio/api/initializers.py +++ b/apps/shark_studio/api/initializers.py @@ -7,6 +7,7 @@ from threading import Thread from apps.shark_studio.modules.timer import startup_timer + # from apps.shark_studio.web.utils.tmp_configs import ( # config_tmp, # clear_tmp_mlir, @@ -88,7 +89,9 @@ def dumpstacks(): def setup_middleware(app): from starlette.middleware.gzip import GZipMiddleware - app.middleware_stack = None # reset current middleware to allow modifying user provided list + app.middleware_stack = ( + None # reset current middleware to allow modifying user provided list + ) app.add_middleware(GZipMiddleware, minimum_size=1000) configure_cors_middleware(app) app.build_middleware_stack() # rebuild middleware stack on-the-fly @@ -104,7 +107,7 @@ def configure_cors_middleware(app): "allow_credentials": True, } if cmd_opts.api_accept_origin: - cors_options["allow_origins"] = cmd_opts.api_accept_origin.split(',') + cors_options["allow_origins"] = cmd_opts.api_accept_origin.split(",") app.add_middleware(CORSMiddleware, **cors_options) diff --git a/apps/shark_studio/api/llm.py b/apps/shark_studio/api/llm.py index fe64dac4f0..dd37862502 100644 --- a/apps/shark_studio/api/llm.py +++ b/apps/shark_studio/api/llm.py @@ -102,11 +102,16 @@ def __init__( self.file_spec += "_streaming" self.streaming_llm = streaming_llm - self.tempfile_name = get_resource_path(os.path.join("..", f"{self.file_spec}.tempfile")) + self.tempfile_name = get_resource_path( + os.path.join("..", f"{self.file_spec}.tempfile") + ) # TODO: Tag vmfb with target triple of device instead of HAL backend - self.vmfb_name = str(get_resource_path( - os.path.join("..", f"{self.file_spec}_{self.backend}.vmfb.tempfile") - )) + self.vmfb_name = str( + get_resource_path( + os.path.join("..", f"{self.file_spec}_{self.backend}.vmfb.tempfile") + ) + ) + self.max_tokens = llm_model_map[model_name]["max_tokens"] self.iree_module_dict = None self.use_system_prompt = use_system_prompt @@ -251,7 +256,10 @@ def format_out(results): token_len += 1 history.append(format_out(token)) - while format_out(token) != llm_model_map["llama2_7b"]["stop_token"] and len(history) < self.max_tokens: + while ( + format_out(token) != llm_model_map["llama2_7b"]["stop_token"] + and len(history) < self.max_tokens + ): dec_time = time.time() if self.streaming_llm and self.model["get_seq_step"]() > 600: print("Evicting cache space!") @@ -376,7 +384,9 @@ def llm_chat_api(InputData: dict): # TODO: add role dict for different models if is_chat_completion_api: # TODO: add funtionality for multiple messages - prompt = append_user_prompt(InputData["messages"][0]["role"], InputData["messages"][0]["content"]) + prompt = append_user_prompt( + InputData["messages"][0]["role"], InputData["messages"][0]["content"] + ) else: prompt = InputData["prompt"] print("prompt = ", prompt) @@ -411,6 +421,7 @@ def llm_chat_api(InputData: dict): "choices": choices, } + if __name__ == "__main__": lm = LanguageModel( "Trelis/Llama-2-7b-chat-hf-function-calling-v2", diff --git a/apps/shark_studio/modules/embeddings.py b/apps/shark_studio/modules/embeddings.py index 87924c819e..95d228d7c5 100644 --- a/apps/shark_studio/modules/embeddings.py +++ b/apps/shark_studio/modules/embeddings.py @@ -41,10 +41,12 @@ def processLoRA(model, use_lora, splitting_prefix, lora_strength=0.75): state_dict[f"{stem}up.weight"], state_dict[f"{stem}down.weight"], state_dict.get(f"{stem}mid.weight", None), - state_dict[f"{weight_key}.alpha"] - / state_dict[f"{stem}up.weight"].shape[1] - if f"{weight_key}.alpha" in state_dict - else 1.0, + ( + state_dict[f"{weight_key}.alpha"] + / state_dict[f"{stem}up.weight"].shape[1] + if f"{weight_key}.alpha" in state_dict + else 1.0 + ), ) # Directly update weight in model diff --git a/apps/shark_studio/modules/img_processing.py b/apps/shark_studio/modules/img_processing.py index 821f7b86eb..401c042ad2 100644 --- a/apps/shark_studio/modules/img_processing.py +++ b/apps/shark_studio/modules/img_processing.py @@ -174,9 +174,7 @@ def process_sd_init_image(self, sd_init_image, resample_type): if isinstance(sd_init_image, str): if os.path.isfile(sd_init_image): sd_init_image = Image.open(sd_init_image, mode="r").convert("RGB") - image, is_img2img = self.process_sd_init_image( - sd_init_image, resample_type - ) + image, is_img2img = self.process_sd_init_image(sd_init_image, resample_type) else: image = None is_img2img = False @@ -201,4 +199,4 @@ def process_sd_init_image(self, sd_init_image, resample_type): image_arr = 2 * (image_arr - 0.5) is_img2img = True image = image_arr - return image, is_img2img \ No newline at end of file + return image, is_img2img diff --git a/apps/shark_studio/modules/schedulers.py b/apps/shark_studio/modules/schedulers.py index 8c2413c638..3e931b1c78 100644 --- a/apps/shark_studio/modules/schedulers.py +++ b/apps/shark_studio/modules/schedulers.py @@ -50,30 +50,30 @@ def get_schedulers(model_id): schedulers["DPMSolverMultistep++"] = DPMSolverMultistepScheduler.from_pretrained( model_id, subfolder="scheduler", algorithm_type="dpmsolver++" ) - schedulers[ - "DPMSolverMultistepKarras" - ] = DPMSolverMultistepScheduler.from_pretrained( - model_id, - subfolder="scheduler", - use_karras_sigmas=True, - ) - schedulers[ - "DPMSolverMultistepKarras++" - ] = DPMSolverMultistepScheduler.from_pretrained( - model_id, - subfolder="scheduler", - algorithm_type="dpmsolver++", - use_karras_sigmas=True, + schedulers["DPMSolverMultistepKarras"] = ( + DPMSolverMultistepScheduler.from_pretrained( + model_id, + subfolder="scheduler", + use_karras_sigmas=True, + ) + ) + schedulers["DPMSolverMultistepKarras++"] = ( + DPMSolverMultistepScheduler.from_pretrained( + model_id, + subfolder="scheduler", + algorithm_type="dpmsolver++", + use_karras_sigmas=True, + ) ) schedulers["EulerDiscrete"] = EulerDiscreteScheduler.from_pretrained( model_id, subfolder="scheduler", ) - schedulers[ - "EulerAncestralDiscrete" - ] = EulerAncestralDiscreteScheduler.from_pretrained( - model_id, - subfolder="scheduler", + schedulers["EulerAncestralDiscrete"] = ( + EulerAncestralDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) ) schedulers["DEISMultistep"] = DEISMultistepScheduler.from_pretrained( model_id, @@ -83,11 +83,11 @@ def get_schedulers(model_id): model_id, subfolder="scheduler", ) - schedulers[ - "KDPM2AncestralDiscrete" - ] = KDPM2AncestralDiscreteScheduler.from_pretrained( - model_id, - subfolder="scheduler", + schedulers["KDPM2AncestralDiscrete"] = ( + KDPM2AncestralDiscreteScheduler.from_pretrained( + model_id, + subfolder="scheduler", + ) ) schedulers["HeunDiscrete"] = HeunDiscreteScheduler.from_pretrained( model_id, diff --git a/apps/shark_studio/tests/api_test.py b/apps/shark_studio/tests/api_test.py index 28db38b2a8..b36184b87e 100644 --- a/apps/shark_studio/tests/api_test.py +++ b/apps/shark_studio/tests/api_test.py @@ -29,6 +29,7 @@ # for i in shark_sd_fn_dict_input(sd_kwargs): # print(i) + class LLMAPITest(unittest.TestCase): def test01_LLMSmall(self): lm = LanguageModel( diff --git a/apps/shark_studio/tests/export_unet.py b/apps/shark_studio/tests/export_unet.py new file mode 100644 index 0000000000..0cc8b2deb0 --- /dev/null +++ b/apps/shark_studio/tests/export_unet.py @@ -0,0 +1,41 @@ +import torch +from diffusers import ( + UNet2DConditionModel, +) +from torch.fx.experimental.proxy_tensor import make_fx + + +class UnetModel(torch.nn.Module): + def __init__(self, hf_model_name): + super().__init__() + self.unet = UNet2DConditionModel.from_pretrained( + hf_model_name, + subfolder="unet", + ) + + def forward(self, sample, timestep, encoder_hidden_states, guidance_scale): + samples = torch.cat([sample] * 2) + unet_out = self.unet.forward( + samples, timestep, encoder_hidden_states, return_dict=False + )[0] + noise_pred_uncond, noise_pred_text = unet_out.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + return noise_pred + + +if __name__ == "__main__": + hf_model_name = "CompVis/stable-diffusion-v1-4" + unet = UnetModel(hf_model_name) + inputs = (torch.randn(1, 4, 64, 64), 1, torch.randn(2, 77, 768), 7.5) + + fx_g = make_fx( + unet, + decomposition_table={}, + tracing_mode="symbolic", + _allow_non_fake_inputs=True, + _allow_fake_constant=False, + )(*inputs) + + print(fx_g) diff --git a/apps/shark_studio/tests/rest_api_test.py b/apps/shark_studio/tests/rest_api_test.py index a3b4977863..fef626bbeb 100644 --- a/apps/shark_studio/tests/rest_api_test.py +++ b/apps/shark_studio/tests/rest_api_test.py @@ -19,13 +19,14 @@ def llm_chat_test(verbose=False): data = { "model": "Trelis/Llama-2-7b-chat-hf-function-calling-v2", - "messages": [{ - "role": "", - "content": prompt, - }], + "messages": [ + { + "role": "", + "content": prompt, + } + ], "device": "vulkan://0", "max_tokens": 4096, - } res = requests.post(url=url, json=data, headers=headers, timeout=1000) diff --git a/apps/shark_studio/web/api/compat.py b/apps/shark_studio/web/api/compat.py index 7944eee336..147262d5c9 100644 --- a/apps/shark_studio/web/api/compat.py +++ b/apps/shark_studio/web/api/compat.py @@ -19,7 +19,8 @@ from fastapi.encoders import jsonable_encoder from apps.shark_studio.modules.shared_cmd_opts import cmd_opts -#from sdapi_v1 import shark_sd_api + +# from sdapi_v1 import shark_sd_api from apps.shark_studio.api.llm import llm_chat_api @@ -155,7 +156,7 @@ def handle_exception(request: Request, e: Exception): ) else: print(message) - raise(e) + raise (e) return JSONResponse( status_code=vars(e).get("status_code", 500), content=jsonable_encoder(err), @@ -183,8 +184,8 @@ def __init__(self, app: FastAPI, queue_lock: Lock): self.app = app self.queue_lock = queue_lock api_middleware(self.app) - #self.add_api_route("/sdapi/v1/txt2img", shark_sd_api, methods=["POST"]) - #self.add_api_route("/sdapi/v1/img2img", shark_sd_api, methods=["POST"]) + # self.add_api_route("/sdapi/v1/txt2img", shark_sd_api, methods=["POST"]) + # self.add_api_route("/sdapi/v1/img2img", shark_sd_api, methods=["POST"]) # self.add_api_route("/sdapi/v1/upscaler", self.upscaler_api, methods=["POST"]) # self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) # self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index 6ec9ab2210..dca062a037 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -21,6 +21,7 @@ def create_api(app): from apps.shark_studio.web.api.compat import ApiCompat, FIFOLock + queue_lock = FIFOLock() api = ApiCompat(app, queue_lock) return api diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index cbb17457ed..994b7caec7 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -292,9 +292,11 @@ def base_model_changed(base_model_id): label=f"Custom VAE Models", info=sd_vae_info, elem_id="custom_model", - value=os.path.basename(cmd_opts.custom_vae) - if cmd_opts.custom_vae - else "None", + value=( + os.path.basename(cmd_opts.custom_vae) + if cmd_opts.custom_vae + else "None" + ), choices=["None"] + get_checkpoints("vae"), allow_custom_value=True, scale=1, @@ -641,9 +643,11 @@ def base_model_changed(base_model_id): load_sd_config = gr.FileExplorer( label="Load Config", file_count="single", - root=cmd_opts.configs_path - if cmd_opts.configs_path - else get_configs_path(), + root=( + cmd_opts.configs_path + if cmd_opts.configs_path + else get_configs_path() + ), height=75, ) load_sd_config.change( diff --git a/apps/shark_studio/web/utils/__init__.py b/apps/shark_studio/web/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From a198934f8aacbe0e8cfcc2b65ab087c5e32d599b Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Mon, 5 Feb 2024 11:49:29 -0600 Subject: [PATCH 12/25] Remove unused import. --- apps/shark_studio/modules/pipeline.py | 1 - 1 file changed, 1 deletion(-) diff --git a/apps/shark_studio/modules/pipeline.py b/apps/shark_studio/modules/pipeline.py index 5dee266b13..7400ce212a 100644 --- a/apps/shark_studio/modules/pipeline.py +++ b/apps/shark_studio/modules/pipeline.py @@ -1,4 +1,3 @@ -from msvcrt import kbhit from shark.iree_utils.compile_utils import ( get_iree_compiled_module, load_vmfb_using_mmap, From 39ebc453933bed00b4cfdeef957bae7ebf07147b Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Mon, 12 Feb 2024 16:24:38 -0600 Subject: [PATCH 13/25] Small fixes --- apps/shark_studio/api/sd.py | 21 +++++++++++---------- apps/shark_studio/tests/rest_api_test.py | 2 +- requirements.txt | 4 ++-- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index c26c25bf00..c349c923b3 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -45,16 +45,16 @@ "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", ], }, - "vae_encode": { - "initializer": vae.export_vae_model, - "ireec_flags": [ - "--iree-flow-collapse-reduction-dims", - "--iree-opt-const-expr-hoisting=False", - "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", - ], - }, + # "vae_encode": { + # "initializer": vae.export_vae_model, + # "ireec_flags": [ + # "--iree-flow-collapse-reduction-dims", + # "--iree-opt-const-expr-hoisting=False", + # "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + # "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", + # "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", + # ], + # }, "unet": { "initializer": unet.export_unet_model, "ireec_flags": [ @@ -152,6 +152,7 @@ def __init__( str(static_kwargs["unet"]["max_length"]), f"{str(height)}x{str(width)}", precision, + self.device, ] if num_loras > 0: pipe_id_list.append(str(num_loras) + "lora") diff --git a/apps/shark_studio/tests/rest_api_test.py b/apps/shark_studio/tests/rest_api_test.py index fef626bbeb..01b0901019 100644 --- a/apps/shark_studio/tests/rest_api_test.py +++ b/apps/shark_studio/tests/rest_api_test.py @@ -39,7 +39,7 @@ def llm_chat_test(verbose=False): if __name__ == "__main__": - # "Exercises the Stable Diffusion REST API of Shark. Make sure " + # "Exercises the chatbot REST API of Shark. Make sure " # "Shark is running in API mode on 127.0.0.1:8080 before running" # "this script." diff --git a/requirements.txt b/requirements.txt index 3f7e719e67..19d4521280 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,8 +5,8 @@ setuptools wheel -shark-turbine @ git+https://github.com/nod-ai/SHARK-Turbine.git@main -turbine-models @ git+https://github.com/nod-ai/SHARK-Turbine#egg=turbine-models&subdirectory=python/turbine_models +shark-turbine @ git+https://github.com/nod-ai/SHARK-Turbine#egg=shark-turbine&subdirectory=core +turbine-models @ git+https://github.com/nod-ai/SHARK-Turbine#egg=turbine-models&subdirectory=models # SHARK Runner tqdm From 75f4ed9bf2d430cbfae353a92ac6ddf9db7b9002 Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Sun, 18 Feb 2024 03:36:31 +0000 Subject: [PATCH 14/25] Studio2/SD/UI: Improve various parts of the UI for Stable Diffusion (#2074) * Studio2/SD/UI: Improve various parts of the UI of Shark 2 * Update Gradio pin to 4.15.0. * Port workarounds for Gradio >4.8.0 main container sizing from Shark 1.0. * Move nod Logo out of the SD tab and onto the top right of the main tab bar. * Set nod logo icon as the favicon (as current Shark 1.0). * Create a tabbed right hand panel within the SD UI sized to the viewport height. * Make Input Image tab 1 in the right hand panel. * Make output images, generation log, and generation buttons, tab 2 in the right hand panel * Make config JSON display, with config load, save and clear, tab 3 in the right hand panel * Make gallery area of the Output tab take up all vertical space the other controls on the tab do not. * Tidy up the controls on the Config tab somewhat. * Studio2/SD/UI: Reorganise inputs on Left Panel of SD tab * Rename previously added Right Panel Output tab to 'Generate'. * Move Batch Count, Batch Size, and Repeatable Seeds, off of Left Panel and onto 'Generate' Tab. * On 'Generate' tab, rename 'Generate Image(s)' button to 'Start', and 'Stop Batch' button to 'Stop'. They are now below the Batch inputs on a Generate tab so don't need the specificity. * Move Device, Low VRAM, and Precision inputs into their own 'Device Settings' Accordion control. (starts closed) * Rename 'Custom Weights Checkpoint' to 'Checkpoint Weights' * Move Checkpoint Weights, VAE Model, Standalone Lora Weights, and Embeddings Options controls, into their own 'Model Weights' Accordion control. (starts closed) * Move Denoising Strength, and Resample Type controls into their own 'Input Image Processing' Accordion. (starts closed) * Move any remaining controls in the 'Advanced Options' Accorion directly onto the left panel, and remove then Accordion. * Enable the copy button for all text boxes on the SD tab. * Add emoji/unicode glphs to all top level controls and Accordions on the SD Left Panel. * Start with the 'Generate' as the initially selected tab in the SD Right Panel, working around Gradio issue #7805 * Tweaks to SD Right Tab Panel vertical height. * Studio2/SD/UI: Sizing tweaks for Right Panel, and >1920 width * Set height of right panel using vmin rather than vh, with explicit affordances for fixed areas above and below. * Port >1920 width Gradio >4.8 CSS workaround from Shark 1.0. --- apps/shark_studio/web/index.py | 21 +- .../shark_studio/web/ui/css/sd_dark_theme.css | 53 +- .../web/ui/js/sd_gradio_workarounds.js | 49 ++ apps/shark_studio/web/ui/sd.py | 660 +++++++++--------- requirements.txt | 2 +- 5 files changed, 456 insertions(+), 329 deletions(-) create mode 100644 apps/shark_studio/web/ui/js/sd_gradio_workarounds.js diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index dca062a037..455ea9358c 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -1,4 +1,6 @@ from multiprocessing import Process, freeze_support +from PIL import Image + import os import time import sys @@ -71,6 +73,10 @@ def launch_webui(address): def webui(): from apps.shark_studio.modules.shared_cmd_opts import cmd_opts + from apps.shark_studio.web.ui.utils import ( + nodicon_loc, + nodlogo_loc, + ) launch_api = cmd_opts.api initialize.initialize() @@ -134,6 +140,7 @@ def resource_path(relative_path): return os.path.join(base_path, relative_path) dark_theme = resource_path("ui/css/sd_dark_theme.css") + gradio_workarounds = resource_path("ui/js/sd_gradio_workarounds.js") # from apps.shark_studio.web.ui import load_ui_from_script @@ -158,8 +165,19 @@ def register_outputgallery_button(button, selectedid, inputs, outputs): ) with gr.Blocks( - css=dark_theme, analytics_enabled=False, title="Shark Studio 2.0 Beta" + css=dark_theme, + js=gradio_workarounds, + analytics_enabled=False, + title="Shark Studio 2.0 Beta", ) as studio_web: + nod_logo = Image.open(nodlogo_loc) + gr.Image( + value=nod_logo, + show_label=False, + interactive=False, + elem_id="tab_bar_logo", + show_download_button=False, + ) with gr.Tabs() as tabs: # NOTE: If adding, removing, or re-ordering tabs, make sure that they # have a unique id that doesn't clash with any of the other tabs, @@ -189,6 +207,7 @@ def register_outputgallery_button(button, selectedid, inputs, outputs): inbrowser=True, server_name="0.0.0.0", server_port=cmd_opts.server_port, + favicon_path=nodicon_loc, ) diff --git a/apps/shark_studio/web/ui/css/sd_dark_theme.css b/apps/shark_studio/web/ui/css/sd_dark_theme.css index 5686f0868c..e17b90c862 100644 --- a/apps/shark_studio/web/ui/css/sd_dark_theme.css +++ b/apps/shark_studio/web/ui/css/sd_dark_theme.css @@ -117,7 +117,7 @@ body { height: 100% !important; } -/* display in full width for desktop devices */ +/* display in full width for desktop devices, but see below */ @media (min-width: 1536px) { .gradio-container { @@ -125,6 +125,15 @@ body { } } +/* media rules in custom css are don't appear to be applied in + gradio versions > 4.7, so we have to define a class which + we will manually need add and remove using javascript. + Remove this once this fixed in gradio. +*/ +.gradio-container-size-full { + max-width: var(--size-full) !important; +} + .gradio-container .contain { padding: 0 var(--size-4) !important; } @@ -182,6 +191,7 @@ footer { aspect-ratio: unset; max-height: calc(55vh - (2 * var(--spacing-lg))); } +/* fix width and height of gallery items when on very large desktop screens, but see below */ @media (min-width: 1921px) { /* Force a 768px_height + 4px_margin_height + navbar_height for the gallery */ #gallery .grid-wrap, #gallery .preview{ @@ -193,6 +203,20 @@ footer { max-height: 770px !important; } } + +/* media rules in custom css are don't appear to be applied in + gradio versions > 4.7, so we have to define classes which + we will manually need add and remove using javascript. + Remove this once this fixed in gradio. +*/ +.gallery-force-height768 .grid-wrap, .gallery-force-height768 .preview { + min-height: calc(768px + 4px + var(--size-14)) !important; + max-height: calc(768px + 4px + var(--size-14)) !important; +} +.gallery-limit-height768 .thumbnail-item.thumbnail-lg { + max-height: 770px !important; +} + /* Don't upscale when viewing in solo image mode */ #gallery .preview img { object-fit: scale-down; @@ -303,6 +327,15 @@ footer { min-height: 89vh !important; } +.sd-right-panel { + height: calc(100vmin - var(--size-32) - var(--size-10)) !important; + overflow-y: scroll; +} + +.sd-right-panel .fill { + flex: 1; +} + /* don't stretch non-square images to be square, breaking their aspect ratio */ #outputgallery_gallery .thumbnail-item.thumbnail-lg > img { object-fit: contain !important; @@ -314,7 +347,7 @@ footer { width: 100%; } -#top_logo.logo_centered img{ +#top_logo.logo_centered img { object-fit: scale-down; position: absolute; width: 80%; @@ -322,3 +355,19 @@ footer { left: 50%; transform: translate(-50%, -50%); } + +#tab_bar_logo { + overflow: visible !important; + border-width: 0 !important; + height: 0px !important; + padding: 0; + margin: 0; +} + +#tab_bar_logo .image-container { + object-fit: scale-down; + position: absolute !important; + top: 14px; + right: 0px; + height: 36px; +} \ No newline at end of file diff --git a/apps/shark_studio/web/ui/js/sd_gradio_workarounds.js b/apps/shark_studio/web/ui/js/sd_gradio_workarounds.js new file mode 100644 index 0000000000..b1f893ee27 --- /dev/null +++ b/apps/shark_studio/web/ui/js/sd_gradio_workarounds.js @@ -0,0 +1,49 @@ +// workaround gradio after 4.7, not applying any @media rules form the custom .css file + +() => { + console.log(`innerWidth: ${window.innerWidth}` ) + + // 1536px rules + + const mediaQuery1536 = window.matchMedia('(min-width: 1536px)') + + function handleWidth1536(event) { + + // display in full width for desktop devices + document.querySelectorAll(".gradio-container") + .forEach( (node) => { + if (event.matches) { + node.classList.add("gradio-container-size-full"); + } else { + node.classList.remove("gradio-container-size-full") + } + }); + } + + mediaQuery1536.addEventListener("change", handleWidth1536); + mediaQuery1536.dispatchEvent(new MediaQueryListEvent("change", {matches: window.innerWidth >= 1536})); + + // 1921px rules + + const mediaQuery1921 = window.matchMedia('(min-width: 1921px)') + + function handleWidth1921(event) { + + /* Force a 768px_height + 4px_margin_height + navbar_height for the gallery */ + /* Limit height to 768px_height + 2px_margin_height for the thumbnails */ + document.querySelectorAll("#gallery") + .forEach( (node) => { + if (event.matches) { + node.classList.add("gallery-force-height768"); + node.classList.add("gallery-limit-height768"); + } else { + node.classList.remove("gallery-force-height768"); + node.classList.remove("gallery-limit-height768"); + } + }); + } + + mediaQuery1921.addEventListener("change", handleWidth1921); + mediaQuery1921.dispatchEvent(new MediaQueryListEvent("change", {matches: window.innerWidth >= 1921})); + +} diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index 994b7caec7..5b20922d36 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -31,9 +31,6 @@ resize_stencil, ) from apps.shark_studio.modules.shared_cmd_opts import cmd_opts -from apps.shark_studio.web.ui.utils import ( - nodlogo_loc, -) from apps.shark_studio.web.utils.state import ( status_label, ) @@ -238,39 +235,63 @@ def base_model_changed(base_model_id): with gr.Blocks(title="Stable Diffusion") as sd_element: - with gr.Row(elem_id="ui_title"): - nod_logo = Image.open(nodlogo_loc) - with gr.Row(variant="compact", equal_height=True): - with gr.Column( - scale=1, - elem_id="demo_title_outer", - ): - gr.Image( - value=nod_logo, - show_label=False, - interactive=False, - elem_id="top_logo", - width=150, - height=50, - show_download_button=False, - ) with gr.Column(elem_id="ui_body"): with gr.Row(): with gr.Column(scale=2, min_width=600): - with gr.Row(equal_height=True): - with gr.Column(scale=3): - sd_model_info = ( - f"Checkpoint Path: {str(get_checkpoints_path())}" + with gr.Accordion( + label="\U0001F4D0\U0000FE0F Device Settings", open=False + ): + device = gr.Dropdown( + elem_id="device", + label="Device", + value=global_obj.get_device_list()[0], + choices=global_obj.get_device_list(), + allow_custom_value=False, + ) + with gr.Row(): + ondemand = gr.Checkbox( + value=cmd_opts.lowvram, + label="Low VRAM", + interactive=True, ) - base_model_id = gr.Dropdown( - label="Base Model", - info="Select or enter HF model ID", - elem_id="custom_model", - value="stabilityai/stable-diffusion-2-1-base", - choices=sd_default_models, - ) # base_model_id + precision = gr.Radio( + label="Precision", + value=cmd_opts.precision, + choices=[ + "fp16", + "fp32", + ], + visible=True, + ) + sd_model_info = f"Checkpoint Path: {str(get_checkpoints_path())}" + base_model_id = gr.Dropdown( + label="\U000026F0\U0000FE0F Base Model", + info="Select or enter HF model ID", + elem_id="custom_model", + value="stabilityai/stable-diffusion-2-1-base", + choices=sd_default_models, + ) # base_model_id + with gr.Row(): + height = gr.Slider( + 384, + 768, + value=cmd_opts.height, + step=8, + label="\U00002195\U0000FE0F Height", + ) + width = gr.Slider( + 384, + 768, + value=cmd_opts.width, + step=8, + label="\U00002194\U0000FE0F Width", + ) + with gr.Accordion( + label="\U00002696\U0000FE0F Model Weights", open=False + ): + with gr.Column(): custom_weights = gr.Dropdown( - label="Custom Weights Checkpoint", + label="Checkpoint Weights", info="Select or enter HF model ID", elem_id="custom_model", value="None", @@ -283,13 +304,12 @@ def base_model_changed(base_model_id): inputs=[base_model_id], outputs=[custom_weights], ) - with gr.Column(scale=2): sd_vae_info = (str(get_checkpoints_path("vae"))).replace( "\\", "\n\\" ) sd_vae_info = f"VAE Path: {sd_vae_info}" custom_vae = gr.Dropdown( - label=f"Custom VAE Models", + label=f"VAE Model", info=sd_vae_info, elem_id="custom_model", value=( @@ -301,49 +321,9 @@ def base_model_changed(base_model_id): allow_custom_value=True, scale=1, ) - with gr.Row(): - ondemand = gr.Checkbox( - value=cmd_opts.lowvram, - label="Low VRAM", - interactive=True, - ) - precision = gr.Radio( - label="Precision", - value=cmd_opts.precision, - choices=[ - "fp16", - "fp32", - ], - visible=True, - ) - with gr.Group(elem_id="prompt_box_outer"): - prompt = gr.Textbox( - label="Prompt", - value=cmd_opts.prompt[0], - lines=2, - elem_id="prompt_box", - ) - negative_prompt = gr.Textbox( - label="Negative Prompt", - value=cmd_opts.negative_prompt[0], - lines=2, - elem_id="negative_prompt_box", - ) - - with gr.Accordion(label="Input Image", open=False): - # TODO: make this import image prompt info if it exists - sd_init_image = gr.Image( - label="Input Image", - type="pil", - height=300, - interactive=True, - ) - with gr.Accordion(label="Embeddings options", open=True, render=True): - sd_lora_info = (str(get_checkpoints_path("loras"))).replace( - "\\", "\n\\" - ) - with gr.Row(): - embeddings_config = gr.JSON(min_width=50, scale=1) + sd_lora_info = (str(get_checkpoints_path("loras"))).replace( + "\\", "\n\\" + ) lora_opt = gr.Dropdown( allow_custom_value=True, label=f"Standalone LoRA Weights", @@ -358,106 +338,83 @@ def base_model_changed(base_model_id): value="
No LoRA selected
", elem_classes="lora-tags", ) - gr.on( - triggers=[lora_opt.change], - fn=lora_changed, - inputs=[lora_opt], - outputs=[lora_tags], - queue=True, - show_progress=False, - ).then( - fn=update_embeddings_json, - inputs=[lora_opt], - outputs=[embeddings_config], - show_progress=False, - ) - with gr.Accordion(label="Advanced Options", open=True): - with gr.Row(): - scheduler = gr.Dropdown( - elem_id="scheduler", - label="Scheduler", - value="EulerDiscrete", - choices=scheduler_model_map.keys(), - allow_custom_value=False, - ) - height = gr.Slider( - 384, - 768, - value=cmd_opts.height, - step=8, - label="Height", + embeddings_config = gr.JSON( + label="Embeddings Options", min_width=50, scale=1 ) - width = gr.Slider( - 384, - 768, - value=cmd_opts.width, - step=8, - label="Width", + gr.on( + triggers=[lora_opt.change], + fn=lora_changed, + inputs=[lora_opt], + outputs=[lora_tags], + queue=True, + show_progress=False, + ).then( + fn=update_embeddings_json, + inputs=[lora_opt], + outputs=[embeddings_config], + show_progress=False, ) - with gr.Row(): - with gr.Column(scale=3): - steps = gr.Slider( - 1, - 100, - value=cmd_opts.steps, - step=1, - label="Steps", - ) - batch_count = gr.Slider( - 1, - 100, - value=cmd_opts.batch_count, - step=1, - label="Batch Count", - interactive=True, - ) - batch_size = gr.Slider( - 1, - 4, - value=cmd_opts.batch_size, - step=1, - label="Batch Size", - interactive=True, - visible=True, - ) - repeatable_seeds = gr.Checkbox( - cmd_opts.repeatable_seeds, - label="Repeatable Seeds", - ) - with gr.Column(scale=3): - strength = gr.Slider( - 0, - 1, - value=cmd_opts.strength, - step=0.01, - label="Denoising Strength", - ) - resample_type = gr.Dropdown( - value=cmd_opts.resample_type, - choices=resampler_list, - label="Resample Type", - allow_custom_value=True, - ) - guidance_scale = gr.Slider( - 0, - 50, - value=cmd_opts.guidance_scale, - step=0.1, - label="CFG Scale", - ) - with gr.Row(): + with gr.Accordion( + label="\U0001F9EA\U0000FE0F Input Image Processing", open=False + ): + strength = gr.Slider( + 0, + 1, + value=cmd_opts.strength, + step=0.01, + label="Denoising Strength", + ) + resample_type = gr.Dropdown( + value=cmd_opts.resample_type, + choices=resampler_list, + label="Resample Type", + allow_custom_value=True, + ) + with gr.Group(elem_id="prompt_box_outer"): + prompt = gr.Textbox( + label="\U00002795\U0000FE0F Prompt", + value=cmd_opts.prompt[0], + lines=2, + elem_id="prompt_box", + show_copy_button=True, + ) + negative_prompt = gr.Textbox( + label="\U00002796\U0000FE0F Negative Prompt", + value=cmd_opts.negative_prompt[0], + lines=2, + elem_id="negative_prompt_box", + show_copy_button=True, + ) + with gr.Row(equal_height=True): seed = gr.Textbox( value=cmd_opts.seed, - label="Seed", + label="\U0001F331\U0000FE0F Seed", info="An integer or a JSON list of integers, -1 for random", + show_copy_button=True, ) - device = gr.Dropdown( - elem_id="device", - label="Device", - value=global_obj.get_device_list()[0], - choices=global_obj.get_device_list(), + scheduler = gr.Dropdown( + elem_id="scheduler", + label="\U0001F4C5\U0000FE0F Scheduler", + info="\U000E0020", # forces same height as seed + value="EulerDiscrete", + choices=scheduler_model_map.keys(), allow_custom_value=False, ) + with gr.Row(): + steps = gr.Slider( + 1, + 100, + value=cmd_opts.steps, + step=1, + label="\U0001F3C3\U0000FE0F Steps", + ) + guidance_scale = gr.Slider( + 0, + 50, + value=cmd_opts.guidance_scale, + step=0.1, + label="\U0001F5C3\U0000FE0F CFG Scale", + ) with gr.Accordion( label="Controlnet Options", open=False, @@ -547,16 +504,6 @@ def base_model_changed(base_model_id): "Submit", size="sm", ) - use_input_img.click( - fn=import_original, - inputs=[ - sd_init_image, - canvas_width, - canvas_height, - ], - outputs=[cnet_input], - queue=False, - ) make_canvas.click( fn=create_canvas, inputs=[canvas_width, canvas_height], @@ -589,156 +536,219 @@ def base_model_changed(base_model_id): queue=False, ) with gr.Column(scale=3, min_width=600): - with gr.Group(): - sd_gallery = gr.Gallery( - label="Generated images", - show_label=False, - elem_id="gallery", - columns=2, - object_fit="fit", - preview=True, - ) - std_output = gr.Textbox( - value=f"{sd_model_info}\n" - f"Images will be saved at " - f"{get_generated_imgs_path()}", - lines=2, - elem_id="std_output", - show_label=False, - ) - sd_element.load(logger.read_sd_logs, None, std_output, every=1) - sd_status = gr.Textbox(visible=False) - with gr.Row(): - stable_diffusion = gr.Button("Generate Image(s)") - random_seed = gr.Button("Randomize Seed") - random_seed.click( - lambda: -1, - inputs=[], - outputs=[seed], - queue=False, - show_progress=False, + with gr.Tabs() as sd_tabs: + sd_element.load( + # Workaround for Gradio issue #7085 + # TODO: revert to setting selected= in gr.Tabs declaration + # once this is resolved in Gradio + lambda: gr.Tabs(selected=101), + outputs=[sd_tabs], ) - stop_batch = gr.Button("Stop Batch") - with gr.Group(): - with gr.Column(scale=3): - sd_json = gr.JSON( - value=view_json_file( - os.path.join( - get_configs_path(), - "default_sd_config.json", + with gr.Tab(label="Input Image", id=100) as sd_tab_init_image: + with gr.Column(elem_classes=["sd-right-panel"]): + with gr.Row(elem_classes=["fill"]): + # TODO: make this import image prompt info if it exists + sd_init_image = gr.Image( + type="pil", + interactive=True, + show_label=False, + ) + use_input_img.click( + fn=import_original, + inputs=[ + sd_init_image, + canvas_width, + canvas_height, + ], + outputs=[cnet_input], + queue=False, + ) + with gr.Tab(label="Generate Images", id=101) as sd_tab_gallery: + with gr.Column(elem_classes=["sd-right-panel"]): + with gr.Row(elem_classes=["fill"]): + sd_gallery = gr.Gallery( + label="Generated images", + show_label=False, + elem_id="gallery", + columns=2, + object_fit="fit", + preview=True, + ) + with gr.Row(): + std_output = gr.Textbox( + value=f"{sd_model_info}\n" + f"Images will be saved at " + f"{get_generated_imgs_path()}", + lines=2, + elem_id="std_output", + show_label=True, + label="Log", + show_copy_button=True, + ) + sd_element.load( + logger.read_sd_logs, None, std_output, every=1 + ) + sd_status = gr.Textbox(visible=False) + with gr.Row(): + batch_count = gr.Slider( + 1, + 100, + value=cmd_opts.batch_count, + step=1, + label="Batch Count", + interactive=True, + ) + batch_size = gr.Slider( + 1, + 4, + value=cmd_opts.batch_size, + step=1, + label="Batch Size", + interactive=True, + visible=True, + ) + repeatable_seeds = gr.Checkbox( + cmd_opts.repeatable_seeds, + label="Use Repeatable Seeds for Batches", + ) + with gr.Row(): + stable_diffusion = gr.Button("Start") + random_seed = gr.Button("Randomize Seed") + random_seed.click( + lambda: -1, + inputs=[], + outputs=[seed], + queue=False, + show_progress=False, + ) + stop_batch = gr.Button("Stop") + with gr.Tab(label="Config", id=102) as sd_tab_config: + with gr.Column(elem_classes=["sd-right-panel"]): + with gr.Row(elem_classes=["fill"]): + sd_json = gr.JSON( + elem_classes=["fill"], + value=view_json_file( + os.path.join( + get_configs_path(), + "default_sd_config.json", + ) + ), + ) + with gr.Row(): + with gr.Column(scale=3): + load_sd_config = gr.FileExplorer( + label="Load Config", + file_count="single", + root=( + cmd_opts.configs_path + if cmd_opts.configs_path + else get_configs_path() + ), + height=75, + ) + with gr.Column(scale=1): + save_sd_config = gr.Button( + value="Save Config", size="sm" + ) + clear_sd_config = gr.ClearButton( + value="Clear Config", + size="sm", + components=sd_json, + ) + with gr.Row(): + sd_config_name = gr.Textbox( + value="Config Name", + info="Name of the file this config will be saved to.", + interactive=True, + show_label=False, + ) + load_sd_config.change( + fn=load_sd_cfg, + inputs=[sd_json, load_sd_config], + outputs=[ + prompt, + negative_prompt, + sd_init_image, + height, + width, + steps, + strength, + guidance_scale, + seed, + batch_count, + batch_size, + scheduler, + base_model_id, + custom_weights, + custom_vae, + precision, + device, + ondemand, + repeatable_seeds, + resample_type, + cnet_config, + embeddings_config, + sd_json, + ], + ) + save_sd_config.click( + fn=save_sd_cfg, + inputs=[sd_json, sd_config_name], + outputs=[sd_config_name], ) - ) - ) - with gr.Column(scale=1): - clear_sd_config = gr.ClearButton( - value="Clear Config", size="sm", components=sd_json - ) - with gr.Row(): - save_sd_config = gr.Button(value="Save Config", size="sm") - sd_config_name = gr.Textbox( - value="Config Name", - info="Name of the file this config will be saved to.", - interactive=True, - ) - load_sd_config = gr.FileExplorer( - label="Load Config", - file_count="single", - root=( - cmd_opts.configs_path - if cmd_opts.configs_path - else get_configs_path() - ), - height=75, - ) - load_sd_config.change( - fn=load_sd_cfg, - inputs=[sd_json, load_sd_config], - outputs=[ - prompt, - negative_prompt, - sd_init_image, - height, - width, - steps, - strength, - guidance_scale, - seed, - batch_count, - batch_size, - scheduler, - base_model_id, - custom_weights, - custom_vae, - precision, - device, - ondemand, - repeatable_seeds, - resample_type, - cnet_config, - embeddings_config, - sd_json, - ], - ) - save_sd_config.click( - fn=save_sd_cfg, - inputs=[sd_json, sd_config_name], - outputs=[sd_config_name], - ) - pull_kwargs = dict( - fn=pull_sd_configs, - inputs=[ - prompt, - negative_prompt, - sd_init_image, - height, - width, - steps, - strength, - guidance_scale, - seed, - batch_count, - batch_size, - scheduler, - base_model_id, - custom_weights, - custom_vae, - precision, - device, - ondemand, - repeatable_seeds, - resample_type, - cnet_config, - embeddings_config, - ], - outputs=[ - sd_json, - ], - ) + pull_kwargs = dict( + fn=pull_sd_configs, + inputs=[ + prompt, + negative_prompt, + sd_init_image, + height, + width, + steps, + strength, + guidance_scale, + seed, + batch_count, + batch_size, + scheduler, + base_model_id, + custom_weights, + custom_vae, + precision, + device, + ondemand, + repeatable_seeds, + resample_type, + cnet_config, + embeddings_config, + ], + outputs=[ + sd_json, + ], + ) - status_kwargs = dict( - fn=lambda bc, bs: status_label("Stable Diffusion", 0, bc, bs), - inputs=[batch_count, batch_size], - outputs=sd_status, - ) + status_kwargs = dict( + fn=lambda bc, bs: status_label("Stable Diffusion", 0, bc, bs), + inputs=[batch_count, batch_size], + outputs=sd_status, + ) - gen_kwargs = dict( - fn=shark_sd_fn_dict_input, - inputs=[sd_json], - outputs=[ - sd_gallery, - sd_status, - ], - ) + gen_kwargs = dict( + fn=shark_sd_fn_dict_input, + inputs=[sd_json], + outputs=[ + sd_gallery, + sd_status, + ], + ) - prompt_submit = prompt.submit(**status_kwargs).then(**pull_kwargs) - neg_prompt_submit = negative_prompt.submit(**status_kwargs).then(**pull_kwargs) - generate_click = ( - stable_diffusion.click(**status_kwargs) - .then(**pull_kwargs) - .then(**gen_kwargs) - ) - stop_batch.click( - fn=cancel_sd, - cancels=[prompt_submit, neg_prompt_submit, generate_click], - ) + prompt_submit = prompt.submit(**status_kwargs).then(**pull_kwargs) + neg_prompt_submit = negative_prompt.submit(**status_kwargs).then(**pull_kwargs) + generate_click = ( + stable_diffusion.click(**status_kwargs).then(**pull_kwargs).then(**gen_kwargs) + ) + stop_batch.click( + fn=cancel_sd, + cancels=[prompt_submit, neg_prompt_submit, generate_click], + ) diff --git a/requirements.txt b/requirements.txt index 19d4521280..764c1ec093 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ parameterized accelerate scipy ftfy -gradio==4.8.0 +gradio==4.15.0 altair omegaconf # 0.3.2 doesn't have binaries for arm64 From 6dc39e6a66c29b55c8356d82f96249eb33d9161b Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Sun, 18 Feb 2024 03:37:29 +0000 Subject: [PATCH 15/25] Studio2/SD: Fix sd pipeline up to "Windows not supported" (#2082) * Studio2/SD: Fix sd pipeline up to "Windows not supported" A number of fixes to the SD pipeline as run from the UI, up until the point that dynamo complains "Windows not yet supported for torch.compile". * Remove separate install of iree-runtime and iree-compile in setup_venv.ps1, and rely on the versions installed via the Turbine requirements.txt. Fixes #2063 for me. * Replace any "None" strings with python None when pulling the config in the UI. * Add 'hf_auth_token' param to api StableDiffusion class, defaulting to None, and then pass that in to the various Models where it is required and wasn't already being done before. * Fix clip custom_weight_params being passed to export_clip_model as "external_weight_file" rather than "external_weights" * Don't pass non-existing "custom_vae" parameter to the Turbine Vae Model, instead pass custom_vae as the "hf_model_id" if it is set. (this may be wrong in the custom vae cast, but stops the code *always* breaking). * Studio2/SD/UI: Improve UI config None handling * When populating the UI from a JSON Config set controls to "None" for null/None values. * When generating a JSON Config from the UI set props to null/None for controls set to "None". * Use null rather string 'None' in the default config --------- Co-authored-by: Ean Garvey <87458719+monorimet@users.noreply.github.com> --- apps/shark_studio/api/sd.py | 13 ++-- .../web/configs/default_sd_config.json | 29 +++++++- apps/shark_studio/web/ui/sd.py | 69 ++++++++++++++++++- apps/shark_studio/web/ui/utils.py | 12 ++++ setup_venv.ps1 | 7 +- 5 files changed, 116 insertions(+), 14 deletions(-) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index c349c923b3..1124b4f5b0 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -96,6 +96,7 @@ def __init__( num_loras: int = 0, import_ir: bool = True, is_controlled: bool = False, + hf_auth_token=None, ): self.model_max_length = 77 self.batch_size = batch_size @@ -112,7 +113,7 @@ def __init__( "unet": { "hf_model_name": base_model_id, "unet_model": unet.UnetModel( - hf_model_name=base_model_id, hf_auth_token=None + hf_model_name=base_model_id, hf_auth_token=hf_auth_token ), "batch_size": batch_size, # "is_controlled": is_controlled, @@ -125,8 +126,8 @@ def __init__( "vae_encode": { "hf_model_name": base_model_id, "vae_model": vae.VaeModel( - hf_model_name=base_model_id, - custom_vae=custom_vae, + hf_model_name=custom_vae if custom_vae else base_model_id, + hf_auth_token=hf_auth_token, ), "batch_size": batch_size, "height": height, @@ -136,8 +137,8 @@ def __init__( "vae_decode": { "hf_model_name": base_model_id, "vae_model": vae.VaeModel( - hf_model_name=base_model_id, - custom_vae=custom_vae, + hf_model_name=custom_vae if custom_vae else base_model_id, + hf_auth_token=hf_auth_token, ), "batch_size": batch_size, "height": height, @@ -184,7 +185,7 @@ def prepare_pipe(self, custom_weights, adapters, embeddings, is_img2img): custom_weights_params, _ = process_custom_pipe_weights(custom_weights) if submodel not in ["clip", "clip2"]: self.static_kwargs[submodel][ - "external_weight_file" + "external_weights" ] = custom_weights_params else: self.static_kwargs[submodel]["external_weight_path"] = os.path.join( diff --git a/apps/shark_studio/web/configs/default_sd_config.json b/apps/shark_studio/web/configs/default_sd_config.json index 7a98a441df..762affe49b 100644 --- a/apps/shark_studio/web/configs/default_sd_config.json +++ b/apps/shark_studio/web/configs/default_sd_config.json @@ -1 +1,28 @@ -{"prompt": ["a photo taken of the front of a super-car drifting on a road near mountains at high speeds with smoke coming off the tires, front angle, front point of view, trees in the mountains of the background, ((sharp focus))"], "negative_prompt": ["watermark, signature, logo, text, lowres, ((monochrome, grayscale)), blurry, ugly, blur, oversaturated, cropped"], "sd_init_image": [null], "height": 512, "width": 512, "steps": 50, "strength": 0.8, "guidance_scale": 7.5, "seed": "-1", "batch_count": 1, "batch_size": 1, "scheduler": "EulerDiscrete", "base_model_id": "stabilityai/stable-diffusion-2-1-base", "custom_weights": "None", "custom_vae": "None", "precision": "fp16", "device": "AMD Radeon RX 7900 XTX => vulkan://0", "ondemand": false, "repeatable_seeds": false, "resample_type": "Nearest Neighbor", "controlnets": {}, "embeddings": {}} \ No newline at end of file +{ + "prompt": [ + "a photo taken of the front of a super-car drifting on a road near mountains at high speeds with smoke coming off the tires, front angle, front point of view, trees in the mountains of the background, ((sharp focus))" + ], + "negative_prompt": [ + "watermark, signature, logo, text, lowres, ((monochrome, grayscale)), blurry, ugly, blur, oversaturated, cropped" + ], + "sd_init_image": [null], + "height": 512, + "width": 512, + "steps": 50, + "strength": 0.8, + "guidance_scale": 7.5, + "seed": "-1", + "batch_count": 1, + "batch_size": 1, + "scheduler": "EulerDiscrete", + "base_model_id": "stabilityai/stable-diffusion-2-1-base", + "custom_weights": null, + "custom_vae": null, + "precision": "fp16", + "device": "AMD Radeon RX 7900 XTX => vulkan://0", + "ondemand": false, + "repeatable_seeds": false, + "resample_type": "Nearest Neighbor", + "controlnets": {}, + "embeddings": {} +} diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index 5b20922d36..fc8c16f154 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -31,6 +31,11 @@ resize_stencil, ) from apps.shark_studio.modules.shared_cmd_opts import cmd_opts +from apps.shark_studio.web.ui.utils import ( + nodlogo_loc, + none_to_str_none, + str_none_to_none, +) from apps.shark_studio.web.utils.state import ( status_label, ) @@ -119,7 +124,7 @@ def pull_sd_configs( controlnets, embeddings, ): - sd_args = locals() + sd_args = str_none_to_none(locals()) sd_cfg = {} for arg in sd_args: if arg in [ @@ -135,11 +140,12 @@ def pull_sd_configs( sd_cfg[arg] = {} else: sd_cfg[arg] = sd_args[arg] - return sd_cfg + + return json.dumps(sd_cfg) def load_sd_cfg(sd_json: dict, load_sd_config: str): - new_sd_config = json.loads(view_json_file(load_sd_config)) + new_sd_config = none_to_str_none(json.loads(view_json_file(load_sd_config))) if sd_json: for key in new_sd_config: sd_json[key] = new_sd_config[key] @@ -696,6 +702,63 @@ def base_model_changed(base_model_id): inputs=[sd_json, sd_config_name], outputs=[sd_config_name], ) + ) + ) + with gr.Column(scale=1): + clear_sd_config = gr.ClearButton( + value="Clear Config", size="sm", components=sd_json + ) + with gr.Row(): + save_sd_config = gr.Button(value="Save Config", size="sm") + sd_config_name = gr.Textbox( + value="Config Name", + info="Name of the file this config will be saved to.", + interactive=True, + ) + load_sd_config = gr.FileExplorer( + label="Load Config", + file_count="single", + root=( + cmd_opts.configs_path + if cmd_opts.configs_path + else get_configs_path() + ), + height=75, + ) + load_sd_config.change( + fn=load_sd_cfg, + inputs=[sd_json, load_sd_config], + outputs=[ + prompt, + negative_prompt, + sd_init_image, + height, + width, + steps, + strength, + guidance_scale, + seed, + batch_count, + batch_size, + scheduler, + base_model_id, + custom_weights, + custom_vae, + precision, + device, + ondemand, + repeatable_seeds, + resample_type, + cnet_config, + embeddings_config, + sd_json, + ], + ) + save_sd_config.click( + fn=save_sd_cfg, + inputs=[sd_json, sd_config_name], + outputs=[sd_config_name], + ) pull_kwargs = dict( fn=pull_sd_configs, diff --git a/apps/shark_studio/web/ui/utils.py b/apps/shark_studio/web/ui/utils.py index 34a94fa014..cee1a6d02e 100644 --- a/apps/shark_studio/web/ui/utils.py +++ b/apps/shark_studio/web/ui/utils.py @@ -29,3 +29,15 @@ def hsl_color(alpha: float, start, end): # Return a CSS HSL string return f"hsl({math.floor(result)}, 80%, 35%)" + + +def none_to_str_none(props: dict): + for key in props: + props[key] = "None" if props[key] == None else props[key] + return props + + +def str_none_to_none(props: dict): + for key in props: + props[key] = None if props[key] == "None" else props[key] + return props diff --git a/setup_venv.ps1 b/setup_venv.ps1 index 09489bf4cc..c7871836e6 100644 --- a/setup_venv.ps1 +++ b/setup_venv.ps1 @@ -7,13 +7,13 @@ It checks the Python version installed and installs any required build dependencies into a Python virtual environment. If that environment does not exist, it creates it. - + .PARAMETER update-src git pulls latest version .PARAMETER force removes and recreates venv to force update of all dependencies - + .EXAMPLE .\setup_venv.ps1 --force @@ -39,7 +39,7 @@ if ($arguments -eq "--force"){ Write-Host "deactivating..." Deactivate } - + if (Test-Path .\shark.venv\) { Write-Host "removing and recreating venv..." Remove-Item .\shark.venv -Force -Recurse @@ -90,7 +90,6 @@ python -m pip install --upgrade pip pip install wheel pip install -r requirements.txt pip install --pre torch-mlir torchvision torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu -f https://llvm.github.io/torch-mlir/package-index/ -pip install --upgrade -f https://nod-ai.github.io/SRT/pip-release-links.html iree-compiler iree-runtime Write-Host "Building SHARK..." pip install -e . -f https://llvm.github.io/torch-mlir/package-index/ -f https://nod-ai.github.io/SRT/pip-release-links.html Write-Host "Build and installation completed successfully" From c507f7d6f66719abb0912bd7782e23eea7d85cff Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Mon, 19 Feb 2024 02:55:16 +0000 Subject: [PATCH 16/25] Studio2/SD/UI: Further sd ui pipeline fixes (#2091) On Windows, this gets us all the way failing in iree compile of the with SD 2.1 base. - Fix merge errors with sd right pane config UI tab. - Remove non-requirement.txt install/build of torch/mlir/iree/SRT in setup_venv.ps1, fixing "torch.compile not supported on Windows" error. - Fix gradio deprecation warning for `root=` FileExplorer kwarg. - Comment out `precision` and `max_length` kwargs being passed to unet, as not yet supported on main Turbine branch. Avoids keyword argument error. --- apps/shark_studio/api/sd.py | 6 ++-- apps/shark_studio/web/ui/sd.py | 54 +--------------------------------- setup_venv.ps1 | 5 +--- 3 files changed, 5 insertions(+), 60 deletions(-) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index 1124b4f5b0..afc4915a2c 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -120,8 +120,8 @@ def __init__( # "num_loras": num_loras, "height": height, "width": width, - "precision": precision, - "max_length": self.model_max_length, + # "precision": precision, + # "max_length": self.model_max_length, }, "vae_encode": { "hf_model_name": base_model_id, @@ -150,7 +150,7 @@ def __init__( pipe_id_list = [ safe_name(base_model_id), str(batch_size), - str(static_kwargs["unet"]["max_length"]), + str(self.model_max_length), f"{str(height)}x{str(width)}", precision, self.device, diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index fc8c16f154..795743f3ad 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -645,7 +645,7 @@ def base_model_changed(base_model_id): load_sd_config = gr.FileExplorer( label="Load Config", file_count="single", - root=( + root_dir=( cmd_opts.configs_path if cmd_opts.configs_path else get_configs_path() @@ -702,58 +702,6 @@ def base_model_changed(base_model_id): inputs=[sd_json, sd_config_name], outputs=[sd_config_name], ) - ) - ) - with gr.Column(scale=1): - clear_sd_config = gr.ClearButton( - value="Clear Config", size="sm", components=sd_json - ) - with gr.Row(): - save_sd_config = gr.Button(value="Save Config", size="sm") - sd_config_name = gr.Textbox( - value="Config Name", - info="Name of the file this config will be saved to.", - interactive=True, - ) - load_sd_config = gr.FileExplorer( - label="Load Config", - file_count="single", - root=( - cmd_opts.configs_path - if cmd_opts.configs_path - else get_configs_path() - ), - height=75, - ) - load_sd_config.change( - fn=load_sd_cfg, - inputs=[sd_json, load_sd_config], - outputs=[ - prompt, - negative_prompt, - sd_init_image, - height, - width, - steps, - strength, - guidance_scale, - seed, - batch_count, - batch_size, - scheduler, - base_model_id, - custom_weights, - custom_vae, - precision, - device, - ondemand, - repeatable_seeds, - resample_type, - cnet_config, - embeddings_config, - sd_json, - ], - ) save_sd_config.click( fn=save_sd_cfg, inputs=[sd_json, sd_config_name], diff --git a/setup_venv.ps1 b/setup_venv.ps1 index c7871836e6..6a937bec62 100644 --- a/setup_venv.ps1 +++ b/setup_venv.ps1 @@ -89,8 +89,5 @@ else {python -m venv .\shark.venv\} python -m pip install --upgrade pip pip install wheel pip install -r requirements.txt -pip install --pre torch-mlir torchvision torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu -f https://llvm.github.io/torch-mlir/package-index/ -Write-Host "Building SHARK..." -pip install -e . -f https://llvm.github.io/torch-mlir/package-index/ -f https://nod-ai.github.io/SRT/pip-release-links.html -Write-Host "Build and installation completed successfully" + Write-Host "Source your venv with ./shark.venv/Scripts/activate" From 60c013e4f0550090aab10fee1f46602d31c9e66f Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Tue, 20 Feb 2024 08:50:53 -0600 Subject: [PATCH 17/25] Tweak compile-time flags for SD submodels. --- apps/shark_studio/api/sd.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index afc4915a2c..8499b652dc 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -45,16 +45,15 @@ "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", ], }, - # "vae_encode": { - # "initializer": vae.export_vae_model, - # "ireec_flags": [ - # "--iree-flow-collapse-reduction-dims", - # "--iree-opt-const-expr-hoisting=False", - # "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - # "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", - # "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", - # ], - # }, + "vae_encode": { + "initializer": vae.export_vae_model, + "ireec_flags": [ + "--iree-flow-collapse-reduction-dims", + "--iree-opt-const-expr-hoisting=False", + "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", + ], + }, "unet": { "initializer": unet.export_unet_model, "ireec_flags": [ @@ -71,7 +70,6 @@ "--iree-opt-const-expr-hoisting=False", "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", ], }, } From f7d1af46f4d50b1b44028f707dc69a013652365f Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Thu, 29 Feb 2024 19:16:47 -0600 Subject: [PATCH 18/25] Small fixes to sd, pin mpmath --- apps/shark_studio/api/initializers.py | 19 ++++++++-------- apps/shark_studio/api/sd.py | 31 ++++++++++++++------------- requirements.txt | 1 + 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/apps/shark_studio/api/initializers.py b/apps/shark_studio/api/initializers.py index 42622b54fa..94d50225c6 100644 --- a/apps/shark_studio/api/initializers.py +++ b/apps/shark_studio/api/initializers.py @@ -8,11 +8,12 @@ from apps.shark_studio.modules.timer import startup_timer -# from apps.shark_studio.web.utils.tmp_configs import ( -# config_tmp, -# clear_tmp_mlir, -# clear_tmp_imgs, -# ) +from apps.shark_studio.web.utils.tmp_configs import ( + config_tmp, + clear_tmp_mlir, + clear_tmp_imgs, + shark_tmp, +) def imports(): @@ -47,9 +48,9 @@ def initialize(): # existing temporary images there if they exist. Then we can import gradio. # It has to be in this order or gradio ignores what we've set up. - # config_tmp() + config_tmp() # clear_tmp_mlir() - # clear_tmp_imgs() + clear_tmp_imgs() from apps.shark_studio.web.utils.file_utils import ( create_checkpoint_folders, @@ -82,8 +83,8 @@ def dumpstacks(): code.append(f"""File: "{filename}", line {lineno}, in {name}""") if line: code.append(" " + line.strip()) - - print("\n".join(code)) + with open(os.path.join(shark_tmp, "stack_dump.log"), "w") as f: + f.write("\n".join(code)) def setup_middleware(app): diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index 8499b652dc..92d4b04aaa 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -42,34 +42,37 @@ "--iree-flow-collapse-reduction-dims", "--iree-opt-const-expr-hoisting=False", "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + "--iree-flow-inline-constants-max-byte-length=0", "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", ], }, - "vae_encode": { - "initializer": vae.export_vae_model, - "ireec_flags": [ - "--iree-flow-collapse-reduction-dims", - "--iree-opt-const-expr-hoisting=False", - "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", - ], - }, + # "vae_encode": { + # "initializer": vae.export_vae_model, + # "ireec_flags": [ + # "--iree-flow-collapse-reduction-dims", + # "--iree-opt-const-expr-hoisting=False", + # "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", + # "--iree-flow-inline-constants-max-byte-length=0", + # "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul))", + # ], + # }, "unet": { "initializer": unet.export_unet_model, "ireec_flags": [ "--iree-flow-collapse-reduction-dims", "--iree-opt-const-expr-hoisting=False", "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32}))", + "--iree-flow-inline-constants-max-byte-length=0", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-pad-linalg-ops{pad-size=32}))", ], }, "vae_decode": { "initializer": vae.export_vae_model, "ireec_flags": [ - "--iree-flow-collapse-reduction-dims", "--iree-opt-const-expr-hoisting=False", "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-convert-conv2d-to-img2col,iree-preprocessing-pad-linalg-ops{pad-size=32},iree-linalg-ext-convert-conv2d-to-winograd))", + "--iree-flow-inline-constants-max-byte-length=0", + "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-pad-linalg-ops{pad-size=32}))", ], }, } @@ -111,7 +114,7 @@ def __init__( "unet": { "hf_model_name": base_model_id, "unet_model": unet.UnetModel( - hf_model_name=base_model_id, hf_auth_token=hf_auth_token + hf_model_name=base_model_id ), "batch_size": batch_size, # "is_controlled": is_controlled, @@ -125,7 +128,6 @@ def __init__( "hf_model_name": base_model_id, "vae_model": vae.VaeModel( hf_model_name=custom_vae if custom_vae else base_model_id, - hf_auth_token=hf_auth_token, ), "batch_size": batch_size, "height": height, @@ -136,7 +138,6 @@ def __init__( "hf_model_name": base_model_id, "vae_model": vae.VaeModel( hf_model_name=custom_vae if custom_vae else base_model_id, - hf_auth_token=hf_auth_token, ), "batch_size": batch_size, "height": height, diff --git a/requirements.txt b/requirements.txt index 764c1ec093..8f5846a633 100644 --- a/requirements.txt +++ b/requirements.txt @@ -44,6 +44,7 @@ timm # for MiniGPT4 langchain einops # for zoedepth pydantic==2.4.1 # pin until pyinstaller-hooks-contrib works with beta versions +mpmath==1.3.0 # Keep PyInstaller at the end. Sometimes Windows Defender flags it but most folks can continue even if it errors pefile From ca69fd5383446b545a853e1631d77e6aa2a16527 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Thu, 29 Feb 2024 19:19:18 -0600 Subject: [PATCH 19/25] Add pyinstaller spec and imports script. --- apps/shark_studio/shark_studio.spec | 48 +++++++++++++++++++ apps/shark_studio/studio_imports.py | 74 +++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 apps/shark_studio/shark_studio.spec create mode 100644 apps/shark_studio/studio_imports.py diff --git a/apps/shark_studio/shark_studio.spec b/apps/shark_studio/shark_studio.spec new file mode 100644 index 0000000000..1c87c953db --- /dev/null +++ b/apps/shark_studio/shark_studio.spec @@ -0,0 +1,48 @@ +# -*- mode: python ; coding: utf-8 -*- +from apps.shark_studio.studio_imports import pathex, datas, hiddenimports + +binaries = [] + +block_cipher = None + +a = Analysis( + ['web/index.py'], + pathex=pathex, + binaries=binaries, + datas=datas, + hiddenimports=hiddenimports, + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False, + module_collection_mode={ + 'gradio': 'py', # Collect gradio package as source .py files + }, +) +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + [], + name='nodai_shark_studio', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=False, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) diff --git a/apps/shark_studio/studio_imports.py b/apps/shark_studio/studio_imports.py new file mode 100644 index 0000000000..0ed03e59a4 --- /dev/null +++ b/apps/shark_studio/studio_imports.py @@ -0,0 +1,74 @@ +from PyInstaller.utils.hooks import collect_data_files +from PyInstaller.utils.hooks import copy_metadata +from PyInstaller.utils.hooks import collect_submodules + +import sys + +sys.setrecursionlimit(sys.getrecursionlimit() * 5) + +# python path for pyinstaller +pathex = [ + ".", +] + +# datafiles for pyinstaller +datas = [] +datas += copy_metadata("torch") +datas += copy_metadata("tokenizers") +datas += copy_metadata("tqdm") +datas += copy_metadata("regex") +datas += copy_metadata("requests") +datas += copy_metadata("packaging") +datas += copy_metadata("filelock") +datas += copy_metadata("numpy") +datas += copy_metadata("importlib_metadata") +datas += copy_metadata("omegaconf") +datas += copy_metadata("safetensors") +datas += copy_metadata("Pillow") +datas += copy_metadata("sentencepiece") +datas += copy_metadata("pyyaml") +datas += copy_metadata("huggingface-hub") +datas += copy_metadata("gradio") +datas += collect_data_files("torch") +datas += collect_data_files("tokenizers") +datas += collect_data_files("tiktoken") +datas += collect_data_files("accelerate") +datas += collect_data_files("diffusers") +datas += collect_data_files("transformers") +datas += collect_data_files("pytorch_lightning") +datas += collect_data_files("skimage") +datas += collect_data_files("gradio") +datas += collect_data_files("gradio_client") +datas += collect_data_files("iree") +datas += collect_data_files("shark", include_py_files=True) +datas += collect_data_files("timm", include_py_files=True) +datas += collect_data_files("tqdm") +datas += collect_data_files("tkinter") +datas += collect_data_files("webview") +datas += collect_data_files("sentencepiece") +datas += collect_data_files("jsonschema") +datas += collect_data_files("jsonschema_specifications") +datas += collect_data_files("cpuinfo") +datas += collect_data_files("cv2") +datas += [ + ("web/ui/css/*", "ui/css"), + ("web/ui/js/*", "ui/js"), + ("web/ui/logos/*", "logos"), +] + + +# hidden imports for pyinstaller +hiddenimports = ["shark", "apps"] +#hiddenimports += [x for x in collect_submodules("skimage") if "tests" not in x] +hiddenimports += [x for x in collect_submodules("gradio") if "tests" not in x] +hiddenimports += [ + x for x in collect_submodules("diffusers") if "tests" not in x +] +blacklist = ["tests", "convert"] +hiddenimports += [ + x + for x in collect_submodules("transformers") + if not any(kw in x for kw in blacklist) +] +hiddenimports += [x for x in collect_submodules("iree") if "tests" not in x] +hiddenimports += ["iree._runtime"] \ No newline at end of file From 44ef35f4db3187392f1145d188e2f8e28ebb99df Mon Sep 17 00:00:00 2001 From: gpetters-amd <159576198+gpetters-amd@users.noreply.github.com> Date: Thu, 21 Mar 2024 20:55:13 -0400 Subject: [PATCH 20/25] Fix the .exe (#2101) --- apps/shark_studio/api/sd.py | 4 +- apps/shark_studio/studio_imports.py | 14 ++-- .../web/configs/default_sd_config.json | 2 +- apps/shark_studio/web/index.py | 1 + apps/shark_studio/web/ui/sd.py | 14 ++-- apps/shark_studio/web/utils/file_utils.py | 32 +++++++++ process_skipfiles.py | 56 +++------------ requirements-importer-macos.txt | 34 --------- requirements-importer.txt | 41 ----------- requirements.txt | 20 +----- setup_venv.sh | 69 +++---------------- 11 files changed, 72 insertions(+), 215 deletions(-) delete mode 100644 requirements-importer-macos.txt delete mode 100644 requirements-importer.txt diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index 92d4b04aaa..224b915459 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -121,8 +121,8 @@ def __init__( # "num_loras": num_loras, "height": height, "width": width, - # "precision": precision, - # "max_length": self.model_max_length, + "precision": precision, + "max_length": self.model_max_length, }, "vae_encode": { "hf_model_name": base_model_id, diff --git a/apps/shark_studio/studio_imports.py b/apps/shark_studio/studio_imports.py index 0ed03e59a4..ee4eb278d2 100644 --- a/apps/shark_studio/studio_imports.py +++ b/apps/shark_studio/studio_imports.py @@ -29,27 +29,23 @@ datas += copy_metadata("pyyaml") datas += copy_metadata("huggingface-hub") datas += copy_metadata("gradio") +datas += copy_metadata("scipy") datas += collect_data_files("torch") datas += collect_data_files("tokenizers") -datas += collect_data_files("tiktoken") datas += collect_data_files("accelerate") datas += collect_data_files("diffusers") datas += collect_data_files("transformers") -datas += collect_data_files("pytorch_lightning") -datas += collect_data_files("skimage") datas += collect_data_files("gradio") datas += collect_data_files("gradio_client") -datas += collect_data_files("iree") +datas += collect_data_files("iree", include_py_files=True) datas += collect_data_files("shark", include_py_files=True) -datas += collect_data_files("timm", include_py_files=True) datas += collect_data_files("tqdm") datas += collect_data_files("tkinter") -datas += collect_data_files("webview") datas += collect_data_files("sentencepiece") datas += collect_data_files("jsonschema") datas += collect_data_files("jsonschema_specifications") datas += collect_data_files("cpuinfo") -datas += collect_data_files("cv2") +datas += collect_data_files("scipy", include_py_files=True) datas += [ ("web/ui/css/*", "ui/css"), ("web/ui/js/*", "ui/js"), @@ -59,7 +55,6 @@ # hidden imports for pyinstaller hiddenimports = ["shark", "apps"] -#hiddenimports += [x for x in collect_submodules("skimage") if "tests" not in x] hiddenimports += [x for x in collect_submodules("gradio") if "tests" not in x] hiddenimports += [ x for x in collect_submodules("diffusers") if "tests" not in x @@ -71,4 +66,5 @@ if not any(kw in x for kw in blacklist) ] hiddenimports += [x for x in collect_submodules("iree") if "tests" not in x] -hiddenimports += ["iree._runtime"] \ No newline at end of file +hiddenimports += ["iree._runtime"] +hiddenimports += collect_submodules('scipy') \ No newline at end of file diff --git a/apps/shark_studio/web/configs/default_sd_config.json b/apps/shark_studio/web/configs/default_sd_config.json index 762affe49b..323a6a329c 100644 --- a/apps/shark_studio/web/configs/default_sd_config.json +++ b/apps/shark_studio/web/configs/default_sd_config.json @@ -25,4 +25,4 @@ "resample_type": "Nearest Neighbor", "controlnets": {}, "embeddings": {} -} +} \ No newline at end of file diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index 455ea9358c..a2f36332b1 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -1,4 +1,5 @@ from multiprocessing import Process, freeze_support +freeze_support() from PIL import Image import os diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index 795743f3ad..ef1c1131bb 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -14,6 +14,7 @@ get_checkpoints_path, get_checkpoints, get_configs_path, + write_default_sd_config, ) from apps.shark_studio.api.sd import ( sd_model_map, @@ -631,14 +632,15 @@ def base_model_changed(base_model_id): with gr.Tab(label="Config", id=102) as sd_tab_config: with gr.Column(elem_classes=["sd-right-panel"]): with gr.Row(elem_classes=["fill"]): + Path(get_configs_path()).mkdir(parents=True, exist_ok=True) + default_config_file = os.path.join( + get_configs_path(), + "default_sd_config.json", + ) + write_default_sd_config(default_config_file) sd_json = gr.JSON( elem_classes=["fill"], - value=view_json_file( - os.path.join( - get_configs_path(), - "default_sd_config.json", - ) - ), + value=view_json_file(default_config_file), ) with gr.Row(): with gr.Column(scale=3): diff --git a/apps/shark_studio/web/utils/file_utils.py b/apps/shark_studio/web/utils/file_utils.py index e6844fe17f..242bc9ee2e 100644 --- a/apps/shark_studio/web/utils/file_utils.py +++ b/apps/shark_studio/web/utils/file_utils.py @@ -11,6 +11,38 @@ "*.safetensors", ) +default_sd_config = r"""{ + "prompt": [ + "a photo taken of the front of a super-car drifting on a road near mountains at high speeds with smoke coming off the tires, front angle, front point of view, trees in the mountains of the background, ((sharp focus))" + ], + "negative_prompt": [ + "watermark, signature, logo, text, lowres, ((monochrome, grayscale)), blurry, ugly, blur, oversaturated, cropped" + ], + "sd_init_image": [null], + "height": 512, + "width": 512, + "steps": 50, + "strength": 0.8, + "guidance_scale": 7.5, + "seed": "-1", + "batch_count": 1, + "batch_size": 1, + "scheduler": "EulerDiscrete", + "base_model_id": "stabilityai/stable-diffusion-2-1-base", + "custom_weights": null, + "custom_vae": null, + "precision": "fp16", + "device": "AMD Radeon RX 7900 XTX => vulkan://0", + "ondemand": false, + "repeatable_seeds": false, + "resample_type": "Nearest Neighbor", + "controlnets": {}, + "embeddings": {} +}""" + +def write_default_sd_config(path): + with open(path, "w") as f: + f.write(default_sd_config) def safe_name(name): return name.replace("/", "_").replace("-", "_") diff --git a/process_skipfiles.py b/process_skipfiles.py index 339c7ebec6..9086ce59bf 100644 --- a/process_skipfiles.py +++ b/process_skipfiles.py @@ -5,6 +5,7 @@ from distutils.sysconfig import get_python_lib import fileinput from pathlib import Path +import os # Temporary workaround for transformers/__init__.py. path_to_transformers_hook = Path( @@ -16,51 +17,16 @@ with open(path_to_transformers_hook, "w") as f: f.write("module_collection_mode = 'pyz+py'") -path_to_skipfiles = Path(get_python_lib() + "/torch/_dynamo/skipfiles.py") +paths_to_skipfiles = [Path(get_python_lib() + "/torch/_dynamo/skipfiles.py"), Path(get_python_lib() + "/torch/_dynamo/trace_rules.py")] -modules_to_comment = ["abc,", "os,", "posixpath,", "_collections_abc,"] -startMonitoring = 0 -for line in fileinput.input(path_to_skipfiles, inplace=True): - if "SKIP_DIRS = " in line: - startMonitoring = 1 - print(line, end="") - elif startMonitoring in [1, 2]: - if "]" in line: - startMonitoring += 1 +for path in paths_to_skipfiles: + if not os.path.isfile(path): + continue + for line in fileinput.input(path, inplace=True): + if "[_module_dir(m) for m in BUILTIN_SKIPLIST]" in line and "x.__name__ for x in BUILTIN_SKIPLIST" not in line: + print(f"{line.rstrip()} + [x.__name__ for x in BUILTIN_SKIPLIST]") + elif "(_module_dir(m) for m in BUILTIN_SKIPLIST)" in line and "x.__name__ for x in BUILTIN_SKIPLIST" not in line: print(line, end="") + print(f"SKIP_DIRS.extend(filter(None, (x.__name__ for x in BUILTIN_SKIPLIST)))") else: - flag = True - for module in modules_to_comment: - if module in line: - if not line.startswith("#"): - print(f"#{line}", end="") - else: - print(f"{line[1:]}", end="") - flag = False - break - if flag: - print(line, end="") - else: - print(line, end="") - -# For getting around scikit-image's packaging, laze_loader has had a patch merged but yet to be released. -# Refer: https://github.com/scientific-python/lazy_loader -path_to_lazy_loader = Path(get_python_lib() + "/lazy_loader/__init__.py") - -for line in fileinput.input(path_to_lazy_loader, inplace=True): - if 'stubfile = filename if filename.endswith("i")' in line: - print( - ' stubfile = (filename if filename.endswith("i") else f"{os.path.splitext(filename)[0]}.pyi")', - end="", - ) - else: - print(line, end="") - -# For getting around timm's packaging. -# Refer: https://github.com/pyinstaller/pyinstaller/issues/5673#issuecomment-808731505 -path_to_timm_activations = Path(get_python_lib() + "/timm/layers/activations_jit.py") -for line in fileinput.input(path_to_timm_activations, inplace=True): - if "@torch.jit.script" in line: - print("@torch.jit._script_if_tracing", end="\n") - else: - print(line, end="") + print(line, end="") diff --git a/requirements-importer-macos.txt b/requirements-importer-macos.txt deleted file mode 100644 index 36e837b320..0000000000 --- a/requirements-importer-macos.txt +++ /dev/null @@ -1,34 +0,0 @@ --f https://download.pytorch.org/whl/nightly/cpu/ ---pre - -numpy -torch -torchvision - -tqdm - -#iree-compiler | iree-runtime should already be installed - -transformers -#jax[cpu] - -# tflitehub dependencies. -Pillow - -# web dependecies. -gradio -altair - -# Testing and support. -#lit -#pyyaml - -#ONNX and ORT for benchmarking -#--extra-index-url https://test.pypi.org/simple/ -#protobuf -#coloredlogs -#flatbuffers -#sympy -#psutil -#onnx-weekly -#ort-nightly diff --git a/requirements-importer.txt b/requirements-importer.txt deleted file mode 100644 index 3fe3a64659..0000000000 --- a/requirements-importer.txt +++ /dev/null @@ -1,41 +0,0 @@ --f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html ---pre - -numpy>1.22.4 -pytorch-triton -torchvision -tabulate - -tqdm - -#iree-compiler | iree-runtime should already be installed -iree-tools-xla - -# Modelling and JAX. -gin-config -transformers -diffusers -#jax[cpu] -Pillow - -# Testing and support. -lit -pyyaml -python-dateutil -sacremoses -sentencepiece - -# web dependecies. -gradio==3.44.3 -altair -scipy - -#ONNX and ORT for benchmarking -#--extra-index-url https://test.pypi.org/simple/ -#protobuf -#coloredlogs -#flatbuffers -#sympy -#psutil -#onnx-weekly -#ort-nightly diff --git a/requirements.txt b/requirements.txt index 8f5846a633..031fbe4b7c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,8 +5,9 @@ setuptools wheel -shark-turbine @ git+https://github.com/nod-ai/SHARK-Turbine#egg=shark-turbine&subdirectory=core -turbine-models @ git+https://github.com/nod-ai/SHARK-Turbine#egg=turbine-models&subdirectory=models +torch==2.3.0.dev20240305 +shark-turbine @ git+https://github.com/nod-ai/SHARK-Turbine.git@ean-sd-fp16#subdirectory=core +turbine-models @ git+https://github.com/nod-ai/SHARK-Turbine.git@ean-sd-fp16#subdirectory=models # SHARK Runner tqdm @@ -31,25 +32,10 @@ altair omegaconf # 0.3.2 doesn't have binaries for arm64 safetensors==0.3.1 -opencv-python -scikit-image -pytorch_lightning # for runwayml models -tk -pywebview -sentencepiece py-cpuinfo -tiktoken # for codegen -joblib # for langchain -timm # for MiniGPT4 -langchain -einops # for zoedepth pydantic==2.4.1 # pin until pyinstaller-hooks-contrib works with beta versions mpmath==1.3.0 # Keep PyInstaller at the end. Sometimes Windows Defender flags it but most folks can continue even if it errors pefile pyinstaller - -# For quantized GPTQ models -optimum -auto_gptq diff --git a/setup_venv.sh b/setup_venv.sh index 62c6513a85..64f769d794 100755 --- a/setup_venv.sh +++ b/setup_venv.sh @@ -49,58 +49,20 @@ Red=`tput setaf 1` Green=`tput setaf 2` Yellow=`tput setaf 3` -# Assume no binary torch-mlir. -# Currently available for macOS m1&intel (3.11) and Linux(3.8,3.10,3.11) -torch_mlir_bin=false -if [[ $(uname -s) = 'Darwin' ]]; then - echo "${Yellow}Apple macOS detected" - if [[ $(uname -m) == 'arm64' ]]; then - echo "${Yellow}Apple M1 Detected" - hash rustc 2>/dev/null - if [ $? -eq 0 ];then - echo "${Green}rustc found to compile HF tokenizers" - else - echo "${Red}Could not find rustc" >&2 - echo "${Red}Please run:" - echo "${Red}curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh" - exit 1 - fi - fi - echo "${Yellow}Run the following commands to setup your SSL certs for your Python version if you see SSL errors with tests" - echo "${Yellow}/Applications/Python\ 3.XX/Install\ Certificates.command" - if [ "$PYTHON_VERSION_X_Y" == "3.11" ]; then - torch_mlir_bin=true - fi -elif [[ $(uname -s) = 'Linux' ]]; then - echo "${Yellow}Linux detected" - if [ "$PYTHON_VERSION_X_Y" == "3.8" ] || [ "$PYTHON_VERSION_X_Y" == "3.10" ] || [ "$PYTHON_VERSION_X_Y" == "3.11" ] ; then - torch_mlir_bin=true - fi -else - echo "${Red}OS not detected. Pray and Play" -fi - # Upgrade pip and install requirements. $PYTHON -m pip install --upgrade pip || die "Could not upgrade pip" $PYTHON -m pip install --upgrade -r "$TD/requirements.txt" -if [ "$torch_mlir_bin" = true ]; then - if [[ $(uname -s) = 'Darwin' ]]; then - echo "MacOS detected. Installing torch-mlir from .whl, to avoid dependency problems with torch." - $PYTHON -m pip uninstall -y timm #TEMP FIX FOR MAC - $PYTHON -m pip install --pre --no-cache-dir torch-mlir -f https://llvm.github.io/torch-mlir/package-index/ -f https://download.pytorch.org/whl/nightly/torch/ +if [[ $(uname -s) = 'Darwin' ]]; then + echo "MacOS detected. Installing torch-mlir from .whl, to avoid dependency problems with torch." + $PYTHON -m pip uninstall -y timm #TEMP FIX FOR MAC + $PYTHON -m pip install --pre --no-cache-dir torch-mlir -f https://llvm.github.io/torch-mlir/package-index/ -f https://download.pytorch.org/whl/nightly/torch/ +else + $PYTHON -m pip install --pre torch-mlir -f https://llvm.github.io/torch-mlir/package-index/ + if [ $? -eq 0 ];then + echo "Successfully Installed torch-mlir" else - $PYTHON -m pip install --pre torch-mlir -f https://llvm.github.io/torch-mlir/package-index/ - if [ $? -eq 0 ];then - echo "Successfully Installed torch-mlir" - else - echo "Could not install torch-mlir" >&2 - fi + echo "Could not install torch-mlir" >&2 fi -else - echo "${Red}No binaries found for Python $PYTHON_VERSION_X_Y on $(uname -s)" - echo "${Yello}Python 3.11 supported on macOS and 3.8,3.10 and 3.11 on Linux" - echo "${Red}Please build torch-mlir from source in your environment" - exit 1 fi if [[ -z "${USE_IREE}" ]]; then rm .use-iree @@ -116,19 +78,6 @@ else echo "Not installing a backend, please make sure to add your backend to PYTHONPATH" fi -if [[ ! -z "${IMPORTER}" ]]; then - echo "${Yellow}Installing importer tools.." - if [[ $(uname -s) = 'Linux' ]]; then - echo "${Yellow}Linux detected.. installing Linux importer tools" - #Always get the importer tools from upstream IREE - $PYTHON -m pip install --no-warn-conflicts --upgrade -r "$TD/requirements-importer.txt" -f https://openxla.github.io/iree/pip-release-links.html --extra-index-url https://download.pytorch.org/whl/nightly/cpu - elif [[ $(uname -s) = 'Darwin' ]]; then - echo "${Yellow}macOS detected.. installing macOS importer tools" - #Conda seems to have some problems installing these packages and hope they get resolved upstream. - $PYTHON -m pip install --no-warn-conflicts --upgrade -r "$TD/requirements-importer-macos.txt" -f ${RUNTIME} --extra-index-url https://download.pytorch.org/whl/nightly/cpu - fi -fi - if [[ $(uname -s) = 'Darwin' ]]; then PYTORCH_URL=https://download.pytorch.org/whl/nightly/torch/ else From 81fba10300dc9fee146a082587bf17c3b1ebe0fb Mon Sep 17 00:00:00 2001 From: gpetters-amd <159576198+gpetters-amd@users.noreply.github.com> Date: Mon, 25 Mar 2024 02:38:28 -0400 Subject: [PATCH 21/25] Fix _IREE_TARGET_MAP (#2103) (#2108) - Change target passed to iree for vulkan from 'vulkan' to 'vulkan-spriv', as 'vulkan' is not a valid value for --iree-hal-target-backends with the current iree compiler. Co-authored-by: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> --- shark/iree_utils/_common.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/shark/iree_utils/_common.py b/shark/iree_utils/_common.py index 26dcf7c5b5..c58405b46e 100644 --- a/shark/iree_utils/_common.py +++ b/shark/iree_utils/_common.py @@ -91,7 +91,7 @@ def iree_target_map(device): "cpu-task": "llvm-cpu", "cpu-sync": "llvm-cpu", "cuda": "cuda", - "vulkan": "vulkan", + "vulkan": "vulkan-spirv", "metal": "metal", "rocm": "rocm", "intel-gpu": "opencl-spirv", @@ -122,9 +122,7 @@ def check_device_drivers(device): ) return True except RuntimeError as re: - print( - f"[ERR] Failed to get driver for {device} with error:\n{repr(re)}" - ) + print(f"[ERR] Failed to get driver for {device} with error:\n{repr(re)}") return True # Unknown device. We assume drivers are installed. From 0ade2ec00d7e904e4e20fd63b62b6d9a874f7d31 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Thu, 28 Mar 2024 09:59:14 -0500 Subject: [PATCH 22/25] Cleanup sd model map. --- apps/shark_studio/api/sd.py | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index 224b915459..a0890b96d7 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -38,42 +38,12 @@ sd_model_map = { "clip": { "initializer": clip.export_clip_model, - "ireec_flags": [ - "--iree-flow-collapse-reduction-dims", - "--iree-opt-const-expr-hoisting=False", - "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-flow-inline-constants-max-byte-length=0", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-preprocessing-pad-linalg-ops{pad-size=16}))", - ], }, - # "vae_encode": { - # "initializer": vae.export_vae_model, - # "ireec_flags": [ - # "--iree-flow-collapse-reduction-dims", - # "--iree-opt-const-expr-hoisting=False", - # "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - # "--iree-flow-inline-constants-max-byte-length=0", - # "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul))", - # ], - # }, "unet": { "initializer": unet.export_unet_model, - "ireec_flags": [ - "--iree-flow-collapse-reduction-dims", - "--iree-opt-const-expr-hoisting=False", - "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-flow-inline-constants-max-byte-length=0", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-pad-linalg-ops{pad-size=32}))", - ], }, "vae_decode": { "initializer": vae.export_vae_model, - "ireec_flags": [ - "--iree-opt-const-expr-hoisting=False", - "--iree-codegen-linalg-max-constant-fold-elements=9223372036854775807", - "--iree-flow-inline-constants-max-byte-length=0", - "--iree-preprocessing-pass-pipeline=builtin.module(func.func(iree-global-opt-detach-elementwise-from-named-ops,iree-global-opt-convert-1x1-filter-conv2d-to-matmul,iree-preprocessing-pad-linalg-ops{pad-size=32}))", - ], }, } From f0ebfb08983644027aa96ecb911385091f51e619 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Thu, 28 Mar 2024 11:16:29 -0500 Subject: [PATCH 23/25] Update dependencies. --- apps/shark_studio/studio_imports.py | 4 ++-- setup.py | 2 +- setup_venv.ps1 | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/apps/shark_studio/studio_imports.py b/apps/shark_studio/studio_imports.py index ee4eb278d2..de008ad0db 100644 --- a/apps/shark_studio/studio_imports.py +++ b/apps/shark_studio/studio_imports.py @@ -65,6 +65,6 @@ for x in collect_submodules("transformers") if not any(kw in x for kw in blacklist) ] -hiddenimports += [x for x in collect_submodules("iree") if "tests" not in x] +hiddenimports += [x for x in collect_submodules("iree") if "test" not in x] hiddenimports += ["iree._runtime"] -hiddenimports += collect_submodules('scipy') \ No newline at end of file +hiddenimports += [x for x in collect_submodules("scipy") if "test" not in x] \ No newline at end of file diff --git a/setup.py b/setup.py index 061873e7a8..d1aea1687a 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() -PACKAGE_VERSION = os.environ.get("SHARK_PACKAGE_VERSION") or "0.0.5" +PACKAGE_VERSION = os.environ.get("SHARK_PACKAGE_VERSION") or "2.0.0" backend_deps = [] setup( diff --git a/setup_venv.ps1 b/setup_venv.ps1 index 6a937bec62..c67b8fc83b 100644 --- a/setup_venv.ps1 +++ b/setup_venv.ps1 @@ -89,5 +89,7 @@ else {python -m venv .\shark.venv\} python -m pip install --upgrade pip pip install wheel pip install -r requirements.txt +# remove this when windows DLL issues are fixed from LLVM changes +pip install --force-reinstall https://github.com/openxla/iree/releases/download/candidate-20240326.843/iree_compiler-20240326.843-cp311-cp311-win_amd64.whl https://github.com/openxla/iree/releases/download/candidate-20240326.843/iree_runtime-20240326.843-cp311-cp311-win_amd64.whl Write-Host "Source your venv with ./shark.venv/Scripts/activate" From 2996df7ee4858f571b4c70094c4a4bfcdd01a88b Mon Sep 17 00:00:00 2001 From: Stefan Kapusniak <121311569+one-lithe-rune@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:17:16 +0000 Subject: [PATCH 24/25] Studio2/SD/UI: Update gradio to 4.19.2 (sd-studio2) (#2097) - Move pin for gradio from 4.15 -> 4.19.2 on the sd-studio2 branch --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 031fbe4b7c..eb5ee5c505 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,7 +27,7 @@ parameterized accelerate scipy ftfy -gradio==4.15.0 +gradio==4.19.2 altair omegaconf # 0.3.2 doesn't have binaries for arm64 From 9f59a16596b07f6f2f01f70f8d38b6d965923f08 Mon Sep 17 00:00:00 2001 From: Ean Garvey Date: Thu, 28 Mar 2024 23:39:52 -0500 Subject: [PATCH 25/25] fix formatting and disable explicit vulkan env settings. --- apps/shark_studio/api/initializers.py | 2 +- apps/shark_studio/api/llm.py | 3 +- apps/shark_studio/api/sd.py | 4 +- apps/shark_studio/api/utils.py | 4 + apps/shark_studio/modules/pipeline.py | 2 + apps/shark_studio/studio_imports.py | 6 +- apps/shark_studio/tests/rest_api_test.py | 1 - apps/shark_studio/web/api/compat.py | 1 - apps/shark_studio/web/index.py | 1 + apps/shark_studio/web/ui/sd.py | 4 +- apps/shark_studio/web/utils/file_utils.py | 2 + shark/iree_utils/compile_utils.py | 2 +- shark/iree_utils/vulkan_target_env_utils.py | 171 ++++++++++---------- shark/iree_utils/vulkan_utils.py | 8 +- 14 files changed, 111 insertions(+), 100 deletions(-) diff --git a/apps/shark_studio/api/initializers.py b/apps/shark_studio/api/initializers.py index 94d50225c6..48e7246df6 100644 --- a/apps/shark_studio/api/initializers.py +++ b/apps/shark_studio/api/initializers.py @@ -84,7 +84,7 @@ def dumpstacks(): if line: code.append(" " + line.strip()) with open(os.path.join(shark_tmp, "stack_dump.log"), "w") as f: - f.write("\n".join(code)) + f.write("\n".join(code)) def setup_middleware(app): diff --git a/apps/shark_studio/api/llm.py b/apps/shark_studio/api/llm.py index 0578c33b70..a88aaa9b02 100644 --- a/apps/shark_studio/api/llm.py +++ b/apps/shark_studio/api/llm.py @@ -326,6 +326,7 @@ def chat_hf(self, prompt): self.global_iter += 1 return result_output, total_time + def llm_chat_api(InputData: dict): from datetime import datetime as dt @@ -392,7 +393,6 @@ def llm_chat_api(InputData: dict): print("prompt = ", prompt) for res_op, _ in llm_model.chat(prompt): - if is_chat_completion_api: choices = [ { @@ -421,6 +421,7 @@ def llm_chat_api(InputData: dict): "choices": choices, } + if __name__ == "__main__": lm = LanguageModel( "Trelis/Llama-2-7b-chat-hf-function-calling-v2", diff --git a/apps/shark_studio/api/sd.py b/apps/shark_studio/api/sd.py index a0890b96d7..1b37384725 100644 --- a/apps/shark_studio/api/sd.py +++ b/apps/shark_studio/api/sd.py @@ -83,9 +83,7 @@ def __init__( "clip": {"hf_model_name": base_model_id}, "unet": { "hf_model_name": base_model_id, - "unet_model": unet.UnetModel( - hf_model_name=base_model_id - ), + "unet_model": unet.UnetModel(hf_model_name=base_model_id), "batch_size": batch_size, # "is_controlled": is_controlled, # "num_loras": num_loras, diff --git a/apps/shark_studio/api/utils.py b/apps/shark_studio/api/utils.py index d4882f2241..e9268aa83b 100644 --- a/apps/shark_studio/api/utils.py +++ b/apps/shark_studio/api/utils.py @@ -77,6 +77,7 @@ def get_devices_by_name(driver_name): available_devices.extend(cpu_device) return available_devices + def set_init_device_flags(): if "vulkan" in cmd_opts.device: # set runtime flags for vulkan. @@ -109,6 +110,7 @@ def set_init_device_flags(): elif "cpu" in cmd_opts.device: cmd_opts.device = "cpu" + def set_iree_runtime_flags(): # TODO: This function should be device-agnostic and piped properly # to general runtime driver init. @@ -177,6 +179,7 @@ def get_output_value(dev_dict): device_map[f"{driver}://{device['path']}"] = get_output_value(device) return device_map + def get_opt_flags(model, precision="fp16"): iree_flags = [] if len(cmd_opts.iree_vulkan_target_triple) > 0: @@ -202,6 +205,7 @@ def get_opt_flags(model, precision="fp16"): iree_flags += ["--iree-flow-collapse-reduction-dims"] return iree_flags + def map_device_to_name_path(device, key_combination=3): """Gives the appropriate device data (supported name/path) for user selected execution device diff --git a/apps/shark_studio/modules/pipeline.py b/apps/shark_studio/modules/pipeline.py index 7400ce212a..053858c5df 100644 --- a/apps/shark_studio/modules/pipeline.py +++ b/apps/shark_studio/modules/pipeline.py @@ -90,6 +90,8 @@ def get_compiled_map(self, pipe_id, submodel="None", init_kwargs={}) -> None: ) weights_path = self.get_io_params(submodel) + if weights_path: + ireec_flags.append("--iree-opt-const-eval=False") self.iree_module_dict[submodel] = get_iree_compiled_module( self.tempfiles[submodel], diff --git a/apps/shark_studio/studio_imports.py b/apps/shark_studio/studio_imports.py index de008ad0db..3f7aa319ba 100644 --- a/apps/shark_studio/studio_imports.py +++ b/apps/shark_studio/studio_imports.py @@ -56,9 +56,7 @@ # hidden imports for pyinstaller hiddenimports = ["shark", "apps"] hiddenimports += [x for x in collect_submodules("gradio") if "tests" not in x] -hiddenimports += [ - x for x in collect_submodules("diffusers") if "tests" not in x -] +hiddenimports += [x for x in collect_submodules("diffusers") if "tests" not in x] blacklist = ["tests", "convert"] hiddenimports += [ x @@ -67,4 +65,4 @@ ] hiddenimports += [x for x in collect_submodules("iree") if "test" not in x] hiddenimports += ["iree._runtime"] -hiddenimports += [x for x in collect_submodules("scipy") if "test" not in x] \ No newline at end of file +hiddenimports += [x for x in collect_submodules("scipy") if "test" not in x] diff --git a/apps/shark_studio/tests/rest_api_test.py b/apps/shark_studio/tests/rest_api_test.py index 01b0901019..741fa523cc 100644 --- a/apps/shark_studio/tests/rest_api_test.py +++ b/apps/shark_studio/tests/rest_api_test.py @@ -38,7 +38,6 @@ def llm_chat_test(verbose=False): if __name__ == "__main__": - # "Exercises the chatbot REST API of Shark. Make sure " # "Shark is running in API mode on 127.0.0.1:8080 before running" # "this script." diff --git a/apps/shark_studio/web/api/compat.py b/apps/shark_studio/web/api/compat.py index 147262d5c9..b5e81f2e9a 100644 --- a/apps/shark_studio/web/api/compat.py +++ b/apps/shark_studio/web/api/compat.py @@ -26,7 +26,6 @@ def decode_base64_to_image(encoding): if encoding.startswith("http://") or encoding.startswith("https://"): - headers = {} response = requests.get(encoding, timeout=30, headers=headers) try: diff --git a/apps/shark_studio/web/index.py b/apps/shark_studio/web/index.py index a2f36332b1..d1b97c2f78 100644 --- a/apps/shark_studio/web/index.py +++ b/apps/shark_studio/web/index.py @@ -1,4 +1,5 @@ from multiprocessing import Process, freeze_support + freeze_support() from PIL import Image diff --git a/apps/shark_studio/web/ui/sd.py b/apps/shark_studio/web/ui/sd.py index ef1c1131bb..799504cb75 100644 --- a/apps/shark_studio/web/ui/sd.py +++ b/apps/shark_studio/web/ui/sd.py @@ -632,7 +632,9 @@ def base_model_changed(base_model_id): with gr.Tab(label="Config", id=102) as sd_tab_config: with gr.Column(elem_classes=["sd-right-panel"]): with gr.Row(elem_classes=["fill"]): - Path(get_configs_path()).mkdir(parents=True, exist_ok=True) + Path(get_configs_path()).mkdir( + parents=True, exist_ok=True + ) default_config_file = os.path.join( get_configs_path(), "default_sd_config.json", diff --git a/apps/shark_studio/web/utils/file_utils.py b/apps/shark_studio/web/utils/file_utils.py index 242bc9ee2e..0f1953f5ac 100644 --- a/apps/shark_studio/web/utils/file_utils.py +++ b/apps/shark_studio/web/utils/file_utils.py @@ -40,10 +40,12 @@ "embeddings": {} }""" + def write_default_sd_config(path): with open(path, "w") as f: f.write(default_sd_config) + def safe_name(name): return name.replace("/", "_").replace("-", "_") diff --git a/shark/iree_utils/compile_utils.py b/shark/iree_utils/compile_utils.py index f5f9557744..5fd1d4006a 100644 --- a/shark/iree_utils/compile_utils.py +++ b/shark/iree_utils/compile_utils.py @@ -113,8 +113,8 @@ def get_iree_frontend_args(frontend): # Common args to be used given any frontend or device. def get_iree_common_args(debug=False): common_args = [ - "--iree-vm-bytecode-module-strip-source-map=true", "--iree-util-zero-fill-elided-attrs", + "--mlir-elide-elementsattrs-if-larger=10", ] if debug == True: common_args.extend( diff --git a/shark/iree_utils/vulkan_target_env_utils.py b/shark/iree_utils/vulkan_target_env_utils.py index 92d2f53442..7cd1b05241 100644 --- a/shark/iree_utils/vulkan_target_env_utils.py +++ b/shark/iree_utils/vulkan_target_env_utils.py @@ -33,7 +33,7 @@ def get_vulkan_target_env(vulkan_target_triple): device_type = get_device_type(triple) # get capabilities capabilities = get_vulkan_target_capabilities(triple) - target_env = f"#vk.target_env<{version}, r({revision}), {extensions}, {vendor}:{device_type}, #vk.caps< {capabilities} >>" + target_env = f"<#spirv.vce<{version}, r({revision}), {extensions}>, {vendor}:{device_type}, #spirv.resource_limits< {capabilities} >>" return target_env @@ -63,62 +63,62 @@ def make_ext_list(ext_list): arch, product, os = triple if arch == "m1": ext = [ - "VK_KHR_16bit_storage", - "VK_KHR_8bit_storage", - "VK_KHR_shader_float16_int8", - "VK_KHR_storage_buffer_storage_class", - "VK_KHR_variable_pointers", + "SPV_KHR_16bit_storage", + "SPV_KHR_8bit_storage", + "SPV_KHR_shader_float16_int8", + "SPV_KHR_storage_buffer_storage_class", + "SPV_KHR_variable_pointers", ] return make_ext_list(ext_list=ext) if arch == "valhall": ext = [ - "VK_KHR_16bit_storage", - "VK_KHR_8bit_storage", - "VK_KHR_shader_float16_int8", - "VK_KHR_spirv_1_4", - "VK_KHR_storage_buffer_storage_class", - "VK_KHR_variable_pointers", + "SPV_KHR_16bit_storage", + "SPV_KHR_8bit_storage", + "SPV_KHR_shader_float16_int8", + "SPV_KHR_spirv_1_4", + "SPV_KHR_storage_buffer_storage_class", + "SPV_KHR_variable_pointers", ] return make_ext_list(ext_list=ext) if arch == "adreno": ext = [ - "VK_KHR_16bit_storage", - "VK_KHR_shader_float16_int8", - "VK_KHR_spirv_1_4", - "VK_KHR_storage_buffer_storage_class", - "VK_KHR_variable_pointers", + "SPV_KHR_16bit_storage", + "SPV_KHR_shader_float16_int8", + "SPV_KHR_spirv_1_4", + "SPV_KHR_storage_buffer_storage_class", + "SPV_KHR_variable_pointers", ] if os == "android31": - ext.append("VK_KHR_8bit_storage") + ext.append("SPV_KHR_8bit_storage") return make_ext_list(ext_list=ext) if get_vendor(triple) == "SwiftShader": - ext = ["VK_KHR_storage_buffer_storage_class"] + ext = ["SPV_KHR_storage_buffer_storage_class"] return make_ext_list(ext_list=ext) if arch == "unknown": ext = [ - "VK_KHR_storage_buffer_storage_class", - "VK_KHR_variable_pointers", + "SPV_KHR_storage_buffer_storage_class", + "SPV_KHR_variable_pointers", ] return make_ext_list(ext_list=ext) ext = [ - "VK_KHR_16bit_storage", - "VK_KHR_8bit_storage", - "VK_KHR_shader_float16_int8", - "VK_KHR_spirv_1_4", - "VK_KHR_storage_buffer_storage_class", - "VK_KHR_variable_pointers", + "SPV_KHR_16bit_storage", + "SPV_KHR_8bit_storage", + "SPV_KHR_shader_float16_int8", + "SPV_KHR_spirv_1_4", + "SPV_KHR_storage_buffer_storage_class", + "SPV_KHR_variable_pointers", "VK_EXT_subgroup_size_control", ] if get_vendor(triple) == "NVIDIA" or arch == "rdna3": - ext.append("VK_KHR_cooperative_matrix") + ext.append("SPV_KHR_cooperative_matrix") if get_vendor(triple) == ["NVIDIA", "AMD", "Intel"]: - ext.append("VK_KHR_shader_integer_dot_product") + ext.append("SPV_KHR_shader_integer_dot_product") return make_ext_list(ext_list=ext) @@ -186,13 +186,13 @@ def get_subgroup_val(l): "Quad": 128, "PartitionedNV": 256, } - cap["maxComputeSharedMemorySize"] = 16384 - cap["maxComputeWorkGroupInvocations"] = 128 - cap["maxComputeWorkGroupSize"] = [128, 128, 64] - cap["subgroupSize"] = 32 + cap["max_compute_shared_memory_size"] = 16384 + cap["max_compute_workgroup_invocations"] = 128 + cap["max_compute_workgroup_size"] = [128, 128, 64] + cap["subgroup_size"] = 32 cap["subgroupFeatures"] = ["Basic"] - cap["minSubgroupSize"] = None - cap["maxSubgroupSize"] = None + cap["min_subgroup_size"] = None + cap["max_subgroup_size"] = None cap["shaderFloat16"] = False cap["shaderFloat64"] = False cap["shaderInt8"] = False @@ -209,13 +209,13 @@ def get_subgroup_val(l): cap["coopmatCases"] = None if arch in ["rdna1", "rdna2", "rdna3"]: - cap["maxComputeSharedMemorySize"] = 65536 - cap["maxComputeWorkGroupInvocations"] = 1024 - cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] + cap["max_compute_shared_memory_size"] = 65536 + cap["max_compute_workgroup_invocations"] = 1024 + cap["max_compute_workgroup_size"] = [1024, 1024, 1024] - cap["subgroupSize"] = 64 - cap["minSubgroupSize"] = 32 - cap["maxSubgroupSize"] = 64 + cap["subgroup_size"] = 64 + cap["min_subgroup_size"] = 32 + cap["max_subgroup_size"] = 64 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -244,7 +244,8 @@ def get_subgroup_val(l): if arch == "rdna3": # TODO: Get scope value cap["coopmatCases"] = [ - "mSize = 16, nSize = 16, kSize = 16, aType = f16, bType = f16, cType = f16, resultType = f16, accSat = false, scope = #vk.scope" + "m_size = 16, n_size = 16, k_size = 16, a_type = f16, b_type = f16, c_type = f16, result_type = f16, acc_sat = false, scope = ", + "m_size = 16, n_size = 16, k_size = 16, a_type = f16, b_type = f16, c_type = f32, result_type = f32, acc_sat = false, scope = " ] if product == "rx5700xt": @@ -252,11 +253,11 @@ def get_subgroup_val(l): cap["storagePushConstant8"] = False elif arch in ["rgcn5", "rgcn4", "rgcn3"]: - cap["maxComputeSharedMemorySize"] = 65536 - cap["maxComputeWorkGroupInvocations"] = 1024 - cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] + cap["max_compute_shared_memory_size"] = 65536 + cap["max_compute_workgroup_invocations"] = 1024 + cap["max_compute_workgroup_size"] = [1024, 1024, 1024] - cap["subgroupSize"] = 64 + cap["subgroup_size"] = 64 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -267,8 +268,8 @@ def get_subgroup_val(l): "Clustered", "Quad", ] - cap["minSubgroupSize"] = 64 - cap["maxSubgroupSize"] = 64 + cap["min_subgroup_size"] = 64 + cap["max_subgroup_size"] = 64 if arch == "rgcn5": cap["shaderFloat16"] = True @@ -290,11 +291,11 @@ def get_subgroup_val(l): cap["variablePointersStorageBuffer"] = True elif arch == "m1": - cap["maxComputeSharedMemorySize"] = 32768 - cap["maxComputeWorkGroupInvocations"] = 1024 - cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] + cap["max_compute_shared_memory_size"] = 32768 + cap["max_compute_workgroup_invocations"] = 1024 + cap["max_compute_workgroup_size"] = [1024, 1024, 1024] - cap["subgroupSize"] = 32 + cap["subgroup_size"] = 32 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -321,11 +322,11 @@ def get_subgroup_val(l): cap["variablePointersStorageBuffer"] = True elif arch == "valhall": - cap["maxComputeSharedMemorySize"] = 32768 - cap["maxComputeWorkGroupInvocations"] = 512 - cap["maxComputeWorkGroupSize"] = [512, 512, 512] + cap["max_compute_shared_memory_size"] = 32768 + cap["max_compute_workgroup_invocations"] = 512 + cap["max_compute_workgroup_size"] = [512, 512, 512] - cap["subgroupSize"] = 16 + cap["subgroup_size"] = 16 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -352,11 +353,11 @@ def get_subgroup_val(l): cap["variablePointersStorageBuffer"] = True elif arch == "arc": - cap["maxComputeSharedMemorySize"] = 32768 - cap["maxComputeWorkGroupInvocations"] = 1024 - cap["maxComputeWorkGroupSize"] = [1024, 1024, 64] + cap["max_compute_shared_memory_size"] = 32768 + cap["max_compute_workgroup_invocations"] = 1024 + cap["max_compute_workgroup_size"] = [1024, 1024, 64] - cap["subgroupSize"] = 32 + cap["subgroup_size"] = 32 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -385,8 +386,8 @@ def get_subgroup_val(l): elif arch == "cpu": if product == "swiftshader": - cap["maxComputeSharedMemorySize"] = 16384 - cap["subgroupSize"] = 4 + cap["max_compute_shared_memory_size"] = 16384 + cap["subgroup_size"] = 4 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -397,13 +398,13 @@ def get_subgroup_val(l): ] elif arch in ["pascal"]: - cap["maxComputeSharedMemorySize"] = 49152 - cap["maxComputeWorkGroupInvocations"] = 1536 - cap["maxComputeWorkGroupSize"] = [1536, 1024, 64] + cap["max_compute_shared_memory_size"] = 49152 + cap["max_compute_workgroup_invocations"] = 1536 + cap["max_compute_workgroup_size"] = [1536, 1024, 64] - cap["subgroupSize"] = 32 - cap["minSubgroupSize"] = 32 - cap["maxSubgroupSize"] = 32 + cap["subgroup_size"] = 32 + cap["min_subgroup_size"] = 32 + cap["max_subgroup_size"] = 32 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -431,13 +432,13 @@ def get_subgroup_val(l): cap["variablePointersStorageBuffer"] = True elif arch in ["ampere", "turing"]: - cap["maxComputeSharedMemorySize"] = 49152 - cap["maxComputeWorkGroupInvocations"] = 1024 - cap["maxComputeWorkGroupSize"] = [1024, 1024, 1024] + cap["max_compute_shared_memory_size"] = 49152 + cap["max_compute_workgroup_invocations"] = 1024 + cap["max_compute_workgroup_size"] = [1024, 1024, 1024] - cap["subgroupSize"] = 32 - cap["minSubgroupSize"] = 32 - cap["maxSubgroupSize"] = 32 + cap["subgroup_size"] = 32 + cap["min_subgroup_size"] = 32 + cap["max_subgroup_size"] = 32 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -471,11 +472,11 @@ def get_subgroup_val(l): ] elif arch == "adreno": - cap["maxComputeSharedMemorySize"] = 32768 - cap["maxComputeWorkGroupInvocations"] = 1024 - cap["maxComputeWorkGroupSize"] = [1024, 1024, 64] + cap["max_compute_shared_memory_size"] = 32768 + cap["max_compute_workgroup_invocations"] = 1024 + cap["max_compute_workgroup_size"] = [1024, 1024, 64] - cap["subgroupSize"] = 64 + cap["subgroup_size"] = 64 cap["subgroupFeatures"] = [ "Basic", "Vote", @@ -491,14 +492,14 @@ def get_subgroup_val(l): cap["shaderInt16"] = True cap["storageBuffer16BitAccess"] = True - if os == "andorid31": + if os == "android31": cap["uniformAndStorageBuffer8BitAccess"] = True cap["variablePointers"] = True cap["variablePointersStorageBuffer"] = True elif arch == "unknown": - cap["subgroupSize"] = 64 + cap["subgroup_size"] = 64 cap["variablePointers"] = False cap["variablePointersStorageBuffer"] = False else: @@ -521,14 +522,14 @@ def get_comma_sep_str(ele_list): res += f"{k} = {'unit' if v == True else None}, " elif isinstance(v, list): if k == "subgroupFeatures": - res += f"subgroupFeatures = {get_subgroup_val(v)}: i32, " - elif k == "maxComputeWorkGroupSize": - res += f"maxComputeWorkGroupSize = dense<{get_comma_sep_str(v)}>: vector<{len(v)}xi32>, " + res += f"subgroup_features = {get_subgroup_val(v)}: i32, " + elif k == "max_compute_workgroup_size": + res += f"max_compute_workgroup_size = dense<{get_comma_sep_str(v)}>: vector<{len(v)}xi32>, " elif k == "coopmatCases": cmc = "" for case in v: - cmc += f"#vk.coop_matrix_props<{case}>, " - res += f"cooperativeMatrixPropertiesKHR = [{cmc[:-2]}], " + cmc += f"#spirv.coop_matrix_props_khr<{case}>, " + res += f"cooperative_matrix_properties_khr = [{cmc[:-2]}], " else: res += f"{k} = {get_comma_sep_str(v)}, " else: diff --git a/shark/iree_utils/vulkan_utils.py b/shark/iree_utils/vulkan_utils.py index a08fb6f5aa..ff394ea349 100644 --- a/shark/iree_utils/vulkan_utils.py +++ b/shark/iree_utils/vulkan_utils.py @@ -144,6 +144,8 @@ def get_vulkan_target_triple(device_name): # Intel Targets elif any(x in device_name for x in ("A770", "A750")): triple = f"arc-770-{system_os}" + elif "v620" in device_name: + triple = f"rdna2-v620-{system_os}" # Adreno Targets elif all(x in device_name for x in ("Adreno", "740")): @@ -169,7 +171,7 @@ def get_vulkan_triple_flag(device_name="", device_num=0, extra_args=[]): print( f"Found vulkan device {vulkan_device}. Using target triple {triple}" ) - return f"-iree-vulkan-target-triple={triple}" + return f"--iree-vulkan-target-triple={triple}" print( """Optimized kernel for your target device is not added yet. Contact SHARK Admin on discord[https://discord.com/invite/RUqY2h2s9u] @@ -184,7 +186,8 @@ def get_iree_vulkan_args(device_num=0, extra_args=[]): res_vulkan_flag = [] res_vulkan_flag += [ - "--iree-stream-resource-max-allocation-size=3221225472" + "--iree-stream-resource-max-allocation-size=3221225472", + "--iree-flow-inline-constants-max-byte-length=0" ] vulkan_triple_flag = None for arg in extra_args: @@ -197,6 +200,7 @@ def get_iree_vulkan_args(device_num=0, extra_args=[]): vulkan_triple_flag = get_vulkan_triple_flag( device_num=device_num, extra_args=extra_args ) + res_vulkan_flag += [vulkan_triple_flag] if vulkan_triple_flag is not None: vulkan_target_env = get_vulkan_target_env_flag(vulkan_triple_flag)