From fc4bdc03ea3bc291f3f84577b8f8a9004ef6c85d Mon Sep 17 00:00:00 2001 From: Michael Schmidt Date: Mon, 6 May 2024 16:37:11 +0200 Subject: [PATCH] Add support for DRCT (#248) --- README.md | 1 + .../spandrel/__helpers/main_registry.py | 2 + .../spandrel/architectures/DRCT/__init__.py | 174 ++++ .../spandrel/architectures/DRCT/arch/LICENSE | 21 + .../architectures/DRCT/arch/drct_arch.py | 862 ++++++++++++++++++ tests/__snapshots__/test_DRCT.ambr | 23 + .../16x16/4xRealWebPhoto_v4_drct-l.png | Bin 0 -> 8931 bytes .../32x32/4xRealWebPhoto_v4_drct-l.png | Bin 0 -> 29505 bytes tests/test_DRCT.py | 53 ++ 9 files changed, 1136 insertions(+) create mode 100644 libs/spandrel/spandrel/architectures/DRCT/__init__.py create mode 100644 libs/spandrel/spandrel/architectures/DRCT/arch/LICENSE create mode 100644 libs/spandrel/spandrel/architectures/DRCT/arch/drct_arch.py create mode 100644 tests/__snapshots__/test_DRCT.ambr create mode 100644 tests/images/outputs/16x16/4xRealWebPhoto_v4_drct-l.png create mode 100644 tests/images/outputs/32x32/4xRealWebPhoto_v4_drct-l.png create mode 100644 tests/test_DRCT.py diff --git a/README.md b/README.md index 45ee15ca..d9850bdb 100644 --- a/README.md +++ b/README.md @@ -104,6 +104,7 @@ Spandrel currently supports a limited amount of network architectures. If the ar - [DCTLSA](https://github.com/zengkun301/DCTLSA) | [Models](https://github.com/zengkun301/DCTLSA/tree/main/pretrained) - [ATD](https://github.com/LabShuHangGU/Adaptive-Token-Dictionary) | [Models](https://drive.google.com/drive/folders/1D3BvTS1xBcaU1mp50k3pBzUWb7qjRvmB?usp=sharing) - [AdaCode](https://github.com/kechunl/AdaCode) | [Models](https://github.com/kechunl/AdaCode/releases/tag/v0-pretrain_models) +- [DRCT](https://github.com/ming053l/DRCT) #### Face Restoration diff --git a/libs/spandrel/spandrel/__helpers/main_registry.py b/libs/spandrel/spandrel/__helpers/main_registry.py index 4d1e5384..2866ceb6 100644 --- a/libs/spandrel/spandrel/__helpers/main_registry.py +++ b/libs/spandrel/spandrel/__helpers/main_registry.py @@ -6,6 +6,7 @@ DAT, DCTLSA, DITN, + DRCT, ESRGAN, FBCNN, GFPGAN, @@ -75,5 +76,6 @@ ArchSupport.from_architecture(DRUNet.DRUNetArch()), ArchSupport.from_architecture(DnCNN.DnCNNArch()), ArchSupport.from_architecture(IPT.IPTArch()), + ArchSupport.from_architecture(DRCT.DRCTArch()), ArchSupport.from_architecture(ESRGAN.ESRGANArch()), ) diff --git a/libs/spandrel/spandrel/architectures/DRCT/__init__.py b/libs/spandrel/spandrel/architectures/DRCT/__init__.py new file mode 100644 index 00000000..c03cb217 --- /dev/null +++ b/libs/spandrel/spandrel/architectures/DRCT/__init__.py @@ -0,0 +1,174 @@ +import math + +from typing_extensions import override + +from spandrel.util import KeyCondition, get_seq_len + +from ...__helpers.model_descriptor import ( + Architecture, + ImageModelDescriptor, + SizeRequirements, + StateDict, +) +from .arch.drct_arch import DRCT + + +def _get_upscale_pixelshuffle( + state_dict: StateDict, key_prefix: str = "upsample" +) -> int: + upscale = 1 + + for i in range(0, 10, 2): + key = f"{key_prefix}.{i}.weight" + if key not in state_dict: + break + + shape = state_dict[key].shape + num_feat = shape[1] + upscale *= math.isqrt(shape[0] // num_feat) + + return upscale + + +class DRCTArch(Architecture[DRCT]): + def __init__(self) -> None: + super().__init__( + id="DRCT", + detect=KeyCondition.has_all( + "conv_first.weight", + "conv_first.bias", + "layers.0.swin1.norm1.weight", + "layers.0.swin1.norm1.bias", + "layers.0.swin1.attn.relative_position_bias_table", + "layers.0.swin1.attn.relative_position_index", + "layers.0.swin1.attn.qkv.weight", + "layers.0.swin1.attn.proj.weight", + "layers.0.swin1.attn.proj.bias", + "layers.0.swin1.norm2.weight", + "layers.0.swin1.mlp.fc1.weight", + "layers.0.swin1.mlp.fc1.bias", + "layers.0.swin1.mlp.fc2.weight", + "layers.0.adjust1.weight", + "layers.0.swin2.norm1.weight", + "layers.0.adjust2.weight", + "layers.0.swin3.norm1.weight", + "layers.0.adjust3.weight", + "layers.0.swin4.norm1.weight", + "layers.0.adjust4.weight", + "layers.0.swin5.norm1.weight", + "layers.0.adjust5.weight", + "norm.weight", + "norm.bias", + ), + ) + + @override + def load(self, state_dict: StateDict) -> ImageModelDescriptor[DRCT]: + # Defaults + img_size = 64 + patch_size = 1 # cannot be detected + in_chans = 3 + embed_dim = 180 + depths = (6, 6, 6, 6, 6, 6) + num_heads = (6, 6, 6, 6, 6, 6) + window_size = 16 + mlp_ratio = 2.0 + qkv_bias = True + ape = False + patch_norm = True + upscale = 2 + img_range = 1.0 # cannot be deduced from state_dict + upsampler = "" + resi_connection = "1conv" + gc = 32 + + # detect + in_chans = state_dict["conv_first.weight"].shape[1] + embed_dim = state_dict["conv_first.weight"].shape[0] + + num_layers = get_seq_len(state_dict, "layers") + depths = (6,) * num_layers + num_heads = [] + for i in range(num_layers): + num_heads.append( + state_dict[f"layers.{i}.swin1.attn.relative_position_bias_table"].shape[ + 1 + ] + ) + + mlp_ratio = state_dict["layers.0.swin1.mlp.fc1.weight"].shape[0] / embed_dim + + window_square = state_dict[ + "layers.0.swin1.attn.relative_position_bias_table" + ].shape[0] + window_size = (math.isqrt(window_square) + 1) // 2 + + if "conv_last.weight" in state_dict: + upsampler = "pixelshuffle" + upscale = _get_upscale_pixelshuffle(state_dict, "upsample") + else: + upsampler = "" + upscale = 1 + + if "conv_after_body.weight" in state_dict: + resi_connection = "1conv" + else: + resi_connection = "identity" + + qkv_bias = "layers.0.swin1.attn.qkv.bias" in state_dict + gc = state_dict["layers.0.adjust1.weight"].shape[0] + + patch_norm = "patch_embed.norm.weight" in state_dict + ape = "absolute_pos_embed" in state_dict + + if "layers.0.swin2.attn_mask" in state_dict: + img_size = ( + math.isqrt(state_dict["layers.0.swin2.attn_mask"].shape[0]) + * window_size + * patch_size + ) + else: + # we only know that the input size is <= window_size, + # so we just assume that the input size is window_size + img_size = window_size * patch_size + + model = DRCT( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + depths=depths, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + ape=ape, + patch_norm=patch_norm, + upscale=upscale, + img_range=img_range, + upsampler=upsampler, + resi_connection=resi_connection, + gc=gc, + ) + + size_tag = ["large"] if len(depths) >= 10 else [] + tags = [ + *size_tag, + f"s{img_size}w{window_size}", + f"{embed_dim}dim", + f"{resi_connection}", + ] + + return ImageModelDescriptor( + model, + state_dict, + architecture=self, + purpose="Restoration" if upscale == 1 else "SR", + tags=tags, + supports_half=False, # Too much weirdness to support this at the moment + supports_bfloat16=True, + scale=upscale, + input_channels=in_chans, + output_channels=in_chans, + size_requirements=SizeRequirements(multiple_of=16), + ) diff --git a/libs/spandrel/spandrel/architectures/DRCT/arch/LICENSE b/libs/spandrel/spandrel/architectures/DRCT/arch/LICENSE new file mode 100644 index 00000000..7397ede8 --- /dev/null +++ b/libs/spandrel/spandrel/architectures/DRCT/arch/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Chia-Ming Lee + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/libs/spandrel/spandrel/architectures/DRCT/arch/drct_arch.py b/libs/spandrel/spandrel/architectures/DRCT/arch/drct_arch.py new file mode 100644 index 00000000..151a818a --- /dev/null +++ b/libs/spandrel/spandrel/architectures/DRCT/arch/drct_arch.py @@ -0,0 +1,862 @@ +import math + +import torch +import torch.nn as nn + +from spandrel.util import store_hyperparameters +from spandrel.util.timm import DropPath, to_2tuple, trunc_normal_ + + +class ChannelAttention(nn.Module): + """Channel attention used in RCAN. + Args: + num_feat (int): Channel number of intermediate features. + squeeze_factor (int): Channel squeeze factor. Default: 16. + """ + + def __init__(self, num_feat, squeeze_factor=16): + super().__init__() + self.attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0), + nn.Sigmoid(), + ) + + def forward(self, x): + y = self.attention(x) + return x * y + + +class CAB(nn.Module): + def __init__(self, num_feat, compress_ratio=3, squeeze_factor=30): + super().__init__() + + self.cab = nn.Sequential( + nn.Conv2d(num_feat, num_feat // compress_ratio, 3, 1, 1), + nn.GELU(), + nn.Conv2d(num_feat // compress_ratio, num_feat, 3, 1, 1), + ChannelAttention(num_feat, squeeze_factor), + ) + + def forward(self, x): + return self.cab(x) + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = ( + x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + ) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view( + B, H // window_size, W // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r"""Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__( + self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( # type: ignore + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack( + torch.meshgrid([coords_h, coords_w], indexing="ij") + ) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = ( + coords_flatten[:, :, None] - coords_flatten[:, None, :] + ) # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute( + 1, 2, 0 + ).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=0.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B_, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1) # type: ignore + ].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1, + ) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1 + ).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze( + 1 + ).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}" + + +class RDG(nn.Module): + def __init__( + self, + dim, + input_resolution, + num_heads, + window_size, + mlp_ratio, + qkv_bias, + qk_scale, + drop, + attn_drop, + drop_path, + norm_layer, + gc, + patch_size, + img_size, + ): + super().__init__() + + self.swin1 = SwinTransformerBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0, # For first block + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[0] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, # type: ignore + ) + self.adjust1 = nn.Conv2d(dim, gc, 1) + + self.swin2 = SwinTransformerBlock( + dim + gc, + input_resolution=input_resolution, + num_heads=num_heads - ((dim + gc) % num_heads), + window_size=window_size, + shift_size=window_size // 2, # For first block + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[0] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, # type: ignore + ) + self.adjust2 = nn.Conv2d(dim + gc, gc, 1) + + self.swin3 = SwinTransformerBlock( + dim + 2 * gc, + input_resolution=input_resolution, + num_heads=num_heads - ((dim + 2 * gc) % num_heads), + window_size=window_size, + shift_size=0, # For first block + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[0] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, # type: ignore + ) + self.adjust3 = nn.Conv2d(dim + gc * 2, gc, 1) + + self.swin4 = SwinTransformerBlock( + dim + 3 * gc, + input_resolution=input_resolution, + num_heads=num_heads - ((dim + 3 * gc) % num_heads), + window_size=window_size, + shift_size=window_size // 2, # For first block + mlp_ratio=1, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[0] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, # type: ignore + ) + self.adjust4 = nn.Conv2d(dim + gc * 3, gc, 1) + + self.swin5 = SwinTransformerBlock( + dim + 4 * gc, + input_resolution=input_resolution, + num_heads=num_heads - ((dim + 4 * gc) % num_heads), + window_size=window_size, + shift_size=0, # For first block + mlp_ratio=1, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[0] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, # type: ignore + ) + self.adjust5 = nn.Conv2d(dim + gc * 4, dim, 1) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + self.pe = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=0, + embed_dim=dim, + norm_layer=None, + ) + + self.pue = PatchUnEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=0, + embed_dim=dim, + norm_layer=None, + ) + + def forward(self, x, xsize): + x1 = self.pe(self.lrelu(self.adjust1(self.pue(self.swin1(x, xsize), xsize)))) + x2 = self.pe( + self.lrelu( + self.adjust2(self.pue(self.swin2(torch.cat((x, x1), -1), xsize), xsize)) + ) + ) + x3 = self.pe( + self.lrelu( + self.adjust3( + self.pue(self.swin3(torch.cat((x, x1, x2), -1), xsize), xsize) + ) + ) + ) + x4 = self.pe( + self.lrelu( + self.adjust4( + self.pue(self.swin4(torch.cat((x, x1, x2, x3), -1), xsize), xsize) + ) + ) + ) + x5 = self.pe( + self.adjust5( + self.pue(self.swin5(torch.cat((x, x1, x2, x3, x4), -1), xsize), xsize) + ) + ) + + return x5 * 0.2 + x + + +class SwinTransformerBlock(nn.Module): + r"""Swin Transformer Block. + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + input_resolution, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert ( + 0 <= self.shift_size < self.window_size + ), "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + if self.shift_size > 0: + attn_mask = self.calculate_mask(self.input_resolution) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def calculate_mask(self, x_size): + # calculate attention mask for SW-MSA + H, W = x_size + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + w_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size + ) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill( + attn_mask == 0, 0.0 + ) + + return attn_mask + + def forward(self, x, x_size): + H, W = x_size + B, _L, C = x.shape + # assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll( + x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2) + ) + else: + shifted_x = x + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size + ) # nW*B, window_size, window_size, C + x_windows = x_windows.view( + -1, self.window_size * self.window_size, C + ) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size + if self.input_resolution == x_size: + attn_windows = self.attn( + x_windows, mask=self.attn_mask + ) # nW*B, window_size*window_size, C + else: + attn_windows = self.attn( + x_windows, mask=self.calculate_mask(x_size).to(x.device) + ) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2) + ) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return ( + f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + ) + + +class PatchMerging(nn.Module): + r"""Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim: int, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: b, h*w, c + """ + h, w = self.input_resolution + b, seq_len, c = x.shape + assert seq_len == h * w, "input feature has wrong size" + assert h % 2 == 0 and w % 2 == 0, f"x size ({h}*{w}) are not even." + + x = x.view(b, h, w, c) + + x0 = x[:, 0::2, 0::2, :] # b h/2 w/2 c + x1 = x[:, 1::2, 0::2, :] # b h/2 w/2 c + x2 = x[:, 0::2, 1::2, :] # b h/2 w/2 c + x3 = x[:, 1::2, 1::2, :] # b h/2 w/2 c + x = torch.cat([x0, x1, x2, x3], -1) # b h/2 w/2 4*c + x = x.view(b, -1, 4 * c) # b h/2*w/2 4*c + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class PatchEmbed(nn.Module): + r"""Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__( + self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + img_size[0] // patch_size[0], + img_size[1] // patch_size[1], + ] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + x = x.flatten(2).transpose(1, 2) # structured as [B, num_patches, C] + if self.norm is not None: + x = self.norm(x) # 归一化 + return x + + +class PatchUnEmbed(nn.Module): + r"""Image to Patch Unembedding + + args: + img_size (int): image size, default 224*224. + patch_size (int): Patch token sizd, default 4*4. + in_chans (int): num image channels, default 3. + embed_dim (int): num channels for linear projection output, default 96 + norm_layer (nn.Module, optional): normalization layer, default None. + """ + + def __init__( + self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None + ): + super().__init__() + img_size = to_2tuple(img_size) # 图像的大小,默认为 224*224 + patch_size = to_2tuple(patch_size) # Patch token 的大小,默认为 4*4 + patches_resolution = [ + img_size[0] // patch_size[0], + img_size[1] // patch_size[1], + ] # patch 的分辨率 + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = ( + patches_resolution[0] * patches_resolution[1] + ) # patch 的个数,num_patches + + self.in_chans = in_chans # 输入图像的通道数 + self.embed_dim = embed_dim # 线性 projection 输出的通道数 + + def forward(self, x, x_size): + B, _HW, _C = x.shape # 输入 x 的结构 + x = x.transpose(1, 2).view( + B, -1, x_size[0], x_size[1] + ) # 输出结构为 [B, Ph*Pw, C] + return x + + +class Upsample(nn.Sequential): + """Upsample module. + + Args: + scale (int): Scale factor. Supported scales: 2^n and 3. + num_feat (int): Channel number of intermediate features. + """ + + def __init__(self, scale, num_feat): + m = [] + if (scale & (scale - 1)) == 0: # scale = 2^n + for _ in range(int(math.log2(scale))): + m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(2)) + elif scale == 3: + m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(3)) + else: + raise ValueError( + f"scale {scale} is not supported. " "Supported scales: 2^n and 3." + ) + super().__init__(*m) + + +@store_hyperparameters() +class DRCT(nn.Module): + hyperparameters = {} + + def __init__( + self, + img_size=64, + patch_size=1, + in_chans=3, + embed_dim=180, + depths=(6, 6, 6, 6, 6, 6), + num_heads=(6, 6, 6, 6, 6, 6), + window_size=16, + mlp_ratio=2.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.1, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + upscale=1, + img_range=1.0, + upsampler="", + resi_connection="1conv", + gc=32, + ): + super().__init__() + + self.window_size = window_size + self.shift_size = window_size // 2 + + num_in_ch = in_chans + num_out_ch = in_chans + num_feat = 64 + self.img_range = img_range + if in_chans == 3: + rgb_mean = (0.4488, 0.4371, 0.4040) + self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) + else: + self.mean = torch.zeros(1, 1, 1, 1) + self.upscale = upscale + self.upsampler = upsampler + + # ------------------------- 1, shallow feature extraction ------------------------- # + self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) + + # ------------------------- 2, deep feature extraction ------------------------- # + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = embed_dim + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=embed_dim, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, + ) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # merge non-overlapping patches into image + self.patch_unembed = PatchUnEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=embed_dim, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, + ) + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter( # type: ignore + torch.zeros(1, num_patches, embed_dim) + ) + trunc_normal_(self.absolute_pos_embed, std=0.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = RDG( + dim=embed_dim, + input_resolution=(patches_resolution[0], patches_resolution[1]), + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + norm_layer=norm_layer, + gc=gc, + img_size=img_size, + patch_size=patch_size, + ) + + self.layers.append(layer) + self.norm = norm_layer(self.num_features) + + # build the last conv layer in deep feature extraction + if resi_connection == "1conv": + self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) + elif resi_connection == "identity": + self.conv_after_body = nn.Identity() + + # ------------------------- 3, high quality image reconstruction ------------------------- # + if self.upsampler == "pixelshuffle": + # for classical SR + self.conv_before_upsample = nn.Sequential( + nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True) + ) + self.upsample = Upsample(upscale, num_feat) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: # type: ignore + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore # type: ignore + def no_weight_decay(self): + return {"absolute_pos_embed"} + + @torch.jit.ignore # type: ignore + def no_weight_decay_keywords(self): + return {"relative_position_bias_table"} + + def forward_features(self, x): + x_size = (x.shape[2], x.shape[3]) + + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x, x_size) + + x = self.norm(x) # b seq_len c + x = self.patch_unembed(x, x_size) + + return x + + def forward(self, x): + self.mean = self.mean.type_as(x) + x = (x - self.mean) * self.img_range + + if self.upsampler == "pixelshuffle": + # for classical SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.conv_before_upsample(x) + x = self.conv_last(self.upsample(x)) + + x = x / self.img_range + self.mean + + return x diff --git a/tests/__snapshots__/test_DRCT.ambr b/tests/__snapshots__/test_DRCT.ambr new file mode 100644 index 00000000..e8511909 --- /dev/null +++ b/tests/__snapshots__/test_DRCT.ambr @@ -0,0 +1,23 @@ +# serializer version: 1 +# name: test_community_model + ImageModelDescriptor( + architecture=DRCTArch( + id='DRCT', + name='DRCT', + ), + input_channels=3, + output_channels=3, + purpose='SR', + scale=4, + size_requirements=SizeRequirements(minimum=0, multiple_of=16, square=False), + supports_bfloat16=True, + supports_half=False, + tags=list([ + 'large', + 's64w16', + '180dim', + '1conv', + ]), + tiling=, + ) +# --- diff --git a/tests/images/outputs/16x16/4xRealWebPhoto_v4_drct-l.png b/tests/images/outputs/16x16/4xRealWebPhoto_v4_drct-l.png new file mode 100644 index 0000000000000000000000000000000000000000..94fe60f8ec48ad81b75f1abc73a59f45253fe2a3 GIT binary patch literal 8931 zcmV<9A{^a`P)001BWNklc)Rv{%)G(m|4L?IxeH=^B&d+t5$``+?A0cgqgOcLk! zBL@f}DQ+O#V1S7Ko+T6OCswV>iyUBKt3(h52&ziBPDND(Pyni;qC%v5Aa$Z-|*vim}t4oVt3!cRKhf z0u+B0!heMT1z5c*Z$S5=6<-&^$E zIY3K?IzVs(0g4-l0L2Z)a*BfYS!>6Ztr;0xQ8b6Z7S36wB$A?!Nuu{Ldi5d2pa3a| zASwz4^g5^#6BJNoW0)wZ5?g=(sDv1+R%0m7++wdL7}qF=nq?FNRZDCvJC`d_P()uZ zEY63;nW>AX0OW*P@s|;P${3`C>zet{_=?r7(G{j>ipf+HD-jnyS6wzHxro~Wog-hIVT%c>lm%e zhzpx>zu$qc+Sjg3y>}AtBS7&Jg!hpF5hTK9*}C;xn#1EJ8xor*B8>qlrWmV1H^o{d zNlYo#q(VdhRD}RyQessxAP~bafe4V4gg}N!m0^X9v$SGj`~x3&Vt8!yokIr>9yl`S z_lu>Ynj$9#h+P&%EE}5@#^t2aS(tMym#>`bOkD*)EGqajWPk*K00OaFwrb<(=+dkx z6&%S3k%~}^!TY-IQ3_BM6-f~gD2Pc_2mk@ZP%x8Wo1g$hlnF>`a;GBBW(vx);?A9S zJ^s{_tCvsAcjmtF$6tBt^~232OS=88#Kf5sVvRA+EyIeM>ymiO93`DUbu9T_s5%hE z`v?FNL;*r$c2}wkP4^7VsCMKrqn2m^nkWvIVK~$7MYz#4_6pZkiwd?M_=kAX@{nV{nx2#$++UYKy zJ~#Q@mtOeY-}|Gvxn5Lna~k-5o|TQf%*aumDJaWwWosCs>AiC&+m}ygh4WPaK==!^ z`L+j!M@Fk!lB#GDU&Td5aXQ9 z%<>hh?%Q+!-urjovHj+?D<+IF2=Tc*jdu#Ojb#N+BIue-m!DXo_luf*s*!j`sHP2fFcd*YggxH z<`>?4=g1%Y!5^GBbH3B>m1V9f!~{0N#L8u(VJL&4GSBAQ(`Sw!gb#QhF(80r3@u-` zdczh{tL%cW2fc1xSDF%3NfVHWiH)_^8RtL*0*DyQY*dtqm>EnlB+=xYV~}B1QIR;h zZ29h8cRu>?-krDJGSYI^iCF8%#evSZ=To=7Iy-adAGEJ!tb;IVX*R8*6&s`7i-E-UaTgEc3x{HHB*Soy- z(F&{dtr!9(yMt3FCV%kezSrM;duHac@AvZ}_jTVDjb@|eY?&3gCJ#;B&g_Nb@8W#~ zz?E5Zc*)4f%AzsMHWx_k`@#1^s396sBET7Et;@2~8cW8&5EC&NAcYzth$bZ=wO|HC zCbq^ow_*MI`*!bs@cw(&tz9wJa@FF()eG;<&bQ}dy4GDNGP`c&vgNI%W8-7x(D=FY zmtH-*|K;z!wEyiNEzVTbuv+=DC~a;NT-j(CGQR4H)>qy;>pMMw2*A*a^^L|zvuu@( zMiQi0`{?VsN+Kx+VzSma&RkZqbB+u-PAXPG#2{7`0t_+37-yl$u&p#@uDSW2>`C4K< z2wo~LEW}J;L$+BKSvgE#wwjoTKp;(ECO}1!q|_{1h8U_K1C~|mZ+hs#J@@Y1vE$~o zEOD;eJ$mx=D=+`)+pizGFf-Lv17&AzBct(F^Wg{g?7sW%2OiwxyS>X-E`R6S-}v?m zuUwj*iZM!nD;v(*EYGcVVw}s0%-In9)yb>0WzXZ*m87N${Sc!Og@i$G(FpnEoiz~* zq2boBD;o-Rj@XuD*)nMqB8njh$#t1^n$YQVmW@v=S-EWYefK>1=m$36v}zdD)k~LO z|Ixu$-rWE8kz?J(ffS8Om68Z3`CcNsW!xVPxOii6ScH-?L z@9zKcnah`^>!f6gNJs|cV+=_VpuBw9*himu_<#HRzc4#JJvG<* zQ}grfUKM;TL5W#dQiP~6BvFD7{RejK{=)zJYtyGL*jD+Kul>od|3AMm-}9OVq4qA% z%Dk{9vsq?sVT}X3s6w~i`5Ld+olj zZSo=G%(;dniJF*_aiy_LB6YRM1)h5B(LHx;U$+4nzgd#%veEeRAAS8F|Fd89;=TA-CF@L{=WGla zV_o5FCZbA)cJ6ty77E^z#5xROx7#wvug%{zGP-N&xNl!N7Z=;jR(t*S-tclRN0I8n zlp&^+I3VUC3{J*5<+5qlZ(ja4pa0nnYnEmSCy$*t@cPjMM^2o)aIx?E#0W7a!XSo> z*+6|=fr)KV*QrXO+wFhi*{44H+0WdvWJ%s~|M^Q_{^fuCFQpvu)q;8{%7U0wz|K0? z%=3mNP6XPy`_Wn`il!v0KJ>f89Q4ZR$A_EGY`e*~r!NfR+m)Vet-07-Z^|XZtynmt z3QrjfCBM+f3gd;yY}l~&u}2>K$TJ@-;Lo2u{qBLIhu=B%?%C;!Q`dU2%Go-{ow}}l zH_MpGa_~wpnhD&seq~8M^y~+I@#p^f&1;sX&iw!N@BhQE{@-70jg8e+x9&+^xY9Zm zGPz?@xU9^xQlNC_eUDW>BtdfA?^RNDMxC`+&+J6~u}vFHf4*-=uau+T>EZJ5rcO3K zR4g>}RE0Vc64)gjZ!`_ZMpo?JdFRtlKehX|&ECfsUij{7ul{Iqa%Q^c=j#|$kcd+1 z4f;Oxh>)>$k&5O}@^uw;ATcNW?Vo%8fBD;g-H^X>_SCQZ>tB8C;E5P`esLy6%^W*x z96M9wL^)Gt^8$wHj{Bdis|uPtq^>HCDGyp-ytuh^PpqGq=w2IYO-zgZL8TWbwolu9 zxX{c>KkHT&z4`7+q-I`#8WTgUk3Rjuy^lS)dhNuKqo=;~`QMtpJU2O*x3=Yt){#`h zY`dKa1OdUbDU2~WGEweyyBhm9Z(IM1zw}Fw@4crtySVTB-}(F(f9Jw{?^hW?2D~8C$Z+tj%c0o+m;`l6=(K`;e2Re(FxQhk!dr8#mecZA+H*ASb)ws1CO| zU$%V3r9q{ZX}FA1*>1PAr3ShG-rGL&$!9k1*m>;Cdtd$bi?4j==QsP?Zd-n{ANk9_Fi=RW@Y&@f*=Hy{+;i=^ybWOQx?) zo$B;wdsPT!JEmfMt!s>Qx)s%frJFYsl^^}oGoSwSGu=-6<+tDZ_P2g;?$AjkEk}pT z(UE~xRgzA-??d#1I59D1OrA7o><@Z_Du(f)WzT)|!=HNY6Pwl*^9y1BYcK!dm%eoB z?95EBp6|{Ol4WK|3L&=4Y;3_M&x@w9mbUMHL_}+!5IraFeWGaVK*)`AaeV&0W<8h~ z<_6rVm7}G}X6Jg{_hy#nBM|j{*BKd|Te>o=-te){eB!wW@94&b@9%r@cfS0M?xKv3 zFB@6CX>7x46gj8v)YYklsVfT?COON>tPv5W7Z-vbxMud~efNI++2{5=cvmCCne#JW z`}#M(`_(_1o$d|9cYKE-Bw|&Gi9ke5CU>^9Mb6gJ)_WdFDM?HkL#zgXy3Ex=1F3_c z8Nt3UNta5!Yuk?9J9jOyw8hGjg_DctFBUvHyL{#M&YqiEIrijJPwsm7g9~Wwd+Fuh z|Lw01v0J@lWWL!PTDkJ^XP;WRZu9)L?(F2b3x^IIIkd0mr4zGbr{5i4y?pD|txxZL z@aYde)F|`G`Od*3KmN*B{`l;vvvczcb=6n+C{n^CL?UG(wg#{^ZyJ{on6~bIH2NSh zQi?GK5md=51u;t0Bq>{sW|S0qlcO|x$NCKqZQtY;u9X)LtQ^dJ@UdqOoOt)8lgG#J zxn<4myB_}7^B7rm>D{X@{O%W54617O#ObN`)^5Lj_d`!@+w=6bY^WG1&(B`^{XhCo zFMaogtg*VaV#B5_TkhGmbH|RgcdS~nxNzn5*WdcV>u6mdUXzvn*#~+IH__zN#ezNMunjsh^@6&%!WGrVMPGtx}PC^Mg$*)<1FIeZ}Ir z2R5$Tz5b@mHoyGMFCDrx=SS?w$|axvrN6bdxw`IlFTeHn@XUJ{T=?-DZ)928cIRH2 zSW|4>GrISQLl-Z+x&Od#{kzXC&el&n_~gf){~P6!aaT=_ahFni-tsm|`a(wzy z*9QaA$dp`&BxG12k%)4eL)m5}XBitqx9)kouIf}rk^qrfyh0;|++;E3pfdV6)@*ny zHL_=){rE5ZjsIzbOEJ_o2^kn7M9GYEumnR{j?9r^x^>TE$%~}qYwvxJG=e!K zvBpB!x~$n4bHpJHva#0R`(OXTr=ER8rn;w&9)9iU!8eXwY+q|nF7!C|mo(%9AKd#h zPyNiYq1M9WwWh88^yKAzN8;7V$=UZ#hTf)UK6LxuhsRvY_xdmW=Wo36t#3Ye+bzq} zRYQa2OP3$j;TO*gj&|hCRNpfeY-b>yf?`91O)&%wD$W`Q=h!M+I49OJGws;(gs(&N zK{SRiAkpX}8;%5$*lLtEYs8*3M_YH^fBV1vwST=L&pPw%Z+_!@FTD2F;ft3$1D|3$ zbQglU-P?BVy6yH&>sR*reqy|pm953A^C$Pe^Tv-}J#gfmU;HQk=%Hu#-o0sabF_Hr zH@@&Ezx|tcj*VxpVQW5Ew6QH+NU$!89&?tt4jlP+L5#-v22Jl20K@f z%Z+6=Ogrv>s_s{HT{9H14xtt#Vk3eO>qfJgyb9bMw{83R|M~Ae{p8*fxNzb4H~!#T zUwiRS&kZVx$;Zl6F2yi5G@LiHjhiC`r$-w@OINS0q4U*(mUb9ar|L@Gx$2vj`phK-8TNrB_Pe1>7tbzGS*xo*67$GiH!ALxUT}Ns5MSmK8+S z3qlwFF|{`TT|wTqce!X~pIVwV|q z>@s4aoezDuu0yQr_hRt}BT^X=z;<`bX#*dzDv-aR`z|2tp(=70K) z&l576709D=uHE$+X;s&avH)?NQdW#u$EscizUi{9ckbEyfjb|#>1g@Y!=;`0fAy zz?<*%tC%;-yl^IWi~VkCGG&cHQ%G!GRy2$?-uGI0wq@Jay$|l)wd>A1Zdx%j+y2@Y zzqIer(aAx&*6T!qCl17sB#+7-hBMDrCueRtbMeukt9P$y6|L5*7w7j~>Yd3N-6g|S zUXpQzbC!{@16#W5;b*Er9b-ZYp&rzs5+5^HRAE5I=8YCP`ltWLKl<3-9dN~)r%r$F zH-GEo`KhYk&nRoO8Uu}%qXEYxEQZ+{;%0MXF;zC{%^OzSvuo%5d-iPEvBgmQ{-1pN z=)rUEoI1A9Q%})nmSC$vC^8~t2>_WHANq?68_r+4YxclhE5pW>s}9b`S1+bBS#!3P z2UDax8!8)xwEzn--TlZ%LLGeNWAeW4r@CSRhy|jwU8{uc%Je?8h4>h#R zDKnWZEjyTk*wKy$o`7-;KBbr>4!R2zYHPB@6ja;Y?)`V{{MnB`|JmoCyK?o?#jgK% z|Ne8Y9XL8NToyS+!^tNm&b+cA98d+(Ob>q^9PC(pm~#%r%1 zK7Mu1`>JnqlbCXsDN#l?siqJRm7I~1$Y`9^bmcvqJG-e&SABaLt#*<3opD*_EU_sJ zXUq-cIc>gkFWU?v0D|{}s)G=j7|a8W4K7EA%TGT3p?~rZ|Nhk6^x1Cvb6@!SzE@ry z9ve0;kG>m(Ny!q7$)hMS8=H-dE!q3P{ZBmp(DD^SrPkfq*`o*F*>~XJkIzoF7Y89l zA~d(vkyH|(&WL3@2sq*+3SdGaXvxPN91FoPvr<-qo z&=#del~nsW#8?^T6ze*L+IwTmr9=6i+wT0efBP@l#nYGP{`8e2-+TGRp67n2+aD}8 zhsIpyd>mL)h(p6QmoHnfed~sYcJJPP$1OHb^OIAjj-TB3@~fxMU2WHO1!a}G@{Tym zOCk_!1V|JLVYZ|!zH4=%9ouIY5f*dfg`+exVrtmyGHVN)m6W?$h}PZuKv|ZWG>I@H zgdPR)q4H9*Wc?79l+EQM#XtP#|MZ>*cFnhE&YhloZU3RS_PsSX-R(-fV%3_MVm0VR zm7!K^<(jp(Z@XpJ9oz5NzJ;`!x;FF9kz;SaefZeX6N}XVwg4j~pF|0inL;EH699!+ zG9hXT0jchVg%C}*SA*0=1=VZ{&hwmr)UVqP?=!MurF zg?@-t3=(|I43AALd*<;EeEM&FV$;TT3v<1b=g$7|AAe_RvVSdBi`Uxi`L?xo{i;-Hbny7ev-9mX=ho#}&`|s6j3Xi@LqY;2s4784 zq6S|{3_kUv9}Ji!dMhpm*{n2qt|_`kmKTMv>wCYr*r(x%)#GEU#+NPc4*FTq9JHq` zx!zz(>k~ z8FM8%^W9^I-}(M`Up#*5%v^7V%tm{x$8bc7D z3^+5Hy252y(SptDG#F_%=ccD;W~K(cd4!4>5FpPRLlbLPtl4BF_IrIV-dA2?1Ori{ zL6J9XmTlg+?5QUnzkm1c72^|m-f%_18Iow->yvRKjb?AqzdAX2_|V}u_P=##|G{hB z1$GS&h(;zONsVDktfE9DB1EKMB50D3Vu~U78YwZ7s6=ApTw(IeIa`*A)VeIs?b#FW zEL^<|C;(zWaKq@5RpYBSwaSL?*O4T~DyEcFI0a>w!wHZ=_pTi~Kl1cb58i+8`mGyd z7?6`JA8~*JKBnVGPrUl-8~c9v!;_~^Pft#nB6DStNUPds#<4NPU?E9DP$&sh1W-{C zQBh5j0$m4!s90v>GE?L(qas&H%|?+p9)4pVsy+b@ApDfe%e8B_FJHT^*RLZ8~{zB?$zEysrFnH&5;aQ%Wd7V<>_ai z{mdsn((AWfZbkU*L&tvj+WyyG`{8?+rw3K9ss<_cNer<`5(xk)Kq>$+R24*kssN-a z0OUFqLrg@>Y^-%zVY8grkaLFF8Ok$v>iFT=sY_NRB{2ZueFP}b7+$e?$4)kRzwQHM z4X3KA`kkuoJDb-rW!#_)rK5@QiEX!S{p`>E+^*X;PF}un>B_b5z4D{C-hTV|vGWUl z;H=eDN2ygcMKuh^Rt2I8D3nDNfT|JzB9R0T0mKY81XyE@wbo@e&j^$N>+H}_Yku;| zr3+`0)KzBz?<4#LqI`JO>h%*7Ysi^)x074L;FP55b%;qhB>|gUe6+@`ST^#>zxL6; z_Th)yGqd{-oqpj5ubw!2c4l%eaB`WIR1bWW8EdR%2E(K%s!9q~CuEWtqoR zSH5qYF>F~$h@eJP7!XNBq>4X7OsZ-pBe1c?WY(C> zm<%9hW2|B827~VOS-yU3>2l&M);@$P zLLeH%v#<{sXcbaiH8HlV;b!N$i~RwSh=uAI0007>NklZ+>h#f7;+d#dWprl<@M zAt`=>K)8-T04c5`K!L1ixUA)hX4Y&rhlU!>rm-%jSXY&(g^HvEaKGxfSc?#mC1(7N|ZD)Q-T7dlvD`U5kSOD0N4<~)*3dgon$}W#etOWI9}Mb#-S75# zY0yEaNfLl4_{#_Y0g9h61XTzDik~0?+j4kxczhynG+7JhGDGlTV4d^c+n6$!*D=-- zoO3{A5fxIXs4_7WPz4hcs}d6l!;n=~V+@$B&6teIY|+T3uU?+Ke4!f5bE*wl%57cO z3MB!nS|BD|m+%t=fDI(!&k;fKJ_QI+{DjR~OINI1vB^0{M9d09F(e~^Xcas=qfm{) zBut!C05FKa0>yQz%!ZgjOhks53=4>e4UuSEyf*XRh2v872vEQPCNthAFs>tl0hEv; z{v6@Y5pGcY6&P51^Q}u(Z16E5qbOpMI#ASDCxKy2LW(3pN=#~qRTW?&5J8j-Uq`Ac z3d0smY>cQtq^|ml^V2g^7i7@KpJ9+f88;9?lo(2Y0uk3afC`H$;s(MEivK186d<5A zG(NFnqZ=AFhKbn#79@wV3KJ!2x@=zkhL7XK~rmWDEEjvY>wpR@bv_LPM1S#607rp6C z(Iz)RgI*LxFOn9%DA3etGd6Z&$F^)YvLi~iticj3Q4>jVp3gZv!`^$X^*+!4*R{`~ z6gf?Sem)u@g9ao7w+IZFEg1vih(R1NKS?04Kq%QVNZ1%8Z2mQ;PhLE8;lg5PDMeRU zmI29F2yBE@Lc%5_BCy#qgfO<*l5Arnknk4Xk^viGbHqnTjNMyz>vpE+YnR@*^3EIl ze-i|BkPu*!{~yE=BQQsTBgqE>6i|XWp&$`p%Ucja#*ik&EonkvB)X9aVMrr!i_KOM z-E2wN{G^D*g}Wa(b?Tf-k=QteLjg7vMVb(XP#9z<4uy?GcNT>~kanPy$TWmx2$42m z5KddjwxHt>NeBT!uGgz;m*0N(&6ho|xk*q&GanF`TOtr-VkkjK-A)t2X3G#lU`P`I zMvx#QBhk!{sT6u5b417lbU=h4NwF1x>3}GJu}BjfNsd&JCi$}hk{^pGb=uiGeg4>) z+jkeoER|BS&)6V9LdZ5q!4X>uZn6ymV;eA%fV9yTM{FsK!59NV$hIV~BpVc<60+N+ zOYgk#>I=*(+M0h8fjJV8Ah9LDBu9#{NkA09Bp^bNbdCf^k|P00fTW5$0-FGVpaUWV zCOINOk{}_#fQ$vnkq{C+`7;FO7DcJk?l2CE-4nykv7O^5my4aA3Q5jjONqrzMnYhW zkp%cAN!-+XN+E%8rW6PeHe$<0*nq?>HlPS&qwGCG+2_Oc=KafWA6~sYA0E&T`3VB^ zuY}l2z#sw)5}2(t5(10?A+Rw=#1V6(K<9`ENP=6U7TmK#io%gJNF{K02SEf*ZjrD_ zjueqbkRUKe1dGLV;?!-6W2efrH;%P;I}~LowK&1lQe=z}*phAGlw@c21RI2Hd;~}v z5GcYnQNpN%Y(PlD7-Oj@$ZlgPq9`p$kb4`aWt6eyCfmXK+KnsME+1UGs`TN(_3Vd0 zfeuA<^J6LmU@!?Xg3b{^h%^WZClZDLK^j4knw_8khl*v%SQu|XK!VsZU<48dgFq(1 zP((L~8inlLm=6S;JaggL$+OG7lVw`OxTv-E-f>H!w}w-;(~6J^b~+_&lj$Rwy$ffi zu`_|*J6IURRtQu}F=9(mqM!ilmIxsU8x(*_73rRAMGcX16b*GK!?0ep)&6y7yZq)W zm)`y%KcQ55gA#s;M=fiXuS zL>lOTz-)z3B}(m^){|Stp`JQ@VR7u_V(*;FSf*u!JkOSmp$B27dn+X}G0t>mcP2P9 z?abcobSB_Dg3@UuY@A3!i9<37fg&PJMA246lqgXo6j92E4N?h&jJJq7M5VTD-Jwz< zrcPTO><`MfUw`5Kx8H;{KN64}5jTly5)veABrsb7q_jg|B@S?~Wn&XU1yUR_1uSvI z0!wKN0xa1M6VRb^L?D4NN6e8r)l;W0EO(DJYaDiW_fF+Bp`j?%yP)+A?n2m^U0^%y z-e)p zO}KP-6YIH;)9yHotAlIv>iTQ?0Md_x%EEd1Y>i= zTawr^M?!3wEeRxSK#~m@5JDU=wvixhAu=szlt8jAV`uL@!!?SmO=~@~xlQ9VEtWMZ z)SlVs$EQV_M${seSPYAv4i%NM>75wHxE)8@Iine28tGfD^{FcP*S!p0aoB0%W`B(Y^9Fh6D+DJ0wJltR#L zNdX39u)vDmTV|hzq1&6e_4Q^PmnTl1Jbmi)vE4D-=JnU#y1r?f4Sn8}P%Nf#F_wr( z*I`6Mlxnx4P^b~AgL_Z+afmB#zxw8j&+#J?5(sy;LTni@TLJ{eBF&aUSjZqSTVWA6 z9ioGcK_FqU*_wvM$#Zv{ICJN;yVs+TsdQ$?j;h`|D0^G44qI>8XADkcd5a@ba6}SE zY$FMfWE+7&V19yaB!LB%u#H8eQ=pJdL~(XWDI2v-E27krawg$&@s0~0f8rC5KKkI< z+s{nmPa^Uk~PY&M%=xmYZAB0Q8j)u~HGQOAmv=|n1}3=7n=%_aNW zZ+!o~*I(f$NZ1rWqzRD*A!8x2Wh0OPf!PW`;E)W)H2w{SaX5S1UAxE6OuMI5#|}Af z1O=y4rnNaUd!Jj|wAP#%5;!v>1jra984H0W*%m^8>WTu29gTXe= zQVOVN)+o-*p0ip28HA#+X??M?_mO)pKK<0wpZvtf?*7nSx1YbGp)1$k``v%>`QQ8J zpZm%m{?X<44tGzV+B?21Qj{2{Whq4_YP57ICCcC^QEG`|x7FePSn{3MUwrS4SNRDA z5+bk>Af+wgWJ@BAKt&|j1VjXwv_YbqBVxTNsmA2yA016zV-iM%0vrorE;c zvvFWz*lgDG+!s63#~yp^&;R+)e)6ZD{Lsa7d%L?+l!JMltzLis`ak`RU;E|1{mXA( zy1v{yCiiJE&GIykb%+{u7#9&^x}#cZ6(mtg>Dff~HoyDUD_7op8MiUmBmp5hh!4m} z*xb@NB7p)DOR^CG2^e!kK_<5hWjKA?U1!eUQKzY^Kx9kXNMJ;t=j?6XtlGTEHiIK1 zut5SO0p>_ZHef)=AdVR9EK$6L0ZI4>ijaI0LXfr;A<{OMP((JjKI5#VlyR6h{l<+O z$4{JoEpY*J5CN)`_~WmHwW`qpZ(_F_;3Hl4_|s~xwkVc z$5P6sZ4ed8I4u^t(_*I#6(Tc*ih?>OcJIj0uD2r7P?4Dj6I}^jyY}zc4duB$cwRv@T(0j{lz0JZhk_=K9 z1QNGMvXQVYWDG)JOR_D6ouNou5;nqSOJI%|yu*)_S80f?_GcO;V{3-T%>F z`13#i*u#%J@R1LlJAGmrhDcjpwKa8k^X<34`;BjW;Y)w?g@5^#*Is)g2U+yqhM1x_ zh^Qr0imHZb7;8k4N<>&f5p^uJIQwROFqVAl)fX?l`3gUwLg$DI7>q$6P>2LfK_#NQ zV^SC}w=AaR`8z(mJa)2-i-<)}C)kFp5s=y2>fmtRtTS7>3nGNs5<-Bm*%BZLkZnn} zB``?X2-_f>Lgt7hz+hW|068tWMClle32B~Zg>a&}-fVhjxwrG^BM<-lXMg_Vk3V+D z9cND-+ntheTx58?Ue!8GXHR_dt6%wt|HrR<@B6Pj|MH8ME*(tAb|@~rXNJhBPMO9) zRVgZE7=}7biU36^p|DDc=KJi?&{jvD#z-L^m~Xa!UsZ0SZVULQY#i zAv?F!GTe6Oy~od-FGEcgJKWP4$=MZ=ZJy`#ygKZ?IlCYt_%TTwu`LO(kq`om4KfIj z1U5&+2h5fP*cLKQJ3<1RBSJwoTAu}_gi2|xuMStcdnZ2nz}Q{P@>6F|?d=`g zUG7Yt=UQr+cG^52cKz_Rmwx@%f9;vy`-8XMdi$_w-n2mxk_QdxIdf3KM#@a9MASMA zKC}nkc&^DXiH*H=!6I%kb#RsI2(>emiAU~K$ z?C$QZSBIBw96tN4XP^1P=fC=uXJ3Eqog3G$56j}AjWG=4nB8rY>6S(%oZX33hH+8q zP=}!ogSH%jC<7(xupoNR&T94cYu~%{_G{c!phJKQ?vzS4NPw!w7Bn{k9_9opLybAAAjKfd(Ix;tEJF;mpr$g zmSWS|)oWMZc_fOSlmDV<`Kolf4jS!_Efr3nKDFV}B zcW>|fT_4@q-BTG8+3r0R#l0(}%=PAQeX#G$o-H6E_z{7*NeE#K5+H>D+foRSuq6TB z!ct%Zwh)Z;OVD7^XZ@Z))=O29fOoLB6}fJV^+QP?v)>W_uJ2Z|Ap`V;Dt9{fAjS>Uwil6cYEaiyb)6w zOSX9^Wf-dLw0owa7L_`Ti{*~WAjz1d4kJLObs9W~Mk%Iv+h-r8op-{uX@>^(vt39}`EL7{L}~8X-FvUGrB0vMZLxFgqYr-c=YHm?Pd@pHdoJF- z7^g9cf|jWWpwCSxQ5j=1x1lwpx?GgH*uQb(-8bKU_wtQ*-h1PPAO7GEzVM|h?_Roe z{o1w5H^#k1D#xfQRvO7^_Y9RfE$T4UVW`6>DLWxWsVb$8W5Mfr|K;yJcjL+%%bmsH z!HSzo&=Y|wH2!re!+nqa^xo;))`zQXy+|ST-t2Vt*5-L$&zqIA3vA0qM-mGe3)zkc zb3_VlMUjLQ3P2G8No+~th;2n^3mI=^CeDZ;!kI+{yZ7GcGC_13We-ZJ4}J8Z&;H!c zKK9^8@4NRyC-#;@sq}fBj*J#k>uDRZZ$w?K50_IdW!SV;t;^k=JtEet&3fK{>&a5utHN{vBLqmGMdiaMZ@*;Pt`Ixgx^R`cq{ zJ8!)5{Ie>!^OFP`VZ#Rm=E$)VXYPIMQ+8d?4fhfyQ18v|^Ljnc>v^-zJ}V@F*$T-V zk+8uEDN2+&L@A{Xr4*?ku_Xyf*p>uDL}mg)h|C0o0H;lsh;CzJmx$In!tSW<9~{gr zclUN5d-TDdefsIg9(m}(`I9Ghb^;+9EV>>NM&$bsIPer=^^f$ksNk8;PVzePg4D zGL3GVAq2%^$*^XOk+1HrUw!elFa6;&zy2G)`P%F6&Pq8+nHC`hrOHWmqgtYtVO;L) z48xRL5h1L|zBzen=Lg^U`ulIZB`_c`w}}6jlgCed=)OmXErzkCiO5Wfy3||mt+#oFlL%reF^z5^ zC!`u#R9%NEwleB4P_xhTnydSVFTMK8=l;d#pLynwUU~b~elR;55g{Yetwza2Dxp&A zG%c2Otcr>XQ9_u_-n3lZc<)dDAkd$rSY*y>_ zs?8f^ccyMa;)sl~R3y7+?(Cg9cKl?|G~y;xu1RbfsfpM*KH@3OVad~p^{#0=7Ym^YqY+JkiA~_&HjES zG4384OBr`c-OQV|8pnF-)QQ&n7?mg$Pt%lho15Odw))z4zVkbu`|Usc!{5Jh`SP69 ztd>hDCTm2dozzmX>bP9&?nEgzsZeN;dRrYows`Ty@4Wl!tNerj2@Dc8TW4>->-3pB zhviNktBKZ{RI;~ho6UMPZ`O7f&YnOhkZcfJX+qKaW*is0C(hPka&`oWBNkv3!Uk+3 zi4T}915#jvBTACCWg?=tu2R6Ey6JOIO;dg7f%~3*>d$@bV~^c_+p%MdsrJOBmw*puKl({rE9_K*L;Yp=ic-lg|f2W#A;j-rF*C?O{{ zwJwLn6m=LEV=1-Gj-sS3>Y&QkpZPqQXvS;_1cXSlwb z$H$>gqf0Kvsi&_V-ng<}|Niej^PB(dx4-?}Z(qG}qb`O>3)x*{ts{1(6qP8`Vlgci zp^!?{p=W}3cBXgVdhO-!KS!?k5dkIW26N_)yU%~2FCAP$tQD92}4n-$zvh`YO zDUpFF<8tTr+s{AszWS$M{L&Y` z`^qbaYs&7PhGB@Hufmi65!fP|7$iJ-heS zdamDl;l*G5^?&wTzw^77-o3J1j;n+HGA-&*oqZ5nJxNEDGLDOJIpG+_G0Hg4t&ZbR zW1bIR`2M#JFTXJ@%lfbb3P?Wikw-s$_V$Z6t{>P^M7Z~Cy|=bL+^i4#JPS#HO+pwH z0d{6Y7}+h*>D%t0jM$Q}orxd~Mq$7fGL{0T6yCxDg$zyy0%wxSqA044P|)!1-rmIv z7k>K5Pd@t41NYu@$NA&Ode4Gf+*hkL*)_iO%InX5<0~({@bV9S_`>rqz22^`4_d3k z7?ojXqGdW|*E$xf5Oa4|oIZWx;Rhf5i~rtVc>M7PhRG9gb#+)v?3SJV&HCEG!5@6_ z%YWzZ{>n?wy_^g6^?DeW(0V%TI*#s{AYvHCVX-WAG*T@=)L|+E>&@o+yKlbs{5Sa# z73drpmnR;2;;F^ByuN>cBm{14&Uv$0A8ghuXG%nr#0Cr!U}xB+R6?z7mdoAK=Wm~z zB^k@cWM=4y!8W!O;Iwf{0%c}|A|ma~j#W$PJ);zfQl`~ry_x&&-p<1hJ@DtA{N$q# zKXm6EC-xTOkQp(qHtY3zwYzhy9-F@T?4SPHKl=6WJ^%dmgM%BJxyIO?^XkwoOASO% z_3Wiq=si_bomfIXf9A|5KK{fn{OnT~Zr^+O{`(FOZs0nUGOyR`P5a?H@BH%r^0&YK zjqksEw3boJScmSuyIkDZzxUQ2lqh8wTkqFz+}JyQ@}UPl@>5TI{E0^&Ie%(* zXLlHf(pPhhIGESdV&~+!+n@dFS3mdp-+$(rFTZo;@}YalSWL@xb18GAvol1o*mT8M z=k;M?n&)ieFvT#AHBO&A^NCM9`oM?p`uG2xUtAwv+1cAG)&jEm*8+-a?;)LJDgl;c9rKljb6@4d@S;-=l> zJ9mEg!D;Vw#MtKSWFfcad9z-vGUseFnNkqyNtBY1_JGJ#xJ0GKJohtaF6{0dZ@on! zOSonB6qE?t+5&G$0!bke3duGIJ0e_kZhmUU>7(mtK1F@@6w{=CS0ednyGIQ&m+8tu+OK z=d^Q^^LlPmRFy(_8h7uxc-N;Mdg$kW_R|l3?A|q-DVF=!uI=rdSgqTapZ)6p{=fa5 z-~0UkefH#;_pe=#vQws*LV2WTDkO+nhhdz`Fc77b8g-PA=BbwV-+S$a?>@&(0;Zki zu`}n7@15J(I~g%F+v*a1vuX2s-n7l;(Agv8#G-*pO|XbQ4^g5-4Vl<%ICtv8sS~Hx z^Wm_lhjUYuq9e5q5)ry7Yy+7ofsK^CV+-|ehXV=Hs0O8^{lLBVKKbMm58Qp%y?0+Y zy|8Kv(Ig{o;NMsvRgs#Nt9+cIyJDTQWn$HY9CA89OhJ(cb>a= z=H!WhO!=F>m@k-P7@?cDC(a#1toy6N*~UhVB3-#N4Q#V>yOSN_+(`~Kl-)AF5nuMInk zalyPgT(5G7T2W8odCp!`no&5@fraL52vGKnsE$}s?mBk*7eDjlU;NAe(Q>oDbK+#o z>v0-$*x5fg_|liZ`q%&ZfBnY0my77D^|Z4qMPMWhX(=E|RE>j5UGD87=o*GmkyVk- z*Is?;`oX^Ly#EtB$4{52QKDAOZFXj&KCkF~vwuLRWiKU6YfjB2N9M>bmzllnJI78R zX2LwwdjE$X+}m639b5MF{${nFofMLY>P$OvDhjdzFcy{2KgmUWi`R{!Do4@h9zx$1^ef!e&!>j#pjB#(8 zu3tYWqpQ^%1I5|x^cM8Au~53ZXUh~$3fKbG@aSas}9oPwaBtq|cTF$}xMj|v3pES5KJ99+J7&>X~oRa7DEE`($d#t;HRYP}Fpo9_{KXo*3I+oLi5a| zupO%1s1QkJ0-3%g)5gxkMx?U0xeTLw8K-*faQ@UM9{Zbr?Y}v`Y-Ge@;mnvlF3a<; zz5Gjm`9J;6cV0TSJB&rHfonSD#6&3=P$-BJb(nT{B1#n1GIoO^6xrr0m*3IDPyGd_ zGF^s|%p#NCY*xK*R_lYlSqU;_m36qf36HT-FD`1SYB%HwMDu3 zz2sd+*ZKjR(~Q5{U#(K*Ctab|6F=W2fy*GcG0+b88nbUVP${AAk6P zkKA?P^oeOuO0BtQULPE0J9+BDH=cX$pZ=r2cX)06=GCijUEUu{Z8^7jLwa{-;ZEx& zz+}2_WfuZtx84mx5dtOIvlqhMt>=Eh{?>p0Kb<@XWZgx0sfcA{FE!CpfJL8KEeHN~z1muvkh&lSb}5b$_*4_)q}&`;l7VO zQg`CeQ%<{w2lLzST=y_hMqAwu9J14F34x7_B8<|R&fZ&3hGA6b{MplwKk~?{%(mi^7)r7H(fr>HxLQ&w6S$siCZN@hw5mFC{v`(`#}ST2t3EIxYgJ)d~| zkq7R(XK#0`@-!6BeSb5D%c;|+zW?24f9LnV__eP;_x9!MmpA*ny{7NoLHbS+Ie+Or=MIe~51bjlTyA{5GsXF- z*_oN0{W(##F^yp>!(k`O0fC4a2{TOq73Ks42vK{RfKfH7nwrl-(d+Ekyy?0FdoSC) zWBb;vE}`(^99G98HQl~r>r3DJ{=?sT_|O|~yz<8Jb8Ah@s^86^WHO4&eW59tBh8X3 z%v9CP0GMhr)ub>545S&vxtO95DoThmU$@Qj%Gr+NNpRb+uS31#`rnBEQmA2QqFIDQ#dyb|!aEUwHG;#rkDVof&Lt zGb>d#cBkeCb7oB{GNhD2z4N3A1I5gWoZmQe&6QW(eB*W7 zHqQ2nUgjuX5Sx|p$miWNix(by=)uF^KX&NI@iU97&Ie`NR&|!SPNx&=XcTecyw|Af z45|t8L6bR=XNCBZHgi+0uq>(8_0x$}l_)x{@T^I~{TQ|<$amQ`@ z_g>i*?f3gexC~5^B%U~P;r_4R|N7x$Z@hl|%<&6ZFYEM!sa92VB2(SLcsv1t;As$2 zV+(o`Z5vIK;cWyA>L>}i4ocFf$cnOVDwx!5JJ+B7;UBs8uJ_zBy|FvnoicJwf zp!3w@PyYA+?w5}oKAPu10PT)6hTDa=6o$tBpy?eHA zAmX4mhz1&5w8i1dcb{-1GERn@9j$vb6mL1{E4gcMU#Gr&Lr;GB?B_NGG?KvBR9Fyp#?x4d7I z5>O;lqa|&d(PWfTOA@WdG8d>AN6kZS4g%C|BV>>OO-#n)uw%d2^p6gB(rSGk-u1fBvx`VSP z4*%t!f9XH{`tNtAd-dc(GG7!KK`$PrfDljxg$7$kKtw3M==6Q?6fqG%XaJOZZ+RaS zW(G581hkFyc(j(13JpG-na=uyUb%VmrjPyDM=#m6 zb$)g_#db8VLZEk@dNO|M@yEXMwQqd)&>^8~>j}+eML-BK#;R_*okBucS9RT}%LtGu z<{^tQi6|%nf>1T6I&o&I$s$$65mL1cV99KDdiLfUZa#SZwO3z#*@n4JnP;Qbu@vm| zW|-RNAOH66{PypE^}*-L=}t3R23^s~3kfiD&L{f;00uQ9K|n}x-shbx&qW-aQ!^Mv z$h|k;Lr_&=22HS3H`REwDnh_4rj#UtW=)wU(_)(~?ABsex9W|X+_~XOT{ko5LuVv& zZ~1m93w5`0!khAf|+?&QKtcJKL_ zpZVGOsd8#|)?71LTg$V&n#hr(Z+_)VUwY*6XD8Nrcn@vrD9(o>j3zCE7b0X~JRTFi z(snSIs)ws0q?z|lKuZx26N3R}q{JA-g`}x&w9&@EPN%c~$}8@;{nmX~UcPh3rf%*e zli|{8=CiCjfBxK=uYUE<|M;_ibZ)H~k48=;(zWorPCKk+><#eyt zE3;c}y!HL}-aXyx_GhNFS!?Q{l50zo?>znVpZw{cpS`e_6?uxS2e?lBWHL##?Ys7D z=yc8T?71^-+bpdPo$qCVqwy*L?|m*bgIa=sM$n}gO*Mq9O|hn zgyOt&SzZ(&FO&p80tW6sc#i@wGiZvf#n@I=)7FG3Eh?ByEfoy(Qn#te{A`CCC(Xqv z?hv~Oa&f2Ll#dhZ^An%)a>8tg!LcU1UUyDq<*K9h`eScpIkTFEm{inS6DD4LqZwH% zB9c0D-j`iJGc|MX`#*NaEjP>*MOKJeGHZrw%cqW>e&~s({^0X}X1*+nkg5^sJ7qU3 zyTj4yRKN3%+uw1;p53$av*(wV4<9=G#?FZl$(Q|LSi({>VR`U0N2A zNj;2)EM(?|W;A%`g<@(30#GyJZNhcVXITir6$L1y(9AjEzFY1wH8Ys0woOWHQ`fPr z1*SwIXr|Eub8&1S6R%9@tTuDw$wt*##3{kHLA!OP*UwPcc;K3@M=Ux=*M=w2J+h3G zL!3u{O=jRazK1+?N6eb6PLtJ6nXe|U#3o}L%y$P_wy5yk z{qmQ-c6RwfFP|!ByYr9hxSD9ZWS>HYhzzTujES6z0|#`(F9 z3(hIL8!ew}v#isd`;A}!jRzn2`(tmNU0JPM4)4rb%gUTWs0kfF7|ihhLkI|p^C9F} z$O?!E1*CJr{kPm>230eSmQvf+ZC$0<2&q)Hb?VG1J>gpdrY0)mSSI96!<169I+x2- zC)++4EV#)y z=K8Yz;)Rd>_`kVq$IgD9yeg@aNj+K~pFa22=l*YCf7X=!YoVz{(a`ixmt1Q(kL#d~8_r&cwo zg(BOzYx@lc_Fc2@>Wg-4+%PrhWm)F3F|5dlGfAjO9~FUqcwLIDXH_a3~P6qu@+#-^T(Q*7I|Cf=rm z7}psYs7pa64(~hLb8E%ynk-U3;c};-`Op_r`g#SakAMVjNdhBX7O+)~|i) zw_iQC+*)c!wU;{c4wS;XTW`Ge1Mk0g`__f&scz{l&q9oT+{B}&-}>5p_x=3?5009s zWIc%@^a5Ba@=WPt)eft1cQCzu$L76PU2)TaYxnHj((AaXZpVA)>D$y0vao5xOE0|m zJHPV>FTeQeq2tFYi;^UW4x75{m4$^SwLxfVAYlfI8Auw80BA51aTFgymUnvwkOCSZ z+<)*M1z@I@G`00)oSL?ctq_L)iUJU(peYT2Yr)!5bW$?YSZ6{?$V2F)u2r!}^{j8^ zO1a4E^k{i&leWlcp%Z()INRE!ZIJsGR=L57>Y0?-8601(V{Z!=HuiSSee~!4U1v~s zy!cMpTHDlh-BvHWa`+2>^f#}*epGxOMH6FQ`kDSzx8yDRuX)e=-oJ5vLt@)?#h}+4 zkB5^wCfd{A`|j7ieE&1gKQF#m8m%$&h%vwzT*wP1Up39^jwwBtaRf`+9ZaH)6%wK%|PhbB2OV7OU{o^MWMcb?+leX=a9Z)oi7idaQqmTxW zhIKR;VI<9r5a+$~&X*nUgMk7%C){`QJ@!x3EH;zLFt)8~BB`oI6Ow=dLeSI_OhOXY z8l#w}kD3hd%nD!Tk_F!_>FbrVsj6qS-lB19u&k*z_u2QsXq^x`ey!)*$#{9T(51CF zGt-x1ZdIpFFwQ1}zs~R3r2!44U`(l5TsryYb1!}EOMmy&lh4d8T)bFSt1HX1 zJNC@aE-cIsuDWc;6?=AFapQGXa#E-1nQ4g=q)eQ=^y(WgJoEg0_kHo`ndQ_eCaaZE z^4z=3d6O8UBW}C-*7w|gaQC)NTQ)A_nL|_w1+*PjBcFGMb^V=3zWv>&o`3PRR}UXQ zJvzS#F=KLSS*HM9Y}9$DNlguePz*2w2{Q#j`~bx{N0Fl2_db|GDB{Tdx7?#@re>zv zHgz?Mu}!H3xGtFqK~RLIYAHDh%5~O2m5dZxAY<3%ZPX@OaNVh_K$@5-tYzJ%Hl;Rh zo}0V!>dUU#z2~~iHkZv}8lD^-d2{LY*NWw}K|2!FQ7do6GVHnfz)%0H*}Yfvc5a_E z&~a5@T%9{{?1kswc;cxiA9^f?g_Uu8W*oEbG-l>D42nxH+O+ket^04iVf(&ICy}wy zPCyaD>Tm?pLq|`4^J{Zf(Qd!j)HFzW>01+poQ9?~cu)b-TKF;k!>h`Ile6@c6Ug z^r>y#%n8PMv6zMHfBIkS{?Ogc-2AXjgqE%2Xzl!wSBI~?eBWn&yIo}nSu17MD~8?- zK6JXhUAwkkf9>8|uGzkGr_`~5?{~{a8*6km+L6PjKJ(k3ef))I$7VI8AH*Ac#*4S_ z{Md&-diCX3_NI!E3nd1ZDSZYt#&Z|We($@lJ@V+2FCBXA)niAzwq%NJ14#x@U73|l zRnY+^rlb?t+wf0F0wBzk6aYde&J~@W3&8-8PK4Loe2s}t6;si%ig9{=!`|&X zZ@c5BD=yx>d)vmjsX>ewTg4Td(qh^J6v);sQF16rnTm^<0t;hXMg9p$6ubfP^7f5b?1&9yRNx$|NB4m?)mvm z=NHe1POsxRsYWq2Xwu2E$G-E>v;X+`lgCb-8Mk#?O|k@PrKV=7ZW^Cu!DptXnn2Pd z5yaa_0E8LV5oRhBfFLO@%ZsAZRaGa>dEqs;+?|rDny!m&HC}^RN-aRZ6d(YDh^QF} zkOWi}P$wQ#v32kvtJ_GZ&w`Xwlc;S~&uyIFd)3wNxb2R;d-radEBZl-%&AHWl1r(k z=g*$|_y6kO9{>LLGar^#&m`+i&-M3QwDU)Q?k6uhuy1x|4r}8=dP1KS`PyXpt(U&{ z++Y9M;`bhzIlnX=S9&oDO(){~)Hy8%+xO1A=is*Y-m~K!2hOh4naO0bCe>=;{e_d~ z{^+-U^P3Mm!4@KAzOZ%D@85Lr${+jqhc<89y0mhkzhOblHSKU^@!aXN=bn1z`ENh) z)bZn|q*Qow=9)=`wuzc(XksnS2|%c6@-8!BlN2z5AP5*hz|8nI(4e4mARyJGJpEfAiO${LSBsV3ShSPFyR!&a?#F z`>~JQb>|OloSVugbzbybXArH)oer0lo`2}U!;k!fz4B6bb!j2bJDPgbyc(@lP3s2f z>iy-r-*ez+f9A~MC_{o7uqIX8ERXO1FQ0$<%lAtYW9qs*oGId#T^oP&=l;!Ax8A%| zw@j^bG1}v7%GvxENZ?EFK2 z|LB+h_Dheve5f)#v${l$gL29t?*zI0s!M+O$3AlDwoTj14t>UczfH}c=v5;-{PIh$ zJp1Um=bl`7;rn5EXp<_K4GM;K7?iSYVeW_CvG2!!Vqx2k3!^n?r_t6kj8~e)=brq> zPyQ;?*_o9p#&O>7O*U-ZbN5HL-+I^fE3O!!jY;1)apcWc-+cIqZ$0<$V@qcy)}PB~ zN=2njTgNJ<*qUi;P@}<9=*%T}V!w|u_T^N;K~d(jo!+&V zA9&xr@BPS!&YV1+`<|LLPM0e;dFu7YKK*Yux>GxXOG}Hc9-fRvx$An_b@}vF*Tpb% z^32&|=SD9bJ@w+Dm+Ny!`@Jc4W>(4wiFImM>q&&gwl-rl2JzkrVwnNSSt%nTwlGm-#dq!>&R6b^AxtGPtKWy_|2`BOi2AY?91 z43n6Wa?953FFg0u-+l9;XP^4s(bMN^nyU13SsaZd_r6Xsu_!tTakf`na@i#xzW4oC z?Abmyn999R;v~z5<1tKMeeK8}f9?;z`R&I-r{jZ*mK@3s`^nW?F5UjokN@Zg-uaH^ z#L3yg^tc{Jtv!1uUOD`QPyX-c4!^iLbDLOfFM7Rplvhjryj^To`&Pnr@>s8V0mJLs zduvjh%%&Gpemv@;%P+8|Q=}neJZTKjfl%56z=#w)Q*Z>K22)GK4~T#;Q=vHe5Hgn+ zAj^j;&)W|$Lx001BWNkl4zw_3suDCq+u1NukGIh-@5!yCd@c;18 z!+-nNfA#Vkhs&wK1U8OZ$P`_NuAbT4`NS{(Z#!qE2bGmhV%wUgUVrAPr@sHmPkr+E z%dgHki;GJe1zQ4V(9BO~S*WIxFKL;M=1OF=r5GkI8%egp{AlBkjkjN&JaFKYsEX5B&WLFTPw=F6GWLjcx0#un0rF?}lrC`e*+6Mf1~x ztPmBTj*}!IJ@WF?zxxMYJ@v|kDY3X(XOlQUUFfI~>E~@XX_3KKwjpn~_IpgVcjJ)P zT^C)ib8?i%$I&$prcbGwg;8Hcp zOhelYGd+<(B2qra4t)(;`t;MDW4F+%>%s>z@GZ-LX zGEG@uDJfLvO`6z1Ow1*~9FTwzAj~A&Fx|Q7hW-6+_uSbtM^BzPa_T~>ifC0; zF(v1;C<0Q}MDEzU@q-_@>)?&o%x#)3bKl1C%CKpx8cDwL;Qe3x;y>89^*#-Ut93Oh zd)=bUT7`;l>3ti6YFCgZ;|`;*>p`j(1ly(FQcf?3+ibR}pjL|~63>j~iLrat$~PF7 z#7C`T78+G}^Qr<0U@&z|Nrez`Ub0TNOCLZXX;6aPckoVyzyO%4LgRRCEw&9{fPpZ> zI+8H_SB99BPK+c{mY2&*t2=gF{DHgg_~1Qv&CJa8raB?d)GSqX=0j5Z&f`!1<0Fqg z`tKrhV6b;LabqXxEPZOsCT=>SnUIx|*6Ot^2}Xf8m?odTMoCd1@uC zQL~UUF=?wl&=r!LHEmw_WL9=Mh+_4~jn`%;Yg2V|k)~Zb*~)6epenOyCdaC9A@3Xu z`OBxrCmB}IyWm3Q)QPD@F;EB*S`v~FAM(5?d=?Bs3IGvu@4-6_0?^c8$xJ7sVN*{C zgTY_~4C_b&2ACPCrb0=UVyoi7gj?Qq_eVeYo_&{J-t82F4Kp5YX=%wMyZ1h7_}q`?6Elb&7RV zP@)s`%8qlPZENql+b+K3p7*@xl1&>g-m{}OC^C@?t4mQU$Kt~I#ozz*XP$ccP|Dc2 z)S6m?&wYrRpcH2yA(0C2Q|l81VjJ-6;hxO*V*%a5h_0m+&=K9s9EHAUEp=i?^ z96nbaS)8o-!4Y;ZNYnV_Es2Q;z;!C3Lg%wG01v! z_&0xX->a`24gFlaBBd(E%!MFAk(iPaB9hcJwiE|7Q*(h7Bt@N!V4>CqB3au;?+uEg z=va*2XH9HlQ!i}Vbj$6xzVn?oZ=0W6*f>`@sgtg)u23yS`R>cF{K4=2!Qmr|^blqO~Y63U|JigRXw0V2r~_a3~{=wJrWG!j-_)b9vEjI?DtBSLW@$C7#d*(-R$tQ(*L8clb2~j6J(1jt zCT~hHN|}d>B#H;siMbH6yqo78Qj9b}RSn?&TkeWR7=*SCsUwpAVWU3IJ8*zFY4~31)C4O%)bntVTnPt)^(E_$L&=2&kIT3ik!}{G|u(zkg+@!RJ{KRLlrZIxkShr1mxyIuRk6 zOhp{u28tIV#smc@s;JR=@4PsRYGTPi%2k(Naq!^v7w_J_ee?X>%ycK~h%|=OjK(O+ zL$AI0h0p)l@sp=con2cxztrjHA4@<{*C4K%cJ=6?8akN6M^8o6B9E!AMzqAy50u? z)!qgmnQ{NEcgJX;2n=QSl~&N&Vv5>e;2WPyOz1zx2|fdORsY;j+BeSme%B0n>zaW)Sg0F*CqO8jT>q z;6sGL3<@7Y-9~W^Fg3PqlORI9WY4Z!Z@THS%l2&BJTp7!b+gR7tck6*jSHa_`P}FJ z%KPThT{w4RtY^GC;qq@TyKkoKg--u;6Dvs}%Y!#>;#|ne!e^!T-V6Xu&76xV z@|xT4iP31J(AK4vWHlLAlaa=#DLNtOjDW#R&5ST86X}D~g!5-l-*DqCpZLTt9o)Nv z^y8+6n#(X8rxRyZe&H8CarmvH0_(Vp;-Xky6ijBCU;yD<00m|QC<^&9tn{V)IW ztHU}jt}Y?C)M%Oc2$bET@MRO*Jj+ZKU`oje*BJ~V1RY%>yw3-{Tr7@;D}iiPUwifF ziKDC4;_AMfcW)RU4DBTep`N@n89i?OC)j;i+?vaxSEvPw6YujZlpzawaG9AY#Xtp< z8n3?TT}tQajHU{KLA7n$x*9i4rETkkpctu1K^P2XFoknufZ6iO(jD)3=g3d z7o@0DGS#-KswSOWyzuY;=U+a#xER_=a1L3Z_acr8(m+ai$Df1H2EsLgYtEw_@J9cb);^`Ov`tx5n zd4B2a%4nsU6fVolppKoqAedUqoY*=A1u&(Esi`JX;6$8Pq)yT86{(Z?m8BE?x%ug3SzlfV4Szkc+{(K7RNgfI&^90GiTK}i@PAcO(}Fz1Bg z)DXlgRFkQLrv#s=k%CBzs#~{gx%S%qmtC}L&#tW-XQqTAF-jidWK^d#nB8>btrPcs z@h^_Nd1A4OOQXiB%IBHOL<}JWLbQz`_!N_xi3nf>4MqYKh?w`L>SNo_s%_in&!0YK zMREGb(W8eBPtG*E7Uth`$=tTO^-b4Z?$0DNg)5zNVhI5f3dzeX z&%6uH1!tBli3@Es1$fzkJ6v9v0vajaIe}_QO>CQ{uG*>*n86ITPR$esQ$T|A1Xm|b zVs^>SO`rbMukXElb5mzdpsICMrQqK^^wo^_O3E@y5CSbhi@-=QGm?s&|De+_9sl zzxlv}uO2;mesOtmW#Y;#pf$Lr)d0)0OwGI#HNeDZNJbK*T7to6fdYh=@Tvy$@g#McdS>ZEV_VGzJVX&|pv!1T?9cql2)i z8J(ZX(y#yOuf6}yA3DFZNYF89$m*tD9IpP>XaDrUZ+t_XXq$>oH-w<3flyV_i--}X zMpA?T#W@I^c!H{GKnMZCHpZCDXT_F{a|ds{;o7UNT$t^3JE2qN7L5d)Skhj9`uMS< z-+B7E@4xcek+;sASzQS}pqsP{|BfEabrWwo<-|LsrK!%<9ambpor+PaC6U-;tJzx3rV538Ybq!*Ij z2M6&$G^0_xS5@c4%t$9f5+)*a8k2~4@66MTE-R*{1~=@#`iATG@7lUhW_d?kk%v0Q zs%}DX-KqYGqbFW@`OvFxzW)3Rhfbd#Hcef0gNg(RaORQ23}Rvg44@j+R6sM*(bR-A zHRqr)j_PK)8qRcd&ewDOVzr6qm(rU@mpH+;>7ui`Hd;L$SuQ)j6x>>75=?4lkr|*N zNJfEmik^x0!G}CFP3_4#g%1d@_pP%k| z2vdR+jm@yG#OF&Zt1o@`yQfZG`2Nv1-#mUQ*4m~91`(Fsju>N7AG|m*g&Kr3RfEAm zAq*fuNht-Pnbn2{lBX38m-_u)krzvKbLRB0^aE)o=Z+k)rWr?F&W&Z3JO!zU+L1sZ zf`bYMZ*$Wdx6W2AbWQ2y7Br0*Il!J`_|ch zQ3NFKa#c0N;EPt`*>j6eJn{T%hu?hb_~}!p&NYak2vn`A6uc|*!V<&_L<}$hNrg1P z44?@mSptMY0L~yHtyywjbnGWh65Tj6bME~4BPZXg*XrTQsFh+m&qhsKWv%OoQ$yPX zNB~t$iku7~4@FS|;&M;;rkPCl2B%LPUp#Yqb-0K)A$}l)e0IyujoUAE#HQzS@lWqv{_y(kJ6)->`Id3%Bs7g#DVZ4&ih`)MWNS7djKD^aAf%9F zCVNUKvAyUtO~bG}w!M9Hxclxq?>@M{r>-lDolfL5&~?T&(X<3C;Vk{EL_b^1Yd2Gb zYH4kn_4C=hyY%MkyPsUetSKPb*g19X;`3jg&lYXv47D8HCfRKohSkxqTsl)VB5FE8 z(>N@rv8XKSyi+YFw*SLl{F}e}cmLaCr+0F(-BRsf%$BR6eE9K4|JUFA&815p+_?S8 zaIkWy1l_F9X~dp&y-`k1P?eJ^jnfdbp6H{NQzuWn_VSlsd+n<)KKta(v14;I>+9I2 z<*=%?oIG>-{rBF#eChq4y>sawUjNCpTX&9@%k5)ZrRycquBF2+wF?->5etYCAvx24 zO$2LFjR5Q^O*6IYBf5FS?*8GhIy`&s)YU7O?_U2nx-7Hm*^bjOHWlU6N>Ygl(uq`O zv)XmDZf4aQHTv4xxN~f4)sBAp_upe!0tF;68_{ol@r$n>KY#Av!I4W$nVtr+ za$K!iYlyT<3Cwbw2Dj20D{pO|o}Joy^-C}P<$w6o7r*r4xhF5OT&)gQGej(I?H#@K z&bvQ;{U6?a`|X=|?hY++7eF*yHJ~Lbqcb4WM%^qTI?eji$B%#QjywZ(Y88>BH-7HLg~xC?$%zSr4R%8eQq8sbxBy zPTMI2NCFfglxYRn3WcIMyIRefhOy1(i=(66n;(4O*3@PCsM|V`CdVde9@U0ay15k9 zQoFu%eOG6SBB_)J<*{SOZhU;@gEwDSfrdZ@8jn2n{3{oqd;Z?NUF-&31kN0Xaac~{ zFb+eM$ec*mZnm7p2(8A$GHE{FX~g;SC;sz)`aiz;t#6(=ceWg@wsv-=ql2z4_Ljq) z2m3$$$xq(=*_%Iq>(Y%|_YAE#MWn+mY4h1u#e*4EIapQEEfz04|MYKv?d31O^up&J zJ8|;Z_N-Ga^UM~-eK((C`uLOE-~HYX-@EkwM;~3ie*N|o88tkO!{Dyes6}YXNu`K2 zwagO5&S{!JYZ3AW0<$KGQVPg8bCMOcTWoJl!*J#DdysC7!%>~lZEt(ZHnyUe)iNc5 zDXl2otgp3pvqjWS3Zzn0a_VdO*-w6a^x#ejTGD|8=8?t83(tS)YtW73ksGDfz-?-6 zT#dtWwOX>~Cfw{Xr*OzLveK2VTlC$({foc+lRy2#m%s8`yVtLtI(epFL?#*REthxh zAAa)T&F_BqyFYs4jl1^_k}>*P@X<6zi4%(*JU-Z49t>@^n4dXy;R`Q3{mR!~{_69e zKX-cT*j8OEdd8+w8}6d^^ZE5_SN`eExBu?%ethZu4?n(j3)9aQQ=6g~(^4`aDI}t0 z6C`Vrz?S4S2njG4V+4w@)*`c!j?zWZ{rh(g?%nD8erg%cQcEKuq->xyY=f;*YB!tL zt}C^#wR0$)IFD^D?%ud|`Q4vJS94tPkofNeb^F4TUwG`olf!U0n=i&Tk#aUUht=W1 z!J)I^jL_6ZJG*Yyrd;K!Mw!P}U+OcTf8me+?0^0C@4fopn3{rL4ae)h}vFCQHpHmlUqU_4kJ9vmLu*?Ihl3r{|A@u_E?{pyRKfAZO< zjxT2O*=$kDeBOL_T-+tnW^RK=57du+y!L!*Vz}$Q)bCDA9FgHH~9V*+vi9ciS#? z(U}F54lo)*DsiY*;Nvq{G$GV$q*CdHTvPKYHh_cYpZC8$bBrkFVai zb#yo!JF(Nx=Y3a(Y+V%AB0}MoGN5!u1Yl#3z?Ni#Brvv-zy|Sps{h{^!cZr{bILud^Js|B+hB1O#yDx>gaHshRl}EPzsjAxVKzwFBTmxlw})p z^b;4K`O;Tk`GfEL-tYb1H_vU&4i9&mTQ}>EAKUKd^Zlb0IWGE|D-BHJax8uA2&z#( zjv49bqqXs1bURt7#l({d~4H@8_L_0u;4$Q?o2^ zL^zv7+ETD)BVi=jiG)FfWE&|nAzSm*rfD2j%jK{-@;C~0wWolh*?@!;P8(2^QXlF| z*GKIY0Voi;eQfdR?Hj-N$&bJRW&s32nLB!`@{MzX!p1Qw(kj)Xbgah2# zIIO_aG)~iM8ponc)7%i*9%}@U8_kZ-(n{U1I)}@a=x-qJiKGeo> zAWBzCLu`->Rp;AKWj1sm5M%aKPV}T7KGfOBF<2E&vZXi4DP#`h`MHHkIpv-2# ztVyNsXSM4UQEQ3XM=7W@r|el@`!3#n>mPS--@w^J13`*31nPn@`HY|hEivqzdhF>J zUm?os=txm&*O6AXv5n5mHcsO(tyV2pQ!{S0Xvm~QYg3IV)HY3}#A55%=Pq9S!b>l_ z@|7=i^ z`P$|8N=(D@utGxeNE9TnkXbW`4THMvvyVS}{_!XJ+TDNfpp>rbW~D|ta~j>IHVvzz zqhVOJX$nz86s2<{b2LbW)$*vS_Oq@NkDWaA%=6E_^77aJ_>cbJ{1Z>iqP0?|)zsC# zghqRsvMXH|p4^tJRqf;4`STzA;{CUO{^mdY_>J#<{|6sjxpsK4TxnY3#x7|)zhcWM0jUw(WG&7Hg_?LLbm1M>TovS`s9-vZ~W+oAO7N_pS<(VTbJHn z?H`WASZY`MTKkf2+Mq_6T4HRg6l^J2ldy$Y6E+eXlAQwELKc{{1iZ$E1S%3(kRV}$ zF<=uRMU>k2^V0WGd#MyuYNT7*{d_Tvt7{)zI=FW$+t7v;4=HdWFdGU;2n3c`Yvwbm zb?4ae3m2b1_1MMzqeGZYZKNg267o1s(~$ORTurNGwidFRle7?;+L%NwQEJraJ;`nA zN>}Igt6zHITmQ?qzV@}3pL_0^6DPJ%Ia-d{+^Gw)pj8<^zVY$j{Pll%>t}CXdhdg4 z*Kbb@r3+2NG)0Z7A`zxp!br-@P#~o(2?U8j!YIHPB!M#>l0m}8WDpja1_YL5B(Y(T zoleY#M5$fZ^`&;D)+oIqq?(ygqU*cp)K<$IS3laldt-IDi!G1{F(qQd5P^aXgLx!) z#9%mf>fG}$y*jDn1i}K_<0M*h%a)nrxLQub&^&o^h}xA&WSbfU5v7zWr%4S%tcK<1 zpL_hZufOu`?|l24zxU0Ze#_1v+th@*QUup;T>H@*uYdP%zWejH-@S6u@wT@X3bWFZEQ&h8-c+#lE4P>8A+MVi4=gMbp5RB zXSM4TLQzye&8}TPTg>y zy7%{?V1UfA;1N{{DyG`@!E``S|9+ z-r;ECwA|Dx_AH1!QSl$w?3YR5eP^&BrG5J!%&$hSw#iHwmaep~1srBy7o0r~x>(bBPfBok_ zef#}i9_<~B!w^xszLvgVQv@neTAC#75+&_GtchQQH@Z1#M>|Ih#Ql~-Pw_U|7*e$uP4t%lN-C99h^KK{EO{QbLcz4Punmw)-e zwb7$GG!1PkwRRC9DM_;ytl1zCYqAJCL2FY@B`k;?vK6bvb$(no3ZLz{3a$nJqKhYIQUXOJ|P5Q2OHL)i4}e zY>hee{nqCmyYQF)?$7`2U;XdP<)>aAKJobH_7C@5yPadlFTeNp8?XQ5```WU+n3(G z^6{-UwfSOUr=kfmBUD1H1(8l0YXf79Ekwwfr4+AW@fjIxmr&Xuuq503ih0OLGRAx+ zGi^&z`@Z(`uA6mTHChYF)|Br0zTcW1?CfwWX*gy>uBqTB+5g;&z6d184 zWF&0XD&Z2eX@V%w_)N0#WzGg+jyBN>@*wJ$?7itp}gpULJgU|K_!v8W3zKXlBDm1fNxRe&zJpGg~K5>>Vywd#m|kD{l<;&fA4Rvy!X+2?|*dp`t`OvY^x!n zt6hmsCKVy6s7#q8=`2w*QwUIqH5qc5G)g9NoD3!L-~z*#C4qI1s-UK^nwb2qy-c2r>~UM4Gh# zJ>*P7LJeg)*(i`;9=Y)3GiNV+ZnikJnx>>1Rx71d2stv1+b|BpIJPl!jH+>5E%%Sk zoH^&5&YpklFaGS${`GghQKNL-?C$+LfBo10<>zny{Q9-qAKyAit!7=zVQ`D0>OfMu zIU+2BZ7Eo@OoqZvJ|lsJGBYBOYzrgVtl8EEHfw@3#*%CcW6}+@hC)%geo<;yYZpaQ ziBdqB84+FIN3S+4uYd6V;e$_Chx=_BfLc(aL4bvMNMOVxLX!JOujm?@x z2pJ1OCT8R8g^N#r{^cQKSdBzbOAUE!*#<;N(=d*$P16c_n2wz8XN%>7!=rJ0>80m> z_jg|X;`7fJre9vW^3yl|@#@DnZr?c?v(C3>9*;szA|0WSWIKp-+Mo^AydflEkOT%H zY=gi;#zF`iV43n2-EaArh+By7NIk^r+IVFLz9*w~f?W=*mIuSp@W(}W^Q?P{%xTDzGP2t|kx z3ACy8i~076o%^5OeQ@vY;e%UycW$()0l^~{2oe>X1RFMj3^0;xgcTw|Mr$@3vW!_X z2$GFpBSBDLVy#?!=1Zr~o!L2i_WpzY-Mja9c6KD$R;aZp9L=pwtDqrI!!V3qu7;MY z(vopi>MToLO#_Y~XDYMVOqJC*L@f%1YNln<4wbY+U@HQIQj&lij3^5E8utJCuEaR2Uba3Fpy0<&Q# zQl>E*;@3nb0@a`yn}CSG1`^wZutC_=5-l^$S^+~s1*8*aA3y)Or%#``c=y3Bt_A1V zO5ZhFW^*Pv$!Z>zjpz zfkTm*l57K#Y>=IH41Cf~~YF}$#5BDE@ za^-_@xjP;mjKc~jL6|fW2vDXG5>6x}*#?0zk{UKBzyXOAU}Pm0Hfte}oHiQ<1z{3o zl1H|-k6(D|g;VDqpN5>q)}T~GX_{=+P-~gv5YAO5&X%LyRN`>|Fs-hZwpteGY78wE zMIbC>r!7JlB%EvrkOW8q12z&i5}&cz5H?^Tgbg;vKx`D1(sxm3p{S)uvQP*pwTsX& z4PEJNqgrcO4u_vy|LFdWD@;c`RDxzg2-3)WMv}2jNX9k+guDi3Ljr;@7PbP*Nfy*nN)b67&o z3M7*;D6mO3YRJtAh9YdthD1os1|%?RDnS|<36L+s+~8z+jCOleFF!ZZ`Y!X^zOz{U__EzO#QAf&X##$+T^f;E{20OPlv;MxbZ&-VY3!NCI}e><`IE~00$ThAyT-pW+;JpD8R@#L>hs>fWQQ5 z7!Vs~Lm)7i|2siIz{rNmPQSf<;?(IgXU}(wZNf!l^K!LPDM7(FTVfe0aH5a|gfJ3d zP=cHmYiT1X(-BgHZ4t;23So>fX^UTxYzw0hNJ1$^E#20!tqAVky>;;5)BU}J2YdHc ztE0#%S4;k90t5{tK4T*uF%p)-PD7D^#0Z;UBTWbaMKmKQ!jkM17@G|d(9D_y=D!mp zj|3$DjMy;0smxBDK6dKNxpU_ivt!FCd-dbEj3}ASnF<26mPipH*2J2Ou@NAH@CJdb zHKz%L*pOu*8zIX=wlM@kc81h!sRSxD=8Jmg){UDtt{v{*Z^NN7vY1Q)B>_&7-ymcJ z21AH65^F(t4I~St4JZhdHXx8mk!DRmt#aB%#(}^_#$X{Mut*~i7z_fjkvt+W|DWuf zfAVure?fH?wO5<6jZTM@Sb(jNjcq9;kgx?vekddZ!nV+w@me54ekc?Or?DeSIt@;T zq#zwgMbUEFzyIm=Ywu47_XLIlrLV&<5s)A-0)xN;6M#swRsbn6CIBh1kZdB-8A!7E z6=O&v6;;T3KKVmur3YbR{YLH3tkih&;i4B7UMqnY3 kSxZ3JNEiq(N{$u&8{4UlE~(D-WB>pF07*qoM6N<$g1+Uaj{pDw literal 0 HcmV?d00001 diff --git a/tests/test_DRCT.py b/tests/test_DRCT.py new file mode 100644 index 00000000..4912b328 --- /dev/null +++ b/tests/test_DRCT.py @@ -0,0 +1,53 @@ +from spandrel.architectures.DRCT import DRCT, DRCTArch + +from .util import ( + ModelFile, + TestImage, + assert_image_inference, + assert_loads_correctly, + assert_size_requirements, + disallowed_props, + skip_if_unchanged, +) + +skip_if_unchanged(__file__) + + +def test_load(): + assert_loads_correctly( + DRCTArch(), + lambda: DRCT(), + lambda: DRCT(in_chans=4, embed_dim=60), + lambda: DRCT(upsampler="pixelshuffle", upscale=2, resi_connection="1conv"), + lambda: DRCT(upsampler="pixelshuffle", upscale=4, resi_connection="1conv"), + lambda: DRCT(upsampler="", upscale=1, resi_connection="identity"), + lambda: DRCT(qkv_bias=False), + lambda: DRCT(gc=16), + lambda: DRCT(ape=True, patch_norm=False), + lambda: DRCT(mlp_ratio=4.0), + lambda: DRCT(window_size=8), + lambda: DRCT(depths=[6, 6, 6, 6], num_heads=[6, 4, 6, 3]), + lambda: DRCT(img_size=32), + lambda: DRCT(img_size=16), + ) + + +def test_size_requirements(): + file = ModelFile.from_url( + "https://github.com/Phhofm/models/releases/download/4xRealWebPhoto_v4_drct-l/4xRealWebPhoto_v4_drct-l.pth" + ) + assert_size_requirements(file.load_model()) + + +def test_community_model(snapshot): + file = ModelFile.from_url( + "https://github.com/Phhofm/models/releases/download/4xRealWebPhoto_v4_drct-l/4xRealWebPhoto_v4_drct-l.pth", + ) + model = file.load_model() + assert model == snapshot(exclude=disallowed_props) + assert isinstance(model.model, DRCT) + assert_image_inference( + file, + model, + [TestImage.SR_16, TestImage.SR_32], + )