From e371aea0f81de58f97a44bc5e63ffba6645dea3e Mon Sep 17 00:00:00 2001 From: ai-modelscope Date: Wed, 13 Nov 2024 12:00:27 +0800 Subject: [PATCH] Update README.md --- .gitattributes | 32 +- BiRefNet_config.py | 11 + README.md | 178 +++- birefnet.py | 2244 ++++++++++++++++++++++++++++++++++++++++++++ collage5.png | 3 + config.json | 20 + configuration.json | 1 + diagram.png | Bin 0 -> 40509 bytes diagram1.png | Bin 0 -> 21269 bytes model.safetensors | 3 + pytorch_model.bin | 3 + t4.png | 3 + 12 files changed, 2443 insertions(+), 55 deletions(-) create mode 100644 BiRefNet_config.py create mode 100644 birefnet.py create mode 100644 collage5.png create mode 100644 config.json create mode 100644 configuration.json create mode 100644 diagram.png create mode 100644 diagram1.png create mode 100644 model.safetensors create mode 100644 pytorch_model.bin create mode 100644 t4.png diff --git a/.gitattributes b/.gitattributes index 886ac0c..5a23437 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,38 +1,42 @@ *.7z filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text -*.bin.* filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text *.onnx filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text -*.zstandard filter=lfs diff=lfs merge=lfs -text -*.tfevents* filter=lfs diff=lfs merge=lfs -text -*.db* filter=lfs diff=lfs merge=lfs -text -*.ark* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.gguf* filter=lfs diff=lfs merge=lfs -text -*.ggml filter=lfs diff=lfs merge=lfs -text -*.llamafile* filter=lfs diff=lfs merge=lfs -text -*.pt2 filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +model_not_working.not_safetensors filter=lfs diff=lfs merge=lfs -text +t4.png filter=lfs diff=lfs merge=lfs -text +collage.png filter=lfs diff=lfs merge=lfs -text +collage3.png filter=lfs diff=lfs merge=lfs -text +collage5.png filter=lfs diff=lfs merge=lfs -text +model.safetensors filter=lfs diff=lfs merge=lfs -text +pytorch_model.bin filter=lfs diff=lfs merge=lfs -text diff --git a/BiRefNet_config.py b/BiRefNet_config.py new file mode 100644 index 0000000..37c8ac5 --- /dev/null +++ b/BiRefNet_config.py @@ -0,0 +1,11 @@ +from transformers import PretrainedConfig + +class BiRefNetConfig(PretrainedConfig): + model_type = "SegformerForSemanticSegmentation" + def __init__( + self, + bb_pretrained=False, + **kwargs + ): + self.bb_pretrained = bb_pretrained + super().__init__(**kwargs) diff --git a/README.md b/README.md index cccd376..712b0ee 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,143 @@ --- -license: Apache License 2.0 - -#model-type: -##如 gpt、phi、llama、chatglm、baichuan 等 -#- gpt - -#domain: -##如 nlp、cv、audio、multi-modal -#- nlp - -#language: -##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa -#- cn - -#metrics: -##如 CIDEr、Blue、ROUGE 等 -#- CIDEr - -#tags: -##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 -#- pretrained - -#tools: -##如 vllm、fastchat、llamacpp、AdaSeq 等 -#- vllm +license: other +license_name: bria-rmbg-2.0 +license_link: https://bria.ai/bria-huggingface-model-license-agreement/ +pipeline_tag: image-segmentation +tags: +- remove background +- background +- background-removal +- Pytorch +- vision +- legal liability +- transformers --- -### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。 -#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型 -SDK下载 +# BRIA Background Removal v2.0 Model Card + +RMBG v2.0 is our new state-of-the-art background removal model, designed to effectively separate foreground from background in a range of +categories and image types. This model has been trained on a carefully selected dataset, which includes: +general stock images, e-commerce, gaming, and advertising content, making it suitable for commercial use cases powering enterprise content creation at scale. +The accuracy, efficiency, and versatility currently rival leading source-available models. +It is ideal where content safety, legally licensed datasets, and bias mitigation are paramount. + +Developed by BRIA AI, RMBG v2.0 is available as a source-available model for non-commercial use. + +[CLICK HERE FOR A DEMO](https://huggingface.co/spaces/briaai/BRIA-RMBG-2.0) +![examples](t4.png) + +## Model Details +##### +### Model Description + +- **Developed by:** [BRIA AI](https://bria.ai/) +- **Model type:** Background Removal +- **License:** [bria-rmbg-2.0](https://bria.ai/bria-huggingface-model-license-agreement/) + - The model is released under a Creative Commons license for non-commercial use. + - Commercial use is subject to a commercial agreement with BRIA. [Contact Us](https://bria.ai/contact-us) for more information. + +- **Model Description:** BRIA RMBG-2.0 is a dichotomous image segmentation model trained exclusively on a professional-grade dataset. +- **BRIA:** Resources for more information: [BRIA AI](https://bria.ai/) + + + +## Training data +Bria-RMBG model was trained with over 15,000 high-quality, high-resolution, manually labeled (pixel-wise accuracy), fully licensed images. +Our benchmark included balanced gender, balanced ethnicity, and people with different types of disabilities. +For clarity, we provide our data distribution according to different categories, demonstrating our model’s versatility. + +### Distribution of images: + +| Category | Distribution | +| -----------------------------------| -----------------------------------:| +| Objects only | 45.11% | +| People with objects/animals | 25.24% | +| People only | 17.35% | +| people/objects/animals with text | 8.52% | +| Text only | 2.52% | +| Animals only | 1.89% | + +| Category | Distribution | +| -----------------------------------| -----------------------------------------:| +| Photorealistic | 87.70% | +| Non-Photorealistic | 12.30% | + + +| Category | Distribution | +| -----------------------------------| -----------------------------------:| +| Non Solid Background | 52.05% | +| Solid Background | 47.95% + + +| Category | Distribution | +| -----------------------------------| -----------------------------------:| +| Single main foreground object | 51.42% | +| Multiple objects in the foreground | 48.58% | + + +## Qualitative Evaluation +Open source models comparison +![diagram](diagram1.png) +![examples](collage5.png) + +### Architecture +RMBG-2.0 is developed on the [BiRefNet](https://github.com/ZhengPeng7/BiRefNet) architecture enhanced with our proprietary dataset and training scheme. This training data significantly improves the model’s accuracy and effectiveness for background-removal task.
+If you use this model in your research, please cite: + +``` +@article{BiRefNet, + title={Bilateral Reference for High-Resolution Dichotomous Image Segmentation}, + author={Zheng, Peng and Gao, Dehong and Fan, Deng-Ping and Liu, Li and Laaksonen, Jorma and Ouyang, Wanli and Sebe, Nicu}, + journal={CAAI Artificial Intelligence Research}, + year={2024} +} +``` + +#### Requirements ```bash -#安装ModelScope -pip install modelscope -``` -```python -#SDK模型下载 -from modelscope import snapshot_download -model_dir = snapshot_download('AI-ModelScope/RMBG-2.0') -``` -Git下载 -``` -#Git模型下载 -git clone https://www.modelscope.cn/AI-ModelScope/RMBG-2.0.git +torch +torchvision +pillow +kornia +transformers +``` + +### Usage + + + + +```python +from PIL import Image +import matplotlib.pyplot as plt +import torch +from torchvision import transforms +from transformers import AutoModelForImageSegmentation + +model = AutoModelForImageSegmentation.from_pretrained('briaai/RMBG-2.0', trust_remote_code=True) +torch.set_float32_matmul_precision(['high', 'highest'][0]) +model.to('cuda') +model.eval() + +# Data settings +image_size = (1024, 1024) +transform_image = transforms.Compose([ + transforms.Resize(image_size), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) +]) + +image = Image.open(input_image_path) +input_images = transform_image(image).unsqueeze(0).to('cuda') + +# Prediction +with torch.no_grad(): + preds = model(input_images)[-1].sigmoid().cpu() +pred = preds[0].squeeze() +pred_pil = transforms.ToPILImage()(pred) +mask = pred_pil.resize(image.size) +image.putalpha(mask) + +image.save("no_bg_image.png") ``` -

如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。

\ No newline at end of file diff --git a/birefnet.py b/birefnet.py new file mode 100644 index 0000000..1ed28de --- /dev/null +++ b/birefnet.py @@ -0,0 +1,2244 @@ +### config.py + +import os +import math + + +class Config(): + def __init__(self) -> None: + # PATH settings + self.sys_home_dir = os.path.expanduser('~') # Make up your file system as: SYS_HOME_DIR/codes/dis/BiRefNet, SYS_HOME_DIR/datasets/dis/xx, SYS_HOME_DIR/weights/xx + + # TASK settings + self.task = ['DIS5K', 'COD', 'HRSOD', 'DIS5K+HRSOD+HRS10K', 'P3M-10k'][0] + self.training_set = { + 'DIS5K': ['DIS-TR', 'DIS-TR+DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4'][0], + 'COD': 'TR-COD10K+TR-CAMO', + 'HRSOD': ['TR-DUTS', 'TR-HRSOD', 'TR-UHRSD', 'TR-DUTS+TR-HRSOD', 'TR-DUTS+TR-UHRSD', 'TR-HRSOD+TR-UHRSD', 'TR-DUTS+TR-HRSOD+TR-UHRSD'][5], + 'DIS5K+HRSOD+HRS10K': 'DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4+DIS-TR+TE-HRS10K+TE-HRSOD+TE-UHRSD+TR-HRS10K+TR-HRSOD+TR-UHRSD', # leave DIS-VD for evaluation. + 'P3M-10k': 'TR-P3M-10k', + }[self.task] + self.prompt4loc = ['dense', 'sparse'][0] + + # Faster-Training settings + self.load_all = True + self.compile = True # 1. Trigger CPU memory leak in some extend, which is an inherent problem of PyTorch. + # Machines with > 70GB CPU memory can run the whole training on DIS5K with default setting. + # 2. Higher PyTorch version may fix it: https://github.com/pytorch/pytorch/issues/119607. + # 3. But compile in Pytorch > 2.0.1 seems to bring no acceleration for training. + self.precisionHigh = True + + # MODEL settings + self.ms_supervision = True + self.out_ref = self.ms_supervision and True + self.dec_ipt = True + self.dec_ipt_split = True + self.cxt_num = [0, 3][1] # multi-scale skip connections from encoder + self.mul_scl_ipt = ['', 'add', 'cat'][2] + self.dec_att = ['', 'ASPP', 'ASPPDeformable'][2] + self.squeeze_block = ['', 'BasicDecBlk_x1', 'ResBlk_x4', 'ASPP_x3', 'ASPPDeformable_x3'][1] + self.dec_blk = ['BasicDecBlk', 'ResBlk', 'HierarAttDecBlk'][0] + + # TRAINING settings + self.batch_size = 4 + self.IoU_finetune_last_epochs = [ + 0, + { + 'DIS5K': -50, + 'COD': -20, + 'HRSOD': -20, + 'DIS5K+HRSOD+HRS10K': -20, + 'P3M-10k': -20, + }[self.task] + ][1] # choose 0 to skip + self.lr = (1e-4 if 'DIS5K' in self.task else 1e-5) * math.sqrt(self.batch_size / 4) # DIS needs high lr to converge faster. Adapt the lr linearly + self.size = 1024 + self.num_workers = max(4, self.batch_size) # will be decrease to min(it, batch_size) at the initialization of the data_loader + + # Backbone settings + self.bb = [ + 'vgg16', 'vgg16bn', 'resnet50', # 0, 1, 2 + 'swin_v1_t', 'swin_v1_s', # 3, 4 + 'swin_v1_b', 'swin_v1_l', # 5-bs9, 6-bs4 + 'pvt_v2_b0', 'pvt_v2_b1', # 7, 8 + 'pvt_v2_b2', 'pvt_v2_b5', # 9-bs10, 10-bs5 + ][6] + self.lateral_channels_in_collection = { + 'vgg16': [512, 256, 128, 64], 'vgg16bn': [512, 256, 128, 64], 'resnet50': [1024, 512, 256, 64], + 'pvt_v2_b2': [512, 320, 128, 64], 'pvt_v2_b5': [512, 320, 128, 64], + 'swin_v1_b': [1024, 512, 256, 128], 'swin_v1_l': [1536, 768, 384, 192], + 'swin_v1_t': [768, 384, 192, 96], 'swin_v1_s': [768, 384, 192, 96], + 'pvt_v2_b0': [256, 160, 64, 32], 'pvt_v2_b1': [512, 320, 128, 64], + }[self.bb] + if self.mul_scl_ipt == 'cat': + self.lateral_channels_in_collection = [channel * 2 for channel in self.lateral_channels_in_collection] + self.cxt = self.lateral_channels_in_collection[1:][::-1][-self.cxt_num:] if self.cxt_num else [] + + # MODEL settings - inactive + self.lat_blk = ['BasicLatBlk'][0] + self.dec_channels_inter = ['fixed', 'adap'][0] + self.refine = ['', 'itself', 'RefUNet', 'Refiner', 'RefinerPVTInChannels4'][0] + self.progressive_ref = self.refine and True + self.ender = self.progressive_ref and False + self.scale = self.progressive_ref and 2 + self.auxiliary_classification = False # Only for DIS5K, where class labels are saved in `dataset.py`. + self.refine_iteration = 1 + self.freeze_bb = False + self.model = [ + 'BiRefNet', + ][0] + if self.dec_blk == 'HierarAttDecBlk': + self.batch_size = 2 ** [0, 1, 2, 3, 4][2] + + # TRAINING settings - inactive + self.preproc_methods = ['flip', 'enhance', 'rotate', 'pepper', 'crop'][:4] + self.optimizer = ['Adam', 'AdamW'][1] + self.lr_decay_epochs = [1e5] # Set to negative N to decay the lr in the last N-th epoch. + self.lr_decay_rate = 0.5 + # Loss + self.lambdas_pix_last = { + # not 0 means opening this loss + # original rate -- 1 : 30 : 1.5 : 0.2, bce x 30 + 'bce': 30 * 1, # high performance + 'iou': 0.5 * 1, # 0 / 255 + 'iou_patch': 0.5 * 0, # 0 / 255, win_size = (64, 64) + 'mse': 150 * 0, # can smooth the saliency map + 'triplet': 3 * 0, + 'reg': 100 * 0, + 'ssim': 10 * 1, # help contours, + 'cnt': 5 * 0, # help contours + 'structure': 5 * 0, # structure loss from codes of MVANet. A little improvement on DIS-TE[1,2,3], a bit more decrease on DIS-TE4. + } + self.lambdas_cls = { + 'ce': 5.0 + } + # Adv + self.lambda_adv_g = 10. * 0 # turn to 0 to avoid adv training + self.lambda_adv_d = 3. * (self.lambda_adv_g > 0) + + # PATH settings - inactive + self.data_root_dir = os.path.join(self.sys_home_dir, 'datasets/dis') + self.weights_root_dir = os.path.join(self.sys_home_dir, 'weights') + self.weights = { + 'pvt_v2_b2': os.path.join(self.weights_root_dir, 'pvt_v2_b2.pth'), + 'pvt_v2_b5': os.path.join(self.weights_root_dir, ['pvt_v2_b5.pth', 'pvt_v2_b5_22k.pth'][0]), + 'swin_v1_b': os.path.join(self.weights_root_dir, ['swin_base_patch4_window12_384_22kto1k.pth', 'swin_base_patch4_window12_384_22k.pth'][0]), + 'swin_v1_l': os.path.join(self.weights_root_dir, ['swin_large_patch4_window12_384_22kto1k.pth', 'swin_large_patch4_window12_384_22k.pth'][0]), + 'swin_v1_t': os.path.join(self.weights_root_dir, ['swin_tiny_patch4_window7_224_22kto1k_finetune.pth'][0]), + 'swin_v1_s': os.path.join(self.weights_root_dir, ['swin_small_patch4_window7_224_22kto1k_finetune.pth'][0]), + 'pvt_v2_b0': os.path.join(self.weights_root_dir, ['pvt_v2_b0.pth'][0]), + 'pvt_v2_b1': os.path.join(self.weights_root_dir, ['pvt_v2_b1.pth'][0]), + } + + # Callbacks - inactive + self.verbose_eval = True + self.only_S_MAE = False + self.use_fp16 = False # Bugs. It may cause nan in training. + self.SDPA_enabled = False # Bugs. Slower and errors occur in multi-GPUs + + # others + self.device = [0, 'cpu'][0] # .to(0) == .to('cuda:0') + + self.batch_size_valid = 1 + self.rand_seed = 7 + # run_sh_file = [f for f in os.listdir('.') if 'train.sh' == f] + [os.path.join('..', f) for f in os.listdir('..') if 'train.sh' == f] + # with open(run_sh_file[0], 'r') as f: + # lines = f.readlines() + # self.save_last = int([l.strip() for l in lines if '"{}")'.format(self.task) in l and 'val_last=' in l][0].split('val_last=')[-1].split()[0]) + # self.save_step = int([l.strip() for l in lines if '"{}")'.format(self.task) in l and 'step=' in l][0].split('step=')[-1].split()[0]) + # self.val_step = [0, self.save_step][0] + + def print_task(self) -> None: + # Return task for choosing settings in shell scripts. + print(self.task) + + + +### models/backbones/pvt_v2.py + +import torch +import torch.nn as nn +from functools import partial + +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + +import math + +# from config import Config + +# config = Config() + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.dwconv = DWConv(hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + x = self.fc1(x) + x = self.dwconv(x, H, W) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.attn_drop_prob = attn_drop + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr_ratio > 1: + x_ = x.permute(0, 2, 1).reshape(B, C, H, W) + x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) + x_ = self.norm(x_) + kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + else: + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + if config.SDPA_enabled: + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + attn_mask=None, dropout_p=self.attn_drop_prob, is_causal=False + ).transpose(1, 2).reshape(B, N, C) + else: + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x, H, W): + x = x + self.drop_path(self.attn(self.norm1(x), H, W)) + x = x + self.drop_path(self.mlp(self.norm2(x), H, W)) + + return x + + +class OverlapPatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=7, stride=4, in_channels=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2)) + self.norm = nn.LayerNorm(embed_dim) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + x = self.proj(x) + _, _, H, W = x.shape + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + + return x, H, W + + +class PyramidVisionTransformerImpr(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_channels=3, num_classes=1000, embed_dims=[64, 128, 256, 512], + num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., + attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]): + super().__init__() + self.num_classes = num_classes + self.depths = depths + + # patch_embed + self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_channels=in_channels, + embed_dim=embed_dims[0]) + self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_channels=embed_dims[0], + embed_dim=embed_dims[1]) + self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_channels=embed_dims[1], + embed_dim=embed_dims[2]) + self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_channels=embed_dims[2], + embed_dim=embed_dims[3]) + + # transformer encoder + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + self.block1 = nn.ModuleList([Block( + dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[0]) + for i in range(depths[0])]) + self.norm1 = norm_layer(embed_dims[0]) + + cur += depths[0] + self.block2 = nn.ModuleList([Block( + dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[1]) + for i in range(depths[1])]) + self.norm2 = norm_layer(embed_dims[1]) + + cur += depths[1] + self.block3 = nn.ModuleList([Block( + dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[2]) + for i in range(depths[2])]) + self.norm3 = norm_layer(embed_dims[2]) + + cur += depths[2] + self.block4 = nn.ModuleList([Block( + dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, + sr_ratio=sr_ratios[3]) + for i in range(depths[3])]) + self.norm4 = norm_layer(embed_dims[3]) + + # classification head + # self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = 1 + #load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) + + def reset_drop_path(self, drop_path_rate): + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))] + cur = 0 + for i in range(self.depths[0]): + self.block1[i].drop_path.drop_prob = dpr[cur + i] + + cur += self.depths[0] + for i in range(self.depths[1]): + self.block2[i].drop_path.drop_prob = dpr[cur + i] + + cur += self.depths[1] + for i in range(self.depths[2]): + self.block3[i].drop_path.drop_prob = dpr[cur + i] + + cur += self.depths[2] + for i in range(self.depths[3]): + self.block4[i].drop_path.drop_prob = dpr[cur + i] + + def freeze_patch_emb(self): + self.patch_embed1.requires_grad = False + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + outs = [] + + # stage 1 + x, H, W = self.patch_embed1(x) + for i, blk in enumerate(self.block1): + x = blk(x, H, W) + x = self.norm1(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + + # stage 2 + x, H, W = self.patch_embed2(x) + for i, blk in enumerate(self.block2): + x = blk(x, H, W) + x = self.norm2(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + + # stage 3 + x, H, W = self.patch_embed3(x) + for i, blk in enumerate(self.block3): + x = blk(x, H, W) + x = self.norm3(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + + # stage 4 + x, H, W = self.patch_embed4(x) + for i, blk in enumerate(self.block4): + x = blk(x, H, W) + x = self.norm4(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + outs.append(x) + + return outs + + # return x.mean(dim=1) + + def forward(self, x): + x = self.forward_features(x) + # x = self.head(x) + + return x + + +class DWConv(nn.Module): + def __init__(self, dim=768): + super(DWConv, self).__init__() + self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) + + def forward(self, x, H, W): + B, N, C = x.shape + x = x.transpose(1, 2).view(B, C, H, W).contiguous() + x = self.dwconv(x) + x = x.flatten(2).transpose(1, 2) + + return x + + +def _conv_filter(state_dict, patch_size=16): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k: + v = v.reshape((v.shape[0], 3, patch_size, patch_size)) + out_dict[k] = v + + return out_dict + + +## @register_model +class pvt_v2_b0(PyramidVisionTransformerImpr): + def __init__(self, **kwargs): + super(pvt_v2_b0, self).__init__( + patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1) + + + +## @register_model +class pvt_v2_b1(PyramidVisionTransformerImpr): + def __init__(self, **kwargs): + super(pvt_v2_b1, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1) + +## @register_model +class pvt_v2_b2(PyramidVisionTransformerImpr): + def __init__(self, in_channels=3, **kwargs): + super(pvt_v2_b2, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1, in_channels=in_channels) + +## @register_model +class pvt_v2_b3(PyramidVisionTransformerImpr): + def __init__(self, **kwargs): + super(pvt_v2_b3, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1) + +## @register_model +class pvt_v2_b4(PyramidVisionTransformerImpr): + def __init__(self, **kwargs): + super(pvt_v2_b4, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1) + + +## @register_model +class pvt_v2_b5(PyramidVisionTransformerImpr): + def __init__(self, **kwargs): + super(pvt_v2_b5, self).__init__( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1], + drop_rate=0.0, drop_path_rate=0.1) + + + +### models/backbones/swin_v1.py + +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu, Yutong Lin, Yixuan Wei +# -------------------------------------------------------- + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +import numpy as np +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + +# from config import Config + + +# config = Config() + +class Mlp(nn.Module): + """ Multilayer perceptron.""" + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + """ Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing='ij')) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop_prob = attn_drop + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ Forward function. + + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + + if config.SDPA_enabled: + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + attn_mask=None, dropout_p=self.attn_drop_prob, is_causal=False + ).transpose(1, 2).reshape(B_, N, C) + else: + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """ Swin Transformer Block. + + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + """ Forward function. + + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + mask_matrix: Attention mask for cyclic shift. + """ + B, L, C = x.shape + H, W = self.H, self.W + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + """ Patch Merging Layer + + Args: + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, H, W): + """ Forward function. + + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of feature channels + depth (int): Depths of this stage. + num_heads (int): Number of attention head. + window_size (int): Local window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, H, W): + """ Forward function. + + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + + # calculate attention mask for SW-MSA + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + for blk in self.blocks: + blk.H, blk.W = H, W + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + + Args: + patch_size (int): Patch token size. Default: 4. + in_channels (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, patch_size=4, in_channels=3, embed_dim=96, norm_layer=None): + super().__init__() + patch_size = to_2tuple(patch_size) + self.patch_size = patch_size + + self.in_channels = in_channels + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(nn.Module): + """ Swin Transformer backbone. + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + pretrain_img_size (int): Input image size for training the pretrained model, + used in absolute postion embedding. Default 224. + patch_size (int | tuple(int)): Patch size. Default: 4. + in_channels (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + depths (tuple[int]): Depths of each Swin Transformer stage. + num_heads (tuple[int]): Number of attention head of each stage. + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): Dropout rate. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0.2. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. + patch_norm (bool): If True, add normalization after patch embedding. Default: True. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + pretrain_img_size=224, + patch_size=4, + in_channels=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + use_checkpoint=False): + super().__init__() + + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + patch_size=patch_size, in_channels=in_channels, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + + # absolute position embedding + if self.ape: + pretrain_img_size = to_2tuple(pretrain_img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]] + + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + self.num_features = num_features + + # add a norm layer for each output + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 1 and self.ape: + self.absolute_pos_embed.requires_grad = False + + if self.frozen_stages >= 2: + self.pos_drop.eval() + for i in range(0, self.frozen_stages - 1): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + + def forward(self, x): + """Forward function.""" + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic') + x = (x + absolute_pos_embed) # B Wh*Ww C + + outs = []#x.contiguous()] + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + +def swin_v1_t(): + model = SwinTransformer(embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7) + return model + +def swin_v1_s(): + model = SwinTransformer(embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7) + return model + +def swin_v1_b(): + model = SwinTransformer(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12) + return model + +def swin_v1_l(): + model = SwinTransformer(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12) + return model + + + +### models/modules/deform_conv.py + +import torch +import torch.nn as nn +from torchvision.ops import deform_conv2d + + +class DeformableConv2d(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False): + + super(DeformableConv2d, self).__init__() + + assert type(kernel_size) == tuple or type(kernel_size) == int + + kernel_size = kernel_size if type(kernel_size) == tuple else (kernel_size, kernel_size) + self.stride = stride if type(stride) == tuple else (stride, stride) + self.padding = padding + + self.offset_conv = nn.Conv2d(in_channels, + 2 * kernel_size[0] * kernel_size[1], + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + bias=True) + + nn.init.constant_(self.offset_conv.weight, 0.) + nn.init.constant_(self.offset_conv.bias, 0.) + + self.modulator_conv = nn.Conv2d(in_channels, + 1 * kernel_size[0] * kernel_size[1], + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + bias=True) + + nn.init.constant_(self.modulator_conv.weight, 0.) + nn.init.constant_(self.modulator_conv.bias, 0.) + + self.regular_conv = nn.Conv2d(in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + bias=bias) + + def forward(self, x): + #h, w = x.shape[2:] + #max_offset = max(h, w)/4. + + offset = self.offset_conv(x)#.clamp(-max_offset, max_offset) + modulator = 2. * torch.sigmoid(self.modulator_conv(x)) + + x = deform_conv2d( + input=x, + offset=offset, + weight=self.regular_conv.weight, + bias=self.regular_conv.bias, + padding=self.padding, + mask=modulator, + stride=self.stride, + ) + return x + + + + +### utils.py + +import torch.nn as nn + + +def build_act_layer(act_layer): + if act_layer == 'ReLU': + return nn.ReLU(inplace=True) + elif act_layer == 'SiLU': + return nn.SiLU(inplace=True) + elif act_layer == 'GELU': + return nn.GELU() + + raise NotImplementedError(f'build_act_layer does not support {act_layer}') + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + + +### dataset.py + +_class_labels_TR_sorted = ( + 'Airplane, Ant, Antenna, Archery, Axe, BabyCarriage, Bag, BalanceBeam, Balcony, Balloon, Basket, BasketballHoop, Beatle, Bed, Bee, Bench, Bicycle, ' + 'BicycleFrame, BicycleStand, Boat, Bonsai, BoomLift, Bridge, BunkBed, Butterfly, Button, Cable, CableLift, Cage, Camcorder, Cannon, Canoe, Car, ' + 'CarParkDropArm, Carriage, Cart, Caterpillar, CeilingLamp, Centipede, Chair, Clip, Clock, Clothes, CoatHanger, Comb, ConcretePumpTruck, Crack, Crane, ' + 'Cup, DentalChair, Desk, DeskChair, Diagram, DishRack, DoorHandle, Dragonfish, Dragonfly, Drum, Earphone, Easel, ElectricIron, Excavator, Eyeglasses, ' + 'Fan, Fence, Fencing, FerrisWheel, FireExtinguisher, Fishing, Flag, FloorLamp, Forklift, GasStation, Gate, Gear, Goal, Golf, GymEquipment, Hammock, ' + 'Handcart, Handcraft, Handrail, HangGlider, Harp, Harvester, Headset, Helicopter, Helmet, Hook, HorizontalBar, Hydrovalve, IroningTable, Jewelry, Key, ' + 'KidsPlayground, Kitchenware, Kite, Knife, Ladder, LaundryRack, Lightning, Lobster, Locust, Machine, MachineGun, MagazineRack, Mantis, Medal, MemorialArchway, ' + 'Microphone, Missile, MobileHolder, Monitor, Mosquito, Motorcycle, MovingTrolley, Mower, MusicPlayer, MusicStand, ObservationTower, Octopus, OilWell, ' + 'OlympicLogo, OperatingTable, OutdoorFitnessEquipment, Parachute, Pavilion, Piano, Pipe, PlowHarrow, PoleVault, Punchbag, Rack, Racket, Rifle, Ring, Robot, ' + 'RockClimbing, Rope, Sailboat, Satellite, Scaffold, Scale, Scissor, Scooter, Sculpture, Seadragon, Seahorse, Seal, SewingMachine, Ship, Shoe, ShoppingCart, ' + 'ShoppingTrolley, Shower, Shrimp, Signboard, Skateboarding, Skeleton, Skiing, Spade, SpeedBoat, Spider, Spoon, Stair, Stand, Stationary, SteeringWheel, ' + 'Stethoscope, Stool, Stove, StreetLamp, SweetStand, Swing, Sword, TV, Table, TableChair, TableLamp, TableTennis, Tank, Tapeline, Teapot, Telescope, Tent, ' + 'TobaccoPipe, Toy, Tractor, TrafficLight, TrafficSign, Trampoline, TransmissionTower, Tree, Tricycle, TrimmerCover, Tripod, Trombone, Truck, Trumpet, Tuba, ' + 'UAV, Umbrella, UnevenBars, UtilityPole, VacuumCleaner, Violin, Wakesurfing, Watch, WaterTower, WateringPot, Well, WellLid, Wheel, Wheelchair, WindTurbine, Windmill, WineGlass, WireWhisk, Yacht' +) +class_labels_TR_sorted = _class_labels_TR_sorted.split(', ') + + +### models/backbones/build_backbones.py + +import torch +import torch.nn as nn +from collections import OrderedDict +from torchvision.models import vgg16, vgg16_bn, VGG16_Weights, VGG16_BN_Weights, resnet50, ResNet50_Weights +# from models.pvt_v2 import pvt_v2_b0, pvt_v2_b1, pvt_v2_b2, pvt_v2_b5 +# from models.swin_v1 import swin_v1_t, swin_v1_s, swin_v1_b, swin_v1_l +# from config import Config + + +config = Config() + +def build_backbone(bb_name, pretrained=True, params_settings=''): + if bb_name == 'vgg16': + bb_net = list(vgg16(pretrained=VGG16_Weights.DEFAULT if pretrained else None).children())[0] + bb = nn.Sequential(OrderedDict({'conv1': bb_net[:4], 'conv2': bb_net[4:9], 'conv3': bb_net[9:16], 'conv4': bb_net[16:23]})) + elif bb_name == 'vgg16bn': + bb_net = list(vgg16_bn(pretrained=VGG16_BN_Weights.DEFAULT if pretrained else None).children())[0] + bb = nn.Sequential(OrderedDict({'conv1': bb_net[:6], 'conv2': bb_net[6:13], 'conv3': bb_net[13:23], 'conv4': bb_net[23:33]})) + elif bb_name == 'resnet50': + bb_net = list(resnet50(pretrained=ResNet50_Weights.DEFAULT if pretrained else None).children()) + bb = nn.Sequential(OrderedDict({'conv1': nn.Sequential(*bb_net[0:3]), 'conv2': bb_net[4], 'conv3': bb_net[5], 'conv4': bb_net[6]})) + else: + bb = eval('{}({})'.format(bb_name, params_settings)) + if pretrained: + bb = load_weights(bb, bb_name) + return bb + +def load_weights(model, model_name): + save_model = torch.load(config.weights[model_name], map_location='cpu') + model_dict = model.state_dict() + state_dict = {k: v if v.size() == model_dict[k].size() else model_dict[k] for k, v in save_model.items() if k in model_dict.keys()} + # to ignore the weights with mismatched size when I modify the backbone itself. + if not state_dict: + save_model_keys = list(save_model.keys()) + sub_item = save_model_keys[0] if len(save_model_keys) == 1 else None + state_dict = {k: v if v.size() == model_dict[k].size() else model_dict[k] for k, v in save_model[sub_item].items() if k in model_dict.keys()} + if not state_dict or not sub_item: + print('Weights are not successully loaded. Check the state dict of weights file.') + return None + else: + print('Found correct weights in the "{}" item of loaded state_dict.'.format(sub_item)) + model_dict.update(state_dict) + model.load_state_dict(model_dict) + return model + + + +### models/modules/decoder_blocks.py + +import torch +import torch.nn as nn +# from models.aspp import ASPP, ASPPDeformable +# from config import Config + + +# config = Config() + + +class BasicDecBlk(nn.Module): + def __init__(self, in_channels=64, out_channels=64, inter_channels=64): + super(BasicDecBlk, self).__init__() + inter_channels = in_channels // 4 if config.dec_channels_inter == 'adap' else 64 + self.conv_in = nn.Conv2d(in_channels, inter_channels, 3, 1, padding=1) + self.relu_in = nn.ReLU(inplace=True) + if config.dec_att == 'ASPP': + self.dec_att = ASPP(in_channels=inter_channels) + elif config.dec_att == 'ASPPDeformable': + self.dec_att = ASPPDeformable(in_channels=inter_channels) + self.conv_out = nn.Conv2d(inter_channels, out_channels, 3, 1, padding=1) + self.bn_in = nn.BatchNorm2d(inter_channels) if config.batch_size > 1 else nn.Identity() + self.bn_out = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity() + + def forward(self, x): + x = self.conv_in(x) + x = self.bn_in(x) + x = self.relu_in(x) + if hasattr(self, 'dec_att'): + x = self.dec_att(x) + x = self.conv_out(x) + x = self.bn_out(x) + return x + + +class ResBlk(nn.Module): + def __init__(self, in_channels=64, out_channels=None, inter_channels=64): + super(ResBlk, self).__init__() + if out_channels is None: + out_channels = in_channels + inter_channels = in_channels // 4 if config.dec_channels_inter == 'adap' else 64 + + self.conv_in = nn.Conv2d(in_channels, inter_channels, 3, 1, padding=1) + self.bn_in = nn.BatchNorm2d(inter_channels) if config.batch_size > 1 else nn.Identity() + self.relu_in = nn.ReLU(inplace=True) + + if config.dec_att == 'ASPP': + self.dec_att = ASPP(in_channels=inter_channels) + elif config.dec_att == 'ASPPDeformable': + self.dec_att = ASPPDeformable(in_channels=inter_channels) + + self.conv_out = nn.Conv2d(inter_channels, out_channels, 3, 1, padding=1) + self.bn_out = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity() + + self.conv_resi = nn.Conv2d(in_channels, out_channels, 1, 1, 0) + + def forward(self, x): + _x = self.conv_resi(x) + x = self.conv_in(x) + x = self.bn_in(x) + x = self.relu_in(x) + if hasattr(self, 'dec_att'): + x = self.dec_att(x) + x = self.conv_out(x) + x = self.bn_out(x) + return x + _x + + + +### models/modules/lateral_blocks.py + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +# from config import Config + + +# config = Config() + + +class BasicLatBlk(nn.Module): + def __init__(self, in_channels=64, out_channels=64, inter_channels=64): + super(BasicLatBlk, self).__init__() + inter_channels = in_channels // 4 if config.dec_channels_inter == 'adap' else 64 + self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0) + + def forward(self, x): + x = self.conv(x) + return x + + + +### models/modules/aspp.py + +import torch +import torch.nn as nn +import torch.nn.functional as F +# from models.deform_conv import DeformableConv2d +# from config import Config + + +# config = Config() + + +class _ASPPModule(nn.Module): + def __init__(self, in_channels, planes, kernel_size, padding, dilation): + super(_ASPPModule, self).__init__() + self.atrous_conv = nn.Conv2d(in_channels, planes, kernel_size=kernel_size, + stride=1, padding=padding, dilation=dilation, bias=False) + self.bn = nn.BatchNorm2d(planes) if config.batch_size > 1 else nn.Identity() + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.atrous_conv(x) + x = self.bn(x) + + return self.relu(x) + + +class ASPP(nn.Module): + def __init__(self, in_channels=64, out_channels=None, output_stride=16): + super(ASPP, self).__init__() + self.down_scale = 1 + if out_channels is None: + out_channels = in_channels + self.in_channelster = 256 // self.down_scale + if output_stride == 16: + dilations = [1, 6, 12, 18] + elif output_stride == 8: + dilations = [1, 12, 24, 36] + else: + raise NotImplementedError + + self.aspp1 = _ASPPModule(in_channels, self.in_channelster, 1, padding=0, dilation=dilations[0]) + self.aspp2 = _ASPPModule(in_channels, self.in_channelster, 3, padding=dilations[1], dilation=dilations[1]) + self.aspp3 = _ASPPModule(in_channels, self.in_channelster, 3, padding=dilations[2], dilation=dilations[2]) + self.aspp4 = _ASPPModule(in_channels, self.in_channelster, 3, padding=dilations[3], dilation=dilations[3]) + + self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(in_channels, self.in_channelster, 1, stride=1, bias=False), + nn.BatchNorm2d(self.in_channelster) if config.batch_size > 1 else nn.Identity(), + nn.ReLU(inplace=True)) + self.conv1 = nn.Conv2d(self.in_channelster * 5, out_channels, 1, bias=False) + self.bn1 = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity() + self.relu = nn.ReLU(inplace=True) + self.dropout = nn.Dropout(0.5) + + def forward(self, x): + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.interpolate(x5, size=x1.size()[2:], mode='bilinear', align_corners=True) + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + return self.dropout(x) + + +##################### Deformable +class _ASPPModuleDeformable(nn.Module): + def __init__(self, in_channels, planes, kernel_size, padding): + super(_ASPPModuleDeformable, self).__init__() + self.atrous_conv = DeformableConv2d(in_channels, planes, kernel_size=kernel_size, + stride=1, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(planes) if config.batch_size > 1 else nn.Identity() + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.atrous_conv(x) + x = self.bn(x) + + return self.relu(x) + + +class ASPPDeformable(nn.Module): + def __init__(self, in_channels, out_channels=None, parallel_block_sizes=[1, 3, 7]): + super(ASPPDeformable, self).__init__() + self.down_scale = 1 + if out_channels is None: + out_channels = in_channels + self.in_channelster = 256 // self.down_scale + + self.aspp1 = _ASPPModuleDeformable(in_channels, self.in_channelster, 1, padding=0) + self.aspp_deforms = nn.ModuleList([ + _ASPPModuleDeformable(in_channels, self.in_channelster, conv_size, padding=int(conv_size//2)) for conv_size in parallel_block_sizes + ]) + + self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(in_channels, self.in_channelster, 1, stride=1, bias=False), + nn.BatchNorm2d(self.in_channelster) if config.batch_size > 1 else nn.Identity(), + nn.ReLU(inplace=True)) + self.conv1 = nn.Conv2d(self.in_channelster * (2 + len(self.aspp_deforms)), out_channels, 1, bias=False) + self.bn1 = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity() + self.relu = nn.ReLU(inplace=True) + self.dropout = nn.Dropout(0.5) + + def forward(self, x): + x1 = self.aspp1(x) + x_aspp_deforms = [aspp_deform(x) for aspp_deform in self.aspp_deforms] + x5 = self.global_avg_pool(x) + x5 = F.interpolate(x5, size=x1.size()[2:], mode='bilinear', align_corners=True) + x = torch.cat((x1, *x_aspp_deforms, x5), dim=1) + + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + return self.dropout(x) + + + +### models/refinement/refiner.py + +import torch +import torch.nn as nn +from collections import OrderedDict +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.models import vgg16, vgg16_bn +from torchvision.models import resnet50 + +# from config import Config +# from dataset import class_labels_TR_sorted +# from models.build_backbone import build_backbone +# from models.decoder_blocks import BasicDecBlk +# from models.lateral_blocks import BasicLatBlk +# from models.ing import * +# from models.stem_layer import StemLayer + + +class RefinerPVTInChannels4(nn.Module): + def __init__(self, in_channels=3+1): + super(RefinerPVTInChannels4, self).__init__() + self.config = Config() + self.epoch = 1 + self.bb = build_backbone(self.config.bb, params_settings='in_channels=4') + + lateral_channels_in_collection = { + 'vgg16': [512, 256, 128, 64], 'vgg16bn': [512, 256, 128, 64], 'resnet50': [1024, 512, 256, 64], + 'pvt_v2_b2': [512, 320, 128, 64], 'pvt_v2_b5': [512, 320, 128, 64], + 'swin_v1_b': [1024, 512, 256, 128], 'swin_v1_l': [1536, 768, 384, 192], + } + channels = lateral_channels_in_collection[self.config.bb] + self.squeeze_module = BasicDecBlk(channels[0], channels[0]) + + self.decoder = Decoder(channels) + + if 0: + for key, value in self.named_parameters(): + if 'bb.' in key: + value.requires_grad = False + + def forward(self, x): + if isinstance(x, list): + x = torch.cat(x, dim=1) + ########## Encoder ########## + if self.config.bb in ['vgg16', 'vgg16bn', 'resnet50']: + x1 = self.bb.conv1(x) + x2 = self.bb.conv2(x1) + x3 = self.bb.conv3(x2) + x4 = self.bb.conv4(x3) + else: + x1, x2, x3, x4 = self.bb(x) + + x4 = self.squeeze_module(x4) + + ########## Decoder ########## + + features = [x, x1, x2, x3, x4] + scaled_preds = self.decoder(features) + + return scaled_preds + + +class Refiner(nn.Module): + def __init__(self, in_channels=3+1): + super(Refiner, self).__init__() + self.config = Config() + self.epoch = 1 + self.stem_layer = StemLayer(in_channels=in_channels, inter_channels=48, out_channels=3, norm_layer='BN' if self.config.batch_size > 1 else 'LN') + self.bb = build_backbone(self.config.bb) + + lateral_channels_in_collection = { + 'vgg16': [512, 256, 128, 64], 'vgg16bn': [512, 256, 128, 64], 'resnet50': [1024, 512, 256, 64], + 'pvt_v2_b2': [512, 320, 128, 64], 'pvt_v2_b5': [512, 320, 128, 64], + 'swin_v1_b': [1024, 512, 256, 128], 'swin_v1_l': [1536, 768, 384, 192], + } + channels = lateral_channels_in_collection[self.config.bb] + self.squeeze_module = BasicDecBlk(channels[0], channels[0]) + + self.decoder = Decoder(channels) + + if 0: + for key, value in self.named_parameters(): + if 'bb.' in key: + value.requires_grad = False + + def forward(self, x): + if isinstance(x, list): + x = torch.cat(x, dim=1) + x = self.stem_layer(x) + ########## Encoder ########## + if self.config.bb in ['vgg16', 'vgg16bn', 'resnet50']: + x1 = self.bb.conv1(x) + x2 = self.bb.conv2(x1) + x3 = self.bb.conv3(x2) + x4 = self.bb.conv4(x3) + else: + x1, x2, x3, x4 = self.bb(x) + + x4 = self.squeeze_module(x4) + + ########## Decoder ########## + + features = [x, x1, x2, x3, x4] + scaled_preds = self.decoder(features) + + return scaled_preds + + +class Decoder(nn.Module): + def __init__(self, channels): + super(Decoder, self).__init__() + self.config = Config() + DecoderBlock = eval('BasicDecBlk') + LateralBlock = eval('BasicLatBlk') + + self.decoder_block4 = DecoderBlock(channels[0], channels[1]) + self.decoder_block3 = DecoderBlock(channels[1], channels[2]) + self.decoder_block2 = DecoderBlock(channels[2], channels[3]) + self.decoder_block1 = DecoderBlock(channels[3], channels[3]//2) + + self.lateral_block4 = LateralBlock(channels[1], channels[1]) + self.lateral_block3 = LateralBlock(channels[2], channels[2]) + self.lateral_block2 = LateralBlock(channels[3], channels[3]) + + if self.config.ms_supervision: + self.conv_ms_spvn_4 = nn.Conv2d(channels[1], 1, 1, 1, 0) + self.conv_ms_spvn_3 = nn.Conv2d(channels[2], 1, 1, 1, 0) + self.conv_ms_spvn_2 = nn.Conv2d(channels[3], 1, 1, 1, 0) + self.conv_out1 = nn.Sequential(nn.Conv2d(channels[3]//2, 1, 1, 1, 0)) + + def forward(self, features): + x, x1, x2, x3, x4 = features + outs = [] + p4 = self.decoder_block4(x4) + _p4 = F.interpolate(p4, size=x3.shape[2:], mode='bilinear', align_corners=True) + _p3 = _p4 + self.lateral_block4(x3) + + p3 = self.decoder_block3(_p3) + _p3 = F.interpolate(p3, size=x2.shape[2:], mode='bilinear', align_corners=True) + _p2 = _p3 + self.lateral_block3(x2) + + p2 = self.decoder_block2(_p2) + _p2 = F.interpolate(p2, size=x1.shape[2:], mode='bilinear', align_corners=True) + _p1 = _p2 + self.lateral_block2(x1) + + _p1 = self.decoder_block1(_p1) + _p1 = F.interpolate(_p1, size=x.shape[2:], mode='bilinear', align_corners=True) + p1_out = self.conv_out1(_p1) + + if self.config.ms_supervision: + outs.append(self.conv_ms_spvn_4(p4)) + outs.append(self.conv_ms_spvn_3(p3)) + outs.append(self.conv_ms_spvn_2(p2)) + outs.append(p1_out) + return outs + + +class RefUNet(nn.Module): + # Refinement + def __init__(self, in_channels=3+1): + super(RefUNet, self).__init__() + self.encoder_1 = nn.Sequential( + nn.Conv2d(in_channels, 64, 3, 1, 1), + nn.Conv2d(64, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.encoder_2 = nn.Sequential( + nn.MaxPool2d(2, 2, ceil_mode=True), + nn.Conv2d(64, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.encoder_3 = nn.Sequential( + nn.MaxPool2d(2, 2, ceil_mode=True), + nn.Conv2d(64, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.encoder_4 = nn.Sequential( + nn.MaxPool2d(2, 2, ceil_mode=True), + nn.Conv2d(64, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.pool4 = nn.MaxPool2d(2, 2, ceil_mode=True) + ##### + self.decoder_5 = nn.Sequential( + nn.Conv2d(64, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + ##### + self.decoder_4 = nn.Sequential( + nn.Conv2d(128, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.decoder_3 = nn.Sequential( + nn.Conv2d(128, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.decoder_2 = nn.Sequential( + nn.Conv2d(128, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.decoder_1 = nn.Sequential( + nn.Conv2d(128, 64, 3, 1, 1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + + self.conv_d0 = nn.Conv2d(64, 1, 3, 1, 1) + + self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + + def forward(self, x): + outs = [] + if isinstance(x, list): + x = torch.cat(x, dim=1) + hx = x + + hx1 = self.encoder_1(hx) + hx2 = self.encoder_2(hx1) + hx3 = self.encoder_3(hx2) + hx4 = self.encoder_4(hx3) + + hx = self.decoder_5(self.pool4(hx4)) + hx = torch.cat((self.upscore2(hx), hx4), 1) + + d4 = self.decoder_4(hx) + hx = torch.cat((self.upscore2(d4), hx3), 1) + + d3 = self.decoder_3(hx) + hx = torch.cat((self.upscore2(d3), hx2), 1) + + d2 = self.decoder_2(hx) + hx = torch.cat((self.upscore2(d2), hx1), 1) + + d1 = self.decoder_1(hx) + + x = self.conv_d0(d1) + outs.append(x) + return outs + + + +### models/stem_layer.py + +import torch.nn as nn +# from utils import build_act_layer, build_norm_layer + + +class StemLayer(nn.Module): + r""" Stem layer of InternImage + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + act_layer (str): activation layer + norm_layer (str): normalization layer + """ + + def __init__(self, + in_channels=3+1, + inter_channels=48, + out_channels=96, + act_layer='GELU', + norm_layer='BN'): + super().__init__() + self.conv1 = nn.Conv2d(in_channels, + inter_channels, + kernel_size=3, + stride=1, + padding=1) + self.norm1 = build_norm_layer( + inter_channels, norm_layer, 'channels_first', 'channels_first' + ) + self.act = build_act_layer(act_layer) + self.conv2 = nn.Conv2d(inter_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + self.norm2 = build_norm_layer( + out_channels, norm_layer, 'channels_first', 'channels_first' + ) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.act(x) + x = self.conv2(x) + x = self.norm2(x) + return x + + +### models/birefnet.py + +import torch +import torch.nn as nn +import torch.nn.functional as F +from kornia.filters import laplacian +from transformers import PreTrainedModel + +# from config import Config +# from dataset import class_labels_TR_sorted +# from models.build_backbone import build_backbone +# from models.decoder_blocks import BasicDecBlk, ResBlk, HierarAttDecBlk +# from models.lateral_blocks import BasicLatBlk +# from models.aspp import ASPP, ASPPDeformable +# from models.ing import * +# from models.refiner import Refiner, RefinerPVTInChannels4, RefUNet +# from models.stem_layer import StemLayer +from .BiRefNet_config import BiRefNetConfig + + +class BiRefNet( + PreTrainedModel +): + config_class = BiRefNetConfig + def __init__(self, bb_pretrained=True, config=BiRefNetConfig()): + super(BiRefNet, self).__init__(config) + bb_pretrained = config.bb_pretrained + self.config = Config() + self.epoch = 1 + self.bb = build_backbone(self.config.bb, pretrained=bb_pretrained) + + channels = self.config.lateral_channels_in_collection + + if self.config.auxiliary_classification: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.cls_head = nn.Sequential( + nn.Linear(channels[0], len(class_labels_TR_sorted)) + ) + + if self.config.squeeze_block: + self.squeeze_module = nn.Sequential(*[ + eval(self.config.squeeze_block.split('_x')[0])(channels[0]+sum(self.config.cxt), channels[0]) + for _ in range(eval(self.config.squeeze_block.split('_x')[1])) + ]) + + self.decoder = Decoder(channels) + + if self.config.ender: + self.dec_end = nn.Sequential( + nn.Conv2d(1, 16, 3, 1, 1), + nn.Conv2d(16, 1, 3, 1, 1), + nn.ReLU(inplace=True), + ) + + # refine patch-level segmentation + if self.config.refine: + if self.config.refine == 'itself': + self.stem_layer = StemLayer(in_channels=3+1, inter_channels=48, out_channels=3, norm_layer='BN' if self.config.batch_size > 1 else 'LN') + else: + self.refiner = eval('{}({})'.format(self.config.refine, 'in_channels=3+1')) + + if self.config.freeze_bb: + # Freeze the backbone... + print(self.named_parameters()) + for key, value in self.named_parameters(): + if 'bb.' in key and 'refiner.' not in key: + value.requires_grad = False + + def forward_enc(self, x): + if self.config.bb in ['vgg16', 'vgg16bn', 'resnet50']: + x1 = self.bb.conv1(x); x2 = self.bb.conv2(x1); x3 = self.bb.conv3(x2); x4 = self.bb.conv4(x3) + else: + x1, x2, x3, x4 = self.bb(x) + if self.config.mul_scl_ipt == 'cat': + B, C, H, W = x.shape + x1_, x2_, x3_, x4_ = self.bb(F.interpolate(x, size=(H//2, W//2), mode='bilinear', align_corners=True)) + x1 = torch.cat([x1, F.interpolate(x1_, size=x1.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x2 = torch.cat([x2, F.interpolate(x2_, size=x2.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x3 = torch.cat([x3, F.interpolate(x3_, size=x3.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x4 = torch.cat([x4, F.interpolate(x4_, size=x4.shape[2:], mode='bilinear', align_corners=True)], dim=1) + elif self.config.mul_scl_ipt == 'add': + B, C, H, W = x.shape + x1_, x2_, x3_, x4_ = self.bb(F.interpolate(x, size=(H//2, W//2), mode='bilinear', align_corners=True)) + x1 = x1 + F.interpolate(x1_, size=x1.shape[2:], mode='bilinear', align_corners=True) + x2 = x2 + F.interpolate(x2_, size=x2.shape[2:], mode='bilinear', align_corners=True) + x3 = x3 + F.interpolate(x3_, size=x3.shape[2:], mode='bilinear', align_corners=True) + x4 = x4 + F.interpolate(x4_, size=x4.shape[2:], mode='bilinear', align_corners=True) + class_preds = self.cls_head(self.avgpool(x4).view(x4.shape[0], -1)) if self.training and self.config.auxiliary_classification else None + if self.config.cxt: + x4 = torch.cat( + ( + *[ + F.interpolate(x1, size=x4.shape[2:], mode='bilinear', align_corners=True), + F.interpolate(x2, size=x4.shape[2:], mode='bilinear', align_corners=True), + F.interpolate(x3, size=x4.shape[2:], mode='bilinear', align_corners=True), + ][-len(self.config.cxt):], + x4 + ), + dim=1 + ) + return (x1, x2, x3, x4), class_preds + + def forward_ori(self, x): + ########## Encoder ########## + (x1, x2, x3, x4), class_preds = self.forward_enc(x) + if self.config.squeeze_block: + x4 = self.squeeze_module(x4) + ########## Decoder ########## + features = [x, x1, x2, x3, x4] + if self.training and self.config.out_ref: + features.append(laplacian(torch.mean(x, dim=1).unsqueeze(1), kernel_size=5)) + scaled_preds = self.decoder(features) + return scaled_preds, class_preds + + def forward(self, x): + scaled_preds, class_preds = self.forward_ori(x) + class_preds_lst = [class_preds] + return [scaled_preds, class_preds_lst] if self.training else scaled_preds + + +class Decoder(nn.Module): + def __init__(self, channels): + super(Decoder, self).__init__() + self.config = Config() + DecoderBlock = eval(self.config.dec_blk) + LateralBlock = eval(self.config.lat_blk) + + if self.config.dec_ipt: + self.split = self.config.dec_ipt_split + N_dec_ipt = 64 + DBlock = SimpleConvs + ic = 64 + ipt_cha_opt = 1 + self.ipt_blk5 = DBlock(2**10*3 if self.split else 3, [N_dec_ipt, channels[0]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk4 = DBlock(2**8*3 if self.split else 3, [N_dec_ipt, channels[0]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk3 = DBlock(2**6*3 if self.split else 3, [N_dec_ipt, channels[1]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk2 = DBlock(2**4*3 if self.split else 3, [N_dec_ipt, channels[2]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk1 = DBlock(2**0*3 if self.split else 3, [N_dec_ipt, channels[3]//8][ipt_cha_opt], inter_channels=ic) + else: + self.split = None + + self.decoder_block4 = DecoderBlock(channels[0]+([N_dec_ipt, channels[0]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[1]) + self.decoder_block3 = DecoderBlock(channels[1]+([N_dec_ipt, channels[0]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[2]) + self.decoder_block2 = DecoderBlock(channels[2]+([N_dec_ipt, channels[1]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[3]) + self.decoder_block1 = DecoderBlock(channels[3]+([N_dec_ipt, channels[2]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[3]//2) + self.conv_out1 = nn.Sequential(nn.Conv2d(channels[3]//2+([N_dec_ipt, channels[3]//8][ipt_cha_opt] if self.config.dec_ipt else 0), 1, 1, 1, 0)) + + self.lateral_block4 = LateralBlock(channels[1], channels[1]) + self.lateral_block3 = LateralBlock(channels[2], channels[2]) + self.lateral_block2 = LateralBlock(channels[3], channels[3]) + + if self.config.ms_supervision: + self.conv_ms_spvn_4 = nn.Conv2d(channels[1], 1, 1, 1, 0) + self.conv_ms_spvn_3 = nn.Conv2d(channels[2], 1, 1, 1, 0) + self.conv_ms_spvn_2 = nn.Conv2d(channels[3], 1, 1, 1, 0) + + if self.config.out_ref: + _N = 16 + self.gdt_convs_4 = nn.Sequential(nn.Conv2d(channels[1], _N, 3, 1, 1), nn.BatchNorm2d(_N) if self.config.batch_size > 1 else nn.Identity(), nn.ReLU(inplace=True)) + self.gdt_convs_3 = nn.Sequential(nn.Conv2d(channels[2], _N, 3, 1, 1), nn.BatchNorm2d(_N) if self.config.batch_size > 1 else nn.Identity(), nn.ReLU(inplace=True)) + self.gdt_convs_2 = nn.Sequential(nn.Conv2d(channels[3], _N, 3, 1, 1), nn.BatchNorm2d(_N) if self.config.batch_size > 1 else nn.Identity(), nn.ReLU(inplace=True)) + + self.gdt_convs_pred_4 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0)) + self.gdt_convs_pred_3 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0)) + self.gdt_convs_pred_2 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0)) + + self.gdt_convs_attn_4 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0)) + self.gdt_convs_attn_3 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0)) + self.gdt_convs_attn_2 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0)) + + def get_patches_batch(self, x, p): + _size_h, _size_w = p.shape[2:] + patches_batch = [] + for idx in range(x.shape[0]): + columns_x = torch.split(x[idx], split_size_or_sections=_size_w, dim=-1) + patches_x = [] + for column_x in columns_x: + patches_x += [p.unsqueeze(0) for p in torch.split(column_x, split_size_or_sections=_size_h, dim=-2)] + patch_sample = torch.cat(patches_x, dim=1) + patches_batch.append(patch_sample) + return torch.cat(patches_batch, dim=0) + + def forward(self, features): + if self.training and self.config.out_ref: + outs_gdt_pred = [] + outs_gdt_label = [] + x, x1, x2, x3, x4, gdt_gt = features + else: + x, x1, x2, x3, x4 = features + outs = [] + + if self.config.dec_ipt: + patches_batch = self.get_patches_batch(x, x4) if self.split else x + x4 = torch.cat((x4, self.ipt_blk5(F.interpolate(patches_batch, size=x4.shape[2:], mode='bilinear', align_corners=True))), 1) + p4 = self.decoder_block4(x4) + m4 = self.conv_ms_spvn_4(p4) if self.config.ms_supervision else None + if self.config.out_ref: + p4_gdt = self.gdt_convs_4(p4) + if self.training: + # >> GT: + m4_dia = m4 + gdt_label_main_4 = gdt_gt * F.interpolate(m4_dia, size=gdt_gt.shape[2:], mode='bilinear', align_corners=True) + outs_gdt_label.append(gdt_label_main_4) + # >> Pred: + gdt_pred_4 = self.gdt_convs_pred_4(p4_gdt) + outs_gdt_pred.append(gdt_pred_4) + gdt_attn_4 = self.gdt_convs_attn_4(p4_gdt).sigmoid() + # >> Finally: + p4 = p4 * gdt_attn_4 + _p4 = F.interpolate(p4, size=x3.shape[2:], mode='bilinear', align_corners=True) + _p3 = _p4 + self.lateral_block4(x3) + + if self.config.dec_ipt: + patches_batch = self.get_patches_batch(x, _p3) if self.split else x + _p3 = torch.cat((_p3, self.ipt_blk4(F.interpolate(patches_batch, size=x3.shape[2:], mode='bilinear', align_corners=True))), 1) + p3 = self.decoder_block3(_p3) + m3 = self.conv_ms_spvn_3(p3) if self.config.ms_supervision else None + if self.config.out_ref: + p3_gdt = self.gdt_convs_3(p3) + if self.training: + # >> GT: + # m3 --dilation--> m3_dia + # G_3^gt * m3_dia --> G_3^m, which is the label of gradient + m3_dia = m3 + gdt_label_main_3 = gdt_gt * F.interpolate(m3_dia, size=gdt_gt.shape[2:], mode='bilinear', align_corners=True) + outs_gdt_label.append(gdt_label_main_3) + # >> Pred: + # p3 --conv--BN--> F_3^G, where F_3^G predicts the \hat{G_3} with xx + # F_3^G --sigmoid--> A_3^G + gdt_pred_3 = self.gdt_convs_pred_3(p3_gdt) + outs_gdt_pred.append(gdt_pred_3) + gdt_attn_3 = self.gdt_convs_attn_3(p3_gdt).sigmoid() + # >> Finally: + # p3 = p3 * A_3^G + p3 = p3 * gdt_attn_3 + _p3 = F.interpolate(p3, size=x2.shape[2:], mode='bilinear', align_corners=True) + _p2 = _p3 + self.lateral_block3(x2) + + if self.config.dec_ipt: + patches_batch = self.get_patches_batch(x, _p2) if self.split else x + _p2 = torch.cat((_p2, self.ipt_blk3(F.interpolate(patches_batch, size=x2.shape[2:], mode='bilinear', align_corners=True))), 1) + p2 = self.decoder_block2(_p2) + m2 = self.conv_ms_spvn_2(p2) if self.config.ms_supervision else None + if self.config.out_ref: + p2_gdt = self.gdt_convs_2(p2) + if self.training: + # >> GT: + m2_dia = m2 + gdt_label_main_2 = gdt_gt * F.interpolate(m2_dia, size=gdt_gt.shape[2:], mode='bilinear', align_corners=True) + outs_gdt_label.append(gdt_label_main_2) + # >> Pred: + gdt_pred_2 = self.gdt_convs_pred_2(p2_gdt) + outs_gdt_pred.append(gdt_pred_2) + gdt_attn_2 = self.gdt_convs_attn_2(p2_gdt).sigmoid() + # >> Finally: + p2 = p2 * gdt_attn_2 + _p2 = F.interpolate(p2, size=x1.shape[2:], mode='bilinear', align_corners=True) + _p1 = _p2 + self.lateral_block2(x1) + + if self.config.dec_ipt: + patches_batch = self.get_patches_batch(x, _p1) if self.split else x + _p1 = torch.cat((_p1, self.ipt_blk2(F.interpolate(patches_batch, size=x1.shape[2:], mode='bilinear', align_corners=True))), 1) + _p1 = self.decoder_block1(_p1) + _p1 = F.interpolate(_p1, size=x.shape[2:], mode='bilinear', align_corners=True) + + if self.config.dec_ipt: + patches_batch = self.get_patches_batch(x, _p1) if self.split else x + _p1 = torch.cat((_p1, self.ipt_blk1(F.interpolate(patches_batch, size=x.shape[2:], mode='bilinear', align_corners=True))), 1) + p1_out = self.conv_out1(_p1) + + if self.config.ms_supervision: + outs.append(m4) + outs.append(m3) + outs.append(m2) + outs.append(p1_out) + return outs if not (self.config.out_ref and self.training) else ([outs_gdt_pred, outs_gdt_label], outs) + + +class SimpleConvs(nn.Module): + def __init__( + self, in_channels: int, out_channels: int, inter_channels=64 + ) -> None: + super().__init__() + self.conv1 = nn.Conv2d(in_channels, inter_channels, 3, 1, 1) + self.conv_out = nn.Conv2d(inter_channels, out_channels, 3, 1, 1) + + def forward(self, x): + return self.conv_out(self.conv1(x)) diff --git a/collage5.png b/collage5.png new file mode 100644 index 0000000..d7da7e1 --- /dev/null +++ b/collage5.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9f802564aa1e3a7c90762c7e65b77007f081cb179cdd9b42607bad3b1fdaf16 +size 4515604 diff --git a/config.json b/config.json new file mode 100644 index 0000000..06d8fa9 --- /dev/null +++ b/config.json @@ -0,0 +1,20 @@ +{ + "_name_or_path": "ZhengPeng7/BiRefNet", + "architectures": [ + "BiRefNet" + ], + "auto_map": { + "AutoConfig": "BiRefNet_config.BiRefNetConfig", + "AutoModelForImageSegmentation": "birefnet.BiRefNet" + }, + "custom_pipelines": { + "image-segmentation": { + "pt": [ + "AutoModelForImageSegmentation" + ], + "tf": [], + "type": "image" + } + }, + "bb_pretrained": false +} \ No newline at end of file diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..999cf7a --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "image-segmentation", "allow_remote": true} \ No newline at end of file diff --git a/diagram.png b/diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..134ab1524bbd2fc75ee0a4e3063b02e44aa2ff1e GIT binary patch literal 40509 zcmdqI_dlE8|M-uZwPG}=Jt|ePTQl}-)o80lZA!GZv_|b!Gz3LaBWR5-L{+I+5j$3? zT`MHC6tVX=eZAk$=c}Lp;QRUfmfPbx=eo}IIFIWw&f}g$b5ld+^B2!kP*5;mH`2RJ zK|w{JprGuhrzJ~95V%YViu3Ot>gt+b*VPp=_xEvo=;=y9VU(DeMQ4#d!5+DOcD5o` z6Kez-F{fUkPGx^up2ni6!!DG;eXeLCt^I;6qlidh#qzs^==Zm=2%0I8@uO%kRnJ$e zsv=VO9AG+(*cdpoh|7^luZIz5V8lEI3eJmWBG=uo2E+$g_Ireli8C~={4 z)Z-ibIqZsAyiDdf)e%{Hfs|LheXJ40!@b$F_vQ4lIiVCn(mjdMH^XUrI?cn{vy47b zXzdn;x8;Gml^qOn#Pu2V4mOoRW~2>ekf|)K0viX<@8oJ#3TpWjkO^(1XY3)*#PNfx ztP^yu9w;p_1D+F>X$5&8gSo*TO{sel_0s0?2kbgY>`V$;3<0HNfr;IpZqm0JW=LEo zE}bC9Hk&Vr+9`gHXLz^B`A|#w(aLiE4>KE1F{q!j`#2|@nq!kAPlPeym2-G$;4vp- zkcKwt^GZ?cojfrTzH94&pGcWi_6o0D`#1f0Qsav^GpiDW>t%bHCHmbb5;_P-MEH#d zI8G?MShq}BK}y73wZrCjRu5)rhq3+Ce)#CHd!Qig;771N-@)XEj4q0UiQr-KrPY55FMT6+uKezfCu z8-=YS<7yPMFRlA$Ys~bf(s?x3b<&&S#SAbl@0z@Dj3~jT@4@)}smXJ)GoRAU7S>J@ z?PHf?A0i7`DH=ALA2Cy4JLf zP-Px_CSqIM#?v~giYQi$$%e}Q#ZbE6&m1tK^je$%)#4N@5ipjQL3bfdOz%0^@Ohg5 z8``&q_tOFvf!~sugpsn!Oc&Z(tekD-D6=m!34Y@K#P^BiPU1w$LyT?9G-liJncQp@ z=Lf}6s$bDdjxMO~aXwGBTD}IZsf5Oa#+bFXrp|fa$9x>bSjLw2`P+WRzQ25@{Fv)L z)k)jRcvGizddx{}c<;+XSdnou7@sylEV6YLyEm`N&Po52+9q01xIjpp6VSszM_U3Xn18tKc^mz9|3+f@gAdqy_X{&BEh-*ctUUXcDt~K2Z^9<0 z3~gI~gUqyd&NXc}xF2^{2_w z9m_j=<}$aB+?6k;cNCR;yWt-1&R^qn?e4XZ&+eaXKVSKL=h2VHvEDkw!QmC&Wy9sb z6(n>S=Qsf9w^Dl@-qFV+IP!T!{d?Tc_Mgl@14lGHWUq!^`*v0F;Z+aF5Z3*bNBNW@s2s~sNxvQn0;}h&C%E01Vja*8KDQw?GMfuw)}fJFh08&(`N*=kd>Bww)1)C+_vjs!?xPCA#*nEEm|5{W?D0* z>(9V#Vr}wmifvA!>aPK>OAMYEuqG#VfbpF8j~#0rf;Y3p<-|R1zA~ZB{&|x{0cqVC z>Ul$MN7l~szRS}oCTNGevBH%1=tGuVwd}HN%beM&NGU!kJ>ZnoS2t&<>Hz1!hn(Pn z7oOdjITtPB5rp+?bHT|mQj~IDhmN$ zg!kU>{JNU-WY^z*KJ?o`I{dZq%U-suW?Lz{VO!QZ7Mxo@uzyCOVXUtunONKVaVmel zVD9u&$d~vgx`Edn_?)t$vKCk+RC6jRzel4he93j~($1sp!@0GEM%>W{)&>0foz>y_ z_dj`;Y*vm&23lk$D*JuLXImo@&n755wai;eBSZN3S&(;nc~~Bww*>|XC-8ByS@3Lw z$01f1r!P{)Q$O=$3FdpmcZJ1^<(yMqRiBet-Ob<5zY|sJRHjw#!V2Q2Vr7_pQVI>g zNy>mtjc=0vx{I$b4_~&nu&KGdjb5HyW|h;BE0Ozw`sAF|ZVw+$;%NVj#p7S&_c~H= zJNXT+-m{SSVEwJ%{m}k_{0P@5Li9FG8SskKq7>v+q)Ebc<XWR zW{y7|XHl7H=Qx@#i-Rxhg6c12iZhEfT=Fp$xVdR0VaCHA48NjZdFR~iz|wFdM&qe0 z#g_u&LcmTcd;>;?dGsHT9?1<~Lq%PruzY4ta^nTD#98 zWoA25i}BT;f8LaND!ErF^6`evj!o1P?T4)m-{s!hFg`hN3EVxMJJYpGb55&Sp1C-7 zO)Ua@eEAUN^6|%{mq+sj#^;qUt+Fz*LajR;0(U^U$SdI&ZICrhE;0CL{Jeg=?Y!&W zr16?&i;}C&HG!}$LaDrgwckX86yq0Li7zs<^_ewyYd<%L)D^qt4%H@4HCdEe%)BMN zdsIGJVQ+Kd)Zlp3$6uiKGw|msc;esyIyA5sc(DHHo!ncA+2pDG#-oFRW%ae@s_?fz z-q(M5us#;H5ccD!ZO@_n$-9s0g<+1m{I;O?yb$GeN1`xNS8C_wPQ%4z=1cLdP80K7 z&Em&T_lyS9YF~eNz0;YLa#!g#FFd!f6kR$`$RPZFS@*I6AJsW^-_C~D%qo98@b=k* z?-HNy@S3KZauo=urAN3eepp-`tY3sRdNEE3K!hbIP#WKbl0o(tGYD2wVc{ZH<(v;XgS}|c)=JRwK64*-|Et$%j z8v7kPv+|Yq$=+#W@ZQ&T#^0 z++3+Y1usmzwhv$L)s#Qm|7N+LM?#|=Dw_jZ0_GMAXB~;~dYMD}8}hKP z%g$5M@vlSQRo_{Um0u{OUo(%;Klb`h?WX7h7gvsw_FfA<*L)N@eU`ZM_5_9_o(|>7 z?8{tm=xeGCXFS{u0=szL#H5qN4JfJaDoZd#DjiW2Vkx!Os3Wfi`j-{}+({8F)KO>H zbl6_3GF4+o@Yg554l!4Ysh@^~ zy#Dk_VRCBfb$xxX-c&W|PdzuChyA#+Q(JG@hHa4$IN?iYU#PBc& zM7cBfVmw8ss)uT4!&^D}LgLr2U!Ok-d$VnO=e`jqAz2Q&T0$)*M9E;JMa7<~#m_Mq zX44t-`XPI2Wfw2AOaBaur2d&5i9sXi42S=n8cBUIo-Fh0m@K&Q=T`qS zeMF8jE|{McQF{Hqva2j)89wQ=dgi~6U@v!}I=`hVz-N;FUzs}}S=Pbh5pv~!S4X~N z-bu|#cmh#=CH`O8pfXvOtT4K3@$X}e3K0yKale9mubTWN^P|6v^)Fe6aG|VE9;&wNpSLCJ@X1Xr zC;R{ZDDdO@JK|w=-;0L%I1yi10D@LZvZ7%-i{0t?jW$-nU6uMTU%n{U+6OOX79(;LpdB+I>+1JpCs_ZePt?Fs z?>ol=Z&9uLP_b3@gX z`GUfz8{zy%%!H>^`UzoN6(-8*tR$17z>1ebk~FkpG=i%-h#?ygRTD9(i4{Q(u6 zOGX;OjEK`^RewB=L6?=40rt(*cP#vh!bffyO5k{dohJerj2yeD4J@()0|U8q)|!rf z!%K{_fCeTx@+E-Tpu22qK~ijM^p{A2VcSD;UXwM#fh|cp+L3YliJ*c65HeIAp?SLh zm6LF=Fh2oBN)eV~891K9>7}XbjYe$j1!31E0|cW&H3(L7K|J2YU{h}C(d$R6p9Xy2 zSLgk)tY;#!#TY)HMZ3TscN2ul`}5V5OjTFj%niJvhw0o0O51X$Xr2anPS!|-?={TQ zI42GQo0O6TsOVvkp`fDo)`N!Co#UTF4@e6#ia3Q+i-`F+E}#Gm^X}%qX6YEn&MoJJ zp#QnNoZ#B~vK_&%wor#hz=zE=8|kgP3%XZ^%eZFzbgV0yPY*>tIO02!r}AwMt}H5xsg5i!n2_(TJ^c00SL$fVkd!Uk8S|W8#Fi$l-=qL5z%h`^LB+qm zuT96#d9?P$VZM8Hbv5au%@-hwxM)-5sO{Hwnd*7y5{Vw{ZSnNo8@0obOKiN%{SIHPw<4cEQj#=WrekcR(-HP$La^`Pcz?GW?^{n=9I@+h16uE zU9HFN>O}jm>Gi0Bw{$?6rhVr$Brm%92fD-laDwbmgPQ8gG3Qt~I^qU+49A^7>j!*O z`@M8{lj7;dy&0QW@O-AYVB*tdg##YO@nJge^P&aUh?4TG^O_x5&0hhu?KJ#rZ8W@z zX-iEs1jen{aM>(wNs^p+h6|cd+Po+W>PY5Qa(}_i!|YCZOeswp$NN!Pzswo>J4FwnEI9wD|y;H%W|cu^x#w^-AO6y zqSJ?maKHCBWN#)iCj@uFlek~X!tD$~LV3$mzVb*S*gBF!eT&tzE&NOQJ$vz{)&$wS zw<-QfcU>3iikM(*zyqBN6;%S4(^BwBTXajxyDEatN*`8c%~PB@m}qgV{fy)J){aV* zCVy=AVp>ZA15A$#Y{eU9F0#H@ezFAyCdP*Q)APks$3S1@o zr8j`6kv0)+XSQHM4m3J9$12Hs+q!N`Ct7{l^G2fzs!LQBr4aI)jR#yBYQVob6Lk{W zAFaa+wg}#Ib80id3d0348TyhkR@O$h5$z0Y+Qm%H=Qf`uxCoc>12swNhs&>N=sf2X z_Aw0hBLjUe*B@_*P|-(ik)0mblTX~$Eh4H`I21pxJ1YysMP_=g+scVUXD!s?YebEQqoeD`+iJ{}EGCe^in%ru7k_V1hKa<9!YrBq`$Ed_(oR?>b z#-V<5PGzWKwpM3AN7Rvwa*Awt(gm02CljRV7T$w?9?m#3_( zS=Y1Pz$g@Dzc`v{qGXVaRWU~v%JK4J|8a{K6I&AUcdE58>0OE0`h0R&hWz%_bXm9> zE3oWfi`bm3#z?L6qCHI>T)Kpfe$n_mffXpu4(A5#9j!MvU(V&y1l}T5?Ns|xm=vmN@!Lk{xZn07)B`NeW>~LQA z)#6l|^j!(vu$nfiPLVE={ut7#XXIt4v<~s{R`%8wOwt;Ws8d!qOIc-msiMP$vw1>-M5(w1XB6D5Q?vB_T&K1#tRLnF z&REM9`fO8bY{ws9WBeSG>c(Hoj7KFz@eb6`f%HIIsi@LfvBQ=)ro>&q3)@v+VM-Uz zc-Mk;*_bwRRI~~;L%M4Ys2v)rjh{E`VCLm!Bz*%avSg5KgXf~?p4Z=KcD$KiFNI+E zxumu#Dy&>jne9JvOTgn$KY~ZCLwG=jbc^L^S#l?f!=(o@AM$g~O8A!HZ-rSWI@Uv+ z+)ZaDK*Mxo|E5keDp4<=Pjfq}zGpwR9DZDHs^|S%i6|=lF1VB*;nBo*%2wu^I(T9-YTr~vd^9IT?x{(TJ-2>=Cjd3~9GAmJ>zy&5J2qVq~ZOH?_ z>xl)$>W97Ax0TS`UvRb4f|IYOV^}q;aK;2#*z@o~P@TW$)=fKGTjq?a6d0w~QgGKn zv|<%6&=-2@ulWKKD)b^;nvx;uE|sV_j`G>K@~;n{o~LW*UmR7GOgi&Ijinpm)`y4_;y1MG1+_kVK3%t(L6@pS`f6M==WH+9?+j;FqZ{J4eYy z_bz2Q*l%a;T3uaH2v{(AFuy1x6vVPAbo#6L)I->6W3lP%WL*YVXx}5dyB&pUT6iuw z12D8LYM;T_T$Oeg_Dkc|DLT# zl)j!qiptm!SHBsbOOj+Ke>DH^5r>>?4xGs$CP-FErg8h5bHSi^rzTN#{5Hr2?4OkL zr*ON<9>sQnkf3*Z+>MKgF4<)eL0k zh5W8>SDtUhDpCWu)KuiiDZQR(F`pXS{`d)A@T@NTPC~@wf|Yij>_C^XN*%Og!}+(! z^X`5Dnuj>kU_*q3C0WhrKt z1--~gN$QQ)*e-JZu)>kZcq4YzES!{H%op|svSX3U&DxO-7x!EocS5+DL1tm7Wae?R z=u3!m2#;0AX+TC*$(e1E?@*4uIPAs1941Z^25)kX$T@QibAqrP)(^yLv@$vM^AU7t z3u^QQ2p6L-$n~QP7p`&L-!Pb#2WAL6(P5BiTzjbW8|i*)TY5}N8{toMM=(=BBkZNNsQBR}Pb<#22)<)9Coqv|_Dv8DuplQMhu&a1V9(kCK58i@UZuX7mUc9vf^zH9Hh*yQVh z3EX-)c2yby+ZGB*p}Gr`eZjFdj!WgQIM6>3;ys zn3uVsY<`7f>xKU|Te$HHUjjES(dA-?7$PeaLR{ZcSo2S6i3+H9Y$m+&IB%oEl}QAK zl}V-lDP|2sWfw4fg?SHDR;n-Kmt2JB7nA8y(lwkpP#p}&_GmiBPT$-1s}jA5`G!ly z-I%Tztd8LkxOzNs>^aeYLA@4=YA?nvWOK}j#KPTk0Aw`{@o7&nn%$2gmgTVLX z-tOn-uacT=0+YQ@0Ud~wEZwq(MO`bxp~ zz8FPiVtD2+EG_q-AxZXG##{a3#vXX^Y6BiZf`B6rUf=yqQ{t&qiOTXLR#Ifl^QHR)^br*Z7l_t* zLOHsw>P=bXGcehge`e4m!E(5B zm=IvJzq6lXkr-+)ldILg>G+i@z#+IuQbt+DY)OI90%*TT3p4f4USg6aVHSX@;kyEi55sYY>m!1nws@kbz|#S-04j8x3x-j+%boh(Qbw5whG43N;{1#$;ADy?)S7f$;iB-8{IzA!D z)w9X{$a$KLFrJXGVCB|CdX-9lsHaSS(&y}P8UI4%+gMMNlTD6YI5LOnfm-ct9~CDq zJWJbYP{tPNtEZVj`YC~#>Lp#Udls=sEa>iJc4rJ!*|PLF?37P&{Q5#e&pNEhA32{& zymuO2=pU>>2!*soD!%17Qkiw-^>4z(l9u4u3(8-`0-i);d|R0GHxk{qDN=q@my(rhSf{xb%4H^(aV z3YRC437GHiOxX_?oV_b8w78^TdR)`3ld8{wWEJCdXS{`pj^@wkcCAl5?-S%&%mn8S zw)U(_R^0z2b#TB^sUl#6Wr1^e0p2HNcXGT`$6!M^rV`x;T>8Jg;WOgm<)SH~Cj=4i zs#xymOUK-ur|fy33M1)UWb^b?q%Y6Ilg}~e5twHcK>kQrNPR@cM?T)w0T^|WPKXLC zmm5~g^1RQ_BBp_H+De?vmqh;WVx9y_sAplgbWP5C<8KbN+3`3UNC8e8k~iLsqC2_~ zQY|&oJuSj|@?q7+TZu?i`6Lx_@8Ffny$K>teoFVp5;ml%O~78kfdrZ7aWdiyaBZvt z$$0tS+$N%=zoa|tL0wM0%Lim#PSdM$R9a%b_A>DLDW^InCFj{JGlmhg5Bz84S$|5F z;nHAFRJK9H?tlm4%FeVmYK#Q|#MnT;iUHVpsO~9TNU4|r=n91}S(G7-$tUI9#?%Hk zvp;b^h>G)lpcGZ#&1i2=e|WANQ@~}|G{DFLrvdHw>x>AU?@82WlwpL^FiKMi%h0L_ zGE%7B7rFw-UuBA<5SD@SFx~|2WjJYz%E)cd8|ul!IlatjH>VvgGoV_73Q-_9NROE+ zFOI!Z8P1^;IV8@=*`OyBRK+H&*Zh#z599nSp1suotl>55+s53E00rd(R_Ryf!4HCO zb$I9>jXx$I-!%`(bWrkEse^i$US3Xb0|L&Ew>#Uo83%Wy!PyRbj)Oocye^SKu56tG ztCt5PJ95{nvew;w$`{^NYvMp17pDNP$L~)Ftk0)(ISL3@1@|+ARHZl5W*nL3XB3$p zwzad)ZbkB=-Fe-XuxK|^SyOu5$@fosT#})D0XQ{;LqR>D-P^n35-u*dEACP!q*J;R zFfXtzBiKBlp@A>X*GNa6rg3WuglcBsA!AD83Z4A;&S^Y%f9I0|Ni?+SlN!seZ=2_7 zFY^hsV6%x7RLsF8< zR{BSaE|BIDq(Z7VPQ>e_Q%TrVN zg1{b2Gi_-8ISGGaH22Z_<;9C#-F#_Xi5G`33|V3AY!{ZWBK;!q=Hi0(4%9bH^A@cs ztc~dRx)R%Io)^MQOr0eEls|V0-vS^lGXk5rvqIaP^5L`aq z0t5PG@Oxueu=H?HNJIe33Z0f5c=7&X4(C?~Q?%gS1C3E~OfT^%x+-8ikP za^$ra?s1e1o6Di*B8e;lQovO;y3JNb53>TQn6(p9wZvSM)Lg1ASJyc!+;r5no{XFF z<&w7=W(goM(H(XKrOwg#P~SJL={-lOSM2pum^G};lzVfGebr#Z9K@)^sE*fBqqiw= zg4ex1X=V4^cRPaiNxRk6 zU1E*S!ZY$_2jWtC@S&H(Ts1JfYHZ|_i&IBdSWGa+?}m-1i;;a$wWj7)20jaGHRyxz z(6D)dRdwZ7Ahh_V)E8|RHA8$m+LF|(`fWh|Xt^_T=`B-7Xtt0y$VZE6#nFVj%MN?y($Gu|>hH@tb5E)7JD_SfkWRU_1$T)> zAD|~3!ovSFHyy$3+_sc=hg?i6=u6O*vW%3VC#JviVpTS)l|}9cnb`x`sO90IrmN-{ zOoC7`3(&i#x(@bD+&)*eH}UJZVOAU?TN_m*G*dsLJv+cUl3`4%W=LbrKdoiK#2(1< zQ`>4oD>60S>OSz{&qC%V7bnb7TXCOr`+~s~IH;!ew8y%9NU+Ie9xLq$m2)W~!mwPy zR{QRBw+uX0>{Wpl)NBXVdGl17L9HxB9Xd+n2n~ER`Aq7Bh@rI?1DXI2JvnMgJSXuS z_&zFAYdsz=X4}*jt|@@t0ftEfJR`gK1o+eT<_?#t`GjBx@+prDH-;Ey}{XL%5>bKhZHb^ zud#Lw{8f8xaH<*_dmJ$J%*a&$_#^dFmkZS z*oyA#el?&xQBA*r>*n{1&Po-W(ZVgYUu>}A>@u_lQeqjR5$zXSdt>yY0|2eo%2kfH zWk64RHgV4I!KK&~SuH)3kqhIFa6piOR#nR_1WoMMjnd6?M&pM3E^4D)qf$;bitV#~ z)7tnVSIpU;+(w`ZO+8CYCrkM|enm4%UoMz8Dn7$qAl)i}jBJ`R@OGOyI1R*{4qLJ# z75Z^+N$qS4@r=4w8)3C<1)&|bBKEv;=X&$@5l^{ur$Ie~;x|z_^>MO`*H#Z$qV&Rl zy7pUC;{+b=3*CG8DfGkA?v{dQPW+R+c|=E7&rD#R+m1%c3ZqvyLh(n zE-E?qIvX_jx07b*+Ze^J+!PpsfdJ@KTu2YKrPyRjpN3+u?Jlaq1i3><&K#I($QRUw z{^KeZE(Ua2<;MK9Hu7Oia8DfP`pP+~jPT+3)_o^kjc0&1*_>SIujOi>Ca5jon6Ed9 zaZ3thCED~bYqxGc&XQnu9`Fm>>)h_dQ>DH3x{OQu)9m$|p)YA?pQ+_l=SCjaPm?dyA$u!w*- zYj0j&2{Ys&9=A7QTmYf-pP1LUBe?Tz#z^L84)YqbBCV^_>07g~cN@zukp8+kcXDXf zZwblGRvWjAd8YA-Cf#scguDLOd1G_E$RjA%4sVVZV zAyw~ZZVTjwLYU(e!g@RB%{#^Sa{VFhP)mZpG!lu5Vr2cv(-XAlukoJ(={@z#lP}bu zxf^CU^Z9C_drvc--Bv_yt8B7fTYF?vpIkJG+**5Xk}+|&!C$T6Fz@Ko`(0OFI*$~M zjdItX=r=E)uvl2SDGctN3p1^sAHQBdFQ9O4LOySRM*-c@@Zy=mXa-Nep9W_`!1scA z#Dux$Ti1g5q&{DwTmUOnU~kO9bNsu?cL(+ViPawf2sC^e;I=No|-!l$=Rqor8eb>2{^8 z;BcE4@qh8EyBy}(mdS(X|DvtL%+)DfaPOh+%>UemyyfiWugDD6qu6`@CAG+hazDu| z)c;ch=^V$Wrl!`F)Ya8nUsqOE&aR)B|CjU9qG}T!RL#?AxKGPAH8Ek$$*bf&ZS!0- zvZ0}Y5j8bE&G+%+$5z;#J9pAjs$J!#zkR#C*ge}^Qoa4`c;q@y!CSaI#J^`}XHR_z z1?_z)B^Q3Y@9yIhVLV}yt9YT-Bu`oH%F&Bod)9wHvf)Od1j7(p;7QZ=H{;DUpNIxP z@7Ynr*}BfE^JS5}KUfj+bh~VGE{3(^4;q6!UIgLQ*MrSg$wZ01L&4LZZ|i=4{k4El z`<>92tJFz`ngkq9ep=!q&#@%b*4BR632c9U!EZo)4|{FQmSaHkER@d=%jUO~Y1wf! zv)7a@>!c(6HR5y&*~zRl4IqLY_PXdBGBWmi zf8gwRKxfso0HQ+fX|P@p@M&sZDa~DYY2z&c8T`{3o1Zth8Tj}ycF0hWzp`=ne&|;2 zY|5Qaqx+P=9uM_NzIUbNKgncjH>5 z#}W%Wj8mW|D9YL+^-> zb@vs;zBLyNrcV>LSUQ?L3kTcQRc#PlX3w!g%;|&l)RkiW=cD+~{fK>T!gTK=9eD3M z^e;a1ham+~O0}{%L?GVdJDDV+mSMdev5=X63s9Rd?eAR1p<(zq9n?Iqvr%ve7z!@S zvMo5Sv93rT8|e(R<$e$uY9OMi6u0oBOv8EVjNo%xy77SniZB9I!0%@ zjKGz`$uwF0!54LyaErb{Nxo0-^1rT~?2}(Vr#SQ@oBx%(p3m~3*w_O#9wR7V9P69K zQQ)pIQ1Ba_DH06$o?u%b;lM-eg9iQReR5lHz5bOGG;>;9r_mN#k>UklxI3TI`M$F0 zBDMm5x2*Af#n*TAKJ(X#6dleE$7ZYZoo}uDI-AciY{^=ah*isSCILYd<-ZdxjX=+s^FP{Ve zvGitug)rw*4zxfbR#qfeKJ=NgfgVSMse4bgYwz=?fFIL<=k%ZezDngHiIGGQpN}dg zVyIYOe_BN`lSA&e=d6(i=3dpg22(P`p}ak)3xB#5r&pg>`4_s}p-9QsuHxBnPyL4% zhR-S4s8qN4WX*@sFN};Uaye(2v+EsP2jyD?4Ndf<(iM1wdnVXf?EVOx#k}r+`P{QI zVeBMTmqh}rg2_7>p18YI(eV2UR@3<+>rp$GgKff-S|{we(|vpjmr}4uog}{fEtg*e zh5^K>hMsHh^cZN@+O5NvtPj}Ozo)Dg_-mg!gf=RkRr|W$GF`o3>fRODeDq_#l{MU) z9vXftUA>C|)C%jb3_V>o)O>Ch_VbQD_xS=c?l`O|6+XmK)Gzb68qcV?-xf|BNmM%S zbvX4;L)Q6+H*S%gz6|EZ_!mBz{lKQJ{Qa_s!!nTv1h~Vo^Cr9)`#F6Bi@!vSkFPUQ zCesn{>SLjy@0JdB$MkITlJo%uTgh*hnhyT-7XIDN2Ax8Q#KB40&h24UwGR+7%NqBE z$1nHCX;A(|dY+xx_yw9Y_*h8u$!>^?nl5^V{vpN)&_WM$Mkn4XV5f2aau)wBvmhoflDa^Uj-63#`v!k8(jQb1w7|Q$;hCGT0g|x~ z^L&e*Tc(Q={L2FwIhs?gKbI98iQc}X!<~j0?<;P!eda-@W7K6C+y+%~IJj2K$JXx+ zK8bs^!5_SVv#h-yGA(-pt)}*VF>Lg$pjur~hF%i-PdEGDxBM(*&V@k(`B5zY-6Jx~ z<3`o6Mi>}kGMW0zq{PfU$f){#j?Wr@k7bI9#DsDWf<^w0Jom|&+}Nw~o&RI`++={g zEz*tc@7W`NGPzN`k7j=_s!ga+R*?28{7$l>3KsVoy&8BSWC68@h3$b`(rdlZj?{#J$)8MJ3T_e%fo+5fkwa`67I z^W@Dr$F{r%AD@}v;{CGIzFX0>yvBdI44HC|%1eqZy+f(Jfa8@*9yDu6HEHsHGLVME zNMoWm)oena_sTN7>}6o5jNn?5Vb%`z>)Q3VxgDHQx@Ah82l1afLQ)>3h`NC?z5l5u z$ecPSLD#^w;}OG$$r0Y>aI~a=%?t-`x{*?{I#usC?%BpVwMm;-oCFu2ptZFBs6R=T zYzXw{Y7dndJZ}h)3}1AH>OTE_E328ojj3^Rh<~%EW5%kg8p^cOQ}Mr6HABYBwgTfn zeH5e0zn)wq@)-_1WZv~{hWS~3 z_zCI=e>Ayw6g#pz1iQabl+}RG`QA8`cTq*!ODTvHB{eiNnGvA6@|^gM_$&E`0Bmc3 z<$pg&d9vk;&%<=twH_m%IC`MZ#%q<`pBmSc?%^k2IZwVJ>|0OzoUGhcN13tXvjiqC zW(oYTmwjX$j4+7~`-v^xuZ&m4f3DeaS(5q)EmdWmYEWUx8Dfved0to2-R-y|`SplQ zT#+OZ9x1waG{VXMo2Yr;Eqr|tnZA2YQ#|a;XN41YvqxWFzn4tho>6ujAQU%3UwBQs zKm6ASJOM`RZDLU!mj(+Ek*vr;fzIDk_gBS8bH+bV@@wrU1!9<#?C*RW>0?KcjjtAZ z(mUJV_?fKGy?DCNeL-DLIB}1)Uy$o)ZGsbjc)*|kiS@dU#z|NITe{C3tqQp=rtHJ` z7sLknzJh5^qQEE+`sVn~i$P*^p;Ucuit#RAZJ_Z7qgu%RZOgnMM;n}p<_zrd;cB(Z zBz`~4##u*m=kDlw$UlzEfR${0%RG`&{FGFE5{RY3#qaiCN|d}qzbc&0&aNk%{pwgl zNd(ssO+Bs`^Z7Mxfe7oi;}V6Jit(LE&WQRf5+K0MhG-Tq? zkmDPHq?cL77We1=yFOk@U2B7>goi$T3|Gr~hx;V>Tze5s%O;x(*i0YeN>*cmExh9D z_8>DN??t^sYa0OgvNGAV*z z+{On|6T3TuJDPs?TiM+$B=oSxZ?lz?P%RqwAbiF`ddrN^c>QM1^$mZi+sThhPM0tOV)FP~v}@XiM3UF$bjeiuHkmW=?%^*(<5 zq+`B2*HoF=EB~>Uho83Zk6}BM`U$4=pBNa0(kHw7+K%CwRgtKQH$m&hY6W>&kTEF# zx0kX0&*{wOB}2ruoBUk@N#=Vr0#Dc%g36}Dw{m`YcSFw96|iR?={hgF2(ZL9N} z_&2t*?{klwVTWNyT%$W!`-fvMB@tdSkOTS?jy<32B8`_`mgGAX+@Ii(ykq}|B>Io5 zc#R}S(Cw5W_3~(nBH^IIsr#=MvLEX%ZA!R>hCbgDA9?U&w8<*BRw*wpR?bOM;ahSJ zez@Xt(0iLi)$2&Fh#xUclHz=_pH-xli>2C=lsbQoJ#~4+{&SUnxZvla_)zeQ+YrA* zcqqS(sJF4OO!xb6ooaaQZPb$JO6l;9O~@Ulr;pA~5kXIS`>S&7P_5{Hg5-!G*-e}z zCjuMc)Em(<_OrZa+1|@y=*7avZJ-8J=;?>eO@Goo8QbAI)6_TWdeEHrsvV!0vBfuy zVe^l2L}%2}xp;+BzJwGGX8LEjtqob&QUG*dOA`3!m36Z2W`_9sZkVR%#HQe00Cs)Y z1i5-h%7k@r>G1Pz%xE^E)tUm$438r$O`lJ!giH$DXmI5X{indyI$%$j`fymc((p&t(jJ}kUYp|8 zNDV(oU#Hhs0j=q3NX8X?xFIagIJVogHl_t%V^2>NNB!y@We)Sfrf*GbK7<0LX%)SG zNruO|dz^O_J}O>Bovp<1x6iD+|JGL)Qu2|7w_G{-JhJ?my`W_YQe^ zufyrrw4~@abTv}YdEUD>XMW$ko_%&2_q$8&{f8TVgt0R9ekHLX$hEi}q3W?x$Ux&H zFYj%>pxJ#+mUQg?6vywEE+cHV-f_&GF7&JJ&i}{WTfarwc2UEMsDmgl zfOI#~g5=O8QVJ>{4I&^N!qCz~4H6Pk3IZlA9Xbr%A>9p914zevj`wrlH_vw*-#_ra zk3U`q~tY+Da)Hri=L&O-glw4hsC zl{zQm&->g$lucBra+UjhuAaroy-UCA)wezSFsD#)>TO9|uin)%Y%s%Rze1;9Q!E04 z&MFWMp4yX?NrIGvDddX=qgI_p=5gV?LkPzmwMFXpS!0&Y4u_IB_SY<{9{mv(K8L)Q z4*%=Tn6N->HR!gR;&M}uu2d_#vp_aZ#ql(s#MrkRC9R7IU{}|NgqMkXKZ#4K!BOIW z5yCKX-I(%1C&ghB^|uO$;D7#tZu>dj6QlFt%N>TdP1RS3zRH1z->sPW8PiVK-;eu@ zJjsL<4=0o!j>VK7EN>_+sBqF(MQ_gPtzPLj)+IPPD%S5VkIzpPj}`IIA8Ft?+ax<+ z%kl28z7&UCFKMtkOh+H#E$>}qE8Xy}$u>9fa!c6s!`_kR(cpkvjru5wonn5Diwc{= z;2OTHT#{`ZZ^vmik7YWfOK?$tiuqdKozE;`l1U6w6FSGdip*gG*ggI?M?zmHWzp7F+$l6)uk2alrd zqE*(^G26=GGqpP>;T<7}c#lJX@+d}eCp@pvj{1Ca$1XFf^b1E*85!v9n@!)ECYH&c zg6wo_IYKiX##`#PiiomSRrd7|&fQ~!`e0)0oQA#(nB||AUc_P8oBS+BFkB^;qhXKC z{^cI-xH5zcm9##Q&$-i-kt&&Ir-lyghsz9O%4(xzrmOPD{PN$ zoQ%3J+D)Bqkh*CjW$ci1{O~wM`uLIe)SuPgNCCnM3hMh+%;1_Z;m3g(<8J!*rI=5D zO5Y!MF%+=Va`8S$#Ry2-?d{z={5G7c^ZO=_$K%7ER6#=P2oIMhS(4V~u;GqNkZnqp zR~s?DCYB8$>E#O#d~w_@J@0r-={T%K)Wj-#d$w*n6R(kIOU}{iJn`REO?Rzc8rqpF zT1TZ1d*z60SGW+^MM#jj&EN9D(jENI@ZvQh%uA!){<_JH*k8GNw5z3WmA>yb9`Dy# zEtGQ6GKOwMm8ZW!JB5o!JEK$G{E;mU%jQIC)Dh@wNNBN{b%uFlr23kcafyvDvhjIo z--gQ(Z$;&lTM5QaLC5+VGal2?&@8@bf zkET>fJ=;R>#F@HG0E_^rt z3sqmdoqhFbQso|!#rqkB*yp|OEHebDW4^~PCL3>3-z#%n%KU`e^?0CS1UF!JhIpM`5-YFu5kqY3x8@pE&8nq^>m(Q65B9~vGx+oP zY8?;D%NH=6a7K@!HtT!g_?G!w^PF=}4Z{W5g^!CzJ@Gg8q*^XX#|MczsHbna^mTk( zr6_);>Hsy?7(2?!cWwCSq92hWZ(UV)G7#Du;(@&^`vpNbBpHn?{d2GiT*P(p(pp4J z{a$Wt*U>uO8|Y;D)EAWq*=n&ai~rr?;g{R_?p``)d@!{Pr-)5!Cq4BWG0wG5 zv1UCwIX;be(PB<#2{Vm7mXF< z%%gly<0zc@d&a#{(uq|2p=$QE)BvKcWfJz1rX~$r&43!d)9D1-LNPqMUD}KU5DYC> ztPjOcXsQI9?hLv+4NnXwm+Gn+?Xq-kXi78eJgqD&zWtm1=Rxp;ptJOkV=pw_$V76T znK_J(!6CEvUP8y~X#Z#T`*!>cKZ~z_$u=sn?{jb8y5`UOWRih2=f^HtsG^DqY$781 zZY0$B_!QowQP#Y*sEvtY<~*NNJK_qxIO8L=y7FYV zr_1mv7YTb+5r3(a>Ip-Yw+?C|q4uhE^gD5(5@u#wj&(^vViHB%qy5x6(qoim(vvqy z;`Y4t4;#8VXrWxaizRv*XK8N16`15a-88f2w#n%Z>Qf5v<>{_|QHz7ziDHW<}ey`fPp98ksmiYo%DTiQmMmY>=c%pc?2RU8EzBsIwb^6vejFUa;rDQ{1qJKW9_y@)O&Ddf2xl7n7HX&LI0AKS5>wptz33~#am|GG4^XMgR z;{|X~UI-i-A5U{5g&!s00Dp!bS!HEshL^8==!`y3Lh^I?kC$?(Dl030F{%+4c9=L8 zzQnGPO3|0A2`m+cmgebQ9x^JkH?$opV02p@g7)75X~ZFyF3GHK-|iGd6y?_*K98iAuH8o# zd57M|`VQb6J?nV$sQ*N%zV_UYeI;`f*DZzL?_fT0*7$VZc;@g|zF*2gZo21lZ%l-7 zBbggdWd;f-#I)q@y9C!cQFs|}Nahg(; zO|G#C+RQT|G?vvjD2joqXiNrLqCgYqK^hP3XLS1FsNm^$Fng|N*R1LtrS!A4`9L0`F$?zc$xEn zX~e$h41)O@pYZgpy8Q+|vx#bt;9MF_;R(B-+jP%^Fh*W$k6@eL+m*L@^orgSjRd{* z-VCJYmeXxktwF4`@m6tTbFobz)l0pY=5ExUAuY|}f-S*X>lU8o3#!=kTpRg@0Y!pj z8wb}l53xB!tTPC7L~FQqpD83MPHO->yr$R;n7uBZ>S44m2|iL(DV2mvBT@u2 zL2m5r1T~hjA1g20@dCN-LZ72uRB^iGdQcJ3$QCfnXvCVAz^9;x-qC~U-4sE>PV*(o zV}~Wyeb>C2BbwRY2j8bcJw=MId_V=BC4~53gF`IE4JL)b{}$e=uxa|5k}Y$%-%b{N z12BxBU{7<^g6>6qSAsCtW`OjkjO%QN63k(^C>s*kG9xjl$bm0MZJI_RNBy8n;}*nk z%W<16BL=vhTi5cp?Rbw^c667Kdr?4`_4LrGy1L20Rerw4^jvyJ;@en(~H2MZ~) zAb-nUJz;SnUN$MQ;dZ)v^>#^M(t_N}4d~qaoSYmK7t%^OX%>1mGFVFnvPB!$Vvv&P z_uHrXlZTI;OdY`vA+4&IssJ`BClTVUZVzj7)Z=ie!Y&9-c<==(z1g-dN!yOPGqw8| zXXC?JNT5|2zDp_V9GhzO5W>!^O$GiDdu;3<4%Qh3B@)GTCWQPEEqn&D1nR=cpwc-7 zT8KTak4E(~d_~v>NCRmyj(KPvi{lAIv(t5n&qH@hTv}KYNoS4@7BdLlUda~3ZB$L3 z*fY+xM3|g`;$}%;SwxNl@l98Us_H;Jg$zk=$e0`<*OvJVRjJ?R2D!}lKCMd7WP`y(xSJgo0Gj~FJy)_NQa8kw}-<@l^0-Ky;e86XV3qFkXchQZt zC$eZF+vRVPDs@q?JzLSKyVY8hD`s@is$jIzVk-E0Qe@<)#;x4!PHR%xw{K)kPqKF0 z-}pRu){rcU-m(h`l#!#L#~DV8!j}tBB^dj(6y+S!>I(n`jBSjD5$M9~ihca{t9t zA<1dCg>p?oT$Vd7Zzdm)5o1^SZpNu`1vonoa8RuM3%w1#=unh05mS7%; zhuiaa*F|EAZZc<9^}1yI+We28ru#0gXKcoEeM{pnqa);_;)j{JgI-@6NA}Cggd6Bc z-k1TJJ*j)a__B>;ZPlYBW>3sBW@l)lSY)uq%)vr&m-m8TL%!>lYT8(;2k@0?nmjw1 zB+sPw_r_Mt6WXKET)saplY^~s{L}d45%i67Gh-+kd&H7^B{8=Tx6wGA#AO__oE~JH zRS<0-cN0x>4i`rm2DKH`iHT$s1v6V<-fTx?9#0XG-&kdkw~7LJy1w4OQeY-R#6xcs z4&vG>XLX`qG-kDVnT-nvA6U^e{jimSKXFNQxoczK`B0zcn}ZEU$JMNFzb5anLM!#7 zsbC8r6(1h3KfloCJmY2mc8;=8tsF1iZsvza*Tft;Vz#5P7)hQ=l=Snn4tajATC&#h zJ@&$h{;0*YU2wB&2YsI_ph$N~1&ML^?65v<)jpI3Eg}+0>{c&~ENKY7gqhEI2Q7DZ z(m8I?ykS8}^|dF><}@{7e|nKEdR?Rc`{7li6(pab9f#_T`vX2Vn>y^n==~fkRl zkB`zMW~MD93!SdgZCBEZ-xyQGS+mxzy0rToxlL#jAzj0;Ed2U_mzV~2WdA$KoGKux zNruxe`t5>0yCi!MEZQ0M3#r?frS6OSLT`0DeZ#Un=>a-~xzWYID=&0UEHCx2SUI&i zsJlH~va0zWN5nrYbQ-QOy8trR;DMBzrGljgr)zX4NjF|65%&_#4Pu# zP^~KhMr3oZEv|ef1Z%gt`ldOrX4p>UtB3Q6MA_kt_W7(Cc;9R_ zi1tvwdi2$a`Oepo)^kA@IAPv_Ad~QQ{K5IB`~e_nV}=ZV`mf9FAx;)sqxT2e+y7=u zU&`1xOKWHz*%^Dt9nNotIqWIgt9X5t-n!>LmPl93F4B~IwG2IG&C_6iJ7RDb9muzb zZsn;tS?3kBFwjnQlTmG-th=*&H($i|SJWi$Qn=zT&p>LHHtUuBoTR$0&T2yL<#2Z6 z@e0EZjP=#eeT2`WFxJwWRYptUxjNZtvtwpOk$KZbT>tcQIRD=mcJ2I zq21>@6sG_Fg&UT(;l9_-+a6aclH=L;PL3SI0pt55|@q9kS%PFyYRZvxH0cj*=7R6vQs-{Jt`C@Uui@&u?NBs#!Ba zQoLssp7(Gc3tT@gFD@F7(-iX9LGPkcYP;fdh6H_1SHtHX_f=n#4wz|M;!dBP8gwoe0=8AEv}thSAKsnm<%iSyzLCCYe5I{>dyOtFqcx&xz7O8*POeQyrAI2fUu2{k_|)tZ-)IR+4L!UD{LnsP=eP?iJ2UIP|Zo zO*mEm8e{QP3qO#0`t0yQJVlR#PKqS8_8Sl`W)$DkJ~)vodt@@`U_osSUhZ*FuVNsi586no~C4&@!j#*{=Ep5ITEAo9@hsihuCqi6#BV^ ztMJYI)IL5aB7J8594oeHq_Fh*PjVg19H$&B@^0a1E{kcct&b)wrIedRE!a?>s8KNX zL#;&ElBHJUfyz-C`ttV3Y`d%cA|L&)@D9$W7B<8kVvL;|Mq`iQ6?n>G;vU1f*<0us z2elVS)s^rRdCXhpoyGSQZd>1Shg{?gk?Cn}&MI^}kMs{SJ-pDqoYf0SK2Pu+j)Hb` z%2$VWb-z`JXu$ZjQazNvz`yQA;Ro6VcA3Wk!rK=4x#sV5_FIJun2vQWZEdTTGuQ-W zit%K~EB@H5Sd+S?=7I8+=}xBVVBPJ7xe+P*x`dJjQ1P@<9Y?WBbs}p{ zr<+CTGYj4IeFG8nk{@}HDgtxVeIPz+SY&0aextCP-K4}gz|Pj=Ga_pzz$xkO z>duny;qJt|Ox{J8fqHTn?!2aw%ySpBK3Qq5?EQ-*&P<~yxqP>^rbpFqd!B*`0CV|3 zYn{K>=TgRZF;3W1->rmvb)%IGR?}Jmj~^Ek-Sk?JKxLGg z4SMQ&jn%_W8C|GirK#>T>&})hE)vYtdGH`pyEpoJVTKrH8b7Y=h)Oz2w$E1o^du#|k1Z?|DvetFekkD`awA|@|4a*Iq!tCVMH3XWwtWA&S#PLM6=nlDL&NjBc!d{a z^B1ObG^EZfw&Ll9Ka1_E8QH>qj-^)cpTLO%DlHArF4kW4KUG+^0~MR+CgyG%$Cm0> zuT|v91yEzc}6tM90)+A`Ru_mEm?2 z6|yf5a!R^9Ta8Q0407|E=m7UJM6Kas}(tlI=U zyCFQ~rt>dl^;QbdlD}LGO75L|_5*94YWsaH?_a`7#s>sRgNx->ug^UT1f=V^lkyMv zK4byHvo5EeTjv1z6=qfA2@syI`ljM>ab=bqhA0!zc>lD+%6plb!3BGpn& zil6`9kKi(@^8cxfMG#N)p&K0vhzf!vWt;EnR}^X{|%o524~;GaG5 zf7J>+qD-u)u>XK*;uqN;pthxlLQ| zeAJ=tWCqkaL;*WpT|fbI>lLv){-O&_OG^{*+Fcc0NHRCW`t&X=lo{*Tj+XK~&NHCI z8mVII8nEPXD4nMjjpO%=JtHGjH8nMbaLi1m;Yfz4lNq*brLart2)&P)H3#K2!IBYV zJmG-&YG4edA;y*-U})t|)~hy<)_pk*z~`4Nj$P9?V!Zae@btSQ-s)phF>6`XM~F7aEMuejAh1ZTOw-`SGDKI<;G`K`n*gMDHH99tae? z(AS;*&Y#5!w`ttRoSZuD43Zkz%W2l`Wb-v(scpo`cBceir_|A^+)KY%>PZBsEUMZ> z@fno!+gHpdlpn5qo&`3{Q98yxcj&xU3sI`R+Z`fJk(7cnz`isZR07py+fDvMnS%ZB zmI}uq`trw|QbX>l=RZF&JwCz~jDWhL%Z%L0^ggaMuHCYQ=1oXAaCN-KcL;bbvc*&v z4J=c9x=DD(PX}s9;Gjfhg6tCVLym@==4OC4c~?-)7+@p|U;G*<$23yoJHSw>hrAVY zv2T6ko;#-@;H^Xpa{Jd9g@PKURC3!vtFzy^2 zgHkMB1z}XmebSjWC@8=o3cn5B3KQY)>6%>0nITNIxjs_T?{GO6KU#9atNPcUqRPMb zPJirody<9m@$ny*7^6bpT&4Rp-hn;OT+NV$`_?PP5YyvvehW(Ycqa9fdYuERPhnsr!5Q!7_k?_f*8@OjJM z5^W>AYi9#4=2!kri~KpNei9FX`UTleB)g80hlK%rI#?0W46HjTCyy6IB8U=bq>&t* zRrw5+P&CXXi`}gDc1+@JF--c^gKaWI1;N&R9pDxZEs~=PIv}sz3})K7+$ENN2xxHj zbimK;@8n7w!pAU`#pd{o{ICyMbqYQP-kY}JuG8DQnAo-9wKuX~ zAydwh2FrQzFv+g;7qz*DD8qu~ccFB*X1c<-nwZbhHoSAoF(gOBC-aGQ#}GKC*peb7 z1912b2>t3QZza)d=?-qsWT#?@hE`)yi$Mib%e?INL+@cCFBcT{ugSsi9Z;H}3@9$H zS#$sTQEXzclrY0Yg>}5{UbNrwET&zsi}W4OV!D?-NWZ-+r;IOx`0g6ys~Nx?WnV6i zlNMSV9RA1&{mmeZGDv!b)byo)+tat8)0^toEH(+*X6QOlB45x?6knt^t?g?9$7`

%!}+r4I7YOBYHzWMnX5>miFkItbmu%I(V79r^@Qgqi%~yDsBTO~QBS^Z9}}ih``LI4 zuSBST&GAPHiKXuyv5EmeT1=_DPz|j*j947%q4gpvIr8kfWZ@N zI%4;ZWt*b!OGwu)Hv5RGQume9?$k8$2ojSv+5u;5CM0=!PWYeT6YKYG3v^lc#oq?(XP^EsL`<-_{N1Nsg;ZlVX+ z%F0ATH$5nh97Xe@)LsSCQHo7z;2V*feMKEXeOg(LUaqm=7u-((fcg4xtor$Vm}CBbi1D zb*Sn>0xGY6Lw}_J8ExWsoD8;mY$!R1itHRyI`r2(dJp1IJv9H7$c3)*K5yV4ZM1OV z+;MVt9ncKwH_Ow&Exd{5hd;%1->hB=s-9DT->B2Q=LPI(CWTya_ITIBr4K&7Z6*C@;`=-=L zE6LSy;F>NHpRFWgod2xMqrGihhJxf)z+we=feT$NdxG`bhk7B^2+WVx=>xWsY-tCP z`HC3Q4exBLCKn^Jl}p2Oi+`WS*h_2ZCe91Sc7bxO3p#FQ{H8|9iF=6OrwZQohD^ME(*EycVL`=1pnf*GDTROjZ{G?7sPt{x ziszqCga7D5IDj3uyvfd0ySnP0wGT{UKoB!ZcqM&wtG$0N!+hyHNl23vYmd+lfAj_!s1XbS7X#@~>qCp95tq z_o>0aM;uGY&VSASCi0(Y`|!Vs{Cns5|8GTNiIG1?!T-}+8*}Ecp_Y|>tMiV5BSo5` z;saUIR7L(v2+@~pL;(+GgOr4RP|q521-9Jgl8&Xvgif?SbG!Rch)Ky1kLecTfvm#i zSs5YGOEZXo88mFnw^jfBa$o4B6dy4kPw8)uGuDgu@%17bt?` zQ$MJy{rq9q40;&B=#dHkb$s%G!_EO`^3$WwDA19KYR$01*<$J&P&z3cjr}d&`onX5 z!5y*aNK&o^JywE2B3z>S^zav^Mm3Sw6kvhtRU5TlL%v6Avz!Z^Y19+IhhYZ|>wtZ2DhjWoG@qZ$;zbWm}(Rf?{9sm``DPQDE2O0?9W)ez7L)q}4kFCBLqRa$fp z{CnYHe|Hg))dN#0hl%RV@0SRJ!2d4-1GUT3-SUJtlxbf)+&^BFrboiIn&FZ_pGydU zIC7>M=yEQaPGl)ZJ7OBhMTi$bQ{QJ_Mq$1)E4$KcmqRybPf$*M1Rnw=4g2J_8v zn&Er6ya}`}w^qTgGTq;to(5rKG;Od=UIknGn>$?GPf9pozx%a&RDH3g9J5%}R{;=B z35nNHpI>ILUdu!@B#}550m?%`z`K<*7v25J94W?jZL(~_lgl#?qdWV19atk^yBA$| z`SH2a5@mFYq={(-FH{UzIMhRxX`o7x6wqK8?Mt9y3z68U7EC8Mov8CW<9A)^=9t1v zke~uyvWL*TtF0%HKyTh$sRV3`BM@tZ_?{d#%mQZ(0oT`wYiH*c`U{!24M1^_%@C;q zah<7~1aF=reR%Mld?*E%#u8p8m1$&RT9Ok=m1v1ZCmo@q{E$SmcF@SH!4UMn@FN|< zD(nOnv4juB?WcElP|VfP6B>EgA#n8|YGa?XXpQIW3Dl&Ka9RS8I&6`>G!5?hUwWu5 z7VO(>iC25e!dN~DmMFg|DyYD$A$;j|BQx?UHKtvd&Gq)hcnW6)37oY1ueZ>?X5GVy z#MAsiwNpNtD8CfD(lE0W^G$DH$-uANzsIdkN-J!ud)<1v8Vm4OFQ{cmNlsbhN~t|& zrH^~aIr=w=|7WN2u!66LJ%WX8ebmkk0c@A#bt)l_kazFHULSWoRb>lgO3$xaujB{) zp2}}*yzsb-_c>?NugdMLf*XdhRb{+IQ!i}95(GsPPKL3jbiooAo_{$i37q zH4Rp>w3c`9@nzln>~i`XSx@d+Y}sEk{dr4_w1HqCPoG+%U$J5$Cr>-YAzbam&N#QD ziSJQLzVxX5O8CI*l8*4#TypX+@cJK?R*oQR)x-t!@sFYdIzFR2xUXu8eg0tJ+M_s~ z>e3m;hvp^s+@B6@vVcNIs@+Dca~-iP6J9#ECE{weH!+)8x;MO1dqyzb5ddc{nWxmh z;^T-q;3JqXvnrO-bncHsrUgl~0RPeHIEK|uO0L3N_0$)Di;Fd>R$OPtE#9E1?sxpb zn=2BLycAa+;_B;`dh5Y=hbt7z`=g9z0~AM&l`j75>9lA|Oq8QrOrW{*!u0_quf-2D zJE=>hkCQD9!soNye2r9+L{2*rAA87B-ilgR2z{Kgm-MbYDO)jEjFB-Yf6TQ{-E^)w zX1aR2GXjUgsUt`g)`t!$mQ(H0La3}HPYr80EzOf7%ayE1Y|@DZRv=CaA{W94Er~fY zlNI#7#C1MdXi?1oK&7r7Xf;E+QeAshSTA`C^ZtYbib%Oo5-3=c5?gX!W7gw4#D+T+ zw6q}`rLzk>>o*KosF(UNag$d$dhh5|4Y00#J9uue;-ue1Kr5d=Q6u-`g$KMYP}n$n z*KqBPxXE_*tGN)I{9u2huQ_L*FeCkTJZ$Y4U98M*8i*w$ByT)^_9H6_p=3bf9FR*L zEi^;b6Q!FWV6H@38IrtV^iD-5JEQ4L(GVmfD|_?H-(iT8Ql`3EshHx z8wQ(4^>$H>mQ1lL6P*v+OX99m&|~5&`QC*b-+q6cZ}G5`pSw4m^m98SwYYg^vdP!q zP>%@5`?AA=LJCBz=Xe@ zv;1_j&kWE=Cs<>m`!P~JhYA8wL~M*jc$O*?b_0y;wwHT4Efs|5?Bx`&yb$n};S1XC zl9s;aH6UVXxAiN(PAaby^RtQWq^#OhoYR0ZgTce=s8bK_pM-2#?NOt8A zs!w9u`1dGiSS8`B{@Aw}(}W#R^o#qYS)hgBr)q`jj<_;;G^gBfX<1SejcG)ZJuhRDN1>ZoK!t&c_LY2Rr^uHJx}}V* z!x^~l;^arVRWwjzJL%D8FRU=mS<81~7|Fo?Izz;MGy313{}T5$v6X;nZN+az?(LEO zAcRu4$m=&;5!IX}+11y#?CynT!lEyP=4iL2VpNIw9e|&h2*a;pCp=2j@ZS0RXLwO}(+_ z*V3zG@L@r5!E?#LyA09*pC0Qz!B#c=1Hx|uKMqSJOQrLpnNyZAO{Z5?J(o^>CG!k? zxP7UeGL7>5^t{5B3;S~O{1<X=s8k5^)2>H@d55hbrg zK4UugSEHKB9x0J?xauwgm%p2yp#4{GhiZ&clAxsq92DwgF{}f`Ng-r)f2lWH)oQfV zscr`JK7(v8bl?|Tb`ju{^Zo&}ON05wqWe>U@<+RCSwp_q+(9xhA0z-;Ys00;1 zNJarjMm+(VV3lux@)-hJaaRbaje%qRx$1>SZArE@lK2^2e+}>?Q~&^D%X2+l`QP*X zZ6{v;7yc%j8t3`eO^_L3#)2N)AZn#h>%dw#YvnB_CRpPF|q+-%T3T;Ed;<~j_}{up0SZscW%*u^d$xcdY46Ves2Yk;+6?Z_3K{CnpI5zcgQyzq~+(Qk~=^)2;fhL8ENQeU%i8r(n6%oXq zPLwEM-iX#f0TkWqYyvC(_a>7uB>tdhszK3)E!mOR*?j80($Co#xB)1To}(y7%YbJd zaxg(t7p#N;SgphZlgcg91iAJ@zy*+UkW6>fV!=BW?PA;2u1R|CAdC($vrSiFd+2m& zfQki5QynR4=I8rQ&hhE5uKK^Zxd5>>YlU{UxO6<#JYA%HfQ0jDLTq3YG;zdn!qDIF znlNfh#ha&1B$`aH5DI5p(Rl$pR{%UPehQ+dLh)iRR{Ht9_cy!q468Qt{Z6<->KS(b zbR+#cXlM?U>1a%HaY8&(5Je@$qttrOoXm&mk?J0;!ue;q>2=kSTP{N$qo^BF2IeWVjQ{T{wK>G|vHer+xB<g*1OYq_4H3$r)vP&=XGp_=KBe>*A;=S?>Rk9Z3^ zf8`tOowq|pQl`m|;!sN>4HxJDf%NkWzp@FyA)(X`!eMx+@E*Ow%lV=cn_CLacudq? z9{1*&NRZ#J&`k4KQ#VZ|H*Kf2$`^n=yDPAsdO|3*Z({H<*n$L8j?I1vyuTa``kRMd zrzCD$7;05$KLW0Jv^uXpj_LHDRgwKh;k%8ist$TBXI-vZV&m0Gallbavs?J+GTbg+ z^&v7btHTm^u(fFV*-?hx*Dt+dI*Hlk%8?WguA3rXvJo7wl}#d1lnD^q1a6kp_h8jO zc$z4vbf0+wqm_$x4h4V@mXGZV-55ubrNM*_)^%eGU|MY>IZArjTDgSvL7XLy_|kkE zkRrN#Go39FiITyM@r!5uxl%WiIG#{CYx_o?k6NG5Uo5$WoP=q2c(y}ZG7j1;QJ~c! zT{_5Ee!sWkb8hLVt;iA(GsqI!?>L|b(usEDCcPbZaJ8iIZ^8x+cK`w|p-k(_lz6A7r05K2t5 z0T`pKkUIObQ5Bh3Wx1>)zdT%!M$eQ9l#%9oib1e|l%S<&HLk>}Tug_{6htE^zVMlr zCX!Bt2V03vUz*fT&*zc8KO_3EzA_$(b6Q8BxI~=i3@k*Oem5sOsvxdSGiDr(EoPRb zxR5v?P1pq9cLUmKGn>zc)3aw0bTDf;#G1PX!;XMgYFFc(9_SRp8&}jOi^MrLgt<2# zS+s>;x1U_S7F5X4y-d{YWb$>VmwtcmCTt?^Rg6qZEW|KTVB>LHBd)r_?Tyz? zQ$KJ0U}Y>1<59L{ZssB(Q4k9$cTsTjn1LZ2#4ml(>wL1rIn4^pNSjPS$(=@u54J_s z-(mfCGN{!}R4SL+NoWTiC8MTxlZX!85p-!TKa$Z>;-j+{XeD(%;D5@{{XEDc#6~az z_j&jQ+R73+&1)&2K3;o5i8rVAGDew(LM8^-Tel50M9j049nTp$rv zQILH&AKTgSh$z&=nL~K~p>5vvy)s{%J5{$-e<}aESweb|?ZUR|*Dsy*m3y2$-2&`y zjq7~hIZW0HkcQpa%CB7cBB#`kL(WCy!G8d;90r zi|7sDq)Wl3nNG9z_4t$9m{XJ0XvucVI`-c4D@<{Ve>Bu;M}K6dF@lwS9eIL3Xz+N> z&HH19gtOq3s!J^3Anx<2{*V%OK4`wS~j@y5^u`n_Fgd-xD!)h@_ zH8pgRW^c6&cy@~B=2gJf^RZIrku&6%>1V*SZF5#rC{`#EaCbI4M&0*|j#AiRUrdvD z{N2o1Lb$FX>_7&Qj1`(WDRQ@Rob{A-@I=o-8OHDtG;EuFKZL<&-5QIUN+u!CMxL!E zmcHttOt{yW;Bm#|m#EDWx3dB9dKkmnJg~?m@&(n^=$j+DOa7A_vLE^w(ZjKiZRI12 zi>Lty;Iz-s=rmA3MPqm8#gvCUt@R>wH1NueCv{ee;(Qw=zlTK9B7fyo@eZs3(Uk1a^vz;*39_VURL}VZUd)= zn}q`EM%V9|2JnAAWeADm&vjBXVaOZSOs>8w)Qr{p)W;~(QI;{jff68~xYF`Twpl-( zigiWsR&ofp!_^Z&+>|(84Ggu-#UKm!Op(BN<`7$#?0FyVsP;yPlUuid;X1XjZQub3 z?7~7<20gZboMWVJ>vcXl6o>FD2u~CoqEIm37pI)isdw0jaH;1CWVe;$dcYMv=8RrZP24(CxQ9N)(Rzy=+$T6_yGtZ=T zvL31Qhl#%;l5%|ol^x6Aj!M~TxOA+27pDbTyZbG_;pW&reM-Z@7I0y`JqAKar~X&- zuZ9yxJbbf)+^D>m1l}YkXlO8q(P5(QE?&J-sEjL=zBhSxy61FRR6K1+Ix{0ir}e#j zL{rK!WO4Fj-Mw(U@!7#ze5*pTYXZ3D>KZ$7x!UaR%`}mDq)!Lk{z(q#1s|UbBX!D) zRHnGHTZcW(y^h#`8#CRNk7BxgS5j3jT!hUOW58gX zU5**ace(4w;QNA#ZuAMzd`8h;Q4pDMRC?3w(*HoY z0m){18R2o7o5zMHbHNE#C-ejn)njB!DeF;}t*@ z9}g#c|4MGeAXNy+mY(^}5kaFCsR}iW(y2>MIC~~~F)Z;pPb9jWU|zhn`!oA$SOu&3 ziT3I94FhJmRIelgzCoZuQTp`TUOTC5Vx#koL(qkpz|b?EvN2*wSBQ?0ZPTTb20mW_ zg1Fov9oVF|68F@JOe6_Pj#>R~JJ7ufi&EbZ2nbe6zIus{9L+Z^z(VD!Achx{2Ka7= z5H+y`1^G^eEFoD=U>z)*^!}aaN~B0CqpFnl9?MoAMrZ(LYjvzGg%&$=;U?r-`N*${ z4nINKnS8!p#as!Ufl#8axCQW-YP(t$nk(584ZCtwC=*>)<8CA?6hK4LovTA!MdS7e zqZ!TU97Oks7tQI@)T)EV%!h=wg^489NE@_~>?HY8d*qBFu1XjK=ZyR0QYkDKF514{ zj3sT0>gZiCRgI+EN+%AWI(m1~pi(bon|!;#aP`wzeW#$mS0Iwh!YsNPB8gTS4!ML_ zyqKnV%Km^2l>r@$r)YQ|_hk6W?6~rj3nAmF_2-m(WAbC{00c)wTqN2>otS>&1J~-| zp;XPAGv=2>8aHU8XiaGm7Zh;fo)WRDaK~NtZ@2Y<1qltoA8)>77T*kdc+G`M2Mr6j zc(Hzym1M=L#6#v)#}!S1&6hIeI@h-0j?09ESHvlV&_dtgqE{2E zkw1yeJ|Dm76%$Lis&r8M{9yd?b)2D#yDIy56XLeSRh0nAWHo%&>Nv77{?}Y?|u%25|urb&er{m=XhOs znW=bb@hYJKjiQp*Tele=62TxRu6D4<&9S&8lZR47t=mTxqPRpj2ol?Xpc6CgA&rXi zy9z{-%jEHeE(&6McmlIu!ehi=LWo*5m=!stH-@BkmGx)>YnWWMP_qd47#-q;F$tPP zw!j>7pF|n4i>bFj;-3K9nIcOh`2eC;uJFO^bBmNV-Ap}nTWvrA(MQ1iOq=}9}sO`%c zC#ql>6xR1Vhgo}crb9@FbA zta!1tKcIYJTJ7rQ@pf1A48^D^v6BZ1nM4|{OuZq_tR$*7WRz@Wxr}zs=QUPg;lztAv%a4rhpjG;Pczo<(A*J?-rQkEC3AOJJ|3UZ zYoJ<{4<#^^CVs4kJS(Ha>8Qfd@A{^zjrWCJ=92m?zub$@enL=sR&%=cE1>C^u5^Ex zGUWpe3jB0ZDp@0%l-7ImjNS(V5V;3d-PYeJC)hPG!2Um)juOTt6fg7zVg=UJ_h0_( zudYHE`Ot@t+1TFRHYIQ%$fU-kg`9@jSnBb%qE+jkQ=;$P_71m95535EB^4jq#V2YX zF7ly>;MBMdx8TMq{m~Dp!Y08kSaD1mif0odzroK`CAezW}8J9qq~XIOFc>t)U*;;ta9xKDN#cN;1hRnMF8^Sd3U9XBC^T z3Wo-Uw6wOr`;dTG&Ph{znhT{L@lc9VLefyK(QZhfQeP>^5$Hvy@{@~4i~19-mUOw| zYVGTOb1CHcnSbo*c%d+o<3&!rY_s~kG4~+9A1@a#On7PkV()fT;rw>mwt0NIbG~w-!u^}{?A~$%W*rRwdsi5urjC@GN4}IOeOU^Vm znC4>$r(bSbnsLhEKoG!0)uQR|G;m?k-aW;0+4-b`aI4MMz6Wn*l_K9OrP#dP zerXwV^=bg(D*m8CePrm>E++X;2w$}Ls965TAzUK%q1$f3HI5lXwWuWmt}iC+2-nr* zM=YXOMjN>9Ldb*>l4F1Ll9Jn{6o0E39jZBVZLYB1cDqtA zKY)K*pxUf7KZ@?K@7<@uA-Vmz|eROY{P+@Sj0F#omMgf}dTjxVl0QazCRj;H0LP-+RK>` zzhgSmn0CNy;M6au4%J4|+ytEZ6@SD1y$ETxH4C&k)NH?aHqtKF0MKsOrrL*b$h%i_ zLAzI>)bsnEm(G3uUuBVe|DE+LiUTD|5( Bbn5^B literal 0 HcmV?d00001 diff --git a/diagram1.png b/diagram1.png new file mode 100644 index 0000000000000000000000000000000000000000..0a120d09a7fcbf76cf4812bbf86adc9ff94dd438 GIT binary patch literal 21269 zcmeHvXHb({)UF~5iV7Z@Qte0==`}V$rAtvdf`E}G(jg(D$3hncg0vvLBfW*9Qj`u6 z=|v(Xv`|6~N$!rvlCS)@^Ua;P^UWN8I8J=``|h&Z^Q^UYpsu$1p#xk8cJ11A==!y* zH+SvYqq1uk9UbF7@XKNPqgA_hUEFp3>Lmm3-3ud36=sI;^NSu2pZ=Zmkb!;A(_I$H z2J{Wjf;0C9@0Q+O%6_$#VcRfK=_H5C7pD7r1{o1qFP8LB*OEd5jZ&@j-rW}4pUj@A zclF|jGiLN>z9+7Z)rI=SxLxqOFn#`SWg>dcM|(Z7=Bq{V*J`_mQ(j+39k#cNBTv)O zGqN1N_-NN|@Q*LItqekm=?*kMv3%M~uVTSS^UD{Di{tvl6JIaUj0BJD`S+3k_50GK z#82NEEvnIH;penHMj?$6h+`C$P(Edanx&`Wn#B2H9fg^sCp~B599Ljc4h>572n1q# zpwmCvVlDfYG9K{^Y;{pAG(}`$7 zJc2^Rq+Wl0vrX;#AK9)Feb-Gv*Lp8cpqcT$ebqhr)r+xBq1;HB5qC@paj`l>jspiK zXIs0bL)!Z_*Z8&9g(irY509i>O7p~;`qwMJ)Fyy{hPQ+~vVSn>Ph7x^&n>j(r@45* z8iUwzvpEIYb(@XiH;2^?+JqX*I%+{oGMBV zoUWaUb?C_}AC2u?bgG<;t{l4aocHSk2WNEy*KJ(Jw6t`2nrd&>>nbcdncRsWuR`BH z-iw=tP<)(qy${xg=VJUh*AhJkp@Mej@Ktm9=u!B_h%9N+l5ER|F1_!yTBvQg=gPvI zRdsS+-z}$4Dx24oZb-(!F@>6lx33Jy*_AR2Ym}>};cvzXx#kTN5q8b~f*t5Be z#Ryw&wd_a_^RRi{XYH)K-<$2;Yj`V-0(Rl^T$tbf*2PnE1P@(8n$HlfLI!1n`eF1~ z`n~D6m6rmUd0qKC(~uUiqbt$n-;q@wDb*5g56D~784P^hNqQqAUSkPj72}Ty-s3 zV&DlXto-(jo2waUC;609&Dz(kLT|iUkOb4v=9SOf;Nv^}x{lWGL#Qjrsz|*cw29L} z#7@BFCR0z$>#b?-q{6bRU6$qYGS&aGKe)-1RDOQh}iC-zvvK7}*CmKf3_K*-ycJB>9l`_Bm-yMYRJ+vj5Q zRoL#*SOUg{axQez6{q>Ir=eD#;8N;$7k0!uYKKs06d9Mv)3}(In zG+7p749uF7hP%279A_#^lKdfo$*Ff-MNiO2>zM zrW011U6oW$vqL9aqPCoOCLwg?n{$NS3_LFp6%=9}g_wMMw!{qk_p5}8)NJ{gKx@hp zGvy`3QprULAxCMn+)=f-l2hkgp6z=M?(E^u6U>|$ksRu`Pk@ogH&$NOzVXeLlX{d8RUW z=O3xnQ?eXbFCfnrtZdvj>yYu7{er%27R^z%%<$Zsw+^S}(h}b6=n(PDT0g{%ATP!g zx}j>ec$A`ZQ2V0~FPn$ELcXex=|9%k3d`h3IEI?BHnV0MJHZM$BGTYh(crb+aOQ!8 zB+L6NW_^%IgIog+rn78<GV}!uKacb1z*Ls&Wbt+T>r*6aE5 z?BT&XQ)umtV7K6^pw*yeRex8dvI6f$;aL8u;vmDD7ZWPh z2OWm?spP4AYOdK8VV#~~&UL@Cs4B?!B!qmK7f~AaCMU*R$S%fOW_%MHuG7eX)^3eb zws%Dv@?-rXtiR@NSv*UIWeiQJlq%R?*>crhM&f7u+=Nv6)shs4rQ)lC{gQ&FMFzWv zkyE5(v(+*~{}8W;Ait+U%?o7-wfml7`lhCx*7yWhfc6Bbl(h(V1Uu>FZ{1!cKU;}w z<+!B9BfN+VCQUsRhz(b_NPXMSws366muE+ARxkl=V{&4Y4SiOkKrW7);)q3K|b+fOAunMOR@%85> zi>#>4hJAIK(vQn490kN$1Bs8V*M%OV1W>RGmIhj7MPg*MKA4PmrD#M70`8$>&wIs9w-@ zoLJ*1XnbGL?5!lnzV9Q>Gy|)!DX)C?=dmW%tJO4TR4AQ`hC4+CJ<5=O5Qz5-Bz)1= zk=723!!Tr2zS z@4cFdglJQIgA7BEak{>?{F%CPK@PclWG0W*w9B@=2QxaK%mtnKe2RTRaD}P4;tsq5 z3Ov;zLc3t}wXiQRc|+OlksY3dd6yaK^O3cl+kDWz{OJ{V_oC19*RpJJn~%R=Ypmex zl-=|=(p|}mpxUD;Ok1y&n8_FJlTo+Q8D%EKNfa`klF08+n+QCNYrB8yW@5yUbG0Z% z?4>$CTnfSS{);N7dE|66WIEnWo6o9oR=>Nc;!<@*V4csA$J(vZLVO>zcN*ny=0=VU{bzU2=6X3c1n-W4GZROZf}Bq%5nI;rzf$ z&I5#&ASiuzPd&s2)~TP$crf@&s6VHN)4d^_gVj0s0ztVa->I{am^`wo9ES$djO8h) zJ4*59y{gMs5d0oDfIh6i+xNZ`Cqad6EXUt1)62}JITyCvf^FHurZr)B)t!Udu5KQ= z4Thr6#2dK9#Bs=qK!*<`tyT9GQ4SbQxZI`|9{%fxI0okDCO z5rW#C<*r+8N7Q+`LoM23eu!;2k>mmUX@y$#HIM8*1dVR7z?9;q=P%(FYG0nV;!< z(I+m~Ty7Us`Oe8tz>eqQ7=vuNvx2TIf~@aHgg~%=NA~!<{v0JAsLnmldbas3>X1 z$%wKe7q3=N_*o^loeEca1w3W3tP6rTgz@MzN6J=jjz-D}sO)MgCF&?GU#)#*tkkDC zw$-syi%bp&8$>*wT-&b(mk2K2nu?p&kZMkIY_yfoa~alQMIK&SH*d!YJD!(FEzz!DlrsSRj{bdP7cG*{) zW0F#lDzXr-RsXiLjnCUF#!f%Zx!rbrQ88$sw!CJ+X-|_(heKb+l&=lqoCgS@eP^?_ zHq=y2Ct4+JaJWsetQp|xdhp4k?`?EF(cWR!K$2{KN>V6Ql4e%Nos1O~lvUW`jt<4x zhWHIzU!V6jZ^C+ua*WmJhfEgx@B)|kL{#aw!MJF-9*|UA8YXKIr^FgyFM2*wTn+wJ zC!Ov$glS=N{Ne`_DtH_42O%+jxe0)9>z!V~Uyp$kiUlk}JJ*F)_korRQ0HksB<>bv1Se=!iZy%=p82 zu-xNd*_3kV|LOav|B)0;M~5;woTGi@&&V#3<4e%5?{sLUDhgIq%43vp@+Wsf9cg@V z9Fu$JC#;~34lp^u_}?tDKEwE<%0F`ZV{|DS%jTDdw>^UX0VF?0+JU(iKZDck;eEm5 zOIefeX4z;)wu8C;&o-=HjTy!VAYe(WSat4mAL2rJ#^=`p8Ozw_3@RiOHLG~1mu_y6 zqFaW^H>*Z(txjjSVw)x*OdKMga?n)76)C76n^R7^C*cPiztm5IP|nczBlqabXc6Ng zzP?=J;-38i@h_vr%wrEjc9z`N2kd2AN?IM*T094%9FVd$O(E`ElgJEUWYTe%jA_HB z)y{bN;!0myIUr60$00CzPGluu7Hoa0 zt(&GxFxnbaT&_SOVB-Gs#)J&SEQBRxh=^L&Dlvnwi7tX&*hng7m zW|9fZ;Rq%8tUjt`=t%R#g$gJpy>Zc3(C+zS9fiCWruawV9@RhF3&3$xs4 zxx2H)(R5(m>^3M&)$)gY(U7MF#U*f~e) z7VD|g%TFizIV-Gv(O*3-V&`-6T+ky;TFvmle_b3(a6DzoPEBCvM-}*cuwhw(4P8=Hh0oM}QnfQ6& zgl<)dpP~N`mG;#f_t_8gZM)`1W1(hT16s^xzhc8;X=VH=(0g`19<5VDqNi6GaW!C~*;aO7ehj=HNQKf2U1Vg0RbewZXjESa*q^MW z@q1h}Lr0FVY`Qn=iPD%ngWJIJetX|7MKiSO)bS-;Sc(aa&C7iEs6OMvlN2b;POX+hA7Bnkr?r>7V5k>Q?`4|R|GRPj zPdCn{N|1OaX7-(cQc%nfJ*)Tg0qmkCx{fw||*|R2BX^t8Rbi{n0Auo-z_Ob#&chM|)K{Heg=t$MU zYy}!=%@zQHJ{ogRk9q%2<97*Y2gBLay^;c{`eRBeq0sG?H^{ns?o~^qxO4<%>2JbQ z8`S2MOYjPzX91K2I8b&CwA~dWYUcB!>8fm zImFGnqkRT{?At|QAg;pf&5Z8HSpwRiF2|4^wbr%@WQSIaM)`t;Od6Iz^7~RwUD;aQlsIJ##-?(|0D2 zOz4{y*U{hLN;n&W*AV|qm&wZPOycoL*}3v_(~iCe8+$})vx+=VZ?W~N8;3yVYLt)G z$SBw5Ld+kMX5D90Y*ub~4LKW_6w9PJI9tAs&YRakj+H1Zl$*{C#Ix4bD#0ZcH?j;n z>1EnQ6|Gxh2;AV$_m*$i9`^ElXO-UHu}3`$m?Vn>^v?uYiltP? zj~F%_Z1Xf#Qnp=v>?kwSEr9wA{~k_)t~#pO?_gZ@nma2R)8V*;0&XOEE7ovNN9A&d zG?XYx?1|Sun>xo?pv8b!RuHXnaPmFx@Z`5dysLdwZx3fxXLLWk6g-xvB|c{B9`pvq zcz+b}34^R`{ETfPqTiHfD>er&)sh`q5+xytH37#J6qoa!9H~bc)gh&`fuA?a-QZ__`bLXC+XI=LuTd?TP z_gfA<({;rU>m3b-4&438vC*qN0;{1vSR|(6MR$t~(+Wae!z(>-tXin`*zz|UEL)iB zq*p1VG*6PC9vsI#Vn@ry`UySr7H@MP3WcsWau0jm;rMRm`w>&DNW`lsC`a!#%_%zT zh%&d^$S+Wd(pG#38?ozc~`GlPcE zJD%b?5rCcPtuU;`m?r-o4 zZdbB~4Z4(f+p6*aoN#f0zHRf1^EHnby8FWF7 zkZ!7doUZf2xHs7f_k5`Sn&0=md{I$`WNxJH=8NTwPAi?e>ANhG(lfWglRyf&&bjlz za2q$GgM#=w-ax^d&q50l)Y1^uK3dxdh(#}~>IYnz(F1-)S>Zvf76{oCm z85%CP6Gl~tSxVzu_22gFiOIdT-4)lI!JgBKT=M^B2gk+r6=*+CD9v4Fy`+<>W4jjK zA~fLmdLETIuY-Fu(wzD{aYhFLC$C6(6m&yBY}OK9iCj}y9uMfWj4Y=fa=<7H%O;^o z4tfSGTrIMm>FoOC?$6TYdDmNSt!3m;?6n4+ssrkZ!eTC*Ro>jJav!sCqC>S4dwL){ zf<4Dhdr$Nsr!RHu8gfog49Iq`0zBW2|&!<6{&Qkp$vt9OvF z>i0FB(XKWjs*_XN`_6RTwzkj53itQnMroMQ3o7Oj?OX{FdoO8~?+K0H-K|eu&&8xw z3dy0%*!&0L9cAekW^%R;2te6hY7G*(7ueftQ93sxrBbc^&50PQCEmWcQMsMMijlV0 z)byJ_F9iSAuhrwqaJRKkI<0%arJ~}ha|M(#qMi{04H|g6P(Gu9-=5WnT9?`nAXmD9XByD)w&(b%eJGr(hOzQdAVN=EAwhYj&k*|iUMj9J)nDn^Jik{buu zJxT`Ew!|$fg`DboQB&SUIkP8Yg4m~j)LP6a??#l()<|HisXJg{^%>unhG;g9RG6eItGA_!v0(#xjb4HM zg~prIj=ktQh4PyhCXojof&-|(cH}%(@}`;z8Dvcr3x(a|L&Io#7bCOOPfMT*fdEsjR!FB9ev3;X_7YNQm3qys3afC z4^dgD#@9l>eRvvby|6+}Pf{xZC?dDt9fwSVyyP|gbWLcy%k$qm1#A?1rsgM`nmp0(^ad`g&?YQVgUb zrMwo0rQq9yop?DfK*oozF3@_CYPwF%9D`KmaOL&e+Y_91G0s)EbhTh-kgJF7%WzG) zJKemtu>M$q|aR<3>nUz>C!2Y~K2HGgjzq?We z>LR!Y7FL?Vc;hA>mRGciu6HVL45JSfiqm>*B3;x0ShT2Vv{((HhaihsmECjq)RO#a znYX3RsBHWs%QoG*6li9TBpo*)=qu0r0#VqBYf zkt!sdPR8}uXVZLEr`Q{BR-==<r=FXkU|GH?&{jsaP=*@ z5^D|*_}K9<%XCdWOFDj-mCU`tE{n2D@j=Y%3FJ9bRo1WZx2iv1)q3Bst zMd?&+#T4+HAxAe?Byh?F{K&GHmpz?$V`N81^pZbpV4F#iikxw&x%q)u{i^k_QXHB; zSJUC^;aty+1@*Mmv1+ISwul}2&D;UAIS+*p!?b!V6hm)NJXd206o2&n@%?;9ybtef z+f;g9k^M6nz-XW~jw&#YnrICRMzMnWrQ$4&6ZsI? zkOVo8VPA7^YE)NJ9-?7yS$Kh$QaXuC8(L2*ud%Z_--}FGdn+5RBWwg=mEwIzj3IQf zl5ho`mWk<$(h6Ef$B>ZeK!ySe&zy4B9{R+zNA=UjbP5SS4QW>^-H~^&=?sUP2oBWM zmn(1f3a`FFP3CN45Tu7%u_Yth{G67wFdB8J>ic#th|cy|Tyt-Tl)$}j?n@)``JG)7 zTn}295m8W>Tyn()ncmvC@V0E@Mg4ZccRpx^-7m<;sH7MmDzl(bFy`jI9gZsaEF3ft zW9m;Ntlf{Z_i^!UjA_D7*>gqh6;WOKs~L-4D0?BFn4$lLNDTlbs=vfLqusOy{ab6L zrX%;`wi_KV@BPBHgNroA`Pw>=j7oiUd#7zL%^t*9DjIfG{vQ8&fV6hj*Aq zgegpCxnmL>iv4_wdYSg6#e9?Bn2o}{#x!Lxjg~{V;bohb3U_?~D7uAHJf`je$u}-1 z&@WJIvpXA27S@i7>GpwAqGlBtGe&N?O_)L-h)$ZmgBjWzwGGQD7 zheb>WQ2Cn!u_ZbjpGt1rvO_l4IIZjBDpnJ3FjL+_Mm)zbYiOssLR{Ja{L(FiyLA;X z3D9s}wX~Q^3X6~I?VhfW!QFjDn4*nrZ}mH&z{XHvb>87V%_e>3Aj!}=VD zOzW0k$)WqlS%SIRED3}fWmT`Tm@cGBMEgX;<9%^=U2hCyi{(rws;>DRKA>6F)>0=g zyt*#Z|7IiOSYl_J<-lzTF{3b|p{IY_adY2~l9Csn+cKRngN@AEFS54ezu-=Q3oS@a z>gx`vFaY2;y~{A_r%ebDVnWZWreFX}Nki@2w;Fdd^~86?qjI%TakbZDy=EI6-^}U_ zD60ss<~LX~*thqec1lR#6cmFJzXDcF7VW6Hy;+xk%?q-l`6!a}-fHJ`ZSe)=s5~yt zVl8IdVpX>y-)P&7W1{{^s|te#;=va+0&2-3rERV+>)lSXAV?96?m{fc>(>&Pyb&JG zX?I)1uS|4RKH9i7ZFIvIVYn=9)szvGlB`8z!`ux4P&L$!`Xx406r>8cGj?^=I2s!^ zlMPtnoOmHxjywpgUMMU`hGuA&AsBi~-jJ3fX9pP9kcZugHhGW(hUP!a4xtI7gg{;` zTTW>3H^&5k%PlJFIB9T<#ta(*Gi=~$P(lm%^TE)nmaGdjW;h#|;jD?wd#p4=&Dntk zkJ7iLS-pWCu;7h)-DhcrRtW(ME|k$iv--?ru=;;D?%$33rRe`k+`p>&KeBNKXP0~c z?!0MfIra!eHxDw?IOlC+AjyHYQz5qnXD|Gl;~%4?AxTDnfAp5jkur9sV6}0s7XI3I_AhD=j6xMy6>` z4lA4+0XXK+=QA47-sZMg^4F^ILu^n})5vMFU)<9ma+mVSfN+AB*K4 z@L789fySD;Y*}(tyr9Qt)B^g=s=Mz?gJC(7{S*eG(P`G z?ngDkRemP-$Hp+QF&(|8Xc~t;%9`00(s>4H_DcNRJJ4O>4oWemHU`+@oB0v2+7E*n z5E4!b+LJ2fGKxMaOc_Pprc=C0J@}xO4ge-$=>mM58>9xZEz+EWo7Ua)vCp(VewLB) zjBnwQ-9}>l9s9~Jc^hgATMo;BM}(0R(F~{o045n0d>o55`nr~C>Yjm`N)11RA)a0L zjmAi}dWtHIuzvx>mOe7#3o}g+a05ghX;K$xV}M`~1Bji<;-?uI2?CEB(7QBjjV>n$ zfXvlyLTE;w193-W&nGG)`cre0#6bL06_V;tI}(H)f@u+#en0f+qB-E@JdRx_(T+3# zbG>ou$*(s7j=TyK7tCIGwM{!R3(PfAZ6A%H@Bp<1Uu;jW(T=rJj>G|T>X!+xLczZ>>jF8-CUUyAgvO8z!a|JtzM_Wb|a=z}c9@Pm(s zaE1lGKf(-!@ke`l02#~O#H_S*advKwdkOQ&aj+8fOJTl$evP5_$$_{jjzi3>|C19^ z7D3*TavrYOu?LNMTk>ARuIjNDY;Ry;ioaEro{^AG4e+qx5Eb|~2BjUYvVBisHZ!G% zIJ!~$Y&Kanc*O<;lUbY4yD|%)A4d2``xCAw4Y%}>S%=#4aH`yVG~}3M&m__^ajGLB zE?ozeh+>@vQB|>P?SG)?ZzYK{o!-v*x|5OUE93+sN8D!tR9l?q)EoPBy%e=H9siZr zPH|kt%6*my_@G^!Qx8ZaybhC_c)=#A95FcA=y>xz3sK^uJBXX7inVQ;!<>lm7oLkf z=r?YC=Ko4kEw>)HeV#%%zjib5^eHkAJD15^gZ+MpL+ya7Ungge3MXBVRB4xzm@5W- zu+oP|9=3|&T7(NUO`S>&f3&~WY>WDhU>r(=mC3A z7{U9Oe{yz2id!Z&h4@ryxOR2~u?s7-1Zxa~+{ragb9cRM(9f3$npbotH+C8dD-Jpj zmOu-uEkM$f8gSKM50|jU42$XW1?;9svoy7R2aDLP}xJIGm!M6|5y!WZCC_7uLmx35v4d zDi7rX#Y7uW8uQ=2hT8=#k>EWM2yGQD*92|5533f+hD?OY0XgQfHropYp%ji9)f7^| z?IP(81vN6fASnJXud>ndcQW|F|mZjT@j8sM@xe+7%@0Ip5EQtX>1vL;aAA zVK>ZmfrLnWIqW0@Z5f4NKw`|Y@elTK3U^fWANEn>JC!YKJmc(IX$tB-x>Y&TsC7bM zj?Ohie-o9gCp2-nfd#c>*3e}V{BIn!IS97T4s`27*@8VC!&x)oivDXtdd>MSTh;`nU8nab*@wd&U1Y!@B|w zF%iH^z(piloR%Qrapk6#OUZG7oAlM0*Tx{D+#5YC2`t ztRX7aVE;;O^!h~xmUOBRzt0vPzvNnK@ncfo6qoAvtiDO?Lr+QLhQ`RA{(+|7sZCnA znUrv`paEKv*@jN}$R;5sYl`MaU5S9iiZ6NAcCo*z^8urFF&wlx_&U~0A!%0i7W6MX zbDsJ^i5-1Qw_voz^Y(NVwdyqyL?eVYUFgi}36@}cDGK%O1{{zl?O;xEXQ}C^0+_7= zWoMN;+(v+}dHJMtpM?L;X33a8F%3l~WT<-9h8oginS4{`!A(1_@@uzecW<>bJ6C*r zM##QZBcv5`{sixX`rUg5F=mUuk^;-|zfuBjWpKJE_7@wMjPE0Fd1~{7Z`?+6s;oqW zmk58ADcFLKlqG4>brxS-#V7O^wBQS@d_Wz~&4F955873OKL6FR;3wgJnxIyCd2jk( zyXo@w7RmNL3IVBs3Q7t;=^lc^Q7TORjbrJ1^1Oz}s!bt$IoOiN@}N?Z=JIjV=o(6H zamVZ!P@JGEqyeGFBx9D>F4%E!vC3c_Xq?@!r+kxbk-~#d@{z!eA6`ThXk|s`1A;$O zzPgZ-xPi`~Fj;P1rU1A(iU4=YZ0LZ?c_QqeUyRxtI|doKZ@=D4zvbt+Fj)FAf2HzX z-#c#XRXQj7eRW1TD@0-9$2*Ai=heIGP7S%IK!^&j!dlC6)cFEfyr*6t1Dx$w)}uSY zrw&}TDh8ajOpDqZ>TN}&TV}4R{vu^ZtfWvwX#S@~b)IW3g^e7c3KM&b*ZXFgu>I6a zhA5l>AMI{7QGRjp{zOAyUePwt?N0KhEhiQKEslZwglV+f%Zt7A6UuILdFAWijv{e9 zWaZPT)eX(=rl}46dBl_?>NDh;Dj6h5x)E&>%>);aB_V;7$Q%XSdGr#+F&TuCVO^h8{1p?d>wH^`L`sUrLq5v?iwZZw8};$8C=ZcC+emi&Nm`g>pW6L26Tcn-_QwN+W>Foy8+in8!A#)RmFYv zQYs^ErS>C00wU-+N!;hN#&$8}^!IS=tFnrNrPmfZ96IJo!jdga=?vLk^trF<^!g*D z>!wY%*o&1T%HI~{b$EqV#7*y@7v|>P2VzgN8=0qf%k^8|7 z&;7|jM20VW&+ zptV!K@POe?sW``#myCp(?N%n5JfZNOb1 z?K%?^ik|+}5(H9EPJjgR@pn`-SIsE7e2iWuxxQU?EFLlh2#uRZ=u3UKrwm7dX^dmL zaUza|Qvu&)w=1j-#`yq7VS@|0}6!GZYlM{STylUpmL&pUkC;hz7|KvYb2eX;f-%65PsD_ZgWS*8AloyDgQRfI*1D zy^pY}Z_aT-3S)=jMhWTUG4gu3Pvry(lul0OXsEuK|7d6fojY0Q!@VKOw#Ghm62iwR zNE}ThgSbk$K-|bX*e~KVUCnWp0xXI&Z%-Q7&|TvPsvO!oki5`duslWj)G9`5!+&h8 zEl#EM7WjK0*3FGv<#gI@<2FteZ@E@06sfAOq)nspyX)w)g}ZiQ#Y~K!j#;Xjnv@pw z7M`>8L}#`}1Iq%}JMVXi=4Lcdx|sJ!W8}VKi2`l18?+P(o~A7y1y9vylwHaBc^AtM vf64~3MwdU(<^u<*jr0G$>VNfK?ND4Z6y_dZPC2v-{JE~GeKr5G)#LvIu#F#p literal 0 HcmV?d00001 diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..121c871 --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:566ed80c3d95f87ada6864d4cbe2290a1c5eb1c7bb0b123e984f60f76b02c3a7 +size 884878856 diff --git a/pytorch_model.bin b/pytorch_model.bin new file mode 100644 index 0000000..fdfdfe2 --- /dev/null +++ b/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0986c2881028a2d0ef9b638ab06bc4cfe7c529760d451eaa7098ade2592015f2 +size 885079136 diff --git a/t4.png b/t4.png new file mode 100644 index 0000000..b5ce68a --- /dev/null +++ b/t4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43a9453f567d9bff7fe4481205575bbf302499379047ee6073247315452ba8fb +size 2159885