From 314766afaaa07023a3076ca0e8bc6c7fca3c3903 Mon Sep 17 00:00:00 2001 From: Suanmd <1044507575@qq.com> Date: Fri, 11 Aug 2023 15:17:02 +0800 Subject: [PATCH] first commit --- cal_complex/Complexity_CDCR.py | 405 +++++++++++++ cal_complex/Complexity_ESRGAN.py | 199 ++++++ cal_complex/Complexity_SR3.py | 285 +++++++++ cal_complex/Complexity_SRDDPM.py | 268 +++++++++ cal_complex/Complexity_SRDiff.py | 564 ++++++++++++++++++ config/sr_lwtdm.json | 90 +++ config/sr_sr3.json | 96 +++ config/sr_srddpm.json | 97 +++ core/__pycache__/logger.cpython-36.pyc | Bin 0 -> 3972 bytes core/__pycache__/metrics.cpython-36.pyc | Bin 0 -> 2918 bytes core/__pycache__/wandb_logger.cpython-36.pyc | Bin 0 -> 3399 bytes core/logger.py | 141 +++++ core/metrics.py | 93 +++ core/wandb_logger.py | 116 ++++ data/LRHR_dataset.py | 99 +++ data/LRHR_dataset2.py | 99 +++ data/__init__.py | 56 ++ data/__pycache__/LRHR_dataset.cpython-36.pyc | Bin 0 -> 2664 bytes data/__pycache__/LRHR_dataset2.cpython-36.pyc | Bin 0 -> 2409 bytes data/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1672 bytes data/__pycache__/util.cpython-36.pyc | Bin 0 -> 3400 bytes data/generate_bicubic_img.m | 87 +++ data/prepare_data.py | 182 ++++++ data/util.py | 92 +++ eval.py | 45 ++ experiments/check_fid.py | 25 + img/LWTDM.png | Bin 0 -> 128607 bytes infer.py | 101 ++++ model/__init__.py | 11 + model/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 509 bytes model/__pycache__/base_model.cpython-36.pyc | Bin 0 -> 2004 bytes model/__pycache__/model.cpython-36.pyc | Bin 0 -> 5115 bytes model/__pycache__/model2.cpython-36.pyc | Bin 0 -> 5051 bytes model/__pycache__/networks.cpython-36.pyc | Bin 0 -> 3597 bytes model/base_model.py | 48 ++ model/model.py | 166 ++++++ model/model2.py | 167 ++++++ model/networks.py | 135 +++++ .../__pycache__/diffusion.cpython-36.pyc | Bin 0 -> 7494 bytes .../__pycache__/unet.cpython-36.pyc | Bin 0 -> 8270 bytes model/sr3_modules/diffusion.py | 249 ++++++++ model/sr3_modules/unet.py | 259 ++++++++ .../__pycache__/diffusion.cpython-36.pyc | Bin 0 -> 9250 bytes .../__pycache__/unet.cpython-36.pyc | Bin 0 -> 7600 bytes model/srddpm_modules/diffusion.py | 297 +++++++++ model/srddpm_modules/unet.py | 243 ++++++++ requirement.txt | 13 + run_metrics.py | 18 + sr.py | 244 ++++++++ sr2.py | 244 ++++++++ 50 files changed, 5234 insertions(+) create mode 100644 cal_complex/Complexity_CDCR.py create mode 100644 cal_complex/Complexity_ESRGAN.py create mode 100644 cal_complex/Complexity_SR3.py create mode 100644 cal_complex/Complexity_SRDDPM.py create mode 100644 cal_complex/Complexity_SRDiff.py create mode 100644 config/sr_lwtdm.json create mode 100644 config/sr_sr3.json create mode 100644 config/sr_srddpm.json create mode 100644 core/__pycache__/logger.cpython-36.pyc create mode 100644 core/__pycache__/metrics.cpython-36.pyc create mode 100644 core/__pycache__/wandb_logger.cpython-36.pyc create mode 100644 core/logger.py create mode 100644 core/metrics.py create mode 100644 core/wandb_logger.py create mode 100644 data/LRHR_dataset.py create mode 100644 data/LRHR_dataset2.py create mode 100644 data/__init__.py create mode 100644 data/__pycache__/LRHR_dataset.cpython-36.pyc create mode 100644 data/__pycache__/LRHR_dataset2.cpython-36.pyc create mode 100644 data/__pycache__/__init__.cpython-36.pyc create mode 100644 data/__pycache__/util.cpython-36.pyc create mode 100644 data/generate_bicubic_img.m create mode 100644 data/prepare_data.py create mode 100644 data/util.py create mode 100644 eval.py create mode 100644 experiments/check_fid.py create mode 100644 img/LWTDM.png create mode 100644 infer.py create mode 100644 model/__init__.py create mode 100644 model/__pycache__/__init__.cpython-36.pyc create mode 100644 model/__pycache__/base_model.cpython-36.pyc create mode 100644 model/__pycache__/model.cpython-36.pyc create mode 100644 model/__pycache__/model2.cpython-36.pyc create mode 100644 model/__pycache__/networks.cpython-36.pyc create mode 100644 model/base_model.py create mode 100644 model/model.py create mode 100644 model/model2.py create mode 100644 model/networks.py create mode 100644 model/sr3_modules/__pycache__/diffusion.cpython-36.pyc create mode 100644 model/sr3_modules/__pycache__/unet.cpython-36.pyc create mode 100644 model/sr3_modules/diffusion.py create mode 100644 model/sr3_modules/unet.py create mode 100644 model/srddpm_modules/__pycache__/diffusion.cpython-36.pyc create mode 100644 model/srddpm_modules/__pycache__/unet.cpython-36.pyc create mode 100644 model/srddpm_modules/diffusion.py create mode 100644 model/srddpm_modules/unet.py create mode 100644 requirement.txt create mode 100644 run_metrics.py create mode 100644 sr.py create mode 100644 sr2.py diff --git a/cal_complex/Complexity_CDCR.py b/cal_complex/Complexity_CDCR.py new file mode 100644 index 0000000..a81fc12 --- /dev/null +++ b/cal_complex/Complexity_CDCR.py @@ -0,0 +1,405 @@ +import torch.nn.functional as F +import numpy as np +import math +# Residual Dense Network for Image Super-Resolution +# https://arxiv.org/abs/1802.08797 +# modified from: https://github.com/thstkdgus35/EDSR-PyTorch +from argparse import Namespace +import torch +import torch.nn as nn +models = {} +import copy + + +def register(name): + def decorator(cls): + models[name] = cls + return cls + return decorator + +def make(model_spec, args=None, load_sd=False): + if args is not None: + model_args = copy.deepcopy(model_spec['args']) + model_args.update(args) + else: + model_args = model_spec['args'] + model = models[model_spec['name']](**model_args) + if load_sd: + model.load_state_dict(model_spec['sd']) + return model + +def make_coord(shape, ranges=None, flatten=True): + """ Make coordinates at grid centers. + """ + coord_seqs = [] + for i, n in enumerate(shape): + if ranges is None: + v0, v1 = -1, 1 + else: + v0, v1 = ranges[i] + r = (v1 - v0) / (2 * n) + seq = v0 + r + (2 * r) * torch.arange(n).float() + coord_seqs.append(seq) + ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1) + if flatten: + ret = ret.view(-1, ret.shape[-1]) + return ret + + +class RDB_Conv(nn.Module): + def __init__(self, inChannels, growRate, kSize=3): + super(RDB_Conv, self).__init__() + Cin = inChannels + G = growRate + self.conv = nn.Sequential(*[ + nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1), + nn.ReLU() + ]) + + def forward(self, x): + out = self.conv(x) + return torch.cat((x, out), 1) + +class RDB(nn.Module): + def __init__(self, growRate0, growRate, nConvLayers, kSize=3): + super(RDB, self).__init__() + G0 = growRate0 + G = growRate + C = nConvLayers + + convs = [] + for c in range(C): + convs.append(RDB_Conv(G0 + c*G, G)) + self.convs = nn.Sequential(*convs) + + # Local Feature Fusion + self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1) + + def forward(self, x): + return self.LFF(self.convs(x)) + x + +class RDN(nn.Module): + def __init__(self, args): + super(RDN, self).__init__() + self.args = args + r = args.scale[0] + G0 = args.G0 + kSize = args.RDNkSize + + # number of RDB blocks, conv layers, out channels + self.D, C, G = { + 'A': (20, 6, 32), + 'B': (16, 8, 64), + }[args.RDNconfig] + + # Shallow feature extraction net + self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1) + self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) + + # Redidual dense blocks and dense feature fusion + self.RDBs = nn.ModuleList() + for i in range(self.D): + self.RDBs.append( + RDB(growRate0 = G0, growRate = G, nConvLayers = C) + ) + + # Global Feature Fusion + self.GFF = nn.Sequential(*[ + nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1), + nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) + ]) + + if args.no_upsampling: + self.out_dim = G0 + else: + self.out_dim = args.n_colors + # Up-sampling net + if r == 2 or r == 3: + self.UPNet = nn.Sequential(*[ + nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1), + nn.PixelShuffle(r), + nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) + ]) + elif r == 4: + self.UPNet = nn.Sequential(*[ + nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1), + nn.PixelShuffle(2), + nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1), + nn.PixelShuffle(2), + nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) + ]) + else: + raise ValueError("scale must be 2 or 3 or 4.") + + def forward(self, x): + f__1 = self.SFENet1(x) + x = self.SFENet2(f__1) + + RDBs_out = [] + for i in range(self.D): + x = self.RDBs[i](x) + RDBs_out.append(x) + + x = self.GFF(torch.cat(RDBs_out,1)) + x += f__1 + + if self.args.no_upsampling: + return x + else: + return self.UPNet(x) + + +@register('rdn') +def make_rdn(G0=64, RDNkSize=3, RDNconfig='B', + scale=2, no_upsampling=False): + args = Namespace() + args.G0 = G0 + args.RDNkSize = RDNkSize + args.RDNconfig = RDNconfig + + args.scale = [scale] + args.no_upsampling = no_upsampling + + args.n_colors = 3 + return RDN(args) + + +@register('cdcrcal') +class CDCRB(nn.Module): + def __init__(self, encoder_spec, imnet_spec=None, imnet_spec2=None, + local_ensemble=True, feat_unfold=True, align_corners=True, + dense_predict_with_for=False, num_experts=20, ksize=3, + weights_fused_with_mul=False, self_do_step_1=True): + super().__init__() + self.local_ensemble = local_ensemble + self.feat_unfold = feat_unfold + self.align_corners = align_corners # False in Liif + self.for_flag = dense_predict_with_for # need to choose outdim: 3(with true) or 27(with false) + self.mul_flag = weights_fused_with_mul + self.step1 = self_do_step_1 + + self.encoder = make(encoder_spec) + self.num_experts = num_experts + self.ksize = ksize # choose 1 or 3 + + weight_pool = [] + for i in range(num_experts): + weight_pool.append(nn.Parameter(torch.Tensor(3, 3, ksize, ksize))) + nn.init.kaiming_uniform_(weight_pool[i], a=math.sqrt(5)) + self.weight_pool = nn.Parameter(torch.stack(weight_pool, 0)) + + # FC layers to generate routing weights + self.routing = nn.Sequential(nn.Linear(2, 64), + nn.ReLU(True), + nn.Linear(64, num_experts), + nn.Softmax(1)) + + if imnet_spec is not None: + imnet_in_dim = self.encoder.out_dim + if self.feat_unfold: + imnet_in_dim *= 9 + imnet_in_dim += 2 # attach coord + imnet_in_dim += 2 # cell decode + self.imnet = make(imnet_spec, args={'in_dim': imnet_in_dim}) + else: + self.imnet = None + + if imnet_spec2 is not None: + imnet2_in_dim = 8 + imnet2_in_dim += 2 # cell decode + self.imnet2 = make(imnet_spec2, args={'in_dim': imnet2_in_dim}) + else: + self.imnet2 = None + + def softmax(self, x): + row_max = np.max(x) + x = x - row_max + x_exp = np.exp(x) + x_sum = np.sum(x_exp) + s = x_exp / x_sum + return s + + def guass_with_central(self, central=1.0, sig=0.5, data_min=1.0, data_max=999.0, sample_numbers=10): + # scale_factor from 1 to 4 (default) + # o o o o o o o o o o + # x1 x2 x3 x4 + data = np.linspace(data_min, data_max, sample_numbers) + sqrt_2pi = np.power(2 * np.pi, 0.5) + coef = 1 / (sqrt_2pi * sig) + powercoef = -1 / (2 * np.power(sig, 2)) + mypow = powercoef * (np.power((data - central), 2)) + return self.softmax(coef * (np.exp(mypow))) + + def gen_feat(self, inp): + self.feat = self.encoder(inp) + return self.feat + + def query_rgb(self, coord, cell=None): + feat = self.feat + + if self.imnet is None: + ret = F.grid_sample(feat, coord.flip(-1).unsqueeze(1), + mode='nearest', align_corners=self.align_corners)[:, :, 0, :] \ + .permute(0, 2, 1) + return ret + + if self.feat_unfold: + feat = F.unfold(feat, 3, padding=1).view( + feat.shape[0], feat.shape[1] * 9, feat.shape[2], feat.shape[3]) + + if self.local_ensemble: + vx_lst = [-1, 1] + vy_lst = [-1, 1] + eps_shift = 1e-6 + else: + vx_lst, vy_lst, eps_shift = [0], [0], 0 + + # field radius (global: [-1, 1]) + rx = 2 / feat.shape[-2] / 2 + ry = 2 / feat.shape[-1] / 2 + + feat_coord = make_coord(feat.shape[-2:], flatten=False).cuda() \ + .permute(2, 0, 1) \ + .unsqueeze(0).expand(feat.shape[0], 2, *feat.shape[-2:]) + + rets = [] + conv_pred_inp = [] + if self.for_flag: + q_lists = [-1, 0, 1] + else: + q_lists = [0] + for q1 in q_lists: + for q2 in q_lists: + preds = [] + areas = [] + for vx in vx_lst: + for vy in vy_lst: + coord_ = coord.clone() + coord_[:, :, 0] += vx * rx + q1 * torch.mean(cell, dim=1, keepdim=False)[:, 0] \ + .reshape(coord.shape[0], 1).contiguous().repeat(1, coord.shape[1]) + eps_shift + coord_[:, :, 1] += vy * ry + q2 * torch.mean(cell, dim=1, keepdim=False)[:, 1] \ + .reshape(coord.shape[0], 1).contiguous().repeat(1, coord.shape[1]) + eps_shift + coord_.clamp_(-1 + 1e-6, 1 - 1e-6) + q_feat = F.grid_sample( + feat, coord_.flip(-1).unsqueeze(1), + mode='nearest', align_corners=self.align_corners)[:, :, 0, :] \ + .permute(0, 2, 1) + q_coord = F.grid_sample( + feat_coord, coord_.flip(-1).unsqueeze(1), + mode='nearest', align_corners=self.align_corners)[:, :, 0, :] \ + .permute(0, 2, 1) + rel_coord = coord - q_coord + rel_coord[:, :, 0] *= feat.shape[-2] + rel_coord[:, :, 1] *= feat.shape[-1] + inp = torch.cat([q_feat, rel_coord], dim=-1) + + + rel_cell = cell.clone() + rel_cell[:, :, 0] *= feat.shape[-2] + rel_cell[:, :, 1] *= feat.shape[-1] + inp = torch.cat([inp, rel_cell], dim=-1) + + bs, q = coord.shape[:2] + pred = self.imnet(inp.view(bs * q, -1)).view(bs, q, -1) + preds.append(pred) + + if q1 == 0 and q2 == 0: # central + conv_pred_inp.append(rel_coord) + + area = torch.abs(rel_coord[:, :, 0] * rel_coord[:, :, 1]) + areas.append(area + 1e-9) + + tot_area = torch.stack(areas).sum(dim=0) + if self.local_ensemble: + t = areas[0]; areas[0] = areas[3]; areas[3] = t + t = areas[1]; areas[1] = areas[2]; areas[2] = t + ret = 0 + for pred, area in zip(preds, areas): + ret = ret + pred * (area / tot_area).unsqueeze(-1) + rets.append(ret) + + retsp = torch.stack(rets, dim=-1).squeeze(-1) + bs, q = retsp.shape[:2] + retsp = retsp.view(bs, q, 3, 9) # [batchsize, sample_q, 3, 9] + + # step 1 + # scaling_factors -> torch.round(2 / cell[:, 0, 0]) / 48 + if self.step1: + _rs = [] + for b in range(bs): + routing_weights_0 = self.routing(rel_cell[b, 0, :].unsqueeze(0)).view(self.num_experts, 1, 1) + routing_weights_1 = self.guass_with_central(central=(torch.round(2 / cell[b, 0, 0]) / feat.shape[-1]).item(), sample_numbers=self.num_experts) + routing_weights_1 = torch.from_numpy(routing_weights_1.astype(np.float32)).unsqueeze(-1).unsqueeze(-1).cuda() + if self.mul_flag: + routing_weights = routing_weights_0 * routing_weights_1 * 10 + else: + routing_weights = routing_weights_0 + routing_weights_1 + # fuse experts + fused_weight = (self.weight_pool.view(self.num_experts, -1, 1) * routing_weights).sum(0) + fused_weight = fused_weight.view(-1, 3, self.ksize, self.ksize) + if self.ksize == 3: + _rs.append(F.conv2d(retsp[b].view(q, 3, 3, 3), fused_weight, None, stride=1, padding=1)) + elif self.ksize == 1: + _rs.append(F.conv2d(retsp[b].view(q, 3, 3, 3), fused_weight, None, stride=1, padding=0)) + else: + raise + retsp_residual = torch.stack(_rs).reshape(bs, q, 3, -1) + retsp = retsp + retsp_residual + + # step 2 + conv_pred_inp = torch.cat(conv_pred_inp, dim=-1) + conv_pred_inp = torch.cat((conv_pred_inp, rel_cell), dim=-1) + conv_pred = self.imnet2(conv_pred_inp.view(bs * q, -1)).view(bs, q, -1) # [batchsize, sample_q, 9] + theoutput = torch.bmm(retsp.view(bs*q, retsp.shape[2], -1), + conv_pred.view(bs*q, conv_pred.shape[2], -1)).view(bs, q, -1) # [batchsize, sample_q, 3] + return theoutput + + def forward(self, inp, coord, cell): + self.gen_feat(inp) + return self.query_rgb(coord, cell) + + +if __name__ == '__main__': + config = { + 'model': { + 'name': 'haha', + 'args': { + 'encoder_spec': { + 'name': 'rdn', + 'args': { + 'no_upsampling': True + } + }, + 'imnet_spec': { + 'name': 'mlp', + 'args': { + 'out_dim': 3, + 'hidden_list': [256, 256, 256, 256] + } + }, + 'imnet_spec2': { + 'name': 'mlp', + 'args': { + 'out_dim': 9, + 'hidden_list': [16, 16, 16, 16] + } + } + } + } + } + + rdn_model = make_rdn() + cdcr = CDCRB(config['model']['args']['encoder_spec']) + from thop import profile + from thop import clever_format + x = torch.randn(1,3,28,28) + coord = make_coord([224, 224]).unsqueeze(0) + cell = torch.ones_like(coord) + cell[:, 0] *= 2 / coord.shape[-2] + cell[:, 1] *= 2 / coord.shape[-1] + cell = cell.unsqueeze(0) + flops, params = profile(cdcr, inputs=(x, coord, cell)) + flops, params = clever_format([flops, params], "%.3f") + print(flops, params) + + print('done') diff --git a/cal_complex/Complexity_ESRGAN.py b/cal_complex/Complexity_ESRGAN.py new file mode 100644 index 0000000..de0f3d3 --- /dev/null +++ b/cal_complex/Complexity_ESRGAN.py @@ -0,0 +1,199 @@ +import torch +from torch import nn as nn +from torch.nn import functional as F +from torch.nn import init as init +from torch.nn.modules.batchnorm import _BatchNorm + +def pixel_unshuffle(x, scale): + """ Pixel unshuffle. + + Args: + x (Tensor): Input feature with shape (b, c, hh, hw). + scale (int): Downsample ratio. + + Returns: + Tensor: the pixel unshuffled feature. + """ + b, c, hh, hw = x.size() + out_channel = c * (scale**2) + assert hh % scale == 0 and hw % scale == 0 + h = hh // scale + w = hw // scale + x_view = x.view(b, c, h, scale, w, scale) + return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) + +def make_layer(basic_block, num_basic_block, **kwarg): + """Make layers by stacking the same blocks. + + Args: + basic_block (nn.module): nn.module class for basic block. + num_basic_block (int): number of blocks. + + Returns: + nn.Sequential: Stacked blocks in nn.Sequential. + """ + layers = [] + for _ in range(num_basic_block): + layers.append(basic_block(**kwarg)) + return nn.Sequential(*layers) + + +def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs): + """Initialize network weights. + + Args: + module_list (list[nn.Module] | nn.Module): Modules to be initialized. + scale (float): Scale initialized weights, especially for residual + blocks. Default: 1. + bias_fill (float): The value to fill bias. Default: 0 + kwargs (dict): Other arguments for initialization function. + """ + if not isinstance(module_list, list): + module_list = [module_list] + for module in module_list: + for m in module.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, **kwargs) + m.weight.data *= scale + if m.bias is not None: + m.bias.data.fill_(bias_fill) + elif isinstance(m, nn.Linear): + init.kaiming_normal_(m.weight, **kwargs) + m.weight.data *= scale + if m.bias is not None: + m.bias.data.fill_(bias_fill) + elif isinstance(m, _BatchNorm): + init.constant_(m.weight, 1) + if m.bias is not None: + m.bias.data.fill_(bias_fill) + +class ResidualDenseBlock(nn.Module): + """Residual Dense Block. + + Used in RRDB block in ESRGAN. + + Args: + num_feat (int): Channel number of intermediate features. + num_grow_ch (int): Channels for each growth. + """ + + def __init__(self, num_feat=64, num_grow_ch=32): + super(ResidualDenseBlock, self).__init__() + self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1) + self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1) + self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1) + self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1) + self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + # initialization + default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) + + def forward(self, x): + x1 = self.lrelu(self.conv1(x)) + x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) + x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) + x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) + x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) + # Emperically, we use 0.2 to scale the residual for better performance + return x5 * 0.2 + x + + +class RRDB(nn.Module): + """Residual in Residual Dense Block. + + Used in RRDB-Net in ESRGAN. + + Args: + num_feat (int): Channel number of intermediate features. + num_grow_ch (int): Channels for each growth. + """ + + def __init__(self, num_feat, num_grow_ch=32): + super(RRDB, self).__init__() + self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch) + self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch) + self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch) + + def forward(self, x): + out = self.rdb1(x) + out = self.rdb2(out) + out = self.rdb3(out) + # Emperically, we use 0.2 to scale the residual for better performance + return out * 0.2 + x + + +class RRDBNet(nn.Module): + """Networks consisting of Residual in Residual Dense Block, which is used + in ESRGAN. + + ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks. + + We extend ESRGAN for scale x2 and scale x1. + Note: This is one option for scale 1, scale 2 in RRDBNet. + We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size + and enlarge the channel size before feeding inputs into the main ESRGAN architecture. + + Args: + num_in_ch (int): Channel number of inputs. + num_out_ch (int): Channel number of outputs. + num_feat (int): Channel number of intermediate features. + Default: 64 + num_block (int): Block number in the trunk network. Defaults: 23 + num_grow_ch (int): Channels for each growth. Default: 32. + """ + + def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32): + super(RRDBNet, self).__init__() + self.scale = scale + if scale == 2: + num_in_ch = num_in_ch * 4 + elif scale == 1: + num_in_ch = num_in_ch * 16 + self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) + self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch) + self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + # upsample + self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_up3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + def forward(self, x): + # import pdb + # pdb.set_trace() + if x.shape[0] != 1 and len(x.shape) == 3: + x = x.unsqueeze(0) + if self.scale == 2: + feat = pixel_unshuffle(x, scale=2) + elif self.scale == 1: + feat = pixel_unshuffle(x, scale=4) + else: + feat = x + feat = self.conv_first(feat) + body_feat = self.conv_body(self.body(feat)) + feat = feat + body_feat + # upsample + feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest'))) + feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest'))) + feat = self.lrelu(self.conv_up3(F.interpolate(feat, scale_factor=2, mode='nearest'))) + out = self.conv_last(self.lrelu(self.conv_hr(feat))) + return out + + +if __name__ == '__main__': + from thop import profile + from thop import clever_format + x = torch.randn(1,3,28,28) + + rrdb = RRDBNet(3, 3) + flops, params = profile(rrdb, inputs=x) + flops, params = clever_format([flops, params], "%.3f") + print(flops, params) + + + print('done') \ No newline at end of file diff --git a/cal_complex/Complexity_SR3.py b/cal_complex/Complexity_SR3.py new file mode 100644 index 0000000..d6d1263 --- /dev/null +++ b/cal_complex/Complexity_SR3.py @@ -0,0 +1,285 @@ +import math +import torch +from torch import nn +import torch.nn.functional as F +from inspect import isfunction + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + +# PositionalEncoding Source: https://github.com/lmnt-com/wavegrad/blob/master/src/wavegrad/model.py +class PositionalEncoding(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, noise_level): + count = self.dim // 2 + step = torch.arange(count, dtype=noise_level.dtype, + device=noise_level.device) / count + encoding = noise_level.unsqueeze( + 1) * torch.exp(-math.log(1e4) * step.unsqueeze(0)) + encoding = torch.cat( + [torch.sin(encoding), torch.cos(encoding)], dim=-1) + return encoding + + +class FeatureWiseAffine(nn.Module): + def __init__(self, in_channels, out_channels, use_affine_level=False): + super(FeatureWiseAffine, self).__init__() + self.use_affine_level = use_affine_level + self.noise_func = nn.Sequential( + nn.Linear(in_channels, out_channels*(1+self.use_affine_level)) + ) + + def forward(self, x, noise_embed): + batch = x.shape[0] + if self.use_affine_level: + gamma, beta = self.noise_func(noise_embed).view( + batch, -1, 1, 1).chunk(2, dim=1) + x = (1 + gamma) * x + beta + else: + x = x + self.noise_func(noise_embed).view(batch, -1, 1, 1) + return x + + +class Swish(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class Upsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.up = nn.Upsample(scale_factor=2, mode="nearest") + self.conv = nn.Conv2d(dim, dim, 3, padding=1) + + def forward(self, x): + return self.conv(self.up(x)) + + +class Downsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Conv2d(dim, dim, 3, 2, 1) + + def forward(self, x): + return self.conv(x) + + +# building block modules + + +class Block(nn.Module): + def __init__(self, dim, dim_out, groups=32, dropout=0): + super().__init__() + self.block = nn.Sequential( + nn.GroupNorm(groups, dim), + Swish(), + nn.Dropout(dropout) if dropout != 0 else nn.Identity(), + nn.Conv2d(dim, dim_out, 3, padding=1) + ) + + def forward(self, x): + return self.block(x) + + +class ResnetBlock(nn.Module): + def __init__(self, dim, dim_out, noise_level_emb_dim=None, dropout=0, use_affine_level=False, norm_groups=32): + super().__init__() + self.noise_func = FeatureWiseAffine( + noise_level_emb_dim, dim_out, use_affine_level) + + self.block1 = Block(dim, dim_out, groups=norm_groups) + self.block2 = Block(dim_out, dim_out, groups=norm_groups, dropout=dropout) + self.res_conv = nn.Conv2d( + dim, dim_out, 1) if dim != dim_out else nn.Identity() + + def forward(self, x, time_emb): + b, c, h, w = x.shape + h = self.block1(x) + h = self.noise_func(h, time_emb) + h = self.block2(h) + return h + self.res_conv(x) + + +class SelfAttention(nn.Module): + def __init__(self, in_channel, n_head=1, norm_groups=32): + super().__init__() + + self.n_head = n_head + + self.norm = nn.GroupNorm(norm_groups, in_channel) + self.qkv = nn.Conv2d(in_channel, in_channel * 3, 1, bias=False) + self.out = nn.Conv2d(in_channel, in_channel, 1) + + def forward(self, input): + batch, channel, height, width = input.shape + n_head = self.n_head + head_dim = channel // n_head + + norm = self.norm(input) + qkv = self.qkv(norm).view(batch, n_head, head_dim * 3, height, width) + query, key, value = qkv.chunk(3, dim=2) # bhdyx + + attn = torch.einsum( + "bnchw, bncyx -> bnhwyx", query, key + ).contiguous() / math.sqrt(channel) + attn = attn.view(batch, n_head, height, width, -1) + attn = torch.softmax(attn, -1) + attn = attn.view(batch, n_head, height, width, height, width) + + out = torch.einsum("bnhwyx, bncyx -> bnchw", attn, value).contiguous() + out = self.out(out.view(batch, channel, height, width)) + + return out + input + + +class ResnetBlocWithAttn(nn.Module): + def __init__(self, dim, dim_out, *, noise_level_emb_dim=None, norm_groups=32, dropout=0, with_attn=False): + super().__init__() + self.with_attn = with_attn + self.res_block = ResnetBlock( + dim, dim_out, noise_level_emb_dim, norm_groups=norm_groups, dropout=dropout) + if with_attn: + self.attn = SelfAttention(dim_out, norm_groups=norm_groups) + + def forward(self, x, time_emb): + x = self.res_block(x, time_emb) + if(self.with_attn): + x = self.attn(x) + return x + + +class UNet(nn.Module): + def __init__( + self, + in_channel=6, + out_channel=3, + inner_channel=32, + norm_groups=32, + channel_mults=(1, 2, 4, 8, 8), + attn_res=(8), + res_blocks=3, + dropout=0, + with_noise_level_emb=True, + image_size=128 + ): + super().__init__() + + if with_noise_level_emb: + noise_level_channel = inner_channel + self.noise_level_mlp = nn.Sequential( + PositionalEncoding(inner_channel), + nn.Linear(inner_channel, inner_channel * 4), + Swish(), + nn.Linear(inner_channel * 4, inner_channel) + ) + else: + noise_level_channel = None + self.noise_level_mlp = None + + num_mults = len(channel_mults) + pre_channel = inner_channel + feat_channels = [pre_channel] + now_res = image_size + downs = [nn.Conv2d(in_channel, inner_channel, + kernel_size=3, padding=1)] + for ind in range(num_mults): + is_last = (ind == num_mults - 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks): + downs.append(ResnetBlocWithAttn( + pre_channel, channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, dropout=dropout, with_attn=use_attn)) + feat_channels.append(channel_mult) + pre_channel = channel_mult + if not is_last: + downs.append(Downsample(pre_channel)) + feat_channels.append(pre_channel) + now_res = now_res//2 + self.downs = nn.ModuleList(downs) + + self.mid = nn.ModuleList([ + ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, + dropout=dropout, with_attn=True), + ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, + dropout=dropout, with_attn=False) + ]) + + ups = [] + for ind in reversed(range(num_mults)): + is_last = (ind < 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks+1): + ups.append(ResnetBlocWithAttn( + pre_channel+feat_channels.pop(), channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, + dropout=dropout, with_attn=use_attn)) + pre_channel = channel_mult + if not is_last: + ups.append(Upsample(pre_channel)) + now_res = now_res*2 + + self.ups = nn.ModuleList(ups) + + self.final_conv = Block(pre_channel, default(out_channel, in_channel), groups=norm_groups) + + def forward(self, x, time): + t = self.noise_level_mlp(time) if exists( + self.noise_level_mlp) else None + + feats = [] + for layer in self.downs: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + feats.append(x) + + for layer in self.mid: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + + for layer in self.ups: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(torch.cat((x, feats.pop()), dim=1), t) + else: + x = layer(x) + + return self.final_conv(x) + + +if __name__ == '__main__': + from thop import profile + from thop import clever_format + + unet = UNet( + in_channel=6, + out_channel=3, + norm_groups=32, + inner_channel=64, # 64 or 128 + channel_mults=[1,2,4,8,8], # [1,1] or [2,2] or [1] or [2] + attn_res=[28], # 6 or 8 + res_blocks=2, + dropout=0.2, + image_size=224 + ) + x = torch.randn(1,6,224,224) + # xe = torch.randn(1,3,28,28) + t = torch.tensor([1493]) + # out = unet(x, xe, t) + # out = unet(x, t) + flops, params = profile(unet, inputs=(x, t)) + flops, params = clever_format([flops, params], "%.3f") + print(flops, params) + print('done') \ No newline at end of file diff --git a/cal_complex/Complexity_SRDDPM.py b/cal_complex/Complexity_SRDDPM.py new file mode 100644 index 0000000..5bf99e6 --- /dev/null +++ b/cal_complex/Complexity_SRDDPM.py @@ -0,0 +1,268 @@ +import math +import torch +from torch import nn +import torch.nn.functional as F +from inspect import isfunction + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + +# model + + +class TimeEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + inv_freq = torch.exp( + torch.arange(0, dim, 2, dtype=torch.float32) * + (-math.log(10000) / dim) + ) + self.register_buffer("inv_freq", inv_freq) + + def forward(self, input): + shape = input.shape + sinusoid_in = torch.ger(input.view(-1).float(), self.inv_freq) + pos_emb = torch.cat([sinusoid_in.sin(), sinusoid_in.cos()], dim=-1) + pos_emb = pos_emb.view(*shape, self.dim) + return pos_emb + + +class Swish(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class Upsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.up = nn.Upsample(scale_factor=2, mode="nearest") + self.conv = nn.Conv2d(dim, dim, 3, padding=1) + + def forward(self, x): + return self.conv(self.up(x)) + + +class Downsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Conv2d(dim, dim, 3, 2, 1) + + def forward(self, x): + return self.conv(x) + + +# building block modules + + +class Block(nn.Module): + def __init__(self, dim, dim_out, groups=32, dropout=0): + super().__init__() + self.block = nn.Sequential( + nn.GroupNorm(groups, dim), + Swish(), + nn.Dropout(dropout) if dropout != 0 else nn.Identity(), + nn.Conv2d(dim, dim_out, 3, padding=1) + ) + + def forward(self, x): + return self.block(x) + + +class ResnetBlock(nn.Module): + def __init__(self, dim, dim_out, time_emb_dim=None, dropout=0, norm_groups=32): + super().__init__() + self.mlp = nn.Sequential( + Swish(), + nn.Linear(time_emb_dim, dim_out) + ) if exists(time_emb_dim) else None + + self.block1 = Block(dim, dim_out, groups=norm_groups) + self.block2 = Block(dim_out, dim_out, groups=norm_groups, dropout=dropout) + self.res_conv = nn.Conv2d( + dim, dim_out, 1) if dim != dim_out else nn.Identity() + + def forward(self, x, time_emb): + h = self.block1(x) + if exists(self.mlp): + h += self.mlp(time_emb)[:, :, None, None] + h = self.block2(h) + return h + self.res_conv(x) + + +class SelfAttention(nn.Module): + def __init__(self, in_channel, n_head=1, norm_groups=32): + super().__init__() + + self.n_head = n_head + + self.norm = nn.GroupNorm(norm_groups, in_channel) + self.qkv = nn.Conv2d(in_channel, in_channel * 3, 1, bias=False) + self.out = nn.Conv2d(in_channel, in_channel, 1) + + def forward(self, input): + batch, channel, height, width = input.shape + n_head = self.n_head + head_dim = channel // n_head + + norm = self.norm(input) + qkv = self.qkv(norm).view(batch, n_head, head_dim * 3, height, width) + query, key, value = qkv.chunk(3, dim=2) # bhdyx + + attn = torch.einsum( + "bnchw, bncyx -> bnhwyx", query, key + ).contiguous() / math.sqrt(channel) + attn = attn.view(batch, n_head, height, width, -1) + attn = torch.softmax(attn, -1) + attn = attn.view(batch, n_head, height, width, height, width) + + out = torch.einsum("bnhwyx, bncyx -> bnchw", attn, value).contiguous() + out = self.out(out.view(batch, channel, height, width)) + + return out + input + + +class ResnetBlocWithAttn(nn.Module): + def __init__(self, dim, dim_out, *, time_emb_dim=None, norm_groups=32, dropout=0, with_attn=False): + super().__init__() + self.with_attn = with_attn + self.res_block = ResnetBlock( + dim, dim_out, time_emb_dim, norm_groups=norm_groups, dropout=dropout) + if with_attn: + self.attn = SelfAttention(dim_out, norm_groups=norm_groups) + + def forward(self, x, time_emb): + x = self.res_block(x, time_emb) + if(self.with_attn): + x = self.attn(x) + return x + + +class UNet(nn.Module): + def __init__( + self, + in_channel=6, + out_channel=3, + inner_channel=32, + norm_groups=32, + channel_mults=(1, 2, 4, 8, 8), + attn_res=(8), + res_blocks=3, + dropout=0, + with_time_emb=True, + image_size=128 + ): + super().__init__() + + if with_time_emb: + time_dim = inner_channel + self.time_mlp = nn.Sequential( + TimeEmbedding(inner_channel), + nn.Linear(inner_channel, inner_channel * 4), + Swish(), + nn.Linear(inner_channel * 4, inner_channel) + ) + else: + time_dim = None + self.time_mlp = None + + num_mults = len(channel_mults) + pre_channel = inner_channel + feat_channels = [pre_channel] + now_res = image_size + downs = [nn.Conv2d(in_channel, inner_channel, + kernel_size=3, padding=1)] + for ind in range(num_mults): + is_last = (ind == num_mults - 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks): + downs.append(ResnetBlocWithAttn( + pre_channel, channel_mult, time_emb_dim=time_dim, norm_groups=norm_groups, dropout=dropout, with_attn=use_attn)) + feat_channels.append(channel_mult) + pre_channel = channel_mult + if not is_last: + downs.append(Downsample(pre_channel)) + feat_channels.append(pre_channel) + now_res = now_res//2 + self.downs = nn.ModuleList(downs) + + self.mid = nn.ModuleList([ + ResnetBlocWithAttn(pre_channel, pre_channel, time_emb_dim=time_dim, norm_groups=norm_groups, + dropout=dropout, with_attn=True), + ResnetBlocWithAttn(pre_channel, pre_channel, time_emb_dim=time_dim, norm_groups=norm_groups, + dropout=dropout, with_attn=False) + ]) + + ups = [] + for ind in reversed(range(num_mults)): + is_last = (ind < 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks+1): + ups.append(ResnetBlocWithAttn( + pre_channel+feat_channels.pop(), channel_mult, time_emb_dim=time_dim, dropout=dropout, norm_groups=norm_groups, with_attn=use_attn)) + pre_channel = channel_mult + if not is_last: + ups.append(Upsample(pre_channel)) + now_res = now_res*2 + + self.ups = nn.ModuleList(ups) + + self.final_conv = Block(pre_channel, default(out_channel, in_channel), groups=norm_groups) + + def forward(self, x, time): + t = self.time_mlp(time) if exists(self.time_mlp) else None + + feats = [] + for layer in self.downs: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + feats.append(x) + + for layer in self.mid: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + + for layer in self.ups: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(torch.cat((x, feats.pop()), dim=1), t) + else: + x = layer(x) + + return self.final_conv(x) + +if __name__ == '__main__': + from thop import profile + from thop import clever_format + + unet = UNet( + in_channel=6, + out_channel=3, + norm_groups=32, + inner_channel=64, # 64 or 128 + channel_mults=[1,1,2,2,4,4], # [1,1] or [2,2] or [1] or [2] + attn_res=[28], # 6 or 8 + res_blocks=2, + dropout=0.2, + image_size=224 + ) + x = torch.randn(1,6,224,224) + # xe = torch.randn(1,3,28,28) + t = torch.tensor([1493]) + # out = unet(x, xe, t) + # out = unet(x, t) + flops, params = profile(unet, inputs=(x, t)) + flops, params = clever_format([flops, params], "%.3f") + print(flops, params) + print('done') \ No newline at end of file diff --git a/cal_complex/Complexity_SRDiff.py b/cal_complex/Complexity_SRDiff.py new file mode 100644 index 0000000..a79db9f --- /dev/null +++ b/cal_complex/Complexity_SRDiff.py @@ -0,0 +1,564 @@ +import functools +import torch +from torch import nn +import torch.nn.functional as F +import os +import math +import torch +import torch.nn.functional as F +from einops import rearrange +from torch import nn +from torch.nn import Parameter + + +from inspect import isfunction +from torch import nn +from torch.nn import init + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def cycle(dl): + while True: + for data in dl: + yield data + + +def num_to_groups(num, divisor): + groups = num // divisor + remainder = num % divisor + arr = [divisor] * groups + if remainder > 0: + arr.append(remainder) + return arr + + +def initialize_weights(net_l, scale=0.1): + if not isinstance(net_l, list): + net_l = [net_l] + for net in net_l: + for m in net.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, a=0, mode='fan_in') + m.weight.data *= scale # for residual block + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + init.kaiming_normal_(m.weight, a=0, mode='fan_in') + m.weight.data *= scale + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias.data, 0.0) + + +def make_layer(block, n_layers, seq=False): + layers = [] + for _ in range(n_layers): + layers.append(block()) + if seq: + return nn.Sequential(*layers) + else: + return nn.ModuleList(layers) + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x, *args, **kwargs): + return self.fn(x, *args, **kwargs) + x + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class Mish(nn.Module): + def forward(self, x): + return x * torch.tanh(F.softplus(x)) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x): + return self.fn(x) * self.g + + +# building block modules + +class Block(nn.Module): + def __init__(self, dim, dim_out, groups=8): + super().__init__() + if groups == 0: + self.block = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim_out, 3), + Mish() + ) + else: + self.block = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim_out, 3), + nn.GroupNorm(groups, dim_out), + Mish() + ) + + def forward(self, x): + return self.block(x) + + +class ResnetBlock(nn.Module): + def __init__(self, dim, dim_out, *, time_emb_dim=0, groups=8): + super().__init__() + if time_emb_dim > 0: + self.mlp = nn.Sequential( + Mish(), + nn.Linear(time_emb_dim, dim_out) + ) + + self.block1 = Block(dim, dim_out, groups=groups) + self.block2 = Block(dim_out, dim_out, groups=groups) + self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity() + + def forward(self, x, time_emb=None, cond=None): + h = self.block1(x) + if time_emb is not None: + h += self.mlp(time_emb)[:, :, None, None] + if cond is not None: + h += cond + h = self.block2(h) + return h + self.res_conv(x) + + +class Upsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Sequential( + nn.ConvTranspose2d(dim, dim, 4, 2, 1), + ) + + def forward(self, x): + return self.conv(x) + + +class Downsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Sequential( + nn.ReflectionPad2d(1), + nn.Conv2d(dim, dim, 3, 2), + ) + + def forward(self, x): + return self.conv(x) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +class MultiheadAttention(nn.Module): + def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, + add_bias_kv=False, add_zero_attn=False): + super().__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim ** -0.5 + if self.qkv_same_dim: + self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) + else: + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + + if bias: + self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) + self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self.reset_parameters() + + self.enable_torch_version = False + if hasattr(F, "multi_head_attention_forward"): + self.enable_torch_version = True + else: + self.enable_torch_version = False + self.last_attn_probs = None + + def reset_parameters(self): + if self.qkv_same_dim: + nn.init.xavier_uniform_(self.in_proj_weight) + else: + nn.init.xavier_uniform_(self.k_proj_weight) + nn.init.xavier_uniform_(self.v_proj_weight) + nn.init.xavier_uniform_(self.q_proj_weight) + + nn.init.xavier_uniform_(self.out_proj.weight) + if self.in_proj_bias is not None: + nn.init.constant_(self.in_proj_bias, 0.) + nn.init.constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + nn.init.xavier_normal_(self.bias_k) + if self.bias_v is not None: + nn.init.xavier_normal_(self.bias_v) + + def forward( + self, + query, key, value, + key_padding_mask=None, + need_weights=True, + attn_mask=None, + before_softmax=False, + need_head_weights=False, + ): + """Input shape: [B, T, C] + + Args: + key_padding_mask (ByteTensor, optional): mask to exclude + keys that are pads, of shape `(batch, src_len)`, where + padding elements are indicated by 1s. + need_weights (bool, optional): return the attention weights, + averaged over heads (default: False). + attn_mask (ByteTensor, optional): typically used to + implement causal attention, where the mask prevents the + attention from looking forward in time (default: None). + before_softmax (bool, optional): return the raw attention + weights and values before the attention softmax. + need_head_weights (bool, optional): return the attention + weights for each head. Implies *need_weights*. Default: + return the average attention weights over all heads. + """ + if need_head_weights: + need_weights = True + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == self.embed_dim + assert list(query.size()) == [tgt_len, bsz, embed_dim] + attn_output, attn_output_weights = F.multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, self.bias_k, self.bias_v, + self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, + self.training, key_padding_mask, need_weights, attn_mask) + attn_output = attn_output.transpose(0, 1) + return attn_output, attn_output_weights + + def in_proj_qkv(self, query): + return self._in_proj(query).chunk(3, dim=-1) + + def in_proj_q(self, query): + if self.qkv_same_dim: + return self._in_proj(query, end=self.embed_dim) + else: + bias = self.in_proj_bias + if bias is not None: + bias = bias[:self.embed_dim] + return F.linear(query, self.q_proj_weight, bias) + + def in_proj_k(self, key): + if self.qkv_same_dim: + return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) + else: + weight = self.k_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[self.embed_dim:2 * self.embed_dim] + return F.linear(key, weight, bias) + + def in_proj_v(self, value): + if self.qkv_same_dim: + return self._in_proj(value, start=2 * self.embed_dim) + else: + weight = self.v_proj_weight + bias = self.in_proj_bias + if bias is not None: + bias = bias[2 * self.embed_dim:] + return F.linear(value, weight, bias) + + def _in_proj(self, input, start=0, end=None): + weight = self.in_proj_weight + bias = self.in_proj_bias + weight = weight[start:end, :] + if bias is not None: + bias = bias[start:end] + return F.linear(input, weight, bias) + + +class ResidualDenseBlock_5C(nn.Module): + def __init__(self, nf=64, gc=32, bias=True): + super(ResidualDenseBlock_5C, self).__init__() + # gc: growth channel, i.e. intermediate channels + self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias) + self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias) + self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias) + self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias) + self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias) + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + # initialization + # mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) + + def forward(self, x): + x1 = self.lrelu(self.conv1(x)) + x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) + x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) + x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) + x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) + return x5 * 0.2 + x + + +class RRDB(nn.Module): + '''Residual in Residual Dense Block''' + + def __init__(self, nf, gc=32): + super(RRDB, self).__init__() + self.RDB1 = ResidualDenseBlock_5C(nf, gc) + self.RDB2 = ResidualDenseBlock_5C(nf, gc) + self.RDB3 = ResidualDenseBlock_5C(nf, gc) + + def forward(self, x): + out = self.RDB1(x) + out = self.RDB2(out) + out = self.RDB3(out) + return out * 0.2 + x + +class RRDBNet(nn.Module): + def __init__(self, in_nc, out_nc, nf, nb, gc=32): + super(RRDBNet, self).__init__() + RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc) + + self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) + self.RRDB_trunk = make_layer(RRDB_block_f, nb) + self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + #### upsampling + self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + if hparams['sr_scale'] == 8: + self.upconv3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2) + + def forward(self, x, get_fea=False): + feas = [] + x = (x + 1) / 2 + fea_first = fea = self.conv_first(x) + for l in self.RRDB_trunk: + fea = l(fea) + feas.append(fea) + trunk = self.trunk_conv(fea) + fea = fea_first + trunk + feas.append(fea) + + fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest'))) + fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest'))) + if hparams['sr_scale'] == 8: + fea = self.lrelu(self.upconv3(F.interpolate(fea, scale_factor=2, mode='nearest'))) + fea_hr = self.HRconv(fea) + out = self.conv_last(self.lrelu(fea_hr)) + out = out.clamp(0, 1) + out = out * 2 - 1 + if get_fea: + return out, feas + else: + return out + + +class Unet(nn.Module): + def __init__(self, dim, out_dim=None, dim_mults=(1, 2, 4, 8), cond_dim=32): + super().__init__() + dims = [3, *map(lambda m: dim * m, dim_mults)] + in_out = list(zip(dims[:-1], dims[1:])) + groups = 0 + + self.cond_proj = nn.ConvTranspose2d(cond_dim * ((hparams['rrdb_num_block'] + 1) // 3), + dim, hparams['sr_scale'] * 2, hparams['sr_scale'], + hparams['sr_scale'] // 2) + + self.time_pos_emb = SinusoidalPosEmb(dim) + self.mlp = nn.Sequential( + nn.Linear(dim, dim * 4), + Mish(), + nn.Linear(dim * 4, dim) + ) + + self.downs = nn.ModuleList([]) + self.ups = nn.ModuleList([]) + num_resolutions = len(in_out) + + for ind, (dim_in, dim_out) in enumerate(in_out): + is_last = ind >= (num_resolutions - 1) + + self.downs.append(nn.ModuleList([ + ResnetBlock(dim_in, dim_out, time_emb_dim=dim, groups=groups), + ResnetBlock(dim_out, dim_out, time_emb_dim=dim, groups=groups), + Downsample(dim_out) if not is_last else nn.Identity() + ])) + + mid_dim = dims[-1] + self.mid_block1 = ResnetBlock(mid_dim, mid_dim, time_emb_dim=dim, groups=groups) + if hparams['use_attn']: + self.mid_attn = Residual(Rezero(LinearAttention(mid_dim))) + self.mid_block2 = ResnetBlock(mid_dim, mid_dim, time_emb_dim=dim, groups=groups) + + for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): + is_last = ind >= (num_resolutions - 1) + + self.ups.append(nn.ModuleList([ + ResnetBlock(dim_out * 2, dim_in, time_emb_dim=dim, groups=groups), + ResnetBlock(dim_in, dim_in, time_emb_dim=dim, groups=groups), + Upsample(dim_in) if not is_last else nn.Identity() + ])) + + self.final_conv = nn.Sequential( + Block(dim, dim, groups=groups), + nn.Conv2d(dim, out_dim, 1) + ) + + if hparams['res'] and hparams['up_input']: + self.up_proj = nn.Sequential( + nn.ReflectionPad2d(1), nn.Conv2d(3, dim, 3), + ) + if hparams['use_wn']: + self.apply_weight_norm() + if hparams['weight_init']: + self.apply(initialize_weights) + + def apply_weight_norm(self): + def _apply_weight_norm(m): + if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): + torch.nn.utils.weight_norm(m) + # print(f"| Weight norm is applied to {m}.") + + self.apply(_apply_weight_norm) + + def forward(self, x, time, cond, img_lr_up): + t = self.time_pos_emb(time) + t = self.mlp(t) + + h = [] + cond = self.cond_proj(torch.cat(cond[2::3], 1)) + for i, (resnet, resnet2, downsample) in enumerate(self.downs): + x = resnet(x, t) + x = resnet2(x, t) + if i == 0: + x = x + cond + if hparams['res'] and hparams['up_input']: + x = x + self.up_proj(img_lr_up) + h.append(x) + x = downsample(x) + + x = self.mid_block1(x, t) + if hparams['use_attn']: + x = self.mid_attn(x) + x = self.mid_block2(x, t) + + for resnet, resnet2, upsample in self.ups: + x = torch.cat((x, h.pop()), dim=1) + x = resnet(x, t) + x = resnet2(x, t) + x = upsample(x) + + return self.final_conv(x) + + def make_generation_fast_(self): + def remove_weight_norm(m): + try: + nn.utils.remove_weight_norm(m) + except ValueError: # this module didn't have weight norm + return + + self.apply(remove_weight_norm) + +if __name__ == '__main__': + global hparams + hparams = {} + hparams['sr_scale'] = 8 + hparams['use_attn'] = False + hparams['res'] = True + hparams['up_input'] = False + hparams['weight_init'] = False + hparams['use_wn'] = False + hparams['rrdb_num_block'] = 17 + + hidden_size = 64 + dim_mults = "1|2|3|4" + dim_mults = [int(x) for x in dim_mults.split('|')] + denoise_fn = Unet( + hidden_size, out_dim=3, cond_dim=64, dim_mults=dim_mults) + + x = torch.randn(1, 3, 224, 224) + img_lr_up = torch.randn(1, 3, 224, 224) + time = torch.tensor([99]) + + cond = [] + for _ in range(18): + cond.append(torch.randn(1, 64, 28, 28)) + + from thop import profile + from thop import clever_format + flops, params = profile(denoise_fn, inputs=(x, time, cond, img_lr_up)) + flops, params = clever_format([flops, params], "%.3f") + print(flops, params) + + print('done') diff --git a/config/sr_lwtdm.json b/config/sr_lwtdm.json new file mode 100644 index 0000000..c7c0c59 --- /dev/null +++ b/config/sr_lwtdm.json @@ -0,0 +1,90 @@ +{ + "name": "sr_lwtdm", + "phase": "train", + "gpu_ids": [ + 0 + ], + "path": { + "log": "logs", + "tb_logger": "tb_logger", + "results": "results", + "checkpoint": "checkpoint", + "resume_state": null + // "resume_state": "experiments/sr_lwtdm_XXXXXX_XXXXXX/checkpoint/IXEX" //pretrain model or training state + }, + "datasets": { + "train": { + "name": "AID", + "mode": "HR", + "dataroot": "dataset/AID_28_224", + "datatype": "img", + "l_resolution": 28, + "r_resolution": 224, + "batch_size": 16, + "num_workers": 8, + "use_shuffle": true, + "data_len": -1 + }, + "val": { + "name": "RSSCN7", + "mode": "LRHR", + "dataroot": "dataset/RSSCN7val_28_224", + "datatype": "img", + "l_resolution": 28, + "r_resolution": 224, + "data_len": 3 + } + }, + "model": { + "which_model_G": "lwtdm", + "finetune_norm": false, + "net": { + "in_channel": 3, + "out_channel": 3, + "inner_channel": 32, // 64 or 128 + "channel_multiplier": [8], // [1,1] or [2,2] or [1] or [2] + "attn_res": 2, // 4 or 6 or 8 + "res_blocks": 2, + "dropout": 0.2 + }, + "beta_schedule": { + "train": { + "schedule": "linear", + "n_timestep": 2000, + "linear_start": 1e-4, + "linear_end": 2e-2 + }, + "val": { + "schedule": "linear", + "n_timestep": 2000, + "linear_start": 1e-4, + "linear_end": 2e-2, + "sampling_timesteps": 2000, + "ddim_sampling_eta": 0.0 + } + }, + "diffusion": { + "image_size": 224, + "channels": 3, + "conditional": true + } + }, + "train": { + "n_iter": 2000000, + "val_freq": 1e5, + "save_checkpoint_freq": 1e5, + "print_freq": 200, + "optimizer": { + "type": "adam", + "lr": 1e-4 + }, + "ema_scheduler": { + "step_start_ema": 5000, + "update_ema_every": 1, + "ema_decay": 0.9999 + } + }, + "wandb": { + "project": "sr_lwtdm" + } +} \ No newline at end of file diff --git a/config/sr_sr3.json b/config/sr_sr3.json new file mode 100644 index 0000000..afbec2c --- /dev/null +++ b/config/sr_sr3.json @@ -0,0 +1,96 @@ +{ + "name": "sr_sr3", + "phase": "train", + "gpu_ids": [ + 0 + ], + "path": { + "log": "logs", + "tb_logger": "tb_logger", + "results": "results", + "checkpoint": "checkpoint", + "resume_state": null + // "resume_state": "experiments/sr_sr3_XXXXXX_XXXXXX/checkpoint/I1000000_E200" //pretrain model or training state + }, + "datasets": { + "train": { + "name": "AID", + "mode": "HR", + "dataroot": "dataset/AID_28_224", + "datatype": "img", + "l_resolution": 28, + "r_resolution": 224, + "batch_size": 8, + "num_workers": 4, + "use_shuffle": true, + "data_len": -1 + }, + "val": { + "name": "RSSCN7", + "mode": "LRHR", + "dataroot": "dataset/RSSCN7val_28_224", + "datatype": "img", + "l_resolution": 28, + "r_resolution": 224, + "data_len": 3 + } + }, + "model": { + "which_model_G": "sr3", + "finetune_norm": false, + "unet": { + "in_channel": 6, + "out_channel": 3, + "inner_channel": 64, + "channel_multiplier": [ + 1, + 2, + 4, + 8, + 8 + ], + "attn_res": [ + 28 + ], + "res_blocks": 1, + "dropout": 0.2 + }, + "beta_schedule": { + "train": { + "schedule": "linear", + "n_timestep": 2000, + "linear_start": 1e-6, + "linear_end": 1e-2 + }, + "val": { + "schedule": "linear", + "n_timestep": 2000, + "linear_start": 1e-6, + "linear_end": 1e-2 + } + }, + "diffusion": { + "image_size": 224, + "channels": 3, + "conditional": true + } + }, + "train": { + "n_iter": 1000000, + "val_freq": 1e5, + "save_checkpoint_freq": 1e5, + "print_freq": 200, + "optimizer": { + "type": "adam", + "lr": 1e-4 + }, + "ema_scheduler": { + "step_start_ema": 5000, + "update_ema_every": 1, + "ema_decay": 0.9999 + } + }, + "wandb": { + "project": "sr_sr3" + } +} \ No newline at end of file diff --git a/config/sr_srddpm.json b/config/sr_srddpm.json new file mode 100644 index 0000000..6a240e4 --- /dev/null +++ b/config/sr_srddpm.json @@ -0,0 +1,97 @@ +{ + "name": "sr_srddpm", + "phase": "train", + "gpu_ids": [ + 0 + ], + "path": { + "log": "logs", + "tb_logger": "tb_logger", + "results": "results", + "checkpoint": "checkpoint", + "resume_state": null + // "resume_state": "experiments/sr_srddpm_XXXXXX_XXXXXX/checkpoint/I1000000_E400" //pretrain model or training state + }, + "datasets": { + "train": { + "name": "AID", + "mode": "HR", + "dataroot": "dataset/AID_28_224", + "datatype": "img", + "l_resolution": 28, + "r_resolution": 224, + "batch_size": 16, + "num_workers": 8, + "use_shuffle": true, + "data_len": -1 + }, + "val": { + "name": "RSSCN7", + "mode": "LRHR", + "dataroot": "dataset/RSSCN7val_28_224", + "datatype": "img", + "l_resolution": 28, + "r_resolution": 224, + "data_len": 3 + } + }, + "model": { + "which_model_G": "ddpm", + "finetune_norm": false, + "unet": { + "in_channel": 6, + "out_channel": 3, + "inner_channel": 64, + "channel_multiplier": [ + 1, + 1, + 2, + 2, + 4, + 4 + ], + "attn_res": [ + 28 + ], + "res_blocks": 1, + "dropout": 0.2 + }, + "beta_schedule": { + "train": { + "schedule": "linear", + "n_timestep": 2000, + "linear_start": 1e-4, + "linear_end": 2e-2 + }, + "val": { + "schedule": "linear", + "n_timestep": 2000, + "linear_start": 1e-4, + "linear_end": 2e-2 + } + }, + "diffusion": { + "image_size": 224, + "channels": 3, + "conditional": true + } + }, + "train": { + "n_iter": 1000000, + "val_freq": 1e5, + "save_checkpoint_freq": 1e5, + "print_freq": 200, + "optimizer": { + "type": "adam", + "lr": 1e-4 + }, + "ema_scheduler": { + "step_start_ema": 5000, + "update_ema_every": 1, + "ema_decay": 0.9999 + } + }, + "wandb": { + "project": "sr_srddpm" + } +} \ No newline at end of file diff --git a/core/__pycache__/logger.cpython-36.pyc b/core/__pycache__/logger.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..873ca3caa0e5d254fa32bba40ccbb1821e36583e GIT binary patch literal 3972 zcma)9?{XWr5eIO`{p45hr~Vcv%8ruG^f&+(*v0N*vDjT+tyaD5 z@4uJDI%EG~uYES)kI|C9qmxYX5$on7ABwJJR_v~gQH%;>r|a+>TkICGW{upj+jX&K z%ffTkElEcfF?!OKC5+`ynDk`%Ig{nEGPk-_S&>z&*JMMUk@e@idnUApb-A=F*M~C6gAlkV5=)vZ_jb5z6 zjZxh1hiZNDOgrO!Vo)NbU383lye>Shs(^d6u=wbF=4>?pDI4-R8w$ziWG1mSt+t-E z#x3c$Zny5Ww%eAnK&GuIeyUyQyGyn*TD{g)h@{#NQ=hOn-FUKurG!K=uHYi<#Jn`Q zh-Osy8z?e1WHXMQ&jflggNcXM%$9ssNbO8WF&E#g&zz%oGY7_gb5_iX!-BNtT-s|) z7FHNm3R&?tY*si}f5gc2e`KGs&sgdVfk|EId@W`!xR%scvN-fISGsF#&S~thv^=Z~ ztFk1$1tzV{*&I@5rK|*+GNe>G>}V^8hiB$=>DRvyGjCRw)y&&3s_$eeA9HTlSDuTU zKJy@>zJN(CX9wRsVj0P=by$Wsv0wU@tINiNF~{ka=6o~PaG5A(m1tEiLCdqU@tk4b z@)~rQVxlbr$n z%8&7xjrDbC;KKDcwlvdRO~uBBqEMi@ho`#~?+n9U>Q4eZE`KkM_ouE5cMkjhmosfg z!8q)e!=p*4;P+9QObcJmfKeVXsK18rRUD^XM}^7ZIK&&@3z8p3915G;Xh{QIACKa3 z1yKdYne%^%1A9A<1klJDL@C0ASbtxOfjspHUwWMpLQO9oPp*GJ+~@^5Y`QiHR1&Ut zMsY6~CD#Egs9y5-bg@4<^anCId84$8o_#{?JUXaTTd4C8_CF9Nym16n-3km9B(QurmsCy7r1%As$XIj&=uq zbqNzimeh7U0mniz84Xfx50e;^Q5?ub(V5lTX4h`4#zCrzv|{wOhd7=I$ht5{!*OEr zgD!^AlYt@@#xmN1wu&4`U8U|rbnUvf1JzHAK3zAS(4$EAUTM^INeQ2F8sF9SXb>U8 zflQGDX=^`xW>o09l*OD0GZF`A^uqg5$}N`VT*k`|(ft=+9S3aPm05jA}@l8fFzOX!^+za4X6JY_oxN4x4K5T{L0_Wki7 zLBY`X{T+^Fu*YU?@u9>JxMU znvvpiGd~Flq&GElh%W$~R7D-sMo8dCO-kM-H8zkMk;YRiLy|_C6CHMN;So#iUBt70 zqRudT3#h0b3ed(r zT0(UZ6>a3vb&MXbiJa|QK%6)e38tuG?%^K*P$r3^zryy+*!~G@kK!rinVq79QGcNV z3MtlVum%c=6%^Jh45gCD1n$QM1_jhy^TQ7cR_`6M!@|` z(>1b5GmV=Ri=%!M5vZA0sftR%m%ORo!8o<9{n=A=vZ;9NO~tjQI*UoWkeicEsqUjg z@C3gPz%0)*h1{<0Ad<+p{zzNnq_6qD=1)#r_ET&(3Cvg)QDYf4U8T4oE^|fLTRoCP zAH~KK@6Irxx;TSRq69V7u@urm5$UAX&>j|GHGAk}0tJ|u6+wZLR2GiQOn2yJ4i$M> zVOXLUU?3Eo2C~%==*xI9C@x>4^s*A+Wf4|%r&VN~=HcYna?=l6mxH87wRJmLZ(Sav zc1YUqUzN=(&DP~n_#`xMyPaIa6!mu!^g%dXYCXHs8eeJ2X6yE~*1c<}=D9A?|DD}& zI(5FBt>a=ee~ML`(%DwKqKkA_xHnxwHGkLqyU?XuCX1w@dX-322S!rAL#NBP2BYvc zlG=!p7Zg7s#~%hTA@#HuNV%Y?Zl@{?#>dmPr^~s6n9!wds+97iS1?D@PSR5$avevS z%o7?^>6+ZR3(;Ea?rD+iow|Rc%HhP%d4B~>lT~yK|A}lkLW5F+`PtmXs0P5Hnjx2L zFPnqvVw|j-97pcawc=!=J_n@By?8W2RXB*F<81p|ESo<{CY_r6YO;s%VG|efr%Ya_ r?K@ky?i+cpseBTWwA;x8_jL|pqciE}B2}STuIt+F-MZseyy|}e*z}T! literal 0 HcmV?d00001 diff --git a/core/__pycache__/metrics.cpython-36.pyc b/core/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae29ed6ea1042b4ce44d7db82a300e8a4b9794e GIT binary patch literal 2918 zcmb7G&2Jn@6|d^A=^2kFwll;|vWa?EuF!B&wH7XY8-+(r2Q*iIRQ{BADO<)+uY=;ks{hOkG~YRY=qI=5d1n@qT0AV?cLP8zBOheGdG3# z)R0QqIUDnlrJS6pigNqa+#2&Sr?vmg?AIV`tC@a1w?%DijO_SbXjY*4OU!+lJEOTC zxDL3#`wus6s9E9m8@WkVf68+XYgZugM_8;jS$~dZ$nE}o4n9tye)gxDhi^NHj{~Vv z&-YX!yGP#RAWjqMg|SLJKlZ}N?*-mo77b55UrPT}Ykd$8GqvH}-C5pRS-!Kj-dcHo zE_Ty7e`f)E<@gCjZ^14TU90vn0ks^@V-os33gV2Y@%61F9 z3-uqrz0!W?osU+$c(|HjVc+$ZMR4e61J%;nl5S7Z?VVd7KWG^pE0)PgQH>KB`GfE* zI5&t(B4;}f^o?=NoHBNffL- z&w`J`cSG8O5k}kLrHGZQry%(4(J@dfnG{? z!MJ=Kw5!`_{MY&(VSsQ!abN-m8t`eGW6T6P@Fk$xRf>Cq5CYM-p)7$2-Dkp5cHa>; zXeEdKO-MCqU(VizE0Hf8o;NYkg}b?O34OT$QdG1zedjLRCGcZeCB_wE*n_0E{Ls*tLewsKsLxIU4(p`~t4q-v0w6=q&*A_Ximv}VyV7a6FVR~PkYK9kqc(xXTa zjl9u!$8_PZ?e#us{^9;#&R*O*{{rW!ufO-%?U#T1;b*_OcRqum^7h$3?v)rvA=2v| z(r0bwM0*&2LZO>P?Q{@c0~;ZAeXBp4@$kO}|HnT%BV6sgX7n7iHE3Qt3EfRyzL2 z#y-gz`VH`F(x7)j`6B4Ck@4Is@nDC2)I{(YeFOvH25^pZ_H6G7Q%;EwrL#;eSiq9s zDiho)y(@H$70`UKi`Cfl1`rP1t8%tG4R3FM|ASqfG1XDvrG6B6xGf}JWdGFLeedyu z_qN_jwE#iJ3X*@Xz2PDKdX#@PS zp2#Hb;cbCACohXj(tE91VW&s_Feqw2@(0-@y;(72^kpEm(IWz>Z8!{KQG%eYUnSbZ zB&DF1b;-?ay7RQ);g_A^66TS}hfqu@A27R4SxWy6xJZLHQ5t*+*bT0Zp_WmG5~YI{ zm$#^zEsP}9;49}zj_`RBBl=l&9MaoiHB;dr)p=fCBMHiZI>%^y)tfBmxyVhrRVJwK cA#G)Y;9Yv|q%=dBVO?G~=Wf+(x8^SX8xrES+W-In literal 0 HcmV?d00001 diff --git a/core/__pycache__/wandb_logger.cpython-36.pyc b/core/__pycache__/wandb_logger.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..806422b8213152929b95cf2034e3180fa8454e4a GIT binary patch literal 3399 zcmaJ@&5qkf67FvPh@wWa*Pi$%@FqlpB<5mQ6CgPa?7=4McwxY+Lp;bN1Og6+-IA!G zNTo@|9!3KMDZ<;z-?yLq z_8aeCA>_Yg>(PMzE41uQ=s4jtCLZ}a;f!m~3D?%lqnth=uKqumC!-Mi=4scCU_=n$ zBp&0`)2h{;&aZQWo6o6d@D8`06VK#bZo{|3dwd7J7T@Iqeg!zX{0;sGehny__hG*q zW$!3R_{rmRG7++HZ3iCsah6$>Oq}B*5lyCf<^YfLX%u84J02>Ykk#s;b^~aCgO>dY z-PsNY4P$c7j!1GxCpkT}hc+{2CRj?=O#WMqgVks;T01d#{{;m;D=|5&@~xVGp!k2;=AURa zZ@1>{asrrK8A%Fu6vu}tz#yD_Dx6nwK$HM7XAuj4#fg$E58~K)if9f$Unb7+B3isW zeC*_DI}+u?(oo!GXCc#>2=k&p+!N0hLPm3u^3pgAPGV8oxWAt(x~D`{KduJbiFjOU=^|IMe_g}XWpsxQ zu-8p(hnmy|+R|(`pcYX04wza04Op>zvuyTkJ7}Mi(E!w=A;Su^yvLhhjz~BhH_zu$K607tgIc8qED!>yV%e!>%G>a#Ag#KaIN3vw)(n4!flM7u zv$(kp#1_xxP~3r)Effd|TftF{=$Zl~n3`BowCX;F451>=38W0byjM{;6=!}D%*DQg zJpzE|2u7V&ijv?f3Ba|SFU}fq<4Ss!YS0HGKmt*bm_JM75k`Fjj@4}C^|mBlM5&5< zT=#K{QKxw0B1THWO9)~3W6)&pK=-dYSbij9NZ6b`w^r-{X>rm3C%yQgwGeTNqbx`B zmE^|28m~>zG&Z~jE)-Oq@@&7ATEVs@J8L%&F^l1Y@qi(J9a?q^x(Rst>#OIqv1U#G z!@kjrE4Q(VOb%e-A_* z?<(4%1kszSAirSeTCQV}f%(qNz(T~08F+d{*KEzs^;c%ityyEG&AKZc&yAuCMd2Ry zO5>dg<(5h_U5FLC_^EXl#tI&hdCEoXgi{foF48E;ogl*m1Cf(Q;~>nlP0)l(DWOvM zVv&Z^eYLtCa@5e~s5P8fE*38{o&>M(M9>JWR@C$%jSZw@7kc*)WXDprtBOMM1b zS4GDhr*aN$W52OZ8C~u)zi5;?+2ryQ9toQ)+?Tb*iD<8I((I(R=qz^z8phV)N_U>Fu4Q&n%u8vHT>Jma>$3SQM}!v*PKrUP&?{%TQwj>VR`oK>Pmz* zja*%!x2atfW~|DkmsN8We47i_93*ClAknM9do|=X%q~52*=Xd_hi(tAQdoI?zqEZk z&&wF;9pC?E8N~HW$M<;}`o6@fAn^(y-^T7;?C|y_Kf(?x=9cr68!-_}#9M=wU4_ol kbZWh4TUHm|UHiJ4g=*q9oR^6xt$USj2Ux9C4YAn&0U~}*C;$Ke literal 0 HcmV?d00001 diff --git a/core/logger.py b/core/logger.py new file mode 100644 index 0000000..e0634ca --- /dev/null +++ b/core/logger.py @@ -0,0 +1,141 @@ +import os +import os.path as osp +import logging +from collections import OrderedDict +import json +from datetime import datetime + + +def mkdirs(paths): + if isinstance(paths, str): + os.makedirs(paths, exist_ok=True) + else: + for path in paths: + os.makedirs(path, exist_ok=True) + + +def get_timestamp(): + return datetime.now().strftime('%y%m%d_%H%M%S') + + +def parse(args): + phase = args.phase + opt_path = args.config + gpu_ids = args.gpu_ids + enable_wandb = args.enable_wandb + # remove comments starting with '//' + json_str = '' + with open(opt_path, 'r') as f: + for line in f: + line = line.split('//')[0] + '\n' + json_str += line + opt = json.loads(json_str, object_pairs_hook=OrderedDict) + + # set log directory + if args.debug: + opt['name'] = 'debug_{}'.format(opt['name']) + experiments_root = os.path.join( + 'experiments', '{}_{}'.format(opt['name'], get_timestamp())) + opt['path']['experiments_root'] = experiments_root + for key, path in opt['path'].items(): + if 'resume' not in key and 'experiments' not in key: + opt['path'][key] = os.path.join(experiments_root, path) + mkdirs(opt['path'][key]) + + # change dataset length limit + opt['phase'] = phase + + # export CUDA_VISIBLE_DEVICES + if gpu_ids is not None: + opt['gpu_ids'] = [int(id) for id in gpu_ids.split(',')] + gpu_list = gpu_ids + else: + gpu_list = ','.join(str(x) for x in opt['gpu_ids']) + os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list + print('export CUDA_VISIBLE_DEVICES=' + gpu_list) + if len(gpu_list) > 1: + opt['distributed'] = True + else: + opt['distributed'] = False + + # debug + if 'debug' in opt['name']: + opt['train']['val_freq'] = 2 + opt['train']['print_freq'] = 2 + opt['train']['save_checkpoint_freq'] = 3 + opt['datasets']['train']['batch_size'] = 2 + opt['model']['beta_schedule']['train']['n_timestep'] = 10 + opt['model']['beta_schedule']['val']['n_timestep'] = 10 + opt['datasets']['train']['data_len'] = 6 + opt['datasets']['val']['data_len'] = 3 + + # validation in train phase + if phase == 'train': + opt['datasets']['val']['data_len'] = 3 + + # W&B Logging + try: + log_wandb_ckpt = args.log_wandb_ckpt + opt['log_wandb_ckpt'] = log_wandb_ckpt + except: + pass + try: + log_eval = args.log_eval + opt['log_eval'] = log_eval + except: + pass + try: + log_infer = args.log_infer + opt['log_infer'] = log_infer + except: + pass + opt['enable_wandb'] = enable_wandb + + return opt + + +class NoneDict(dict): + def __missing__(self, key): + return None + + +# convert to NoneDict, which return None for missing key. +def dict_to_nonedict(opt): + if isinstance(opt, dict): + new_opt = dict() + for key, sub_opt in opt.items(): + new_opt[key] = dict_to_nonedict(sub_opt) + return NoneDict(**new_opt) + elif isinstance(opt, list): + return [dict_to_nonedict(sub_opt) for sub_opt in opt] + else: + return opt + + +def dict2str(opt, indent_l=1): + '''dict to string for logger''' + msg = '' + for k, v in opt.items(): + if isinstance(v, dict): + msg += ' ' * (indent_l * 2) + k + ':[\n' + msg += dict2str(v, indent_l + 1) + msg += ' ' * (indent_l * 2) + ']\n' + else: + msg += ' ' * (indent_l * 2) + k + ': ' + str(v) + '\n' + return msg + + +def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False): + '''set up logger''' + l = logging.getLogger(logger_name) + formatter = logging.Formatter( + '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S') + log_file = os.path.join(root, '{}.log'.format(phase)) + fh = logging.FileHandler(log_file, mode='w') + fh.setFormatter(formatter) + l.setLevel(level) + l.addHandler(fh) + if screen: + sh = logging.StreamHandler() + sh.setFormatter(formatter) + l.addHandler(sh) diff --git a/core/metrics.py b/core/metrics.py new file mode 100644 index 0000000..b79e02b --- /dev/null +++ b/core/metrics.py @@ -0,0 +1,93 @@ +import os +import math +import numpy as np +import cv2 +from torchvision.utils import make_grid + + +def tensor2img(tensor, out_type=np.uint8, min_max=(-1, 1)): + ''' + Converts a torch Tensor into an image Numpy array + Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order + Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) + ''' + tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp + tensor = (tensor - min_max[0]) / \ + (min_max[1] - min_max[0]) # to range [0,1] + n_dim = tensor.dim() + if n_dim == 4: + n_img = len(tensor) + img_np = make_grid(tensor, nrow=int( + math.sqrt(n_img)), normalize=False).numpy() + img_np = np.transpose(img_np, (1, 2, 0)) # HWC, RGB + elif n_dim == 3: + img_np = tensor.numpy() + img_np = np.transpose(img_np, (1, 2, 0)) # HWC, RGB + elif n_dim == 2: + img_np = tensor.numpy() + else: + raise TypeError( + 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) + if out_type == np.uint8: + img_np = (img_np * 255.0).round() + # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. + return img_np.astype(out_type) + + +def save_img(img, img_path, mode='RGB'): + cv2.imwrite(img_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) + # cv2.imwrite(img_path, img) + + +def calculate_psnr(img1, img2): + # img1 and img2 have range [0, 255] + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20 * math.log10(255.0 / math.sqrt(mse)) + + +def ssim(img1, img2): + C1 = (0.01 * 255)**2 + C2 = (0.03 * 255)**2 + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * + (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +def calculate_ssim(img1, img2): + '''calculate SSIM + the same outputs as MATLAB's + img1, img2: [0, 255] + ''' + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + if img1.ndim == 2: + return ssim(img1, img2) + elif img1.ndim == 3: + if img1.shape[2] == 3: + ssims = [] + for i in range(3): + ssims.append(ssim(img1, img2)) + return np.array(ssims).mean() + elif img1.shape[2] == 1: + return ssim(np.squeeze(img1), np.squeeze(img2)) + else: + raise ValueError('Wrong input image dimensions.') diff --git a/core/wandb_logger.py b/core/wandb_logger.py new file mode 100644 index 0000000..20f8d4f --- /dev/null +++ b/core/wandb_logger.py @@ -0,0 +1,116 @@ +import os + +class WandbLogger: + """ + Log using `Weights and Biases`. + """ + def __init__(self, opt): + try: + import wandb + except ImportError: + raise ImportError( + "To use the Weights and Biases Logger please install wandb." + "Run `pip install wandb` to install it." + ) + + self._wandb = wandb + + # Initialize a W&B run + if self._wandb.run is None: + self._wandb.init( + project=opt['wandb']['project'], + config=opt, + dir='./experiments' + ) + + self.config = self._wandb.config + + if self.config.get('log_eval', None): + self.eval_table = self._wandb.Table(columns=['fake_image', + 'sr_image', + 'hr_image', + 'psnr', + 'ssim']) + else: + self.eval_table = None + + if self.config.get('log_infer', None): + self.infer_table = self._wandb.Table(columns=['fake_image', + 'sr_image', + 'hr_image']) + else: + self.infer_table = None + + def log_metrics(self, metrics, commit=True): + """ + Log train/validation metrics onto W&B. + + metrics: dictionary of metrics to be logged + """ + self._wandb.log(metrics, commit=commit) + + def log_image(self, key_name, image_array): + """ + Log image array onto W&B. + + key_name: name of the key + image_array: numpy array of image. + """ + self._wandb.log({key_name: self._wandb.Image(image_array)}) + + def log_images(self, key_name, list_images): + """ + Log list of image array onto W&B + + key_name: name of the key + list_images: list of numpy image arrays + """ + self._wandb.log({key_name: [self._wandb.Image(img) for img in list_images]}) + + def log_checkpoint(self, current_epoch, current_step): + """ + Log the model checkpoint as W&B artifacts + + current_epoch: the current epoch + current_step: the current batch step + """ + model_artifact = self._wandb.Artifact( + self._wandb.run.id + "_model", type="model" + ) + + gen_path = os.path.join( + self.config.path['checkpoint'], 'I{}_E{}_gen.pth'.format(current_step, current_epoch)) + opt_path = os.path.join( + self.config.path['checkpoint'], 'I{}_E{}_opt.pth'.format(current_step, current_epoch)) + + model_artifact.add_file(gen_path) + model_artifact.add_file(opt_path) + self._wandb.log_artifact(model_artifact, aliases=["latest"]) + + def log_eval_data(self, fake_img, sr_img, hr_img, psnr=None, ssim=None): + """ + Add data row-wise to the initialized table. + """ + if psnr is not None and ssim is not None: + self.eval_table.add_data( + self._wandb.Image(fake_img), + self._wandb.Image(sr_img), + self._wandb.Image(hr_img), + psnr, + ssim + ) + else: + self.infer_table.add_data( + self._wandb.Image(fake_img), + self._wandb.Image(sr_img), + self._wandb.Image(hr_img) + ) + + def log_eval_table(self, commit=False): + """ + Log the table + """ + if self.eval_table: + self._wandb.log({'eval_data': self.eval_table}, commit=commit) + elif self.infer_table: + self._wandb.log({'infer_data': self.infer_table}, commit=commit) diff --git a/data/LRHR_dataset.py b/data/LRHR_dataset.py new file mode 100644 index 0000000..9952f56 --- /dev/null +++ b/data/LRHR_dataset.py @@ -0,0 +1,99 @@ +from io import BytesIO +import lmdb +from PIL import Image +from torch.utils.data import Dataset +import random +import data.util as Util + + +class LRHRDataset(Dataset): + def __init__(self, dataroot, datatype, l_resolution=16, r_resolution=128, split='train', data_len=-1, need_LR=False): + self.datatype = datatype + self.l_res = l_resolution + self.r_res = r_resolution + self.data_len = data_len + self.need_LR = need_LR + self.split = split + + if datatype == 'lmdb': + self.env = lmdb.open(dataroot, readonly=True, lock=False, + readahead=False, meminit=False) + # init the datalen + with self.env.begin(write=False) as txn: + self.dataset_len = int(txn.get("length".encode("utf-8"))) + if self.data_len <= 0: + self.data_len = self.dataset_len + else: + self.data_len = min(self.data_len, self.dataset_len) + elif datatype == 'img': + self.sr_path = Util.get_paths_from_images( + '{}/sr_{}_{}'.format(dataroot, l_resolution, r_resolution)) + self.hr_path = Util.get_paths_from_images( + '{}/hr_{}'.format(dataroot, r_resolution)) + if self.need_LR: + self.lr_path = Util.get_paths_from_images( + '{}/lr_{}'.format(dataroot, l_resolution)) + self.dataset_len = len(self.hr_path) + if self.data_len <= 0: + self.data_len = self.dataset_len + else: + self.data_len = min(self.data_len, self.dataset_len) + else: + raise NotImplementedError( + 'data_type [{:s}] is not recognized.'.format(datatype)) + + def __len__(self): + return self.data_len + + def __getitem__(self, index): + img_HR = None + img_LR = None + + if self.datatype == 'lmdb': + with self.env.begin(write=False) as txn: + hr_img_bytes = txn.get( + 'hr_{}_{}'.format( + self.r_res, str(index).zfill(5)).encode('utf-8') + ) + sr_img_bytes = txn.get( + 'sr_{}_{}_{}'.format( + self.l_res, self.r_res, str(index).zfill(5)).encode('utf-8') + ) + if self.need_LR: + lr_img_bytes = txn.get( + 'lr_{}_{}'.format( + self.l_res, str(index).zfill(5)).encode('utf-8') + ) + # skip the invalid index + while (hr_img_bytes is None) or (sr_img_bytes is None): + new_index = random.randint(0, self.data_len-1) + hr_img_bytes = txn.get( + 'hr_{}_{}'.format( + self.r_res, str(new_index).zfill(5)).encode('utf-8') + ) + sr_img_bytes = txn.get( + 'sr_{}_{}_{}'.format( + self.l_res, self.r_res, str(new_index).zfill(5)).encode('utf-8') + ) + if self.need_LR: + lr_img_bytes = txn.get( + 'lr_{}_{}'.format( + self.l_res, str(new_index).zfill(5)).encode('utf-8') + ) + img_HR = Image.open(BytesIO(hr_img_bytes)).convert("RGB") + img_SR = Image.open(BytesIO(sr_img_bytes)).convert("RGB") + if self.need_LR: + img_LR = Image.open(BytesIO(lr_img_bytes)).convert("RGB") + else: + img_HR = Image.open(self.hr_path[index]).convert("RGB") + img_SR = Image.open(self.sr_path[index]).convert("RGB") + if self.need_LR: + img_LR = Image.open(self.lr_path[index]).convert("RGB") + if self.need_LR: + [img_LR, img_SR, img_HR] = Util.transform_augment( + [img_LR, img_SR, img_HR], split=self.split, min_max=(-1, 1)) + return {'LR': img_LR, 'HR': img_HR, 'SR': img_SR, 'Index': index} + else: + [img_SR, img_HR] = Util.transform_augment( + [img_SR, img_HR], split=self.split, min_max=(-1, 1)) + return {'HR': img_HR, 'SR': img_SR, 'Index': index} diff --git a/data/LRHR_dataset2.py b/data/LRHR_dataset2.py new file mode 100644 index 0000000..4a285b1 --- /dev/null +++ b/data/LRHR_dataset2.py @@ -0,0 +1,99 @@ +from io import BytesIO +import lmdb +from PIL import Image +from torch.utils.data import Dataset +import random +import data.util as Util + + +class LRHRDataset2(Dataset): + def __init__(self, dataroot, datatype, l_resolution=16, r_resolution=128, split='train', data_len=-1, need_LR=False): + self.datatype = datatype + self.l_res = l_resolution + self.r_res = r_resolution + self.data_len = data_len + self.need_LR = need_LR + self.split = split + + if datatype == 'lmdb': + self.env = lmdb.open(dataroot, readonly=True, lock=False, + readahead=False, meminit=False) + # init the datalen + with self.env.begin(write=False) as txn: + self.dataset_len = int(txn.get("length".encode("utf-8"))) + if self.data_len <= 0: + self.data_len = self.dataset_len + else: + self.data_len = min(self.data_len, self.dataset_len) + elif datatype == 'img': + # self.sr_path = Util.get_paths_from_images( + # '{}/sr_{}_{}'.format(dataroot, l_resolution, r_resolution)) + self.hr_path = Util.get_paths_from_images( + '{}/hr_{}'.format(dataroot, r_resolution)) + if self.need_LR: + self.lr_path = Util.get_paths_from_images( + '{}/lr_{}'.format(dataroot, l_resolution)) + self.dataset_len = len(self.hr_path) + if self.data_len <= 0: + self.data_len = self.dataset_len + else: + self.data_len = min(self.data_len, self.dataset_len) + else: + raise NotImplementedError( + 'data_type [{:s}] is not recognized.'.format(datatype)) + + def __len__(self): + return self.data_len + + def __getitem__(self, index): + img_HR = None + img_LR = None + + if self.datatype == 'lmdb': + with self.env.begin(write=False) as txn: + hr_img_bytes = txn.get( + 'hr_{}_{}'.format( + self.r_res, str(index).zfill(5)).encode('utf-8') + ) + # sr_img_bytes = txn.get( + # 'sr_{}_{}_{}'.format( + # self.l_res, self.r_res, str(index).zfill(5)).encode('utf-8') + # ) + if self.need_LR: + lr_img_bytes = txn.get( + 'lr_{}_{}'.format( + self.l_res, str(index).zfill(5)).encode('utf-8') + ) + # skip the invalid index + while (hr_img_bytes is None): # or (sr_img_bytes is None) + new_index = random.randint(0, self.data_len-1) + hr_img_bytes = txn.get( + 'hr_{}_{}'.format( + self.r_res, str(new_index).zfill(5)).encode('utf-8') + ) + # sr_img_bytes = txn.get( + # 'sr_{}_{}_{}'.format( + # self.l_res, self.r_res, str(new_index).zfill(5)).encode('utf-8') + # ) + if self.need_LR: + lr_img_bytes = txn.get( + 'lr_{}_{}'.format( + self.l_res, str(new_index).zfill(5)).encode('utf-8') + ) + img_HR = Image.open(BytesIO(hr_img_bytes)).convert("RGB") + # img_SR = Image.open(BytesIO(sr_img_bytes)).convert("RGB") + if self.need_LR: + img_LR = Image.open(BytesIO(lr_img_bytes)).convert("RGB") + else: + img_HR = Image.open(self.hr_path[index]).convert("RGB") + # img_SR = Image.open(self.sr_path[index]).convert("RGB") + if self.need_LR: + img_LR = Image.open(self.lr_path[index]).convert("RGB") + if self.need_LR: + [img_LR, img_HR] = Util.transform_augment2( + [img_LR, img_HR], split=self.split, min_max=(-1, 1)) + return {'LR': img_LR, 'HR': img_HR, 'Index': index} + else: + [img_HR] = Util.transform_augment2( + [img_HR], split=self.split, min_max=(-1, 1)) + return {'HR': img_HR, 'Index': index} diff --git a/data/__init__.py b/data/__init__.py new file mode 100644 index 0000000..8ba33d8 --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,56 @@ +'''create dataset and dataloader''' +import logging +from re import split +import torch.utils.data + + +def create_dataloader(dataset, dataset_opt, phase): + '''create dataloader ''' + if phase == 'train': + return torch.utils.data.DataLoader( + dataset, + batch_size=dataset_opt['batch_size'], + shuffle=dataset_opt['use_shuffle'], + num_workers=dataset_opt['num_workers'], + pin_memory=True) + elif phase == 'val': + return torch.utils.data.DataLoader( + dataset, batch_size=1, shuffle=False, num_workers=1, pin_memory=True) + else: + raise NotImplementedError( + 'Dataloader [{:s}] is not found.'.format(phase)) + + +def create_dataset(dataset_opt, phase): + '''create dataset''' + mode = dataset_opt['mode'] + from data.LRHR_dataset import LRHRDataset as D + dataset = D(dataroot=dataset_opt['dataroot'], + datatype=dataset_opt['datatype'], + l_resolution=dataset_opt['l_resolution'], + r_resolution=dataset_opt['r_resolution'], + split=phase, + data_len=dataset_opt['data_len'], + need_LR=(mode == 'LRHR') + ) + logger = logging.getLogger('base') + logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__, + dataset_opt['name'])) + return dataset + +def create_dataset2(dataset_opt, phase): + '''create dataset''' + mode = dataset_opt['mode'] + from data.LRHR_dataset2 import LRHRDataset2 as D + dataset = D(dataroot=dataset_opt['dataroot'], + datatype=dataset_opt['datatype'], + l_resolution=dataset_opt['l_resolution'], + r_resolution=dataset_opt['r_resolution'], + split=phase, + data_len=dataset_opt['data_len'], + need_LR=True + ) + logger = logging.getLogger('base') + logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__, + dataset_opt['name'])) + return dataset \ No newline at end of file diff --git a/data/__pycache__/LRHR_dataset.cpython-36.pyc b/data/__pycache__/LRHR_dataset.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..969f2d29e83b74d14b9e6baae73d9b868da9af1a GIT binary patch literal 2664 zcma)8OK%)S5T5RN?!%ARj-5n77$Sid5*sN2g$PAa2uX~Lfpt*O0va~kV|&)~aC+K_ zy_uCF9C5-*jvS)gx$z_N8#pvaBqaX@F7Q?FCVoI9cDKK-uBxf7>c=;Geq^M&{QSB2 zecZDCunv42$XC#cCO}w%C05E>jFHR}-s0%(#7>=-V{}g9re4b%+WnSq{DN+!RlQ*e zS9p&s;YHq#-Kq(`Y&HBn#I+hs`&YMBR4jbH1le6k+p7^NueDXXh*XE1UVK~#SI~;L z0Fl*V!a^FO!V~@@*0Mz)Dvzv|BdVf?-W7FBGp1{c&6~|(xII*9?L7o&SIKsq?J>yL zI{}bB6|@@K$7sb_*h*enp;g*IRysgldO*AMfllcH-I5Eo&gIX_>-r@VT-fI8?2z7v3`r3j&Dp_apM@S>E+1a=nB`uA2 zC?7j}f;lVe{0Wn%)L3_1RJ+GYhAf>O+r*nN+M3ZiJKX3d@w_AdG?x0ne`uLf$6=cs zWI{91{D@7tzbb=KH7!OE1+jP757-H;_2`mzl2qKeb-m%}Kt^qmXUR5nd1qZ$Xxv@{ zXg`h8IExi+_8UxZ_lsgX*9pJ z8QqDq`Ni9}u6;I7#mv)<35R9PZEWix3~9f@kQSx52(Xy9|K$OzvN|7Uv*@dQ0_d;_ zv~lKV!~4OJ+=Bm^+rHn?+@2lDZQpahTL12|+m;&bj;ZmI)`4}8 zi5gac4|QfuhC0f&irgO#ZHE4aHhZ4{vrowm--(i)v<^pvcJh$+Ma?)E*YA*j#D{?!_*55JAt6~=v6gjvtI25ff?p9v<1nKoTL|ywwpJuHn_Q|;Kn!$ z)Akk^l!J`;O|)f@(n2Ppt%fbn;{kbr;C;Z}-}vCs4@g0=jC4@ZIAJJ4o+RTbg3|=E z1XBQQ7fNckw-P6be2uh|1coa-+0I0sYM*-W-hf7IvY#NJ%cT8Ip52S2l5djg4T3WS zuLD}g@RH36V!W`uxk^lCs%%uvE!J*KRkb`YeR#7u=og#v9eT8mwIux=`ghQ(V+{|C zu!t{5x{^ir!A~zEEjN$SLoEzJ#(2@BVfY?`oBuQxsLA1VLsSks%`sxLoCpged+I;+ zvwVt8u>gM4Y#Kc^hn-@hczT-kW)4%%%Q=?@+u};1H0cx2KMte4-tdTDvvwMVp{|Bu znv2bZVP^{ Kho~Q^*#7|fgOV)( literal 0 HcmV?d00001 diff --git a/data/__pycache__/LRHR_dataset2.cpython-36.pyc b/data/__pycache__/LRHR_dataset2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a6879a51e70dcc4ef2df89172c826de4cfc2d0b GIT binary patch literal 2409 zcmZ`*OK;p%6t=IQGjW<^@@R{KI#7Wapb1JVfl!62Qqo2Wp-hX429PzG>rCwUkzU`l z$<%`osn}JpWYHBn{zrcR>$_mXDm%oI4SeU$v`quGL)ICj~vuiz=$0B&$IH8Qhfnq;$-br@PJwKBV7YoDDuncH#4aj)a)JU^**>emhK zaQC6X-N-$(Iuo4TGFsjd`ZZdn@~$4ps9gJO1Gcl4b^8%HuXSa&jAW0Jo?Z@&D|pH` z0g=%$xq)Gv;x6|dnjMS#y!O!O*u2gs&^kOonJE>lw{Nt^-K`u=0Lqb~8|OzR z?A4wRq^pK!0?)^I$~nX;RvDpDSwOS0fvj?YR^Dzup zQNJ=0tO7jFBlE|_p)c!|pG@`IJUk%1J{JmC6yi(f$wxm6t~m57*nuDRP4u`QLm zRfw!Bm4}&7RC$<)HY^&(>_#EivfVVwqFhG&6Cn!GswumS(yh@fL{Ui5pwR_-Dn+^{ z<04mqp3(SCoJN9}Cs?3VOYY~t8scr(f4{Z7U1ZVn{$8{h=gaGNZeRO!ndY!eBnZc| zSy|dWP<|NF)`TG~MtKQfnC|nN`DWb=*tFS1TW1ZkZrSEE;)KD-$Cx>oKhCbDr+xNl zW9g!n9?d5g22L8CAQNoYQ$X_u+eq@wrPUeh3aH zH6e7Sr!(s>$#nPA%HhV2FMgL3U^g*;lGn}{hivGbs08M}q>?;Q>Hjj_yYh}9PbE`w zx}wpV$qcV!tk8y}22Fro=$M|tIA%RpnEVpOk=v-uv&MhEzB!K0z7VB#q`tqEbICmP zkoBqB0-xd&kF2lR&_Z>1p?|?+>z1LjSRdMkoEe=%ZGW9Yxuk;aCm;wlco6t-^Q zYXyJ^9Ff$IMPlGJmR{Q$Z7Sl|C%EWn?b@yAAV&FKt0iLhrJlaRX3X;V| zz|lXr;Gl!9yU^BgN9(j^o;XVxuMjK}GzsPa$||K$&R{D}Q}HT!iG5<0LaylMyvUSC zEmUO0PK)Aa2o>l0|Benpcu#q)5bAqAIifoKl+YuxI!XqHQYVT^x zB{9moSrmq<9)?-L_foQhFuc3hO-B*&E`30HATAS-+#Lf_4MC@djz>vlAYkWxOZ!@Z zVsW8&VWj=m&9!ww+oPssA$r?OklD0cB7GF}$49&m=+x+4hm=hTdb@Oo#|rClXwC93 Uk4F9x30_HB6kvG*+!%`WFJJ~wJOBUy literal 0 HcmV?d00001 diff --git a/data/__pycache__/__init__.cpython-36.pyc b/data/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5ce4733bc2490bad0638937c9bd38205489f602 GIT binary patch literal 1672 zcmbu9-EJc_6vxM&&rH&8)8#9`va>8-BS9jSJG4Sb6a-S}MOz6e2&`%BG?UJZqcanr zA-SR4?t}0i`xL(AYF~jX&T&$Rw2MG6k^lDioY=?bcRU~N?ZszLp2%tBIKMeN56AH* z*y=|p(iu6DNnWrK!`>};>5g2sne#@z^rZibGYVuNL+nEt$rkpJ(WsOQ? z)?~G6lB|+;T7kZH%+CJlu(M6I){Z2C}>| z@kQ2*r=rf6YSdoTO4QTEWKt;8t`?>ELBo}T`aJc(!$3{AwSrG?@jS0YsY(sk#{E7k zo-&;Me0l#E)3M#=o1Z?a*WV_2om9F>CVElH{z)3(?i$8Z<1d=LsEtPnG%@{eXj5nI zp6KSwa$cxXRgIFL0cbG6L_?W1scXV*#!Y)W5PIGie?CPFzdCRir`uzB-0eKIE$g8{&qCdYj-A!f$88#sknfetJhes@8oCWqrZuXjpejLq!J;b`c?z0A;HDD2zxGDS6s%A!?o973CQ*h zTl*`2=E)A?^k%^k}GfNTv#kMPnh3iUBv RySjtoVdY@;xKTIi{sF{akih@| literal 0 HcmV?d00001 diff --git a/data/__pycache__/util.cpython-36.pyc b/data/__pycache__/util.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8b51c88ff4602ce9ef69b5b0341bd711e6812b3 GIT binary patch literal 3400 zcmc&$&2k&Z5uTad1(qNQQ4}qS5*5Lg>;(BCRZ>;rB#a`Fge5Xo3W);O)yBa$a6K;`J%HAm2hyN#2xKb)-M8|#ptcYBZz*EgD+jNnIa7mtHSUxrB8E4Ixgm*U7z`6D*q1KQQ8jhBew^n0}A zDn!Pf&-M8+YqJcKKZDH|2oo0ZT9q3=jHUh}N_VQlh~+zGD%?-QSVu`rgWFLjjDv3Y z4SRiaWhd!|EBkxl=TW@!>9egz>nk!ygO$BB>MZvTUQeyI!Z_UTsrxguHJ!s{hm}Y- zqHfR%8)R#FwUac1j=sMnr!LN~K# z3<4ziR*Wms$6=tkWZ*-Za14>MHgAgz5!2UFNAk8y`hfq9w^*O=*HRBN3VGin_I&kE zPG9t$zPo=X^)h#WSUPL z?gA^LTpix{%dd1Fk*UN|dVfchLWM z*#CH`~Zvza4{!%8x*F36K5^ z;+UyQMx?{OB&uQ3{gS8#t!-zK_1Upd5At#8jYiIlbM-WXJ5<2Z8904ZCyr34ozPZ_ z#0eG7$-nVa6=Fd#S3>cug|yZ>XfEjtw0;q^C;aXJsq7T~x{x>YxaADK|Mu-rKC+jzRy ziPC&?H{Xy3)ol{*lCS_R4)-9~X}Uz1xXSStb5rW4XpJ2nW&1;#_-68`|#Ae;A<%s((d7y4$J2>nH z`;k7OI97{B#7J9%P01@Y6Tmc zi-B$?aT>Mul09t$qX^%iD2hr|SkFzd8^w)ouzx-TbEhGo8!l1rW3BcfnDF_6SmKJ@ zYf|*-M}`KZ0V(pwj0V=s>bXFr(yo#&hio%LxL9Bsk>>CkvCO23UVh zgJYc9b3aAb@Z42b{Q@muI}MtbWVtj$fs;8>7_9rCBig7`WxWg3gBWO7<3^%Yu zaPf^8e2QhmDxmvmW_U*_!ulB;P@L(}#4TrWV6%&x{#TGSP zB`DvB2~y!|?R=N8v()4N(*J~PT~hSvnnP~dlF-YDK7|z_-gu`}r05>8A5`{h#@-`2 zcM^q|()jG!_*==Ar5l@Cv+&ejX&)sjIs}&moyT-@-y(B_I_uL!&c~Hu{MBwE_d4Ny eDo;%{ 1: + # prepare data subsets + multi_env = None + if lmdb_save: + multi_env = env + + file_subsets = np.array_split(files, n_worker) + worker_threads = [] + wctx = WorkingContext(resize_fn, lmdb_save, out_path, multi_env, sizes) + + # start worker processes, monitor results + for i in range(n_worker): + proc = Process(target=prepare_process_worker, args=(wctx, file_subsets[i])) + proc.start() + worker_threads.append(proc) + + total_count = str(len(files)) + while not all_threads_inactive(worker_threads): + print("\r{}/{} images processed".format(wctx.value(), total_count), end=" ") + time.sleep(0.1) + + else: + total = 0 + for file in tqdm(files): + i, imgs = resize_fn(file) + lr_img, hr_img, sr_img = imgs + if not lmdb_save: + lr_img.save( + '{}/lr_{}/{}.png'.format(out_path, sizes[0], i.zfill(5))) + hr_img.save( + '{}/hr_{}/{}.png'.format(out_path, sizes[1], i.zfill(5))) + sr_img.save( + '{}/sr_{}_{}/{}.png'.format(out_path, sizes[0], sizes[1], i.zfill(5))) + else: + with env.begin(write=True) as txn: + txn.put('lr_{}_{}'.format( + sizes[0], i.zfill(5)).encode('utf-8'), lr_img) + txn.put('hr_{}_{}'.format( + sizes[1], i.zfill(5)).encode('utf-8'), hr_img) + txn.put('sr_{}_{}_{}'.format( + sizes[0], sizes[1], i.zfill(5)).encode('utf-8'), sr_img) + total += 1 + if lmdb_save: + with env.begin(write=True) as txn: + txn.put('length'.encode('utf-8'), str(total).encode('utf-8')) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--path', '-p', type=str, + default='{}/Dataset/celebahq_256'.format(Path.home())) + parser.add_argument('--out', '-o', type=str, + default='./dataset/celebahq') + + parser.add_argument('--size', type=str, default='64,512') + parser.add_argument('--n_worker', type=int, default=3) + parser.add_argument('--resample', type=str, default='bicubic') + # default save in png format + parser.add_argument('--lmdb', '-l', action='store_true') + + args = parser.parse_args() + + resample_map = {'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} + resample = resample_map[args.resample] + sizes = [int(s.strip()) for s in args.size.split(',')] + + args.out = '{}_{}_{}'.format(args.out, sizes[0], sizes[1]) + prepare(args.path, args.out, args.n_worker, + sizes=sizes, resample=resample, lmdb_save=args.lmdb) diff --git a/data/util.py b/data/util.py new file mode 100644 index 0000000..6d1816f --- /dev/null +++ b/data/util.py @@ -0,0 +1,92 @@ +import os +import torch +import torchvision +import random +import numpy as np + +IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def get_paths_from_images(path): + assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + images = [] + for dirpath, _, fnames in sorted(os.walk(path)): + for fname in sorted(fnames): + if is_image_file(fname): + img_path = os.path.join(dirpath, fname) + images.append(img_path) + assert images, '{:s} has no valid image file'.format(path) + return sorted(images) + + +def augment(img_list, hflip=True, rot=True, split='val'): + # horizontal flip OR rotate + hflip = hflip and (split == 'train' and random.random() < 0.5) + vflip = rot and (split == 'train' and random.random() < 0.5) + rot90 = rot and (split == 'train' and random.random() < 0.5) + + def _augment(img): + if hflip: + img = img[:, ::-1, :] + if vflip: + img = img[::-1, :, :] + if rot90: + img = img.transpose(1, 0, 2) + return img + + return [_augment(img) for img in img_list] + + +def transform2numpy(img): + img = np.array(img) + img = img.astype(np.float32) / 255. + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + # some images have 4 channels + if img.shape[2] > 3: + img = img[:, :, :3] + return img + + +def transform2tensor(img, min_max=(0, 1)): + # HWC to CHW + img = torch.from_numpy(np.ascontiguousarray( + np.transpose(img, (2, 0, 1)))).float() + # to range min_max + img = img*(min_max[1] - min_max[0]) + min_max[0] + return img + + +# implementation by numpy and torch +# def transform_augment(img_list, split='val', min_max=(0, 1)): +# imgs = [transform2numpy(img) for img in img_list] +# imgs = augment(imgs, split=split) +# ret_img = [transform2tensor(img, min_max) for img in imgs] +# return ret_img + + +# implementation by torchvision, detail in https://github.com/Janspiry/Image-Super-Resolution-via-Iterative-Refinement/issues/14 +totensor = torchvision.transforms.ToTensor() +hflip = torchvision.transforms.RandomHorizontalFlip() +def transform_augment(img_list, split='val', min_max=(0, 1)): + imgs = [totensor(img) for img in img_list] + if split == 'train': + imgs = torch.stack(imgs, 0) + imgs = hflip(imgs) + imgs = torch.unbind(imgs, dim=0) + ret_img = [img * (min_max[1] - min_max[0]) + min_max[0] for img in imgs] + return ret_img + +def transform_augment2(img_list, split='val', min_max=(0, 1)): + imgs = [totensor(img) for img in img_list] + # if split == 'train': + # imgs = torch.stack(imgs, 0) + # imgs = hflip(imgs) + # imgs = torch.unbind(imgs, dim=0) + ret_img = [img * (min_max[1] - min_max[0]) + min_max[0] for img in imgs] + return ret_img \ No newline at end of file diff --git a/eval.py b/eval.py new file mode 100644 index 0000000..a79789f --- /dev/null +++ b/eval.py @@ -0,0 +1,45 @@ +import argparse +import core.metrics as Metrics +from PIL import Image +import numpy as np +import glob + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-p', '--path', type=str, + default='experiments/basic_sr_ffhq_210809_142238/results') + args = parser.parse_args() + real_names = list(glob.glob('{}/*_hr.png'.format(args.path))) + fake_names = list(glob.glob('{}/*_sr.png'.format(args.path))) + + real_names.sort() + fake_names.sort() + + avg_psnr = 0.0 + avg_ssim = 0.0 + idx = 0 + for rname, fname in zip(real_names, fake_names): + + # import pdb + # pdb.set_trace() + idx += 1 + ridx = rname.rsplit("_hr")[0] + fidx = fname.rsplit("_sr")[0] + assert ridx == fidx, 'Image ridx:{ridx}!=fidx:{fidx}'.format( + ridx, fidx) + + hr_img = np.array(Image.open(rname)) + sr_img = np.array(Image.open(fname)) + psnr = Metrics.calculate_psnr(sr_img, hr_img) + ssim = Metrics.calculate_ssim(sr_img, hr_img) + avg_psnr += psnr + avg_ssim += ssim + if idx % 20 == 0: + print('Image:{}, PSNR:{:.4f}, SSIM:{:.4f}'.format(idx, psnr, ssim)) + + avg_psnr = avg_psnr / idx + avg_ssim = avg_ssim / idx + + # log + print('# Validation # PSNR: {:.2f}'.format(avg_psnr)) + print('# Validation # SSIM: {:.3f}'.format(avg_ssim)) diff --git a/experiments/check_fid.py b/experiments/check_fid.py new file mode 100644 index 0000000..a12d762 --- /dev/null +++ b/experiments/check_fid.py @@ -0,0 +1,25 @@ +import os +import sys +import shutil + +def copy_files(source_dir, destination_dir, file_extension): + if not os.path.exists(destination_dir): + os.makedirs(destination_dir) + + files = os.listdir(source_dir) + for file in files: + if file.endswith(file_extension): + file_path = os.path.join(source_dir, file) + shutil.copy(file_path, destination_dir) + +# 获取命令行参数 +folder_A = os.path.join('experiments', sys.argv[1], 'results') # 文件夹A的路径 +folder_B = os.path.join('experiments', sys.argv[1], 'results1') # 新文件夹B的路径 +folder_C = os.path.join('experiments', sys.argv[1], 'results2') # 新文件夹C的路径 + +# 移动以"_hr.png"结尾的文件到文件夹B +copy_files(folder_A, folder_B, "_hr.png") + +# 移动以"_sr.png"结尾的文件到文件夹C +copy_files(folder_A, folder_C, "_sr.png") + diff --git a/img/LWTDM.png b/img/LWTDM.png new file mode 100644 index 0000000000000000000000000000000000000000..605c84190890b06f8e08bbe0c59cd61a1924d4fe GIT binary patch literal 128607 zcmZ_03p~^N{|Bz4a=MX(Bpj8HP*id+LZT^Rxs2qN+{)dCb#>1*B$v5_s(@Crra4@bK4l(|1+KXe*gdP@4v_6oHqM>-k~y(u;|dp7dECBD`(422JrA#0 z`fzdaHgf*k(dzZik&BC+Z+P{}?Lgawt$p<;{X(d)IOfUTBajvwq$r|maBXNkb zaPko1T-!Oy&Kql|PMtgCka7QO{D+q!$iJhMh+*o{Opm*@y#F*ll-PZ&jGKFZUZ7;j zKc_{+uITNwJo~rIY0AUUVln|9N~R+zW$*y=0Q1$Et&N7j(z*9#8({MQKQMgmqmU*a zu)|-k?DhMs-|x8|U(8jAcA7(PGxe|;%l^bv8Dn)Y-W`FTZT&+oO*o_T{kcO-W$X*SdB ze^&&5tX*T`b`)-su`Fb~AK9ziOg&BUuT_>Ta6)55!tYhM8gti+Z`&|K7>~_Z1kSa# zz{7+S{*Q;vPY#A^ZxIk-Hua%NnZs@SuxnX0FpXwR{ zoqhN`{6GQz+V2NHw(s3E&oNk}C&fn-#@q|9$EwX`;Nd?MV9^S4ySgNPU&nRx4RDek zmb5k<8+zyvA=Dttw|l1HVQLfUU9{1aWu8A5zRCkuo8YYG2&|?tw}21%QE9obh;9NN z{pYI7d%WGSh|QH&NjGHF1FQhPfxL`3P@d$Q15k#G%j^MpuyiB3Gdf{?HEa8(Y^V*2 z`+i3KF!e2Pn}KA|$1%%*5SH>q|MvYSuDOIe0d=|=(}|@2Vuyi=cCpKq9D1?l zDkX&>etDUvRmYnuwV3Or3jVtxzIy?t`?La`GqQUx%<~MF0sVE}vkIJNMF9u?9+ecj zLgdp`K+o@1H}_`jaS5IF31}%_p6vtB-oaHs@Y)*mnp^$+k5VD&7Y$HABiBYYQ1Y02GhhAw zy(fe|3P$qnPEcLd+NQu=n_6$4n?9Nkur4}Uegmui?Sa@v_#X|163=aHzmoi?% z?0@faL46$KgD)B5UVt}-sUHL^#)1f9TMjdX3;Dk`cq9Gd|DNK39CA(lN4R#7+DYKe zM0cI1#1WV<#PS!}Hd<5~atQxlvb`$=$o45pAoOQap^ifRk{Up^Z+l$aQRM35<@CN) z&hs7zD=RBU3o8z)#6=6qTUQ%PXG22QT;LXQkGz!Px({UUuJ#&h@SQUa301JFa%@X{ zDXKDGbxI~+^TwFyH6@mc^kSfTQ#$(A!^9n2Uw42hM-u9}zw)%_>MeEOBuUKgynAQ; z<5(P4!uyAy=;G}*`&LB5RAu_U7Sb&+AisQEON-Sbo*S{*M!rQ zoQ?7s%Zih*DjXlcfc<;kwWP>fS75}>lPgMzG-1nOB9XW>)uFpw?mO2vG-R0>rZ2)y zPx7x98^19s^Cq-?*QXJ#DsIk>w63Ekw{k#SVTsmE&E*MdxXry=SwZg}>(LX1+*^t#?Z z{095uXMramN0<9)7r7QcC9CVdjZ0K`qaAY`0Em?)p)HaKW|pESDr-(PlOW)Xr*7ie zFPvRm8f#4Uj+eq?_#z9M!nIZ;H9uk6G}KhA9(a0oZ_USzWA`br&g!`jRp?0*Uc37R z=Ze1L%|F$JFn4rC@mLjJ7>W{<^L5xCWKIKn=s;zp1eFV)x%1TaqX{7d1o~L0}zESFY z@UG6TuGMvSDc4)(qW>?%^vL-v*>?ZUx$8kkhrwkmc88T5aDcrxfCGFQRy;NHgcyBu zC%OpH(eXa<9f9Y+v zoe9nLMzv9)p4IwAF0=dZ%ZPrku;btYjaHvI-uVaPZ4ybzV5S&-OF~n;;?(;s3-N&A zX*^NGcOlpJplt2*4l9|4{YS2mL}oL41*!U(kNwws<3iv5p)_L0o=gjINLh}+DaX5y z5{r3L6Mm6cCB$ogDUQ1qhsf^?*-%$=`)}%+!4j{SHSygFYA07|6fG0Xf~}Gk`&=&@ z0c=p`iT@y^XloOW?Y31x2cX{sF17BQB81|*UzDIWkE)wjEY+H+cwt;$eaoM_Wcsk{ zSz@VWr9S-2WK-%68w8&u>809^(&Z>{y-wUC@wE5LIT&y44T9@b@6=H>d!6y|x3a?8 z|JkE_C|9f9s9c3yilSYaPrzg-+r*{x2@R|2+Bzah`ug(T0~4kDQ4Gmp_LlMd=IUX@ zpktly(H`4=pQ*BR$J!aj+Tw_cCsBs#0Nc|0WVMw)|E34hKhTtx>XMbMDx51Pr8F6_2rys%?1TD8km><^81JoZN7U0htI7WEkS zjYH$%ry#DlLlz}>>|4|1-{KIH33QcWFfxTXjGvx#LFsv#Y>d5e?4*S~mujjzSBHsn zf3l|7(vkkb{cht{fnSica7wlNkW#3Ft{&-Un2MviYO3+TdXMeWI2GcGJ*WtIDjjPWmnZD$?bLqO>?+FnJU zieO8lOFr#NPHoL>Qo4+jSNMzBEp<`BuwIuLrl>1RI*79T>U9Ivy2EBR7Lj>1l@G8*4sY@VsXEu#3CwiTm-DJQgX+Kq=6 z8vjWewat_qoQNzfUf(Zv%+#vZF59`hm-fN7ZPQXd<%TNZ|bYs2}%}6@THaHf0yjQWmM!3 z9h!6{xuk{-ybOejY`9)n5lh1LIZA)SoUZ74HNx~NAcCnqQuIF8Ak&$e>p?B5jhgqJ z_781dK^a^%>=}i+#Tu$!(ntsND;%Tz9RPxFN+86x>*L}oa zb*Zwx@U-`Xy@-QoGdF+lYe&O673q6B5?a{qGf^zXc)Hz%FdMBM?gENW- z$r;;LH@c;79wZSv$dCLTj88c&uSj~WO#HC)hky_&=PeQlptLKt?^k&f+yj~?Iw|Q! zq@tBEg`ithpTmxcA#RBfV!L|jLdxBUiHPIGxsHS)HHl#(g=;H;qH3jFFs(->{y}Qw z=T|0UhkVA$XToi~mqaM=eeqJ{<7mfQYNcmId}=>cj))^ZD=EccjOTqWrl`U^z`^wu zt1Z@taRq{?lG9FJ9mRK$FK`Wsb@Zd~GSd`iz9d{o(W0NsRN(5-PoFTS&78zD>s}v6 z@H&n+%GWee1vVrUhtx8Wa&*;VpP2-Yl96g!(HV3?2&cqUvU${&E-(6gc&g>)rc60_i95WPP0_hn&o;aK3f6vFXYfdw z5_Aqret!vI52INh*^>)*l~B6LQ%$gc(#H~quB&49Z&kgYUBBNp>V()ItPv5YJRuOM z=>2!tYwc6Ii>q86INrI{7&-D4Yeze$1!Y_rWCb1?Ijv>b3;m@qR^Uk)%%zifv&;;a z?}j9;AZGlNRc(hARTbW2&txv`w|u49YgzGpAvm8+Ni=t%cU|~t{EdD7{+LV{{Tr>W zb)2B}`W125tF>}$pDK0f0N|E>p8%np$OrP96fAgN<&Gtb4o^oTixglDQ^G562lC~b zPc|uL=-HkULq(HNx-40*CKx7m_g05?kQ8NAf2>#iNHbM#a#t-ubl)vU-N|7k(n~1& z8m;4+WeLeevPL0j5f7O@*vhYWENC(T9leaZoyoT-2rrTKwb}sVVp&JZ*`G=6?nXSGcae)xajEqiP0}6)g99@H`4wwxpTIj& zFdDl&uOz=2=Kft{7hnED)dJ<*Q2rJ&#YYgHA1(38JK83ctc`|ZmYj)K@YPmqdq?Hg z`St0m(nr)JO&wkJrrTmz{_noC7U0G{&DN56Vzmls22|`#nta!aRrjYEurrLrK5U9N zv*6!Vqt;Ju&s|wI=WnJ5tsPB`3?Ft=g@%_^fE0DLOW(t^b~gvO>N}^@&1NoZX)iiN zte5s89_4G?cWP71;bLm|xe144HFt$1-aBfH2P{>hy`7DBGu8hp^1*vXdKznH{b@%V z=r2&8==+6Tx2YnsOK${`3IVs%#$9x_shLTXs+m;q`EK9r2$(eOOBttP0;~O@V+yF{EePtKYAByo5tcP~izBoCm zazgkMh9q1;fz!U+?Lc#2S z*^1PI6q{j$`f9WEvex9=1m@gB3EH;}o6VJR|I9NvI44v8D21hpMD@2{1-uTj0y^%< z!qn$ZsLIh-Y{&b?H7`EQdKzvz$!mK!c;x$vm6h@ISEjEM-A^%l3GdE&ee7yxkzAcG z0e)m?6XY?LF}qV&`s#Q%26Q&6^MolP8RZr3$ex{S@HW}EC8haUUbP+l_X$7OrAv`< zFq%o7{ZSVk65p{8n?29gE4@$3g4*FSI00XfjK;QANghU~gaaoffj<+3}|;xQ?w z!&axsE_t>w>zFB0xw8nuZp4bj4b=dYW)54669Z zl_oBF$%LjW>ob2mhH=l9B<_fK-VSG0*`VryTk?;cJod{v{{tj*OPvbL(SQYjk%(LE#2a;#@eNNc$@^Rb?^562tR zW{Q$mp58xj=9a#OonUWqS%(wdgqAZec3pE@N3N_N+5c$1(%|eI{PmH0tn636FsW|9 zPu!f;iWc2E+5GDDw#n4ZvQR6nVK}G+T>(Nbr;sQqlg+nNv!iNZEO?yv*Z|+7l^kh(+r=6J;0q1x86z&$W{PTVeqZdoK{G^n0^ z+eCx*oHDMVWIm${n`*!kTzAcip(jO31)!VARMg}GKgbeAa9FFCo-tNUn%#LqYi^Yv z@p%EaZ{ElCFsKw`dq&ASv9sF9xqI{-<14`>U(TWg=l^t5g|4iHI%j?D<{+tCEZHWs zV^PU`@VIE7oAzO1%c0biDzMYEyMt2}ksT*3nmpwkEn(#y4YHQ8y{)Kp8e;;kzC|%ILOxo+%^n?w^t9B^wKpN_P9ZDxF3-9C4{G zr+E`!PZ$(wI4wMDrJSR`jW?fEfL9cjJn;^fulbgDCOsbZmZ+;Y$*UzGB(r>2!|uSz zeP7i@*FI11rj2Tcgw4wU^_ z&4SJEBiz&3?LO1y*}0;Y^jrmk0hKC!a1YBSpp2@Ir%ML-D4`VXY2PL(6$dR7jFU@~ zLIjcLN)&qbJbTzu%*na5nusqm_!>mLDNbY{=W(Sq%Xc~DWqxa!4o~Xwnf3#vf~)=~ z3QT?}1EO%Rmbaa&_t-g{_43ctSqaSf^CxWYZf0rG9WDjQCXY<%-DZ#SV+&fSs?Y0{ zd)mV47kk>olmavF;!@@(%ZLF-bfiUT-@d3P|3iNn0SJH}&JpXh?-jeu7Ncf+seNfF z6>Af)q2pcG{lua}GkLD+uN7VhD)k_6o0{Yywl)DZ)5*9hhUnSAY2xV#bw7y^X=%-X zhD0~X%4{Gwala6o%lHCT2AeFw-SC z4V@Il8uBqQl;Q(gpRZ2p*5%6smkco@$_ax)zgoemtZPnoZHuTnG&WG~YA`EOLhyL#!}i_; zj|6s0T(L2w(y-94Nx4?qO1~Raahy)7%L3SG+`c1mqBSzYZr5|sJE)oBCzI#011O0} z#*|*Xf8A&qF~`{vfzx@e@pIAzqgzy%nuD;=9Xl84UgutN<|LhlcyxlF#vc>lO?mw6 z?Rp2aWlHOieo#*ftN`yBLl9T2?LwL|eO0a@R)7{{_&i`Cjo8u2xSa$uKZ4ktD4%K=Z)K|5z5+$y$j)nn zAfe-hb%8%#*KT|bJya(oW*3zqr&48cd-P>u?V3-`?DcbdfJ6ZtMJ=7u2iyh4O-f$4 z@#fxnTc;^owa+-n?K7;u3kb>85v^yG$yKoo%_r%SxY2Nc$HlFYd@%3Xhtdb z`(<_PvO0Q9IB~>}x5{dh1ex7t3rtqM+7Jhd`Zq9AlIW)mlD4C?w|gn>M*S^T%;*+49)GdL?AXh zorm|)z=g~Yskb1N8ZjPnthEb5+GHPgT$FKenGd@)a??!OcZ#Jbi;^y5OiIMb)0?Em zpUgd1!1zgad#$E|nO&V*-+#pWDAcusl8*&0M5=hK`|6d;^+^`Y28l%oMGe>GBv~`t z7B}K}w;i`#^NRG3(xGY8)u+PvIiVw1>Dgs8r3pEXA*DtTqiz-Y9k#QgjQo{3CgEqJpOk8Z4zYzwSeZQP8kRz}^ z^I#+*S?%+YOK~w4(!Q%|uV~zj4PsH?!`j!wg%s-v>(Zfw)4o;N1Le3VlJ(X==~~h^zd0GR{XQbk3ZnMb|L7|QN=aDt zZI4YU8%x}WVmOr$M4twNYCb(tUg7sLLfSiygG1EeeSPC;emaCtB2O8n?Tb$WZ-tOH zviI_c2WC??UQGIDa+eC(?f!e)Z!^m3&eFg{x#hB0wFsPwCAGw$a~ z7R3J(&94q~)J`lUvI#VhzCW|aeDiaL$jra!$W~N6Sw3uet$;m}!^o47tn-A>`HEw5 z8UIX-Eu9QIa+!UhC5|1F;kZGE(~^n_%eP||UtPG_H{bLrZas=m)^DJkKlR`k2G4$f z-0_E+UFw-Dg;R5IPCCSVZuy)cfRIBbG$smf=eT%ZBUVC%=^mNF2}o7}PG9Uhe*K8Y%XJkX9Lea+#-x5&?1 zQw=&D(P$85epN>tKSvsjThx3qi!EcYUs0F)L=~?~`!<7zn-386)OJi%ICmW#UESBV zsK#S;19TXQJMcfa6-S&zzxr#uG5}6#N{$SGP~3AFZcjiLeRaUJJyjpMc zEs@|G_rEhbAa7*cvWztfVV$j|`Zmt)F`kDxcOkMM$6CUsTo zL{@*Pg18A%ubIOovpH<^$|+sx$wIM_mc9DOLG zRdlL1|L{TNI|7aw5s;~l-L-|sr~HPDm2NpmicRfzxyIdUN9nt2x$HQuI$;pYyd%ky zAEVIDHaQ^Kwo!_sjXXPOE<1~bS_}OhUrk8T@p*Y4* zhH_3w!pvoTX{s?~>>Vy;w_3P;99TcB{9bd^6RGfg`a+`A3bsz;-ozN!xrf55g+@i+ zPxjk!gzoP~Z9KN=gIf@>wY`1#Pa6m{6`;6fYX+ow|}PGXtS2#7A96?t$Mo-ng}r7J2ck;OIe-J4{1go&Uiu zK+)lfMxixCBg-~AYEBGb4_&t39AGnW0vw5(E7cXLjrRSkWlDW)J@&VrOkpb6ZRa5p zU8(>x26(9UIgGb&uBMt^uFthSK|Mk8qihPTXlq+@+!RPN`1FRCvn5R37KD&Ia14$z z5^(BDikyj&P(_W&43pMRr*EpT1WSMlVh808@e}5-_GE>)o_;Qy(!N`0wiIBfKI;RV zo$+8vfRJd=aybA3=4@MG&X(#%N;uH${eYSSk!*cED?G^l`dPgQX5ymctHVcgupWE|Bn)mm z1OPOp9`SPT^a!OO1s4@bPl^msj7@I(WjOj(?0DSsr;T#nv(KKm#nn_1ekyVESZ=KS z_4$NXkDR98kgvVlOzZ#yC5a)Mhb%2`$!5ZC$)E#0G+ zSl+3p+TTbE;M15mS(X?tz2oA(qqV8yV|^*fGL&cw8lB${0O^miME>5>-R*2r)ESTO z8c;VhWAUcGKd9c+i1L~#n6DM(?waj$WG%4mefYbr=OnnK)85+LRg5+fD*na;SEjhdy26b^cxp_}Z8 z!~E1wftam0Z`%4qo?V8hOMRa$2f*mKXyUCxz1?M+q(0lmj{V0R=SKG3>3=v?ChEKr z6w5;!@uENPv+hULcyP4HZjo_WCn-U4cWONeRpZvr~Yg z+UEOsPY!n2^>l0z3~K~bx9`^x1J<`)$yALzzb`r*A=`IAqbNnn_7*aM&QC?#<}JcQ zt9>f~`bQyeH>JC#X9Ye$=&kZAwuYFs{ug*6+TPRC(|a$%8$(_gHNn~r+-b=cKs!Qc zJY6)xf&VXndG)IN+cDsABhgE7qzALldvSR>;%k`G=ji*acurU3Kk2sr7Jh&8VB(c_ z2fVmhLY00{N&CbHf|-5_a(Q4Ca8~9&@MJkg@wtA_-F;oGJ_$|t1q{W?LG3=rmum7# zEB)`ki%k&++_FCLDrQ+;beU**xpwT1BToUxv5X5`DLpQlD0`{?t*-XN#Ug99ZHssJ zMk8SHUIP{{Dj=)k%wc~T_GMF-VO}Mu?tu$UWK4Ujw^Urt!J6qOsE zQ_L2n+tBLRwp8Waw$e@5uf>d+3$$JK>>70%`-O+1*hJjzjWhB<5i>{5)(R0fzad(y z5c8Q*M=kq}9|O|x%lJVtr38-YyAPZbhz0OG6?qYOlCDiWNBZPI|8X==H<6K$*~b2S-)8REu_v{3!6iJyO+l^e-?(jj*ZlKq3Hb*y3Y%(l z _;-ar3S$?E>xw#{Pg_pQ$>LLZ!Cs3HP+dL zj*&zBCA@_(KH%S4aeuSo>j{s!{-Z5a$;m|^?;_LSGP$U?X`x@}*KD*8(eXA4a8b_c zGZ!l3C|e3By~3QuaXvAilz$m07UJ(`euxL$k1sm3yJ1XYk8B_U;AqCYML*k@CcYS3 zi)|4bPhJxV3RbEaqj5aS4t_QHLu|_vHT~_7Hxa^@h#F}?EW#e;d0JsKrLfc|5&MEz z0d$3+Zc)!37O}0a`bYfXX7Af1rbrn`8E{TF@0Q`y1^5-u(v>rxBI=ur`u9L+`*pEm z^HQt2xpy>hwtQH|gcm(0jx-G9Edj|VdWAH-^b5S!gl|yGsMs9^p2^w;s^=k^ow2=( zlx+4{Q**?;5dnibn040V<(T$H9T8?Tv9DFzC3dtmDqP6eQVFgytm47oQpj{ko$)D-I zg6$Ip$x4?85>Tm+|IzAQ9hTWPbi|3|Iudcb_2WO#;&VjOpt@-Fz5U+dhxu%!kK~;x z#o3<@m6=c-J<*CeMz`vYa4bhZEh7pxo2Ml+?ZdYLte@Wi^d!wkp453Or9c2ZPUCxY zO630cvm4P=1-=*}J-1@nFIFL-er2$X_%d0lwWHT{*9u%|{)WC7$dY?b^CfX0lEJ13 zQ|ck(>ks}>eFHVgW6Zp54u2DgxS5w>yCDjcx5t;y33$0uPN=dvRXCoBs%vOwBTj7Q zaISe5ts1+#@b^kW9P~Pr|y@ZpEAix&p^S=AXnXE226<$tAB<{Unz!pMp>jeP4p?CBga)o=e-qQkFpB|9F6vYc~NYzD`k3eEt>KYMA4wJ z-+U!|B!MXHV%ErgZ&~BI1%NoJkOK4mM;r&p9vIL?V&Tavn{d;P{M(f9um38`!)!-s z070lFC#^Euu4(%v^NEjkaGbV|JRp#oQ6(Mn()2fGP3on@d_h&?WDC#qnNp$!r$? z5{|H^xyf2`+yzQJ_jipv)D65Tljt;E29gfIC0)BH3Dk8MlJ+wtoLDdLTg7DHHx$ey z?0r(*b)eXC3n(p5erxj|YBi?iIJawJa4k`iSwL>H{L+vN1`ZA;#aoXX+^ts`&m@vp zvJBr~OI&Q`_^eR3qytt}X+oB;JjC1A7k{}axFo$oKb)*p_c~B+h`=X_1euac^ew$c zyzQQOkA1@#>gwM!sYH3z5C@&cLvPhCn^H6?($rA8ltb88(F}KixEuaRYpW|zwnDf2 zh29dN57QubB>wo!Ja4I>pdUubW>LBq^((=GVO*w-Y%uPfp(o#I@@erR`)HRXJ_-Zh z3?dv?EMM~cC*6@C=vg&cs#}ZPJQ#On()P<#gB(x=Pgf*&h3Lk&0o9uBGY&~LSN!4a ze@_XCIQrior<>Cx`}Z8lLxwE6q3Ly#LYZOJNiz-Nt6vs&JtlnxU=ImyiC3+R8>sNP zd&z6k_Ilf#(R4XEdEIH!J1@gwDkc@yM?OLCRE>wpM+87N55-5C_Z02cFAvmAT*6!PP6xQ^@tP$BDbX@0=Md_FM$>p)_p7u@-IUWL zZWMR7W<#2woW-p1zBxx-=_5;AJ+AsUNEwq9xQI2oQ2&(mQ>kzuwLQ1O zknl3z8}JI&CJ!W1Q{r#P%aEdCVr;@rDz896&D8jbXM(YHow6tJA?^_il(Y{DX_n&Q zKkSp_C2e0E`{=&=7T#>|as>p~W0+X%$4&4>d3yQDWygJc#w%}fba{bR;8n_Zr&MP? z@dUBjpJywd0c!nnN=fC>ANHpv!-;tU5>A=%tg62B_d#0D;|AFw8n`HLq=a5Q_Y~j` z#JgDBc~v5puAFopZi%n$lc=JFk5-tXkj9ChaamQ3fXYBcgOuMaooud33j=blAMD%A zF2JT+yb;abo~hEcnCJ}MNui)=XQU^s-=tm&Bl<%D|jR3kFL&tI9SoX{K0kXnJGbyH6NN1sn-MP=-KF>P8{Z$v#gLsCO0m+E)?KCaS z9yDd!GN}ia_C<+24eVxkgdkJbY!}epiZxLXp0)Qeg*d+DR!=boBdUG9=e2g+E5Z!FD%-X?a;F4BttMN|*0*kMW*-eaMb)E@ zCHOW??9B-)EIA$~>Q6Cx5IIx)G_l*&|7>&k(N}@c-aXCV3X@5S*$rZYYg&Avcz~MetdAxBe3aKPG~_EnI$6 z)*+fN4FtqwZx|Mo|Y(lj$?sHXr z=<*?uu&TDzB8|3Hl-tfNc;{W?ZA8VX|ut?D|m5I3H6JQl-BXbTuTNF;)^gG~K;xe82O2RI>y3ZRTrCT!;6xo9Rl^&o)hb z*v^a(+5rvU`AN&xl*+!-Wb=aWt{Qptxp;n|U}fjFIUOoCKBLYz_6jkKu2z?#bTPB1 zInAR2XlAhEv&2vi0WFndy~+g72KVuD-M*KG2|ha>4@O0)rSiK08P-x=aFn48<5Ap% z?Yl`o2$1=QCcm6n0GOOH63s|oGW5CuJ@Aj&WTL)>w9mXYx}vVtbnIetWAxbBxt%e; zI`((msk#lb(@}am#b&dKWMm10eV2P$qBkf7Gh~nUx)mmWtU4ZE#K?&OGC|8l$e1)i z&Co4pg_?m}!eb}lxnugsqjxw6MV$0-M*eV$(hggdfDDa_1xZ|Z=sHC3UEzEx9rwCr z%7-42+@W=lNmmiJaDL>A5`vZfQVy>U4^T^RsF;3a5Z&3~yJcT(^>8^Qs2Zslu_0q$=A)Tc@iOv)VwzQ(iAu zv>=AN(%m7O+B%kcws}tC0bl0)szq`)lcdO*b-p@4_HIvm>6Z8gi=f*b@c?TguIbY? z-7$Yq{q$(o;dNoI`-Sh`Rf=E$-%68Xhj#|CGdHw{JIL?qtLL`VC!GcquG+3aG2!t8 zL&AFND8jDCK!>*1vw0qlMb=P`QG}PbS`Cu-r;E_Ju|&ETl?u!%nYB+63@F*a%fq zMQO*gYiGdm>d)H#UKCnY4Ht%6COGheBA1?Vs&H*$i@6nI3My+u8CpD2GHzb!{A81cTVoTS|pnudVlkPrLZ??>dRu`oMQuvaTDmSiW29 zuhQWL)w#;^D9r^qn2b+q%8HN^peI9iAHxkZkJV~9%K7qVcHy%wg~USG;YlC%`F{C_ zNgdHLS&WG(#iw1tS$s2JA^ zkti)&U_eF^qw5P33*W~RGPUrtTKYjFLO8R)@76~}-e!2O^6Dor_oC`DOjoopt3$_B z%N-|Dr*L?v+nC%{5o?4BEVe{NOJZ0#y|c#!^6oZhv4+aer@8(yK4X~rE)R)X?R(S2 zYpf!>Yqj%yWE48ylv6c9>4NuOofN!PraRd@(r~&%drAt6V+sbTrpg@)hy?m|@qS(k zmamAngnQRt83>t9%IhgAg^Hv@ob^5Vm#!EX`{mp5)EfiD5yv^boxxQAq(D}hD{8D*pMU9iBlFOZ^F$Vfl z4*hMB?GUShNpn&|gA4J#Q?#SRF!@JzPMqV=WmWwP!9L?Dr?lgKBq+C!NTQJLNDNXV zV-n%*KylGOk!X)gY(5k(Cwn6`_?+`@vcsQ4OPC}e@aw$li#$=T~aA#BcmcJ z{zCgF$LDLllHBpD?bC;eYlaD$0umR7o;1f<9kN`A*lrMTfN7y2r$y!X;w1eGh*B^c zrw;z1oP^f&?(&=So4J5RL^=BTQ=MAxugrLH-00mlsMP#dCgqM{M8ZB4^Alm8Dx%3N z{L3g$*>HRJ{Fq&}HVp&w7;F@ke=Ky#P4W>ZgBweF zNX}o46ifFs-#ee?dPvaEvi&_^8QFkkXCR#P~ny8%lg}$0$Bxla8H%D&Q#~o=a;GwMul2UR-@)~wxyufTq;=`T9Sm7T{lH|^ zV{u@vs>cXu-D0Q&5G5*3zu#KZoPW1kV`J0!?aKUges)&cseRUHpuZ_n;T^!ITrZb8 zu$vv;yOY@S*d1K5<6~>6K-xW2Z)loHXo@uuUpO=R>A=b|kL3cm%7Np9buA$EgP`OE z*w+NIm!&oS+oO4*0?xVxxy76qvjRwjbM?ru7qMF^s?XZst7BY`5BvD|Jjw?*_2nC_ zFEcpHJq4BnbpYZb_TJNx14-ZF46xgi=-O>W+QnfDk}hZ0kt|WJ`1_-1zoEL%+0^NRF8NL!M?D7!bGRyIxu5*`A`{g$IjA{t&{$r&_%NETnY@E zId~4|$jOfYV+fT~RNxk<0M{#*#o>?MVF}v=YkZTLnVFWBKzBDvRp=sNoq+YusFd9) zzsJcM%5nC41lX?<05i8quG}7`wZX@SX!jjV{5edY~BW(lG<) z6mw6fn93$orqgTFpda25B!>{sv}@sk!xQ zbo479nDXg7P~@uCX645dhp9{HiuZ3WeD|y5oN(7-J9-=0r@WAWB47qBMLu}0FP}rn z#bGz(0Al-ym$!gD8J7ZQ#7GvFL4Fo1aRhABxjELZMq^jk*lpZ!MW=PBSMR8cxu6jN zk~lOpbdx!iyYKh3Y<4Lyg^9#(`QT)QGLokZ;XSB8ox58-}5=3tSbDITEQyj#~ise*N%IY+%tKEC`6s@X4 znEn{S?d<~ps{-3Fw?&76n7M-Ew_%A2Rp@ zjrMk!Lw5k9f=Tj0wDD%(@c+K>GdYzQ2+DI%F$lOCCHsJN z(Th}1E~|4M=ryw2=yQlw)t5GY;$E!$dtG33u{)(6y7~a~XAHQi*LM)0gC1JZ)L3u1 zCTMwDbM4y+xtAqi%)GfU#5bY)>jU)X-^4jtSy_oD>O}*6VE-Wv$a9gd7&j{BZF-v- zvhp*T?5R<=t6Vg{?u1@PI+im}fyAs0yB20vDYY*HvzN3%Yp((Is#l&5!pZaPFZ0O? zZNR`?Me~xupNZ@H<(tD)nB9KWIkSDNBUzJq;S%+fu*zZnItbdpw5ocYxjCb){b9M1 zv|1K03TJOYX9jy;Eha8a`gL#+;`&9Q3|-o4M%6xZ>t@snV%j^j+PlX5R~NN-A7yqbY9C z7TVP}7MjT#30y6<3t68^C0D#Lsd?Y%=Qq{T=a*^^XdrY?1@`*izt=sb0Lc8B15e0Q zFaAZw!yl?ZqxFe!vB2I{mnubnQ9tV2*SXWw)X=$9K=d`X3|jL{HfM9FTR)#rS>NAH z;2CkqZb1aB@d8l#q;n=JJ`~Odc($`6C9cJQW!I<2Q&_SkArVI zK%o7z&c45C&6$R^b|!9?TGxO)Gt$%h;Rc-9T%eZ#n5uL(x>FxWt8nT7CQSX5epBo< z;OKcFo;7UZAego-4pNb}{7P@!B7N0M(=#2b7`z&ZC(;>d z!eg6gZH7Z;8)M3jHKp8rNf+EBgWmCnT((`%kYIMIbX~omltNtyup$wP{+o>aJD@) zFO=oPF6?%xpOW|5?$DPj))e#oE&q)G6uvq8;PKR>hZxGi^$ zEKoOq#+n*~ms3M(yqYCjv75qh!$J}47JHL5=T_$5HI`hXT}7GZA+2R$Y*%uH=Ym^+ zk!OiehCRgT<=n$RXXk*P(~>A<1RV$sna2QXLOy+4+y7CDF-A5_j0=JryE3Y-8;kvJ ztZu%P1DM$lv{$l`qYp=&WDCmI)3=6`0!HZ=%`tW)Vm%Eppcx1!X?VwO6A1dexJH+VZog-JqB#V+1VMom7|3U0j$N4JJgH- z?7LE`1Yq6&=&uQ29D9_>*$*&$-#r%CzS87bNBMX-Sp_Z!E+ZDZwZ@7;-Bv4}N(-pK zhrCwn$EOvy*6p@3yQ_NaHdkSa4J@sZO()gOs$w`ffRwE)f?blsdXs`?=FnsVgQ0)T zR;%ag{J;>`7rUKA02E+)xhCN#Q$l;>&Z$UYd4mdWCVc0q;V7z{Wy7^Mt#C&0z z9BXvTL$fk;Yop{DKj2MNlGKFIgR!< zqzk2flHjW~v5JIm9Z(@=pLi!AX7?|J#Or=CHMmiFot>R48;jY$AD?XsNGFMBbGgx4 zCsuQe5GtYJh`oT&3L3sge@v-6JTRyXP~b>9W{qIG zr4Hqpn8Gq7#^~h^Hp{6~R4w0GESpaoXesc5Ge@X^Ef+J;oW=dCqv+q}=B0l*XPiCU zU46Gj`CD~u6PR6jYsSnx|Ds&|N`PI|gOP;H0FtHAYrSZTGMl2B*cW9V^R98&m8#1` zEOj6Q^GRKSQ^mFzD*w#zuwqAH89TM6equ|lUkA4M5scKnsuHSJ@Y`+i9~urA@lg5K zD+HeVI-_} z%kjoHyG?$RzUA4l;st;3EXeqly=EnoQs*LO>R-EJDz zn4Xg9PqR?>NqJ^czd*2C1{yk^X;yku)m z*x@Sd6#_hZ&Qxf4E0c=-iDr&&Ch}3?4=B`qArqowf#lMf`R^?tp58wk@9ca^^b-)R z=&sj1nrPwZgSrGN@FN+&UN?cCLV*!TOm=_3#qiDyI6Cum5L*&)X=Zq$P{Tibw#@jJ zp=KrEIA!`<{TxvW81*BpYTjOfT!uVq?dh^hIbq=u*hOXKD0DCa6i4U0#^CYksvULg zM{M@i7z+kyXhI#ykG(}FA8fq_9{aN^Pft(4NT@};0A`kNA+#HQYvjw;Qv;!LfL^Hy z>&(Cc{nPdA_z*m5?b1DZB&F+c{(v$a2oEBqvueGk+BvjwOSv})UZ%5s=fqB2gj=p@ zjuXKR^8@Is6RCEzcD_Y;RJx@%>4M!xf6bOjrfUxH`yKSTLd4(`fPriO#mM9f@Bl;6|a?9l#Aq04hiZrE_o18J55z|QEJGjAp$nkYh}IkMM_U5Iw8a%HTs z^~#{J*4UtRMV0}ijOK=9Wpc+t$j&Lp;G_Zq=0_L68x^UTg~=q7!Ey@|!tlRj`2C1a zUvU5%gwCdLB8~;OSSibetlp3}xm8f-srF>p5i6GOLRCKq>gEXentjaF8-&qY_W;Qj zgHf1(!iiKEWsGVw`W^77-mWu4>~9~D8!yKO(_+Bbje%6VP*iN~b#%kO+<)LVw-<1K z9GfjyHwbWxx;~!l8 z2{Dy6B$di3%TyGyO!h2e>IkJQA!0%zDqCb1(CJ;U*ZQSSc?^jm?(2_)aW2-g+~irWHl``p zA;B+iMeZlb{^$EU0XY|XZ}VK4Pmzbvv0GPBMJ{FeS>rMP!Nw)GYvm*(FWnD8i6Yu; zj&Uq?@Zo(%sVys9-+jmzQ&qQ|+g!maYaVN02I{Y}r)jmNb{C+SPf_tSVK{5|bb%fB zVr22jF;h?YX@Jq?%+g0}HE3oaVC@b-h17?l@}|w|2K_SC5%~LoE^pXG!YGwQy}G5^ z!RZ(YEXMd%EgV~c5HBT%81ZgbluIly4E;EFq$rQD-*hY3+P$+1Oso7cMk6dMi6UDB zg|{r9z542+-Jkv|U3%w!@SuqM$Cxj(Fmsh$h>rk|iV1KU!bJSm+4LJ#v*E|%y8w$O zcIkBcvj!Sc)e$lm#Poqs8_h~`DNK2$sb|jOa%xhB?0QZ*mP=XV%wR0Q-HUCb1SS)g z2o{Z%w%4>@G(vU%jV{Cs%h~eJ3Ex)fq*Qq5HmlS-cNe^{zhOaeavuqbV%9HMy?w<@AKIHDBPGBrx#5{&F7Shefe|&>t^WQY)ZM`pc=s%YYe4V&G>a+I`>D2s2BBt}j*#j_4g2a%8-75BbVR35)Q_&-AL*0^ z>XiJ#HOhVT50lgVhiY0Q?svv_`H8Be(|a4vg&NwwZ0Mb>ysm+D4p)Kcnqz?*P(&xh zT<0fPt(s1Qo#?j6=IEel_sSy+V0uo$_EfzKh&(_1T}bVCwuLD{FDjyJOm(yK;H^Jj z9DAFE93(bG#6klcHA3_tjBi3Cbo5!05Kp;H7As%XJChxS(< zX{HPzXl|C;hu=hPZrfu&f&AMr{z-m`&~xO+PWjJ^+E~e2ebN8-IdM85|N41ZSU(Yaiet%u^!az+b)X_(5c2xe`Zv`i_0�_oNipQaZ+S* z#+3YKEhdUx_Im%n9}`5Z3Fh9D{60;PAQ^+Ri-rX!M_*Y2N@xgu|1XCC*xhYXFIxqWIEt< zi?%fM{`twhl&WI=X0~4DGGa*5R{;;)W2nDdHD~cb@0KPE)q~^(K4!d;!))O-j*5N1 z9XLX%sb=3I@Whb!6FZ3Hf0WO&P6=5?@qV@Yufi9JYEGe4YY7;}wV-guX8486^&Kzc z=gQ+|w}|ihdqZUmJ#Ss-2Rof7NB|ilAuQvl)3*ir4O@RzdS!XMF?(WE`OISbMWV+Q zakHW>_NEXhWA^9X+yIqI7{77GPqWv4=g%0dNhXEP9FGvNoUPPx%Gkts$gJdr*DVOD0;~@udaL&AyWSe*vi4{&BfSNtv1HJCFTK0MdddYsesg$k z-=E5l%{b*JQA|l3V=VV?Pb0=#y*ArYX=Oi`%3GfTBtK$AvlM6(^+maj>4)OvJ4Z3t zl!%SdbSVkP(7qSE>%~VnR+q<%XEB;njrY^Fc99ZK#wZ>a7g3KJp$z>40WLL`8f9Ln z+bFs#qG$YP43bWwa{8xd{=MRks=#=qhTh5p`}|uITs9v)*1#%6_s`3S3J@EqzhGif z65PPbA2Kd_#uDPj$dE3TXc?tds4;uK^s0VH%zR6({ASlxA6^6{Z@(P8IL~_NIDj;m?RUn~bv2^3wFkR>`d3-Bry{;w=Qd8Xp!^czVC059~RNZK!XJ+-D zqG}d>!cG77gSDuW5zE)Lxb}3rvh_yxu+Y+krx^N z{2b`Vx3{G-=+_ajh@h)-Jyk=CqD4fqC#jXdR3WE~p?l6j^e)Ua@==2MY*Ck=2{}ED zT~miP8nAudDD|rL5H0KP;M-4(C4f%=Oc)T{*~xi>kGph=Wb*pc0FTdFwVZn6N^+if z27}o=CN?r;^q7FJbpf6k{^*Z0_$qAlBa6~hO4p$dskENsJ{4Gq+|kvK;D$PS;-%m1 z9*~Fjn&B(6R%Lfby1t?1sTwz_IV{0&)Y7>d2rM7HzSdb|FUv+y(zqv+8O+}|NS!It9623(r;jq^gC~r1<3^pp+eX5(p28vOl(|5FK zcUID`oY+S;qAxAVf+N!!`iTe!jcHl$13J*08-&=JWv5{DRE-WQ>4w=KtFg#K2!Q1_dk&#Xmr>QYeTa*}beT%#U6+^8NVfAmDYzE*FX<4?eb z8A4^pAq|CT+J5SlbSWokPQ%V7 z8HtGKJd9)D+nRJ>WGO!ommWG01zGE8MuNQR;oWRU0mDjh$h z-<{i3>yWLMa{3{A8B8$chw~B9%W2ZtP17@+G$Ai)Y&V*nB(-nMUlUEI{ioj|Ib@%} zz;D_9rx#13UMw`?LWVcyznsv5)SjEK7EP!Az>)ZlFK6hKKDt+E=$$a;;@OlU_5;`s z7Ci{EwDPz*iT=N?ue!Nphjjekj!vSXmYn50Nks;C~v%bNf>!2Oj!EBSS7ci>!^tYZ2zldJx&Vgnc+~4+7Azn`rwoi zb(NBmJBi2sj_*2FxhmyV26pvc3qqFkrLxgEFrv72;rD)RQ>pvAp)d6g?-a+4yro!a z!A6K)n|9AJd)iC@Xq7Goi4YWS2^1U~0Cd<4+_>Ql(xr&OznCDCM_|@GIwuvr^ z6m*Y+LBmb$-}(8wTs?J|65rEc(BT-29kqGsjK^>fj!XXN_EK(5vNqqnwR?*3c+-$K zYC*{(wjT;p8WBZy6G1+)oX?B%|AddM&aI}QA(TtM9ExRLtaJqGkv?{}+H`kIWL3j! zMttw$-^+g)&tCjJ7cka9;uE2t6Ce7ed@v`x{?bnIbmENy&$;#PI^1_T0e1Dsi0F=r zd~9j=ZtKQp3I`#u@gFGJG%7s9f)MZi!JxPiGEB}{R*bvEbs)J9<-U&}r0b2Rqu+nW z{|;!qy$F#7rd{ym$R^s5FvIv(9JRVQp)q|I=2GowG#x6ZRTZQZg5roAh?`94rXpBqKj`W8nxJm!&Q z+F^@de1RGmQcG{3-jR%)&_L$a+x9!-t@9F2TO)(Rm4Ef( z#3*gzs3KW7WG4_UL!CD#|K2Eo)v4g%@x<72b4RmSu-Tw6!H63Px%Qoy$G&_7T{={sNmONY1jxxiVYYF3 zes6un!ceL;BH=5taRRvT6>HKKAzwFJ8BX{Vd)@bB>_x&$$-c$;8Qc0BzIoxs^AO(5 z5>uk2O5#@9x*TJQx|QW9woNofKfML=+qd(O=AiuT6-relb-WT0`GrLERG1-l4yP~L z*y4_%8r{FWV`P(ZOVbl$yRVchB_#urVxp*pN^BNn%R?&s0(}(`b*OrCJOfqm0hzgf zFZv%sXusQ-rMwneY50CCV%^b6iz>5^(Ikc*M?$rg2J*P6XlCHLi%HF|gY=mB$2keF zk`}1!{z|u+aZas=ikdb*G0#NUS>WNP)a|9OCg-At?Fe~t5P0RzlV}3HNQhT{B`@^F zH4Q#)YaD?>O*qtM@mPk4#Uw!7MI&t!gyKm=ti@kp2mEDu*-|t4&!pE7%pSD*yH^*Q z?Xxrfn7Qt2%R@;y4nag!F*Qn+r2CRUT_*9a9XqZf__hUxCoID6EpTq_$dZknF|g)g z3Oa;8ufqhvD9;>y_~w}+%2&#lh;}{rY!W`?$GU$$k@A-(nrXeJB#GVW=z3#e*0=m{ zk702gQj52Wa7%2PR-l=pmqwIGt*kIxJK`vEO*suTvtu4_4@9^hC1oncTm5_H75o9H zBU$Vk!}psW;ZS9WNMpqN&wqdF@)v&}4S$b}FLTY~`}fb_OnLs?^}rbmw@=*lRm8>l zSRyo9vGE@^n(+l2ExM5TF&;M{2^%T+5$P%9o#e<7uG>1(6{0eAC1@FBZGEY?Uev+a zQiv?&t{?u)ouXfS_B1}z=KN{+tgZvU&w3#9pb+Gwy@2(ZKNo(eQ0^QI-XMJ9#vp&@gOH05yMAZ5@-A+vftuz{XIs)|A^#tXY2hH5bS$R zJgqB|Y$>7+0AR6-KVSOA-fbFEmm@p(HwNgI!(}=9BkTrz^%Ke`e34hx!xntv`}CPS zzK`?WVbSu3*qX6{WUYkX^fHe}yoXlz@;F4QwtXX2;qjT1w!TlR{Mca2ZO7c;8njn7O{=83bi)yH0jm(*5~Im>T^oz8Yyu zA9_+F)xUQd(8n1`A_kLYY^S)pUg?F*W8rDeSubdAOIitsa>6K!fbi1AaGO@n<}2 zKq<=-W1l(TKH$WT=%~0&LO$yI6%-}ct^hqV%6u7h@tl5+@B`pZ6~6D2i;VOc?GaFa z--IEER}@T4s7MA%Ar8*hkP35Q!N&~3#N@qS--Jmc{aC)@k^~cr{aJ8jyZcQ;*Pf;n z43=wj`B)%deDf7Z!-Pv<2U0?KZ!IMi1JJl(4%P4H?IF8{wl{Gdw-FI`6pqqLva2YHf0XA$v2q4kJk3qxx zjxgDOcE;MJ@Y+Ca>L;j~2e*Zy?ev!mKma4;A}PyvA#o~e92;CKJSj>BFW|XGdCwIS zkMuJO-RJjQee0o8mEs{Mpf>CgAQ%G~`u%qu>KydfMV10Z(*5%atnAG=Q)hn=Th3EU zid}O-3X*x05!oVv)G)8)OBodgbWGwFfsqk)uRku5Rl7RW3JHWL4^CuOMKi=Fixh$b zm17X)A~qAg`eyR065xz1&Rfs-tFpdeszHoS6&sSbDa+#~zY=its3}iIUp z9zOy%59O;sF02JceO@+Qfct0XYdbgSU3pcJJ6xmR7o?Q}Uf_?;xF-s}&g6al)dL@d zt`GUr6{#$K^9nUvX>|%c?|qH_h1*&`Cg`lHZV(0z!VArd9z6tJILB8QP8BDx{Wx!4 zRb0J$v0Q;y0oQsY%fNP&vUxWcI_dUgjEHDBYO?a^)`H&ETDwlgdQM#kThrv3D$)63d1yh zh5-KrinzLG`L%H(Om~AAlV6qRvYJ%-!W`fcn~L~8qWZf6I_at`YmQ##Qk^=`voZ$W z`XY{&U%BAW^KcY8TfkAeyv&yG2^VF@|0P&}9Pq?uUljx5+OG&|M! zDH*lehBh`J$lkHqEdN6W!OM-O$q&2%B-X!?pAsgUKn5J`+5MdLtiivut<#Fla12RfIR_XX6FCjIf^(;?&v{;lr1AAy`+Ibd84(7o<%xB=P`rmlk zbE>HPwpIRC4hXqkJU=0S6F*c_18 zR-&CW|0D7OB=RC2X?(j@zcWK$0EyciyF z0^_{8!l}KD*AJq}wl3M`3+LDR?)(=LDtHWh zae~Ld8Gl>!22QzGNyecLmI{b$?BFXK@-_ommW(4~svp<^F)>Y;6c0W0r7p_0a|`&3 z%83-75{Vw=1s*JBkgAvJynqMkuFCLhMb~8j(N_2J6BXM|k~?|ay|~J>6KWkTtxwh) z?554S=g=~0WgU(zvzs>U`OZNXNkxYcCQsA)wD5i@^P;rjizN_9cid)3ZKy+J@d)1^ zSbUi2$zO9s6!IQI{4JZO_~yZxH74D!Q$-d-9(s*i4)7T1x&7M76WsZ=~u% z;Z$H)CMd>#{~}B`C%UsskaG5CrPdLFceUZr*r`M3q@JH&zTJcdn!4)c3|iDIZbrkfPsN&vg8g^7r~n;!y- zg_>O&*b-ZlV5cs5tgqLV)l0pR*RvM+HDW5QT!renM~Fz?UVYeF>cZ06%ND(snlNC- zaSkwZnuXJCeeJXRA9^Hz9~betPnVhmcrseHL5EVx7epN1z%#%1Ab`3q$jes$NW#6D z23kT#Pk0RkMF5+->ASP4C(SpB(7;$xnnn zKo**Y##o4o=Q---)U9>As@(H^v=;LOnvwki#7`u__(|wR4%tw4(gHJA1wxOYe*d8o z5|x1%^Nesp$v=;v0f~>bjz@k*mYFb8iQqy$!cDi@4+#S7hs$#1Ih`TBWnn@{?SQD$ z$w0ili-UE~RZaNc@H0s}X^LU;!OcFWW+=Qn#p~lY4F8#UxP$h}%Nm%iv;)#8&>0Z7 zEdjx~{&|~95D{0j-~E6GPA;Fg$G{(4@z!c@(%&ndEHzw;I5>w%%KID#l5G<-_=`4L z2mCWa|7_?YgNzmT=2KGZR^mxE^0-O={t~-UdPm@`OKX4f)}j7QnCwZd{%_<6xNSG- zMe;OK-rJ;gFhpS&>3au22~p;pQon##p!TLSqipJP^^1i;mci+plw}x=bClkJNr2MD@!GyDj8`(LOedyg)n`lW*7N0b+PlJs#}nSi`NVp%Idp$!+Kr5Z4*QBDi-+ivx(%(rw5OX|Ec#LX`=ZFtOr|6*pg_!7l3 z&<@B4cYpQ4gfeamv*E+s0|%^P{PiI3<|dB|(%`|C%}3~GUmzX$$p`*b7z%3@a7r0u zSASL+*55mEMG#B$CKSMt)(SJ zwv9`SX~IwU1Zm@0<-#&)E3-O{UhHYSG2@rGu8wttc^ras5PJF2Y!c=kX5Lz1x(nt@ z?w1DZR$z|dWLnjPX?=u7!W2WQ-Yq|6LG{q8aK>n2yMm*)KPZ|c^vpk0=-8%Crl z-g>X(OdLHkgjl;dUopjP-zNXs?FA5&I5*j27esc+f5obY-7iU)!}d9Ca^MCTHRVd(PK1*X+7LK)E%m zky+&}0+H+us|71OLosOr<%cLYc4GVgH|;VdhEWq%x|_D2rg*zw!UJjXGQ9P#`U&aP z2MZxf^h7K~%bufl)y}X4TO1VWjU86#(TTB@wyYU0g8%!P8j{KERhP^Ld&M}x0oS|# zE*%D7tmgLHw4JmnR&Uw4XhsYJuSp36E)V2u_69Hb0!gI zj0v`E>I@}If#F`Gn8|P*IYPMMbxq$r<@Ejj!oPpzk+vx!tkS1gFk~pEw#9^$HDD31 z*~YwnTrvtLD6A|Aa0HuEZ}t8oSmI;8g+!3 zf0<5>r1@p8W2ytO3W|Y$*s+vIo`#yVS+5Pow!Prh34HUABD-Lv!t=4naSrbshTRrW zeV}GqL&-~PXdbQlOdEOJ^qi#P1u*$1O=uo6t;TV0?%=;U8F4v`W>pp|4>xg!boy31 z1QtfR`?541z?!(K)UP?4{KQ~j%!|;{jV}0@pCgGoSp3?qJQv^Zp=@sAWNWYJL>%V)S#BQB7Y~`Hp)&u)0QcVHkup8hI zuuoE>-}hV5*Xq1LFeK+zgc3MC;{M!gp#KsdLR!4@BvOsB-EkGx){1j6B$z9=jJ&V0inpXXnB1{(mN4KWKi#ZF?l5I9a* zhGNsKc$30grdH3KEv)-3C*J+?Q-zv@8amz*n#~xhWJi6ItkA7twKATR?q4WJBlr>OjK&p zv`u7|R?dkDk~^C6nn*TOVLI=Q`^$jYnm2H3Xa{}MYC&j{b%MGKCe$(r*dQ1S5VH10 zo-g(zs1f>l8!oJtQ}J~?+qp!s$|!2{n}9Xovy|Waq$|l6 zg_Wb1I1Tjc=&E1Fy!ZZ3{8&&2YjHLds?dd@xlIqBNsSZ_fDX`6{a{wUWbZ|m|8F;g_Zu8 z;cA)BUFml1AXV{v)!7wDA^TZ3y!hL~F?rGRNqSzU~M z>e+(LY75*@9e?%*R$r@{z$hIV;(k*HcqfE1^jM{!<1C(lat9`kv;!vIpY@0#DDG~p zmPEd@OAMYuMg!rVN1wUJ2)JicjN1TaqYfINgAUqx{KXLGa=>-dx)}w~jT>!hlwqUw z0_sU#eYyaGX9dns>sg*xo0&-LsFU(ymIGz=)s(Ru&m|CC|Do{h4JRB-`Hs)c%{j@> z5UQs^6ZA^^{zk=sD!Y+BM+F>|2SMs&_4|oTpFm#l`LmAoy&Wy6DL=w3*o=*r)nl?b z=zeygEQ;mpjoS(Q-p7EmH>@@_`q(R7XgrkQe`(U&QKl_|Q0I?7C$7<~c`(!3+KkT`KeBTK-h~Jv9>F1Y`g=`xXr5CGaD_u!hN1u z6cROSv22c$6Ijg}-7_Yo{tENkKv1Us)APP+do4=%syUbYmR!>>$_fKTZ>$B8HMC)> z%ZSA?68@C7bg)mM?SZb!Bw=Hjzi$I6}SxoW%MBSH3HW3N;)TaEi*$c-Is3i#{L z$qoww_C#_)f4Is~V_YUl6^^=0xnIa5QO zeHFJ%(~}v3>TcHMLYztuYV%uSB`|iI)@L2#3pys}D2V1cL^KBq#<~keDoF)wc~H^F z*uxLPq1M^hYYm<~>QIu$bJT(mhxFm`CO?Yojc?td4pXCHF{6o6F4ywQvWeXqFmh5X zoRC&sPQhG5vOcJhcN3Y5Gu|K(e@61?r;uHsn6CAWXcT#byiFyGJ3jPq90 z0*^ZK!OpJWgLi=ed7H~*e&DmdAqGbaK0e%OfkVz|K}dQ^=DhXLLD1zQm_e%a0fsi^ zTD@+V(T9w)1(KmpMY2fyC6NIS-Qo7>I)o2zi7`box=~P=h#-M#D6V%g2>|Hy&R zcm2Lx*7ysR0{yq(cJU~O$D_7PQf?PePE!-f*eZRd9HOZ=R&y4fA(=Q} z#!34&%D5Jckx>Nt&DWr7Za9Ha$+~?T8b_z|N(44vqc(dT$^ovG(fq7i@_}m|CRo6= z7ZGk~9^z2)o1+lqA`?&7ci?nog)Gf8!K$5>hhxJ;#CkiYGcKmnrfKe`jj`gi#WQqY z$k!gTmU1~asSby>`oyM8%M>M|mwgj&4UNUoNCaBxcXg7JN@tj&Gs0-OE6a+WM#@?c zicHZp4FAjtLBI)pBNI*JC3g#;Qj8`8009U+eIi1l_lwmH!g;$$E6U(vLHbL(wuNYz>xw~^C}i39!`GiF9pJc zyjwqH6DFRZ#!F_ee?DrZ9y?j>_Zf#=0332Ohs)gEtb-`X{4BSTdRaGGWQwW=a4k{@ zak}uaoos$aOG6j+>Hl8Y5}I`rjq*>$J4owi9*mcc4f<```Vu&2b7pT@3@EBJ^Put_ z%Hb%F@0B?MY72iTT@Z5GGsx4Kh1mtt3QLg^zk{FAzZanY+dc$3iCzPPbX$iRv$*EX z3qf6l$)PWig1gvKCnD@hq>-iXQOlRVEi1bjnUWU*6X-*brL}oO^m#);{Pb&J14TCDNAJINDG@!0luwBH*kJXKB7HuNsh=g!Dim}U zg)`=PBB}r?GP}j{yX*wo*s>~M$j>7cEba;AMowBwQ!T2xXI8`eOZ#y$(NO#bD{Cvw zAKw@(nlZ2KerfF^+*R-;YY zb%n`VQL7IKaXnGdHESS;b$QFXENoxd{R2S|r`=4?!Y(r(=C7dnq=BJR_T0Hs;ix~< z6ual)4zy2Xt`Sy8@|NEmtdYd=QpOtEjwVg$`a<)9Zxr@}9oqSChz}IkUp&_l<4!}htE`SoQNSe>3K^85r;^xa{CT9KiO0Mr0FP`kOO8~*9B zib5I=y(-i$lJr(dn!JaA5F~6UkBvzWD+Jdf1BxRb`6Ba^CHEuAV&lyaKWNIB$RNN} z5l*x>t9HA9+ICd@B}!N)VZaoLYP#&Dn`DMYx>f7=*p0PT0L);O4WlP*Hy;4>BhveX zqijg!kH~k-Uwcy?22-0SOf7+-llOkMB3+91FE8!LRD-&5SFvZ0IsSrZX1;R@HB;?F z7!$dRlH2VRc3a+%dA#8z8nS#>XX)A%Iad?pk@3eLR#rjpfooHw0w#kU1P=(M(hJHb6SDO;(vR8+owI@;xHQGtTtT63w8ddVx zlS3pUB4=xIje>yzpLi=+K!Z$T$5yfO0}#bb#pV~U4SUggt_Q)wR~mDVb8ntaHjRqF z{6pC z@2@{5OI)&8upT)?g?lqn&2;niWQS_V2{mVt)tJk{{ew=LHs20^Q4ck{fQNcC5|v~N zfyrraE>kdS*n&PGn9jENEddgRRg%y~{SJ~vi;vd)2jK!PL&WN47`54Q9TGfrarlO1 z|K0~IYnRn?O7d-;JArBHYKUznDUxZbK*!thmPo9QJYe_sV|Ux;TMcJ=f@bxO9L%qL z<16AYyc?#L)+x=8m^p1HOeUb<+D#stMt+>9p^X6F+ev^v-|rjJZRY30e&4T!VBMvF z>3HmFht)4OK@eodX9QV_>A89)=V|5o5>=0q0s1mSYG-!S5Q%fTB7b52MS?M`?0N6p zj9O(3_E(jz)|G8%edk#Jff0s>ea^WynEmXKmI~Vk5ZGg625hUGV+@0#m4BOd89HYv zDw6xHorg^= ztlQ{YDGDnDL8Ipyykuy3s#z64R2W##yHBG`9}Ko9r}uhlBr}5aTVzw`+bz6>WXwgf zLi^Ev?-@bC1=bo;uQuFTbTq4h zQjxD4exSc$WW&|yP3QuH3KCX&pc1z{H@Fy9c_8P8l^i1{pxq*jPO0D+X^u6o>!9eI z#i6^F)Prkf`_u|wzRAR?QNxBNnTlr4Q(SoKKXB2xw|K%#EYLjloIa9F5%v0(iHM7Y zF98!h;*s3Bd024vBOP}bLK;O$Nt$4-tJrxypb-`VD6+GnwXR#jMw9O43{PkXI1UE; z>8R1TYShQr!^2%|+xPGi7?$~CmFlXJo`JGsZ&v>&OKwLaB?=I?Y=|~bI1czu{Bzb2 zg8PA@eyZ^_+(jDbbOB9+Bs6NidTXqjD*3eNb8JB%sxS-RDuEToo2EGsjSD-Il8 zH*MP7dr%$VX#rUCED*9LzN9pw)|33eAC7Ts^2p${$ ztVP2Sg=wb0EPM1{Fh<-{hQ)ht%AMe@3?y!`N>Q2hAwOVy40t=rzBtd zz0NNX+}c>l+O<{zcO!4Xdr-(cdc7NhVurJ)X{Ox2Bc8~`$00HCH-itZ5Xo`c6BT7k z+t@5d8aD`LO}<@c!xdzTI4kq z*n1l+kCPs+T-jmj&ynK36DQD;X&08N>=4~Dl?k1~gLjRDCqP8Ts=p%SY;2^0qGEi| zf{4sXT^-jQRS^NF^`;Cl@`fTYv1tvOLl81hdhxWPoa5HEAju)4)1WyDu z;M`J7fq0w|GWFfL3)W-71o#W68q4Lps-;F1cP&Y9C@$K{y+x@0@GGi2i`Ldm?B=0> zTx9V^xbvKV#?HM#!{7)nBBCo24OO_F(Jfa z_k>tM?nL5>wEg4^Ylmy1k$%~i`z;ngq$nh+>(V(`)}^4Lf^U6rYcf5Oyv6?Ila@I} zalv4yWOjY2#4zCts#p*)^BmS`W63kS7Ejx3^tqO@$$BoHkXz^ea^ahwFsiu)jZ`2& zv?$(&D-DsZ(HAEQ14ex}SB@u<7v(0Ll zX?krf+o~`VpGisa%7}W>Ve{ku2VQjRR3j^6Se>>a4T6nXYzl?)UQ@%;ycq5O^fMhV z1^9|6%D|!`wZCj*LuApBqMO2Yov)cg>_Y0fWL&4|Loi|RYS4q%2r2h}NlNHKxLGt6 zw|#I4W-Wrtnox{*GsVREqzcs7-Ex%j$>2?uNoy3X6ps_psusYVoj>Ew6ov&AxN}K3 z0Z>Gr_PiYQtZ?(nFVDPuviuFZ2nloyUKQUk2GVomDPd*dxn}g_1fbI#6 z`%3proA$SHm{J6J>m&MqofE(acKC`hUeCX!9G$8&Xp)Dlq zyn#6H=qEF=yK3xs%)~4NW=b=~Z$x-$Cm&u?wmWIYfsqAU77)=yc!VO#&mJL;VZrvM z5YxZ~#M-4F9*mlvDD668NIl`*(B6BExdN6zu(z4YO}gytv*2rb>T2%=onRRErA~nS z>~eu{R7B1uc^nKgG{e&LxJ^0@?KwYgmYB?b!alrqb=rAYqLx#jv^9Lx6LrSFz@in_ z;O-E$L1!{RGlW^3(M$X*x+WK5^ji^oB{JDfF}Olqa=w&G#W7ew+)&u}CHe#J^&^6i z3p1@~`G$8*Bddh>4DN88$%u!l-bpY|8|#Ug=XmT|%c{uNBATw&V2f4QY!)Gc9arq+mGQuAd!F8^ej~hQpl% zx)sQ64Ig(EANXuy2c@7+`GF93&EcHA@e0Ab+R|1P2*aGHLtZqS-;17vuyH2{)!t8M zYtG1ESR4l4-u^Frb3ESOr-;)Xjds>WTr2W2VX*4f?}vQTKTB11N@Cj$|3YLR-zyWu zmGAVj{j4rU;&cu>6bhc*|K5l^C(P1}E~(E8ljXVdEk z15G5BLNZ;@F&&zq)Hy>QWNZFCZaf{oqKT#5?oIIxV0se9Q{F0gs!k+3B&hd)uz%eP zP$9bLQ9?Aja%9plszLfw*>!yF(ufyp9i?*zA?EPO4|xUCd*iA(n8z0gu(jnd4w=MM z9}U7XRimpLn=Z0l=ay%7x?yQJVO>2K$%YJTL4U|x@oi?~+45*2dbwSQ`FQ2CKSE$r z-_Z4&-yHEe;IYv^g^8+0^Coo()}-gEJ^cflPBrRi3k_Y4WK|)skWCufZjr)$``fIQ z7mM5FSY;K)cNc3J%d5zpluh+RYSc8i;_1;w zA-7RHlwM>e65467KND^il-4y5tvWjLv^?iSn!13>8s;#pJE{=E&aKmlqR7g*a22~U zqAEqJ@PpK*?4J8K>`kU!-XV`ue%f=qNp<33`3GnZft+T%rWwc5C)jbUHD2x-@Z4n8&d2&$q7EZl6v;MO zQs-wsl80qidgv?$^2EbE#wjh?h54l`l zw^~+c@FG@b>;AFU^Yl~CdajCP%NmMV+Y^S#R199#f+!(`c8=+MSVPKFJoTzj4L!2q7-|NdaPR`x6v~g1w{r`; z)yVG+E|uiuK7uCJOdKbjn%N)YPXc07cR)r&q`VIP3#n;93<`w7 zIY@#>d@HO-#dzLqkwpOhXxJ&mf1tRK9QX&KGZLx@p$guU>iQ(8Yg2(Jm4RShtK6iL zsgZpq!@S9COHrl6zzc`IUQ>-UO|K}1^L$jrO4p+;tcXj)8#TC5{z`+djr~!Kx4!ym zCIZG|`z*2>x^Jj?_DMcslrBL;Qt^0NlRt8AOSrcNa>v?@R-_*AZ{ZU7cP`mm;$a}+ zlbFftBnIn$dmZEGSXq`;OZFE01w4wOl(!`&%Mqez^$STtxhm%^?nXoyW?G(fyfOcv z`>Kfq?=SA!LC0TIIg#CLm_v1Z)YkMM%Q0K!Evjpua~Z^AckHk?)8`|x z5{OvB;TR8I3|*lB9?W<*V?nrH@9 z3^cvsGTXXC=NZFY%Fe5ERPirPXhRI&akYyDslUG;GHFjWlJ5noG!bO8u}pf)i)-yZ z5?`?S?5+d}m6MWAoy*eG1l0Aq!%>a2_Tstu8CoHkNa8hc(;iNk{?aj*viT*v>UzJr z$~nFL?RN1Iybi-iUXWELXCRrmw$bUycvw^si{=?>WQr&BGu>buU_xv~g<>1sb9yeW zP4;zuY2Q*+kAqoiZ1f42>*@>j8z)v?x@X-VC(E0*TK@$J7FcnbjsXc)C;N)%uQ&xH zIL{-&KAolCk>KV)a~2hU&$H7p;?)!KLVMB3Vr!uZjQEK2$NZ};ceLTr!#3d{ew4N* z2kYp#?OqK+wx_P9^ur=fl-L@UFHX z0!E*Gdo^j~YP(>NmyM{zgyK>57k;eX_yt3o2Z}h@x}1J^+wsm-ug^?EK)&~oNWQ+> zqeHB%CwnCfH0s|%3&`_h2z>S4 zNehKep$qa485S0B?0q@sldIG3!LeDB#}0k`{joh)na#HGju(7u&Dpc7O^E_Vh0wJLbs;j?O=;dUFco1)z=K>;$5BT9B+$>XRs zMD&I3(5m+q6XAsOb+BKg0vVFy{IZ~LbV#Xld>$k^uECA690ypk|3})J$3xwIf51wj z6w#ux6-g*Vl4O|_31tdVwy7i}gzU_ON(jjw8j(<;M0R7}_ej>sHui1CHfDRS8Pjs# z-M`oKeO}M~-`(qebFc3jaj=yZ^y8FM)@+UCLN^itzDseNJ5_;Hetq z62_&L69>z`a=32WX=|JD*fh1kp-&N=qzN?pZ_zIA)=dMuzPu}Tlj83QQ%*a=)=_Tr zf1z9yAK;^)YC7|cx*!7$p<48k`W?omE#|rrm7(elGhT>hN;5`FSh1QlF5g9ltcgjF@(Uq4nore53AGm=AO4 z?Y>`Q-Vix)>AdCkexQ`lHC^76&T61Qyg)usmb~WAf!L9iYhr)1+O!I3ltPJ7(K6lClTn2L{mIDagpE%+3PP6iA0A zi5Cl9{||4aFQzyfHdgSIta zFDasvV4l4xR=MdN08a3V+KN6QlY@r|jB#4I;WSM0qRU@`dx~Oi^C~8J-@i1$z(s)f zZg>~4`mqQ=L})JGF;HW=A+KJ=cYOnqALKx*PwZ}hZR!82Pr4d)VMhc>w_}AW~`SmPmZeYNoJ3K1W2IY-12qMvtmbo(!KMPT5dcZR|!t{ zoW~)@J8;!7D#wb#q2Cf-hUuy;Dnd4EEu=XL($B%!gnBO0*xY<4w}BJ%C&G9~YoG8o z4APIkcaxUro6mkN@y_e31ODjX+|R;h_BBJ2l)i^sfirtsM3@2>WFO=)?RZVlss#nx zNaGP?6z$Sm_zMeE=i|UD2*10%o#5sb*u4s-P_<%muSI~?BlsZwlJq!Vv(Wi>oUhb6 zW4uSanGtSz%Q`~4x2&2N=S9U_Ew64Sw5{J1w z3?_tzOL}=;LSx_vj-}V6kD%5)h^Vu;@mm{S%7WaYwKIC_vivGg2935`vUsWRWr|Wk z)?^7@Ey8i`)V_W%>XD$?XC)TTlvBc9&j!~_JSYej?#EG!|fP`LNg!cI}# z=VvpgBFnBuTWnBU5Z_Vtndx<+E8B*R0XkPVnC~+)3f*@6`|*w5EayLtzW5ZUnK;`) zo+(Cnbi&ZGT|HCzW5ym{&|!re)=l}?xT#Nqj>x;|GlQ|v(`T{+(5F?jeBMg2c4SvZ zkIW~q7(eH8TkUAjE(ztg$9$^8TR+veTVT5-Erpzje)X!kpWya!>w9kV4uon%6a3L~ z()|4mOb3`6;UBhf3Zmjo#!}g$(a&OD8oF#3*?{NK7Hfpp3?s7=k61-1#ix_m@~t$9 zS8h;*Ze1aANg4%#1UVi@mEOV{;6#tizd&BtbBo6)4 zjs?l_*GZ*Fi*v!M#c1KD{iUQ0&x7k$g&Y@>a! zm_gX~?OChPrCx+6$}?-BTujooG;mY)*7Vw{?Z&H3Xw_2E71zp)7d<35>*NLGUBg=! z9f}q%?T3jms-om>x2L z2T~_reqQ~MG5MA|`AurN)OoNQR!97Zt{F3aY$L8BLdWI04$F*s?RLGMkd1hYXHGNf zrKC<~e3ZcrN+Rw5Aya|cze$XQ@n&Xq9o-Cm>v5h>3~SPUqoK*ba@hVcpW{q;^+B#eO-XEva0XTPE4SH;3f~>~RUAeJ zbs-9b^UjrCHHR%JitPlbJogeM_<%Drp7qS>cXud4*@9}4V}l!+&M-woGvc1+%lB)y zum4UNWJ5zi{F`6Db0fs$T@v`6G4$W5$Jd6~)CgCSLb|bFA*X2{LWY!f@X;xX-P}O2 zabekDsU1-xPlws`_F1ngn8|(bijKQEp`ly?_6A+prdaTfT;5U6kv4;GO^<5_7oNu7 z_Vg@DNvTmdzxa?c;-kN}38ijLO?fL)eaNqn=U=8YlH3k0{qu|0lhmxC0Y`^uraXKX zi>^}3;6pu&dg~@8c2@!f;!yoUo$HhyB%Y%Y#a_#4P(PS_Avs&{V6L7ObZaB(nLf$ASxY6*PEry9b8ZTcYei1tA(4}_UDI!*6&z=Xu8eb<$ zN%;>+_mfYUw(DI2A!!yt3rT{#=6`w7-o0day8iIcX7D2NA)ZI2OZ(>&cchACbR8Jt zNZhR=;AP%#{FF~1B=57O6-BX;0=uO3OoPKk?1#@tZAFTyGcIWQDKYpGAF!;3S{ItS z;Oc|A;LFP5^@h5EsYr$>0ZxmNgNS3z*} z{PS0{;CXbxujZ%!>W2ia9d3dR&$?10K%92VoOCWv$@nnd{yS2SLlE!MKm^p$Bj5>7we7QkT$FEeLgvN`|-9Tu%AE*9Up{az|GmJqrCkYm^FZ z#|>jhu}D04U%9XYhbHGzBYB*hY89sS7ACWrf+@RzKmPv5B8$ zp!rbi^-9Q*sn9=hbhrubIinuhNeBHOT{kJ*Rp|3JHkWurG&Qamg=&q!j%i;UZwHWX zdAbqSc39AxV66ZE73U_cdmR93x;_n1$KKL`-@cx-5X)pKOo=&j1@1UT``k0!ejNNm zqOeoOIU68b{q~Q%yFs>k`yrM+v|^ac7kA(nd=|Q%y!ZW#j0obom5_Sk0EVm5_6CLN zf;!OnR6#gc;~050>%)ip!>TN@5j_FMSszP-b~-v1oIsrBkB0I*7t9E64-6BxhMu*} zYe$3!#6m4{1UKLfI2++r3oV-h)?O;m!p`pUzTaW0;`aQPZ}dxs|1-<@Hoz!itOPiB3mLPrJOt-}hBh53@<+aa1$ zXJiW|*JE4E841V0o+RU)Sb}Y)r3_xZ%|Fjm@z!O5-yoeEwVn6mMHnpCGf;g;pf`c} z0Dt^lijE~T;6qCV%cTVJj$FJcK@^n(ehH66MMaH;MVGwzMl8ha?b~lA=QKbrrYF9t z>;A>#X8;=D0UGFiBgtow0UdF_K@kuB!x??+4}_Ft*-EN%gs5+;_x%k|C&Ap<+qSm0 zQw+uf@~&_b{51F?@8|EUFk3<^XFhyI>{7O8Px^nS3nix{a(TEcp`7`wfGy~KCFN)S zfBG?B4#cDU`25|G>N^V8zla4z2SB}XpdUNFZshyYS6t zr2Z1s59wF0ysh3$1DwFm?H)J1m+_$rDppWUmL|AZHqA#ubg_25$u5yckRFsv$KlFj zYe+omqz3_tm{MAN{Z;Jkx%uB7G5paW;jS2<`@wzXYR<{AEUv*JlfNy$VttbW4EUt0eU zaOQO<561I`NR5ph{1XI3W6;k42y~4O>c=K5dyJ+uP^VB79-qADp@oe-A>|hLZMRz z7Muo?lkQ(*2P(BH2HydOb*kfl!OUc0u&dS8P<8lqIUXUu8sXWaL$PS5=9Y6HJAMlI z9X8)9<&bUQkN?c-!P3a?96_~==8ANwJ2NXGRCX2P+S>Tn;Kz&F-_+|T9Xn<}dWug$ zpGVKktRQsPI$Mb!t+&5s^j^@XPNV-qz=P9LNXZ}Q@k6TV(bC{V)UI`8uxmxam)PHa^(HuOAPjxbLv71Oapj+* zAwnf7Li@kS3e&N8REq+JplzD+eu_MYghIA^Ps#pJJ9t^wIP-mA!H?wK=l@_VPfsFW z|HqlA#-dmMbtW8uvjTR0?~$L820)M~vkn9%h7@dEp5j0|k#1TB9MoG+Es*B=Wdiu< z=Vo^T_qi&C1Qp-kv{z*ZhTt~cUl`SOZ%47D!QIlpi>aBbY3!1zQh&0OXoB@1Nf9u@ zz3=~|Q_%#iwHKrRaQyBjcrf_hy@P}LA;9qoqM7MrhwZ?1Y>3&*3Ui55uTJ4MsY#A} z&X>ADnSKLGpZnrJQ;-O$=nPTqfJ;M(sLMN2x)jom9LQ;lAy#oT^ipwpNMtq4fTmoEiR4oHrC>1FP*BRatp*B51RGV7Lp|4PN7WHcF8YfxRufyFkFq})D)nYIRS@{I zoHLBKK9NwJpV0GX0JM`t?jr<1@%ilP?<8Dv{w4Yu!gS63}T&CA;W;cgWNg!8kpzS|!kpS^`7;{5k-ua-f& z72p0z&AS>g-?ZjlwD2IdZulV}B8yZ5RN~%qKcjsYgB=CUM0l-ph zaQEMPs}P&eYcO@{77#V_Hiu_i=6l7nHln&8Uva|cW|c=c-geMg;l2oB{pQ8l;}Hob zuR_XxdfjXC3n7;_6PU^C<9Wo3=*4uhV1XqD^WeLL1X>DQFn$r6HQq|5+LBR z`I8fFR3e5t5(aY!+JRT!ow^@lbN12|Ut{JE(57&PSo0$ZRJFgeeP6B!BUXYGu#i-; zvi+JGY1hV5B~1%T0H9gjM>%sh?#;nKDbs==$@iTmeN=>sjlFW28QvMfDm*n`XR?Zwe0Mg|G* z3i4hy$mZw#I2JJ5$6@KMc$ zXPsfyRL^gOgX|_`gdKD~ZAdI$34|ySC!vy5VV}GmpO+Qp^Q5!(qcHSSC~Aywd^1EB zoW|H11shSkwLBKIfZ=yqnT`;L7cM^{5GyFUkb#v5ofI=VUjv-p)SD0(3<^m*3;`4kEA|B+2G5_5MN$THkY_*^n@Tq% z#yd98+Rae)uid_+Tt!c0w-Rk5_VBnx81EJMR=i0URI+=|#li~1v%XYYn_Jw~1TK`a z&bjyXK*Sv2LCvPgxE$-;wPfUXoF}j z8Z*WV%@Oho4AGV_HItf?vdI#wG109-yfkZ6Z>#j-9c`LE)>!)M`Rc*1KED#gyRIsD z+Ut8Sg9k88R!BeYM#y}NMQhEj-qrYe^WQF1*fdb=fdM)Rjy3E;>i((odQVLgKcvYxo$H%> zARG=MLF5}3MjNej3E+6BgIcqDkD57jAk;$yn(#BHfY|q^5_|me`_AR*ZoD_A=Z&v# zFHt9;6`_(do|_1S}cz zY-j!+)GQ4MqV%u??LdLlL{&Qd)ey(02E!s7C_gEfX##mjc%`=(Hm5I-FK~!O8)svh z;}w$eVXZi+PwLigV9<2U^-9J+rQL?=P6PN%3>p+4rkjQi{w{Sr|HZ(*{7m(qZqXou z^#_C(vAa9p8lj zX|R9cz5-i1c}*KRm==X3gu#WLvgZ@3A1?R0H-&x@UJ&jY4J!eRCGR+wx^5luTDA1! zoAd(+Jsjek2`n+i^Nj6yl*}OQ+h(G6<*&?oU}>g6nlS2;#rVdo5fedDG}J+JGS+j= zfY`?DZp~4mptZLDUSD1)b#}5R-%&I-0%-St2A0kse16Qkfx`X29E#Ovg6}djzi%wI zB>)_XfLmU?j|x$VfP=tEA^_}S?WC;G9>IqKjN`#*%R|C&ePxefpkT6*@yJU7DP;8p zyt*p9T28jAQ}G6+9Mm7)pPi@ODl}5sB|w(kI%3pIZZXcxT3Y)|Zof1~tTlJ>D&FPR z<>$~!gM}r20i{fG;O%jhIQ9+x;Qo{A`&}FUA$u1prm|BMNd_lcx&H?oxTFAzQNi&; zd?%1!XqZ=DF~jE5V|Npq-1+z_a50YzBq-s-?c(GYzxRiT1l{w7feoHO+jtpp&A5YN z=b9&(`bK!Mv|l+WCT^r&xuIc+fA5FGnagM!nG7)d6xxjT2+%@U^20te>- zmibab^u=Zmqm=*YSubuB{b)&QMaH&j{O0-0g0jb?tHpmemX7_bwR+5G0Y5evPWb$u zd?d?Sz_zZfXD<9Jidt9&S6pGIA4q4<+y|Kq#A37hSG6w{u!rTVOF&`NoWE6asVj)|P(#fFd|qf)3F$ zXy}i4#iZ*%d0^|jkVs?03DdQ?Us#dZ>Z2ccoAFw_*tw$?x{sC}WAq{1A=|S{C%y!c7z>tl)c4kMKqJT>I9BNZpL)wK~kKo>ymIT^o2;4EMotz z%^)T#Dz;>hj7*y+Q;*P8hX;VRrALC^AV&XUC}=vYyH;z+a_SMh%VS@?{6{~qYOONJBepvU8# zC5-72SZiw%x9GoL0$=;Gf?TX`vpW?HJ)Psw6LP_fa?++pwZUcc_m4N3yV%ZT+V|b! z4fWIY5bDTs2xgULd`x_gvz zz5$T&^UpslDN)YoFdgB#3a`I>gA%~Do+oqh*&y(lqsTd`{{vIeeGXE}=UNhp1WwEC z)@M-6@qMzU#I+-K$#VBln*xuYb3Ryj6Vvj-BU3e)tq`gnr&m8SzPP;t|a_*DQP#?`dENn zuD{zlAph39{%y)jnB_Th_1|^zrCFLMay^S8myt%F9>b3%0Bl&8zu(~d4S`&HY#|E27Gt6U zO1NRkbn+KUQ^q+t!FuT|dxSo~1yu9*Yvl9c6fZHcMbTq=oyXq-wd`rkkL`LXZ-ZeM z$M5c=RXg5G`^j{eQBX6}L-~&#UEw4R=d&fl2 zg9D4Not|FkylN`Pw)+Z8j^-q-eSb_w-p$i3rfa*y^tJwt*d=QuC>8{dC;(b8)Xf#) zXlrOec`_^C@|g5VX~t!Be((ox_U(!%+j-?|`#R2idoxNvbfLFy z>E3~m3=kpfnQ2c~$%&A2iR&1@>9@#Ps{T_HuM&;g1TQ;(p3?yn`P?^usx4|qWjsLM zwj9pn<}VGiPnI27qtS_0Xo90YaLrWLJo5voWCZDZdE1)eSX(=(q<*{f3+kQcyNkug1|AIU zywOhzinmkRlS8W-ge8}Hv#q`kR`~h4hYXZhsrgWw{r6x)+>_f}=M$Iifi^6wzyVC} zi??S4ZUD`f?zk;`SJpq zsxGB)NedH>nXx_KZ~A%%@3QvI1V>P_2kr8aQrF66vsOfiBwxIIzyZn14`ka@*Hmvq zNQ(er(C$K#OYB)+B7I4;UZW7VE|GBY(!`z@G!>oBF!eDnKzwG8DWPaZVy^~ zg`NZFMjlgUlqUu5BB798u%cI1hVlaY^G(86=eh;IP?+#%HTo* ze{}+x145dfL(tUewT6v7bBhw98L>wsvml5UG?LLTFJ@U+Utcg<7PwJ{f`!v%hhaNn zo1~KSGr^sU*JcRGBK_Klm3QdlEPmQcv$L~hWo3rx)hw5=V`Cm39wB`4<;cPf>(Mw9 zN>I4k-21r#;x)#q{E0ZgZZq01_~k9FUa!c;fEyt;URPfYkkpKezjt-jM0^km$^Q8P zM0G)v`b`Nv@9hh<@cG%w{axQa)u~x9kdpuS$*$ztf!R06VnX4_6SBX1(ZNm{2PDud0%}PEM)C=Ck6v+xJhBE#u4?q*n(^~t2CSF~b<7;%br@Q;hsq1P2_kM*a%q<6Az3$RG+>Re%%!K>~q?{ur zeMputvqBdfutv0=G*fgFNs4nE9Y;Q1b_8{I;wN)TxP#*%hewK*qJTB~wm^_B`v7ZJ zA=>2QN@ZCCf~P}K;0Vi4?%+UupyD#I5P`z zExj!h+{|m6sw~`?)|jv+B)JnORFSp=McqbK{0!z3qU;h?Pv{|;bWn&gD(O>zK}{-(IIb zETLn*f4f1j&YNACHym77yryWD&k-Bl@h-!ZXn6~sY}D6EX=LTvZymV6s9R%`y}6Hb)KT!tc0kc|3p`p#dVFRkS;o`S@~J>eOA99l zhx?f44pyjb8H3<@%-Tk-OJs|`rtvnl)a;{Iwb_4k#%6o6E?pa3f_P2PFc|k)r z5(Gy%QIIO;leg2HB3ZmJcuL--OQ{tRyz?uGt1L6Ips=u}rY3*yBPP74FfAyy`7V2` z=J(`UvkjeX?3sJ#*SoVD0=>Bv3_`#rm8i<7G9fr_AJ5whw7$Z#4v)aV*J>prGLjcV z^`AX^);ggiB~^-)lcG3EysN8{+`!W7r49MPlNN_pxho|b8D64>OesIpwyn>%!5EN zvi19hu<)G4kqeh|k7ZdSngI?98XOL<^Chs;1`F82ERF`GqK>5R6%&UfDJA?i4Rv2YcrUOc)orMI^>5+9M&y$M zh=)h3wFfw4Gl`q(nKRc7)axTTeO$K?w(V~VC97chz`|34tIHAq>IbMXTFHb$&z6cd zdBL8{LzO3xIfm&eDSm;41@0am#e%Ok9P=iGg$j?TDEI;~=>)YW*OZpK`l~j~taLZ5 z!1K}Zp~d*GZ`b9q8j{9H$VR%zeDmU?I+;CnWJNibM2GfYC9<6|^lcUy+dyhBAQVzx zELVc5Q@cXa2abHfL+$ zMUiE}&m+QU>{#mjY;q*l?v@XAG4+Re_$LP4xFp$^;xKqDikLjNRw`v!h zsk)ce{l$h?E=sPh5&pe1P<4;ISf+5}LKNrdAj33qYYJ^(m}Xjv-`}Ri550K?Ov~o< z8ruy3O7NN;c*cZq=Gqsy(ibk8ani_h=gt|ocILR@?^x1?hwEy&a0&dS3FK;&*HZ0! z5RKb<=Dq^My3fso@G(Q(Xoj`8?taN*MTw03CV$q463#I0j>8Lyo!Ok`p?$r2svb@@ zLDWOzpFYH$_{BtwAbpv@PJ>loX9H~D6vC=ao*>MO2-#)S6Ou4?9Qt17c^4Vtdq@h& z#ql{aGeja?-L`0rWWP9m#n1t8X%L+EgAVgD(~!wP%SVx!J|?|8^zKWvIBm)XB8=O1 z_F0Nz8~#GY;kQ>uIiIeSj3fW844YU=)g z*4mt%S*ZnVD+gw>{G*vQU6sQZ1*L0j3JnokMDTA2>@P+qPtZmhZZ^_-3gL}mSjf}k znHoFmz!WZl6udpXT;eAixdg@{P9TwsQK%v-Qr20#fJL?-m#46{=4&YH-Y!yGir+_~ zXFCpfaYn8{{=y`_20$@SB@;IDXj%f{{ZmGOa{nEm=hae*zzH{UB3-tq^wq%;w(*2s zrv~pEl6?DnSG6tCFSu?}z{30v7ad<;-vPq|K-i-vp!Y{+R^Zeh!(+%9Y(vX{AMoRy zf*8pbn5GHQ1ecIr8#=F>N>4!jheP;{40he595zSw38m&M+Ej*(n^2TXu{OglmZ{G?R%!7m*}TjJQcQTn#0)r+rcd{ zt(_Td4^Iyz`#i|yju`V^XJ9(AcmsY~PtRQSmFVB}GbZ3d8laya1t|mE^!$=hF#2$7 zHe5scC7LZ5kU;Chuff2xQGbrzhaM)z;R!)ltQ_tV~7H`Yh_L zapCZ32Gv~P5xkr~{!N114qa{T;`F z5z_qgTMx5BrI*CPx!X{lh?0!6d3!P|!A!f0`9P?!I-nJ9p$TX-#z^jP2S;XiXEH^} zWB0@LjMRIMW3vk1GtXYHABn6V)PsXiYP50BmxF_AOLt$Dyo_%eOgs@ot9#WfVeW*O+9}we5R7jQ^a-T*x>OfhOJ5qL+8ZnN&TeK-T7m%zORT!MXxSN1MMQ>LwgU2 zHgW97cVN1ps=|+x73}{6rWT!$!muA4-SV1|b}LC&FR$aa=(E3ZJJV<48K6*nU8R^Y{I{4_uXFPuffxaUR=Jc!p*Z>CKBd?a;ADjHqS78-V|w z9ayiZGxQ@gfc&k@7@r z9;qdrQSo3*6cYd>q;r}^7#M#;2;7Wnl*h%pvEy^Cz8)5^yYoyRM&Y9^LvXXwu9Z2r zUgu2Q!yC|tsR7u^*B4*Ec$s*0@JTV}e% z0C%{#LfS?-x}9y~uhV+=LXz#45*X)Gv3TwS;=tZ<;R>GLEa05eF8(^|ACs| z&h*_^btT?vf#lhC{PX9Us3`Eq*4AL=*5UKJEz*b~2g2|k2Do|z?i7#O#z{zyr8@lh z#+unIGHVU>dDTGrULeV zD*+H(Fy&cqd|8^oiOx-Db?i7`!Y#1>Un=zjPX(D_-i8o7NjBCv#jcn~%-QA||k$ z9NVBL!j+I~Z7r)dI(zjKWP9niTWMx~DdIiNvyP<(%X@bz(8`*^Flg$sj>=y9zgsecX z5VVkw3&9SG4E6|GqHi-MA+m%3vk44$QQ1+rMFvdfn2VMW6dXp_KsgNxUF+x{ng?z4 zH~bDS(;}kUCVO!dCHgpW_?s{d_v(E+fF0=RGmOZ(cFS}orFK=uiPp%v78#j`5W->~ zDEM>78m52i?{5-y38N)==iSjzx<-#FesFEHz1NwTj3kwL_X-po*3x`KRy?n+PVnbX z$jpSS?(w9tQ_$)?w%CkVoxf>nxb0*U28ZZxeO+=CLv9n}%Y zX!2gxdbS>8CnRuN56j)frR}FdJ+h3}X*~Uq)?>(JE6*4i9%51t<{R8Ul?H~7g^RRn znEw@RozloO2vN(eOz`!(_ti#sU(evXZJzu}3X)g!;pd?GCu#79PB|r76_TvRMqj}= z|Gmwutb|6}_Wu4$sHD8_R~b0+6{3`wH9+#3{6a3&9fLvFuA{{Qh&?Um20D*}yRp0{ zPrj}x%2ruh8F&4D0*Ov@bC&`8L&GDYvb*L*CQBe}Rj3xDpF1cR%I#sKw}r4fF9B>&Al<6aPY-0Pti ztv|B+q76UD!_+A#)f@OU#|^L*Ptk0PiV2}ct76MQ%U8L;;^N|eSef*O-RBw%5I~w~ zCKnSJr@40mAI$%rdM5tefuxI97hcxOgIT%HA4?;Y672%toj92OUc3^f=p$I?;{w0j z&&4uiq)^;I{%lG^s*zBoIz1s;Gjak>@!j9OZU--n*;wKZ#V zL|YLPi~&Y#HPf<=%|cIOy_cu9`M_nwI&!s(PQ)b;@OC1T6wc0FtiSoFX2F4{OGof> zh=shR+LUe3K%8)!IWk6aRk*{mCVJ>H|32X++K|@vJ2pWQJLy9wxnMla_qr*zUbPHs z=jj>Oa<8nhaT|}^{8Vkr9e`&g@4>|r1Ojp6MyZ65tJTiCK%S_PNIXB@@&mSwyJKVK z4o1Ibh6q(k1pT){aOS;&C6ZQ@X|v=mgfi$JE^acI>l)Y&C9bUj;(ivw7BdhZ2EIg{ z$r**mQ72!Ud<6G+K$8xKsSCAu#_;=nwD<-sfb%2!lH-hdfirAbcZOQJGYI>f+G`2j zAxI5;v#?-qs;Bfjm0W3OQTdQ^7YhNlUq>okSjVbRY23RPG2fY(md|OZs^}drIdvHJ z@@zEJY{cBt{^F|>kHO<`hUr+XQ>X+7yzHwrgXdQD3}cgZK8dlq7m&5YOFmF3Hn56$3jHeC-CZVO(@iG*ehP|(%*K4 zJay}ae`BYx$zr76p3+mVzHn*x*h{ooO&+f2r6l7T4M3-hmHAj$eb=~=@A;J7lLN7; zE-txiug%wXh|E>gY4P5cL#obKzoIDd0$I)V3L7JOQ%qy@@^8&;M@)c|obckr?0}}6 zA@26Sga?Jzc|_SxK~2E;3cw@2V6d>A_2p5Z47nvSc-}*TZdnu-)WkpFqF&52)ncoGDyL-#S&(@%5B;3HUPDt**(ZpbQn#ULeI)WZvowQAJzMXq7$Qh z;*deEwj0x-_pX?{fpG#Ew+_9h>rL-}*ua#qYt`lLIz}66eWbQll#_FrAa~MvXuqaV0}C?z0!eFX zD6Ei>#9CyjWGcpW@A^*`;5BL_Zjd2|bm7aB)=nUUKbFgb6^LIhKipskdDiWj#@he67!NrPJW1N3l-)_I_|sRsh1`_+$Y%uHw87O*Oh zNGCPP4=(*toMImr!q4nT(YXW$>x`G9T3C>eekQlWhbLEN-dtdh2t5_W90_WH>Q<~r z;RC0LuKMPV@L7>)A0QEx+B(Y)4 z2YU7=P&G7wDfuDeBz@o_dy_Ik8XuoRzq=-71EynKY=4%__(>PNi~_F^xI4~I*|Dp# ziEDzmx(9t6e9_B%$C3HMFyY@&Q*wfnnMRdP=q5^H&JaiG6HXC~0mC8MH=Kw@p~mbU z?#G<(yqg0!o4X?D0LY5}A$HJ{3%I1oRoy^wDEuQx>$e&-hX$-)!w{B?LDTTK;}8VT z^B{nmksxI<5eB30dhM3AV3byixtDn&M_Um#o3Zr!2|7Bo4>-&OI1V*URvO$%=c z=<^U%_K0TJ=x$4N;$?7^0Q`uDdL4IVsqASK3I)8xCYcJ*n=bwR`YG4(EA3hk6PJD` z>Li>jfvCDSk0#qudt))AJMI#%h2E83)lK@zDf?tQdtT0#M+1SRJQ{#Pobb+_1yMJ) zR`a+{zS_FD??Y~Gmqa3`S&`n4%nL9H(?j#=Hyey(8Oj1;u_wH~lW4m@8+H43BY_Mc zk9LpPR4j+U+y;EH5_$J>_@@SuRs>5T2z~JLnGS%t@qhkc6}(Hub~)0(99aN;`TW_= z)Z>|v8yQ(eo3^OpzBr38WSV#1zJ)#QzOP;hP~q@4GrN4C;q>?Sm%U#ra!RvAAYSo2 zW?IKtTC=?g5cfVvY)>#y8#5sEn1+^e%tdu>yEu% z8&u*nOG*89N_24|?t88sW{%W6H?MVj{rwz>mB;1B8C<+XTn^&CKm)vOn?Hf zIY+>TB&_6bgC6_(Rhi#E zFt=f^CA$2-e&D+9@RUFtLsG^#k*gJ?q3ErK@%sm{w-Oo0n?_R@w3A)$=JbHsEf071 zA`4X{k_t4nA|X5fklfY^Ib^Jy-P^0A&Mk)ACmw_G82V@r!Z`NIz8;seSw&+VE&Yy` zl`SYF=>hub#+j}=zXsp=w*gv2O4+QIR(KTLd|(B{VH@oug7q5o?w@v5kla9?dBYxx z@G%Fe^|&n~DUlby+PO6ttwvnA->xAV51u>=QN*!P`yW!?Jl3nE-JrP}z#HVt0Lk?) zyPL7GCG98d6q39mtY`C@AYLtLC|-pVLo7WlVE&RZHqEgapbSOVH*wlz@(eMD#@GOt zIb#SAomS&o{r0~P_ac6R0f&iaLbR+uZoKbtJ$onrvgxL#`J{;yzvN%Z43o}o&G z(fPe0z4NQOQ%$H`PtO{1_^yy*)_wG1TC)LyBs9pCECi7@+0GNuv>REJmHfOdQQ$J; zrPSl;<>uQ{RqjRxCebvXjeN?U(WZA_At`PY1=)8=)as?x?gwYf|XN>ev6s0Z=g&8PSfgc;$8&>O|0ue@oKj<8C& zd(g_51#AiB1j878Ew6OCy~a-CR|a67ZPc)hGZtVPk#ReL6)e%ArD?plpKw1I& z@edpW%W!s$QN`br*#=lL7=oMj<2I68 zrn@C(7nv|5w>K-ucS#?QN`iSJFzkE&1+9b0*3hx5E2O2z@S6lR?&oHiu!#-iI+nVh z>g>Ik#)ty4zOSk*e+HcY@^s{c{L8h-_KJ{&}ID9c4^T))mwO$%&%nE|NsW*&cBR?(?UPVzf63-eV9 za$n-wgvqYn59@OoX`Qq)10Lqx`=Keu4V zEPO3cUXkn#?c(D>iSvgoa2_MOi^THo6cixT5_r!b1Ou>$92} z~B@kO6AO;=KmS6gZVnuI;Uo(BXiXjOac=snrqEF>A*VcbuKJb8z6=cjTFZ zRSD%?CCjoIU7czs^X*df@Z?(G-Ne7|Gq{|pw5L3CBsT7YmWsyuF>DIgn(k^9)qJ?&;mPqLYt`?GcMHeCL}y|_D7%NSWr>g z_L-ITJ_Lms3p)V4EH;Y0KpOx_yKtML=tJ{iD`aWo6>D zV=@7sF;@7akNrqnNHI_KC!=a`rGmk#@-KU;)B}PK5uKj0tJ^G>J+WeQpM9tjm1HCt&Jg-O7gV5l5SDSnaT* z)oYJEt*!T2LS25XYI2eSwaWr(|1Pk2WphucAvETvvNA`S)_3FGYhXG;cldmVkeXLjqmz4I$7BHzIpC|0Q(b6_Y(;PxGpWgMhK{ zi>6pP$C;&8hm66bFXel!C_~XjU^msp+fqoyadBE72-p?k&yUoIC-?FB)8+y;zoLfuUg-$| z0V=yPBQIjM7BU(gYz zf$2k4Y)i#7$PYB(qh>@yH+rgH*-dL>cnhh`cF`Z0cd+(;E==G-1RyP=XG?NGx!3|K zUBli-A)dV6{bz% z^bW%*Q`8@v!090Ml?N^;gn@@N1YZpRpm9X7&MA%;Ym56uNfMMDVPF%qO*-ARXbU== zw#`sG?ltUnV{VCbA@cDKzpShe02d~iined4x1b!Ma0-1X>J_a*_ll0+DWhl#Clw2|S>Bu!CulzArT{^YIQ%>V$^3C{_nV8e%Wm_{zP&BoB}hvC_$t2+QQ0FDoZ67=v)-<7y+O_`0nX)(h9pz72fLiCr z)_8FncuGg740HOyRw<MKG(pqAo8g@ zMHm8a;iy1;nbH44*_Vey-M0OYA|gqWq$rXt6be~}3YF}nMO28WL}WKY5lZ&0kjPF8 z*-a>9&sO%GBJ0?OF@EQFX4La^-|zb#zklxMxR2w0%=Nvlb3Z@l=R7#s={R+}(|fAd z+GxQ+W2YPU1Kk!QmwFiujXZyL4jJnwyRU^{%FD;8`v>APpGiWWS;tl+Lpe$HszZR$W(3o)R3co?dzvB-uK-N8Lt6 z`#_N8$bvloW0=iIT{J*42IKk8{^Ee##igIrK_^8O=SJk!tS(!Wu|ogHZq~|FsS?YR z=q(s}(Xj0a2aCHmXI%P-m4m|z!mCOPl`q=d7ZeXxXb3jRsMGWp)1pw&UneDb{~5aB zQb34RehcxXH2-A#JN^Ei*r`v z9ji(Yb>UnNuS+w0&D6_r(c(Mf?3b{ycPcf?eU^_&UQ6XrS5}& z;9_E}-{Z~Qs)wl@HX{0`rfxI5Co;OZm>PntIMhFNE=3;9A{#l-1hM^gVuQ$=QjklB zZ8JVU39{dv^jZns(qcox`yB!xOeZ0Uj*n?%1P#ys)T-D1uFJ!|&i6 zw3gxO&6&DpN-W}YG@9vXr`Qdpu5xkF0Rr_^RF6MuSRA8N)gpza0yFk+hi{jM5acP@RVk$OWj&-e zxazqyA{vOpO_y}?A{Wk&=F9Jry7%Jvj>bisu4-1qnHh*NeU zv|~F&>UvjYCS@?4`0Os6BqRc} zh?#^I2sm(Owm88oyavDmfLssC6IKH zaD=vkWwl1GBQXh^4CKNH=x^G~R{b zdB&Z*|8ru0qqAkqN5 zS0Jfxh^Y~HoPZw!c_62|bWzw-K<~441n(T9vMnjTu9tW5Vt`Ba^m~5k*+;RT-Z#kC z&>SSuPw<})ho*GH~ za}eMzloEY{w0k#iyFuJ8g=T6E)FDlj(cgU>_| zv7HQPR+o9gu;K0#;Dt?&>a%h>I(%U&2r1LyM+=WS^k+_vFHbba z_J}0X63cwtx4g-z0rZMi9urDIno=MANJ#YTz0uqhg9%0QYDf3D)w7h$B(ZE|S)+^w zb>Rpwp%edn!=-PQCKkTo4xeUH_H888T|IWLW2$)0XmTS;Q~kHg z`PpaRp_@HnG>jDH_ws<-`S#oG=(}$aL4AbSYW;R)F%9?V26W};-})iP)j+S#UGACP z>1Mj)xoy*#(HQ4Ro&ec0jAR`8=6=?h-VGLBQ3J|@>mB8YP*ynWmoxWxh_gOO;jiM* zp}WI7mckcPlm`qJXvIDnZN0JY+`nAqt>ET*(p3h|d;dHIm+4MU^AbIuHj>m3&r^v3nG+P(SDC9H9O<0#O+{QAuiuu*30yo_XP5jnzzaGJot%( zv@}>&&ft#wZ91Atr*b#n#9^zEGJ@1Oz6}%lTh~2=tQE_DpI`+4c68gP6qm6K1FkGMJxI*t8^6rb~_0*HuN1 zir`JB_|?oehg0SLr_9*|HxkJ*xAtS{!GaJYx#k%{ACtWqh-d+DM?zWywqd>NA6v;N z*){EqU34a#VBf6ws=DYBJ*9fcE`C%GdtFp1EL=lE3>4$`6PYWQj=z;9j{+GRI~jcr z5LNT`4#>*1Nt%oE#2o+1xOBN}XjcxGaJXt-z%BoPfW^7FX$BpAeJd*~V5)t$%_FUf zOOgg@X%zjr-TvaA6_wTG+m%(=VU891?JmwlyECBPQ2*7t2SR4YqPZs3Z&&QEK)LvG z?{eGP64%<`S`w8UN%pj8GNX3?cZD8QKG8EL(x(^cTbJdIRp#yr=;VzY9 zSBcdSZT!d2c=lc~-Zryvh|Q)jAG)W3>pT{aBnol;@4prxhFOHk9Wr>~tS?2ZDW&LV zR8vBwOQ(Q7b*D8~yE|S;<}hX$rb@_#`|z!9_P3Sh;Opv1Unlabcyt?j1Mi7-*Ask< ziVbyj^B;T@sf-Vxwr&5o&lgu^FIk{Zgu#9O+s`~n818(3XyE=ms6gtnt%oQY9n=OW zciz7`sb1ieMAtnh)^@tkQp&m~##A6T z8fePn3J~-b;vgKFAOu1k`UB-8g98s^Xx5t!q@XG!Cugv{99YoF>K4&gEfDd%CF3(w z6$-=m-8TYAU+qAIKN3R0N6x}L2brl#l@?q0Xd7~Zcym`a zct!yGrWoVa$0M0$kE0QTveMgEcA0R`r7L@G9{v9foh0IyWdn6ez+a zi=utP`dT8@LX4Ti$VyRwlVqg%T&VF}*sKrqYJeL$t!AD8aO&?VP0ra+?v+i?>FF`l z;MUe@*(b`l?FPT6tCpp<-dXc4757_u4wgOLCmdcT>VuVzllxDjlE3N9+`^$WYec$yXGoF7IY%zbU=c7wTwX#ZplM#PmKm#(R4H*Jrq-7LYfG&6c_&u(6b5Fcq z%0Ngt1HCZM&$ijkhDv<54W7=;&E2Kl(5_!we5$o2(YI9XytgOJgN?r1MO}!Z`JG-P z+CZ;eBv(`2bqDC09*q4?`Ywk5o81Ni1fO*u{K$J;a^hBGN=m_t*_XVjFA|WWmT}u( zIdxt?{~~vdt9&uZ1W-Z5c_OpS)#Mf9NCUFuwP)T0hSEx%jRpH9p!rV2IP`=+Ww+fC zDXH(x%_mnrUH-AZa3M1uhv95kDOP}~iSp^QuKptd$7e-8yi#uYc%UWf+g?T<^I4B? zP3$Z2tT&(cWF9u8`L2DxX}gG}f?A3)p>DtV$CkL%xPt%uR+ZHZy=RjX7u!bv*8Kec z!=jnie)_Z-Z~yDU>$GMPmq|sVe8{Y5O}CB!qWyhNK!AKA^D}(nZCGq_)!i?ex;VUO zgGT6wJ)xBZf(y)Y&JVNa?HikXp&Q8%MXl1>*_oF4K%96froiQJ9*f6u;qPouR5q7g zl^L5Yy-Jgl^&`9AV^elP#~zPO4WAix+2Z#75v}PS137K1T#eq}Dy<{(e~4DQxJ->O z3wxFpGQj@M?3VxKgV3V58BcmAk3*miLf{I_1MuQRf1~N0Ax&Eag?6^w_(}Kl2}4w} zTVY|L=k_Ccv>t}RnjUxW-hBq20#hAk1fRYDy6XKjxu^GT%_$HIk+;ZR%EA7r<{?sc zd?Cwb$A6jm65jtd^R@RD>v?>1Hb-EOkhAa59TbO^KlJ^`o#*R(WHw)#wo9t}Kl$0T zM8)0*g$9!rH!p+ml`3WDTB8fm23;l~V+MB#4%7GJnDu8r^GO{OF^|9Xd{2-@E#s|r z-RC7fKY&Fl0!F}9qj!0qO)mA)+bI3VL3l5YM9oZG%&bD(D_63}Z%=OZZxL94<%rc% zQ8B51A4G9b{NaC-qeB5e!VN+|nbJ)_KfGUmRQ}zyZv=tPQ+E~nh{5vU8LsJPoUzE+ zt;exzSDZ*oz;^+MhX<{18LXf9s=X~;UV7_QkHjN^SKK{*r|5~EJs01DEZF^RW^ls) zCJVp}&lsD#SQjn7pETYfKr!|2%p8_JX{<5_2N#?mAd!9ksp^(Pi<&gZ1>b9O^ST8t z{L%Z7fL*(G^(9ilyi9qEZpL{X^avYcR@;H6PyF&92uJ%ivoZhLI+7($IUa#1U^Klb zSZm~B-1g za=p0KMNMLI01lQfXIzt33PWZm7cmb5sC|a!G75*>(x#3}67`-!C5RO7ycQz|SEvy7 zn16qLo{_ALIe!4ufOfxa5g8h4r@!LZcPF+2)-4p9)y~d7=2Dw0?Uoll5|XySVrgrqvdo>QpfJ7(gKJ0v4>W$%C-2qu5ol8S(yCN;^sn`&sT z2(SGbbvd9Q#q)}G7&*Jyf1Z7}gXl#osh=$nmi8;5+-=JK-8@*A66|1Yzl>aJs|&%nIK&phK*;NyKB&R01n?W?bWF6z?MF<3uz^5|MK^i~S|aF@=j40kGdzXH*H%>pGfjR%K*+;P2VWvJLv` z)lES_X{K;#w!kmq)Q5_06CU$Nn>KCAJ4Bt>+oETG>RhL8T5De3nTX|}M)kf9H3F5; zNkH?dAOoWMTm^8}t+)QkcSusp4dTBzACP zY_0n(-^&fi3;?P8NV}_Jxnf{;61~cU=hkT)--3#}J)I3}F#cIP_!Wg!Z*yA*`xrL? z&-zne1oE>hxSo)a*=EI(8g+KZGv7T^+y?_0Z8Rc(PEL*l*sFPz)$%z!>Fev;7k+ee zL2}&Q10HH$qAw`Nor@5yZMl8=3AZri1(a%z#fGV+&;zxARQP5O{o^)@o?)I^m68rc z?!qMBqPo`{>t9t|xk*&`oxmMA;Ga~mLn;Y-6!Hu9u(Y`Cm}G^zgTtkNjm~%08F_;b zTz`u*Qwxs2F0`es7Lm#X2Z}pKJ1$+>c?15+GkW~;$uCMP%!tb>VO*zs~bag#D_N^NYf*aVkhi+jgn*otP-7%`qY{3x(PnJ=bt zsp)bkHQs@3Ov1D*vL&euR~bmsShja8_J49ip6reDh-|8@2Zvd5HBWo#T!kju8__ zqq*iBdqjuu1GfR9yRtA*YEi*Z1+mMt8fVT;$La0N$Q1d-^XSo|B5jK> zwxcu7>8%#o(xo#jJq7Z@c6+~Se{jscbyjaWPSFWHsA3G8O-{_W+8i#Wr`{{|b1hjk zRk`~s=PRnE!W}yx!G)5t0N}2mrV%SpM5-OoROlw|awy4Alq^53Gm z3r8@ajlVy5JqxAB3x4X|GVpJ@SU8zBmJn)&P%xe zXf*KOex>GkEFRHkiuwAfXhN_G@a%A?GaDzu*=!{L-s>U9kP@%pOGIL$#%ydU-pV9;67N$Yq z@!G}#W>ocx$_^9arAgk@KBI>}SwI^R;$l!3MD@DgOL%JK$+m) zo6X$O18u((_X?_zf|~8G7G4e8Y+j^?0=De%_Wv$=k;`uPVGk@s>l8H(e@G)Tf8k=v zWuZ&zYcSeeQgmx+CLg*7L3=QPPZ9Iv5E*s6_jyWk{Y!rjz@UI>M(7|GPqS-gQnfpWdCZ2#DN4MV>KB%Y!s0aQ^4NZn@ z?!8Z6zI^$EXM+tH4hguEn!WjGeBH6XEnjNkq0K<~o&U_41{J~*^Wd)tP#iP$9?GfN z_(I$CxGpER&)MstfREVG&q_hYm1Eo%cA;x?7Yx$B#YU%vpA8Ee3pI`YiQJv0xz%YP zpFE<~tG`|2ox1wi6}6gsZ&TsESRZ5A&#B7f7>RgAqT>!np*4Y=1`aJ%&VL*usvRiXFeJL7@1HJM9+i?Bm3 zLY*C19>m5Hf%V-;s^Zwjt@@svyx{ij@85prE20GTEZ;-J|Nh&xy^F>WH*Ac}d!vcd zzw+k*(SyUBz}$M-X_MAs{1)SDin*~!n)fg$C41aFm-9AGS1eY%f zuzyjcF9>yA*VnHd9DUt&tJwI7Hx3_zsUcY1aSpa1k-NAR06wM4_r_01{;h@=3nCt8 z0e$LjvhBiC3ih~ou%gKHI=KNuF9yS&c9cMthSBJ|P5!BTe#a+Kx5yl4as3RJMZyn> z#3Y{y{C2%sIcQVDi4x@*NzWPn!UY~m^MHrP>%3HFuGQ4!m zS#nh`YNtNl3{yMU_<|GZv_#TGy z`IE~huWUy&eir+X8MZwhiQaO&Q1>JL6jjPIQ61>?Y&ov5LWR0cN=jp)-`V+WC9>D+ zZ>n)VXezXC`>8th&AU#hQu<=}wL7Ip#>aHi6!qZJtced@zHNg3i$=9UlRE$>_`+1$~fLh&mS=VtAR z3AG#?X`+FPu?AP&co~e1zVn!^h40#dwze_!%4ll>?OYO6Z#x4; z&%@muXmmsAGLiy40!R#del9*k>YozhlYkg*dXmJXpC^el5ckvCkQ!>qQ0s zjr^)?VJ+}}-v$g-YyA9FttM8UcX3l&A9g9(iArNdZ205qD0C%cUX=?3Pvp$_mYZy6 zaQ17Vi?3^LvvqHsNTSTiOB^Y!kpuK)_^XV}*;r0DQ_}>0*y)Mbi8F;E_tQW1s#@d& zTo_LI4oha_R+0_+66O)QSX=I=Q2oo$5)Sh0NW#Y+HW+~!SMeD;p! zBU$R=VG*b+h+V(jS|j2TH?{Rn+;jO>EMU{A6wdFvZw7!xc&eoq<`G2z7CfPXX0mMxlwCArh^S5?;lX_7ju$-89ty@;G3= zBFI~*3{~l}xd!gG!Msvz&hL|XjAZSA$`+5ocQjOqhjn5N?CW|r>i33A@W?C_4^QNE zG^{6HzD50J+Y(>IBQv8rIyN(>r>CdM9&J4V(A}K9__EKvTQ&g~Eq1x;a;rqI#x%#q z$<5?eY)T%&VC&O`BYb%NcMK_?lV?NRw4kgd0t>1Y!os9{K$ZwaR1M&l7YWZ_w6^*Oemh@=T<$Df zF1?r~_BtOA83?EW4LO>eUh${<7tvU0vg%aBd^`shd~Tc|(k7`O6pB3ww4@A%dGdanG zGfGbBX9sD)lX!sfR739Xw@|#sr;W6_{YrnX$hpA{L%MF!T~HlhMOx3anq0lbL&@OP z_D<#;pw8*NX;hH#VKxA+``Z)QLvVKWu7%)+zTfxFjMImee^p>yE!~tyW{9P=N?}Aa+QN-!DN4vaxZ?nm9$CB`I z#BLVDc!Yu;f~eV};Odbygix;wOy+EXGx4eS&Q#B$VcPl^XIqEWbWt~K6njj*PX#(~ zlcRF1^)T#{ci&_c0oL;FOZLDEy#f+4cg$=R>6qeg>Irf#s~d&yjj}en&8!Or1?B^( zTqTmqb!Z6p$DmNd5Rk{f@;kw`4_q=rX3uCndJ{=rj{&IYb!nglSwS`6;-haDr2L_x z_&AinTnTe~MX~p{D5jhJXj`4FO>mii-n}>#8=|@-D}p*N1mWj?=vt^%Mq?pjJMq+U z=Ibc%Z@4&J?e3Om>RP&LJ6SZF+3QMsXO^AmuLddWY+~)o^}~haw29ir;~*Wh@p@>x zV`5@p_)p~S#!7EmZO9>!;rq}WuG0Ey_Mx}aJxMw|kbM33ksEfk?^_&0xWe9w{#yEy z1`9aa?#&A`eGw0y4ZWAi?))Pf(hQB^4bwMlA$sWF!pJyNPv6+vi-v;Jd=!usE(M!$ zrAnjG4M*MM-q#sHc(>oXccbFRwHdnjWEA$X9Mi+$ z%(O{2hHNA5-xbNcs?dAa953c_k)bgu0-4VlDxZFy$3Un{BPEnurhunLuh z+mbi9s&nq%ZG$taQtGNLf0dgnS-*6Rs1OaQOA@z zHySe&z^2dq$w>sOWr=OhW$bpFG8TIm((rRbAO5*JgMtv092~+om$_{^>DH_$G^>3z z9w3vn=Ab=m!(V6+Cw0Y&xV7gUI-<~98MF8zuR~P&pFQJ;E*FEp#DgsfajsUbVHok4 zTUa>GyXJ7=?Pd;iEtk1Ccy%C5lKB~8cdx$M0_|ZJ>&|d!e*gnV5fX0Np%g?&MqV=; z$bc*I^%udw!vNb?CL@Qv@ElF!T+iS+7>uyXAHJ&Sn$XB+Fu0H21ARw`FGsH&+_h)V z(7^m@HCTKQ|5FzC4K{5(i=Lj%HH2a5_)NTv?YLX$SIbEtRQV^Y2+UGD0s2Zv(pIfZ zk5iFKeBp5J%;Ea2U@A#+^d4Da=|w&=6XSm$`Gd6UxCowG;mZ|d8&ua@W8~W9oxd{0 ztVw%ZXyEE}oLOF4b=>6!8>(RxLe%Tqiu!>+gDHHo#XwtJUSAgP-W$Z@ z%CI;`m=i;EvT*sSH{pDu`&EeP&;>bxCk&E z+wlV(#7sS)0F`ON!Kum8!7XC0FsNT&%d^x# zT>nT<IesH5eq@E+FOQY`k3m`b%tW1R+<=v0biguaVdWQl;hloP zXUKxFE{dw`+GVxGPPlU&=vr#-mKtq2{)J!{1ZwiAX!Zrdi{!HKUZyP)Tg>7%nNRHL zeYjAwZp*sF0nZ1MkF#R-qd;?aL`MgBpPpuASO-878CdmA@+`6uGYwIbV;Ffwd=hoE zl7oVp|3-#tbQHtgi!2f7Mr5?~r!J@p+xp9G`|G0uH85ut;qN&2m%P{yyWgfG{a#~= zAd|Y+e(s||HJy&<#7!X&_HdR z;|TOtpDs_jc#TjU1&+eLCC}D9`j$_bm=b{CG3KcRWRuDl76E6veRkJda;=*dHG~Y5 z8?)l)Fu1AWnP5zZo+j)q%H;qNTtU0`FS^<8famdexiTDihJ9%)vzv#Ohg%3IV2F~9 zg|yno+iKpWg%ORAV$EJHye}*?-HYrcS5nnME^9WwA)~N! z)YyJc(bPd8O`wE8AtKgZu}rPHY2L_foqi!z*5H|%VpqOFZ2DV1QjTeGHQ}+x>8G{& zbA9GjmL6r^6L1{}tkRD4dmOB&z#YY8XG)kUn-?%=+Xn>(2Kp!`n**^;+A$x_9>5&D zIciGO{slJ>dIS?7lIk+TF<#f3!*u^#+$#6gZjfBnDxS~eUe8|CGY05#!_v)Wqx=FF z9+`oX+tBctT4`CR#Juqb;h)sIwBkA|q(50OtWZ;>w{%Jr72^z!{S(jfqgaW}qe`!h zmNf4c?>Z_I{Vp1--s$_a-bRGL%;a_Q#8FYQ(|Ng`H{xloa6DS-{ zANL5Wenz)YLxU*!Q*>UWb*&MB@c^h{nFtZL#(rJGpipsc+T)e>%Kq*Wnun0!Y9Q20 zSaXWjT>Yok=x2(MR+FcJz8Gz}wKk%26hz>?eYa4?*Wp> z{d*P1lmj4Ozdk=Ny!j+?yzh;pvl8rA9lr|E&4iPk6Ek7}+F+p$R+f^g?+}l9;p=IZ zbiKz<8+m#k_6{XKzqejoZ;K4j)Ts}pNT06?;ZdL^Aj+oEyH=<}FJzo&aLW$MZJvH? z^_gxs(j$DBeRGEZbXU{2wb)f$RM>+HH{ZB9wZW1e+DdtOtg{zRZ4ot}54oULY^C^8 z_cNh;{)Z}fZZl$uTYnCdokHB|r808BQ0ezSszN6Z?0{IANLmfRc0AqJhHFEw1<8s7 zr%BVWjMaB8aT_K1Zg9H@BJ1rD;`{Rru#4E>strf%Y!tR0YAs#a-)`(g?7CacaU7q9 zzBx91op42+Q1g{8HL_lvw23Bua5FIbICPss(#3XhE7Q300Ii9FqYNx)QVB#*DRB+u zg!atG*=lqB?G8HCU;{Gu(;ZXds#?X!O3KS;cCjA4%(7Un8(uoUu&`_snEEw2`RMYl z*tt8gEm=)=Em>NZKS*Fo853q2i8CKOh?ZUY7P7i^7gUZ7G@KujDu1NHQRN5FoEWEG zj}s~Wc;{cdu**TDVBzIEAT8ROz5+1%&5l*y`x22V&PJ3CnIp$#Sx7(J3Nf*AAq&6; z7+>N+_`xRH3u_H#APZ zapP{I4QTzBt2PwQIb@=Evsdq|&3T#OB#;t1RhRzU0w)%kNuQGRd=+KCQ@(7)drM$()l$1WVxC+nKymZC8 z-pzAfHBp-Hf#0`f>st3msUrNxh86Yz^)XildVrh8fEj~DhS$ip;#I}}Xe*#R>`7v0 z!}m9f>PX(OVsGCOI|=#$eS7x<$(SD1#_jNr=9`(c*EN-K`as29#h5OK-|ivBI(QU* z1OFRfE+x58`;{yZ{*kbCo7n;DA2F$%a*c}pS(6A7$Fu@o@4!*a9CJtVYD=iuS$n*H z`dYD^<3!v$oiXivI{04A_tzGuB~y>a_9faW8jmU#`dCT?W@_bYw5n29^rzs}?W@qY zb;m1ubK^8quM;QTe({DjoWU)YW*OObpB%0`;Pn8@Fso$tEndFDUf z`kB{{*hac&97}0z7t=~oY3obdku0Q(8U1))rc%j7Q!K($w9bzLq5qx)I`SJIzH#k= z5bcZ``;kG?8~1S(8o*Y%4Bc>EoacGM&XlmHkb7I!HI>A$w-JjfXfKkS$%5{J3cBNL1f+%iCA`AJ$#eo&ui$p?A@WrxA9k z!29fOAn<-O$#VAr@6cOg&v2xCwG~xG%C!Y&e`Xn6A#Ium>Pq~q(kr@w)5Ok?1)Wa? z(%ATByG9WCJ^}Lm%&V`Z!+WH_K!DouRgBMLS3CZ2d>XK(wuEu}SmLon92daZYgi~5 zc7<0YHZjo!SbWJj-CiJ~)vBMpzgfj)rUgU)cZGPa^k`HvB$7{#eUg&pE!N$8lt%Nl@Ur*DJZ%=t z>C($V4mE>~HcZ|VTmiN#J4tPSupy0Z0_)Da>VH$vS1nNS0y>q}!#&eFYJ=s|*qa7xXVlYpxwfD#tCh`S&$2)TUjwYVk@gDl z*0)nchWtwzJ3}y_OO`(q{i&@om3(vTnI}%>#?vec*$f%c1g0vtE zx9n0o2JFI$NNX8rfkapm#4$vszB9>{eN#<%{wpeX6@w4+L5Sv&dafYz)QBkL_|j46 zWpM|lhQ4~O1Kku80~>DyW6>+eu-Udye-J%N`L;b52Y6MWTKzvLoYokP;vtx{@Mzo; zW`BL-pgybOko}&eZ&_X|@K=~nn!=R8JI6l>1^h6rJQoZzqlI&vNPOhNg90CaTuyZ| z$%UTsob)MIu`kF9LPf|dgaI#??Ib+Te>=tmE(>#gAW6bnAPdI zgUQlLbDL}u4-%gNsN7L2xG!c)xs+_qGoSA+u}c&Q97bAh7by~W@(ZWve#HszsePy_ zcBE7%ILivyAyPbvv=AvGNTVf5@RyFB|5k=}oxj@!dP#*6^rpNKEJBtl$@_kZDl?SE-iNE||PL->S!BQ|d(uF>EN@2CA!#*|?|3FVk-5F%eLkN7EFH0AWO%CgOVaVY zTR>`wLg}sZF#CSeb=SQHj#^j<8rr@)9;cvm2p_mlKO~sUj`TXeiYiKailA!X3sk7H zm~@;3emt`&y8USR!SUfChnvB4IJPc&5%a6MmhwmJcQ6wa74wB0v{=A>T`OnUddMyM z)EtDf+N?Oqg}QJc_L0G#G{V9np-Bi-WGJGg&_42cQwNj#F)|@u$<}0z2~u){&=95o zSuYf#x3R40E8MN*U@}9NprYd9DWO?)P)f@3u&bqI*E?pxCm4oCnCIYDy?#kA_0{<; z0AV5AjlVH@5kWk?W7aXJNfRjDS+sP>;mTDB-Pps1S+9TT7FZHJ?r3*+@|??XtgWrR ztH^Rm&Ro*7wZ;4dccavgha-j*E)=aJ80$4TPfRgX3ZL_#m6hKBMG zuMf=)P{Bj2P#%K5C28R=iQ@9{eEychtcvxM8nVbl;aI#wfR%JUk};9J}`ArczJ4FFOVW#t6UR{`yVgM$zk>9zp3 zuPVp%aYp=5z=LJRr+0zXQD`0n%md))jE3GSji>XJJ2I7*kt3xe2=&^$$G)AgCt*MVciSpAZHgz=-;-@%zvjy|dafLyeDb}|hXO_cQ zL!z(zSBpBn`c2xRUw1xPxQywe1)^`Q)Q3>fST<<$deqOl9@9*&wABc95W4evO9`d1 zCs&5IjLTE8T{yvM6z1Hou#*3=$9d}vkLF(Tw}jKo&-}}`U|jn&+>6`AR==&h_sBzN zd4UYzDWykieEvMCj3^BAo#Zm&MY#Up*HRNIjg?n^dV!|2PBoo@Of%ueE7o$s=E@PM zOG5`pNLWR6d1VE7>^KirnBzZwbT3kV!r2^j_=*bkaH2=WnN{r^^L*aUm)BJ0PHXD) zY?`HKEPAOdgMN$+cUY~^EI)RvemePRetC+ih&d@RNO_81N%2J-UKSlCZ{PO5mw^g6 zgv?``V=%BeIwr@MNpypr0*No$#Z)u44@ZgR{Ix~tt{*v!*|(MWjZ32RHM)38%KW;) za1C+K5%63n)W`KXAs}>PhSEEXsIkrt`bOpRoqY)&_9{ZK)q{JPW#lyImNa`FnEWWt z?p3XC19f&R0}?FAQWx#er6P-btuEKVKYzUxBXx@){OoMM2iUfaD0i0ipFL=glRWmi zkR2kHSQ)k;E%`G z+bu9N8jf+~eNA3!QUP-o-9fy5eg&&h2yp}v-+op7X$xaWv zLLaZjBq;rMO{gZAk3b>=_kT1s30h?|WAG0!2)Zos2R}YJx zA)Sko<40~{7n?ASd2t6^7V+QKW;?hvfv$`ER0Q4F5z10`kaPw?wGR1}XjkiHV7yO6sY;_yBiP^ta> z9zlK4SP7yANj#;{H73NOgasg9qr?$Z&$eDB8FExDMJj9r5%8G~G=E|ZTWaNw!+q;| zRYzd0)^vyGa9v2uHSvIw${gpwZVH8Zn~l{@1W_w%pt(H+kK6ujEd?6n=VvrjeV5P0 zBJ~M-!2~=ms0`s*qIeR8+Et=fO>3fc~Qn%S@;G{3wTuW6Bw)3zsK`Fa`0m4fmpkfN5hBp2H1?0m=hz7M*_sTsr2*E^<_3-# zRbkjujXhRQF2kvC!8vzJ36T+}-VA3?0L|OyUu$GDF0SL6NAjgItU!&Xs*13mQ6g$> zU}{t-w(7`ExlPQ9>ImqYuxBkCVd{1Hqdr=vZkRcSN@<$rIFS}3GDk*Sun#nnP*ruB z-+>642?$!msSc}k)(t2a$}N2o5;WQ|GNqy3?esauyE7K;wP3j(U~W)Yaa`I|f0i2x z6e6SW`rDcbj$bKKE-T(TxGknG%cCgg}?U)I7xrzcIb;<+&s z=z}f|Z2q#jcw12t5W-wW5lH6>*V6J9cpOyn&dSvFUX)oV>Q7j%17H%6MeZ2Bf$Wh% zDvt*3qu<{y&zrq}Z^t}rdaZv6q53l+*cBi;d1%Z7o0GG=Dtj{kUl_tEDta%4Nmy&* z0R8EQ&yOZY7IJwmbmH3vA=5cxwA8|tQcXgiAQCnALSMJUp#+Ki%$OH8b2%n1>VTh`7*1iPxtg;iPFYix8cWp zMzVu&3)sFgGed{f!uM!||FC@UUcH=A6t*&Hl*#Shqp(=#9-29aHJ;wYN;5atw0Evz zvt0{@i@qdaP=`8@*i_Z5>78F;G`hzQT!Remy>CEw_(P(}#Pi7F%m5#*=h4p%7R&9@ z(h5$HGVF@v!lb6B^o8;^x6TQ*9@5wL(5<@Y8I|w+1zrW6Q3N=cR?ClEeU(1_LNX zcNR~ z-dtja`+R&1!^yrH+v2v^bH98X{rp#7mupQ9!Zw(DcZd_%2ALp~7H`1WCmo}?ebBZMEd20C%yN-xkaGQtdl6}3&>8;;($18 zA8&OXd9TM%0HLmatm*2IDTcL07b3er^eme-V2wO)kGY1F)HQ2btKi_nNw2KlloN zuMO`QX!d&f6XIqqw%o3fUKEvG(wzujTS5^9AIPt{KUFx`pX4@8XbAOJeD;AC z2EP368{F@qVe5uZX(;9I}dlf($2MFTTg?#w2>tXgt)3g97E_{U^gRN?YE=n9DEyE$)q9A zQx}^{m`%$pv?Ta&nXV9)KxoD`p4wiy=IHIdRGGwd-=!X3am>oNITGEIQ4b{HDAX}Z zx6j0%@B;f;=}M}YZ0%9l-r*mJg<^NAs^`n&K*4;wGFD2nXtVSE36+}R?d!-vfffiT zrD>3G7GyL8SUq-icnAG3(s=%S$byqKB;`ZkJ=&;@Q9$_skjx|+ zy3kN1ox)wSvYJA+tGIha#>OV?ot$utM+kWg9x7KEPmIOidsNXta|Td4iw%<3VMxxP zG2s;70T>y>Eca*VciY7I-11nsw*|2ysQNgQ%adm^;0J_Bg1SX3Z(oqqNJAe%c0Dfg zE$Bj8)<~qG1jJp^yBY|L=%qP(Gt9uvUDR(w`=$upcD?;_Jd4XC4fhUz?U>4Njl~_@ zz7HB)ZdZx$*+U4`EomK4$m>g&#iLW-zFlU00OLEhFUV}ZCknORz0t%PXt%!Py?3wR z;Dni;up306bNk8gGrlyS%qN6;QHH>4JQ4Fkb$1hQ=d2p3*Ex$gKsTQtDHZP* zzUH`ZO$e)*!$?5;|9j#o<`e6-@F)d6D)ro!`3zPMdITkH!hTa?v3s3Gde2H-I)c!T09XblRrRTr zj>qidY2UNe1VT|!LOSoxtZYv_uFqrB@bUfEh+`?y*vovgb*~rQzFNg@m>8u)(m~X7 z#V;TB1VbTxsDPxAyog3#8_9YITwSC(e#1FR-$h+10`Etn;gg-PZ#IT~IJcDjK zl2BHykZDfv>!0fPIPEba8ul-r*+}5EK~yvbk1Odozk8En2dp@H)!el50tV&=k1oa!+5Wv(|{qF#Y{y`3KlQ8iYx4Sp_1Nz^eE0kMW^kW|B@sVRujr=+X=H zr-!Eq^}VboC1b2B*e^Ha({*S71u(B2Fm^7I7(3yQ=t6prLXEGdfG-}>7^d+8Jpu_6 zQQxB5CA9X=5{(3vXB|kbi>-?xzD1J@XAm%KZe_wS6w?ee)q|qo@Zyxuh(E<=VprRn zCWyILYKWCkK|F-iZ(00B28$!I=wdwN&A>fVYjpje8upu^F9!dLA9~vp0WP+z_@DUg6O_Yu9p# zH1?NW`~5=qn9>rqwbkXEAWg!F`h)yPOC!BPZuqQbnv_VeP0}f66e4(MPlK%}P0`l_ zL|nkFVkZY~=eX4_ZzS_~7w2yeJ0eYLEh5>1D1iW~rGYJqs`%&A3`D6IRRcfb&{f$9 z?_T^#10VRF66*zNHpBZb>{Gx9ad!|9Cct#8;0>Y+k_(^`)|(l~Qc2PCmoHpkyL6&P zukH{cS_5V_+EY#K?djOiFp&Qfp*A%^&@dP)caUnQ^rrmyivF z0slnz->2rqBd7L7P7Oq%BK%v|>jf0(4b-~3R==z6n6jMYDqiw~IPe+gxz#>C$Z^hd zszT4>|vu=uQu{y;du;A`>DeTp} zjPLvXM;0G5<$RfsayXfIgO--o8P=~q%x(n+lG4HJUTtb`n_bLVhQ?AYtzHIT#He2~ zzGPkPR_Z))`P35~t!b2F+LGo&TfPGm7CE?^X~pV1beuVdlecCyCqMi75nZ0!#jd=ZXJ0AGuT{^_VKs+aihdulQr|p0I`kehEv^bmY=g9*21t0G7ctVYfBT0)i-#`#@!wM2w%cJkcHc%nBadBz7?)D z0S~F5tcg9rBUA69ITvQ@u}i+PH(##0$l~<9aJZ|i=Xps=Jw$;Czxn}6{iJ0~FpFd8 z``dz?>hW(xWJVw?@`YSUQSJDqiAx3dMD3^(*Z`G$$SAKnlwHtTdw~PrOki=6a4ogeb^jWPz zpOuQ8eBBRGw_G8^(b)2}v+*@ZuseBG?g(R;il+RqCDG=9L!@y*iAVG~ldhXUab*zZ zbya5L#*GGH)Q0(InE6Z^?O%V#mj*&5WN(a{9&aK3p5ls@gW5Kx-=S{&j#S7lNjVwp zILnKZS`Tvn^wDvR$Ub_N+4&l93qn@%(56PdV|KkB3AHo3*l?d;vsp-RS2KOLr8QVN zEXiBWQfgh)GG(T>$-OyT-;irsK#jUO_=07!niAm6*>V?LOS?ve8nE@TiJtJ2v z9`|XMtM7;MojShiLwD}pb30VPQs{4*`E5i#YkWnF2$UKBXCp#elIsOVq;q)jEN)R{ zYUQ$H<_S`S5w~YO98|4MTiwBqd5!dg>1wo7HBup%e{qWH;VhCs1@hUv#nDZmV+7oA7Nh^kma_t4T69)NT(o; zbhmVefTA?gAl=<4A>G}e2&mLUH}ebA!S=5jdz%&WN1hgfgf5zcUvF1;(7B8)pnQ3>l-E!ayxG#BIGi5UxN&H9Dlwmr zW~Of8F_wLY3@36kscBrj(SBju+g$ZvoNU|W`vMix1?R7;U^j72U_^ZO>&5s$U9kPS|b%xISRQpa6#c~42fJw5iPmEtwbk}t6x1y zZi)_I8{ytQ3DYY*g{H`@b1K5a$?{76r31_}fdcjtHeQ&I5<7+2KDc>Q|ALQO4LuSCu1=l#&oso~?JCCUuP z%;)<7t}M+9-v^wII=axexxj3n4+!qeME$XQzuGvef%XCI!RD`6*=MR__B=lH-0iu> zYHa6}5J<>3=5r+!1|jCB%3vmHk!JYfwqokOzpzC!JU*v+jg8imz4=8NdqE4EinzLL zoxN)TtW(we3Ucc7Q_INXy7jjL-i@`ZYn@Z#V3Jgep0?0tX2nLvqw1!+4GIjuuvoK@!0pMWcCNJI$|JXZ2!%lAEbvak~KtTo=!6xvrra82uJ}HHjGm&ZM$_C2c zZ>^$s^@U`~_I__CVa4Z(&ZTxW510>%85)m21CG(5N4iQ!!2Z4QWbP$%y~W&+-bHLC zv9-uOd3vpCJ=*K#U}Sx>En<}Zx@+|&4!cj1H&vcy!nhY~(#QVd*6t$Fk^@?0ei;PDKK9Hx^Ynq>g zdr4l{6*R34OQ(@&s4!XPeXY6!k@;hn49uHAh`Hkm4T3@C(|_?FQ9kIdMD>RNis~DE z!~Lyq|Np(NhG4$(UUGH8yEh(N$Xb}bpK81C#)G;*16NwLWGxSUb*@^6b` z2BcWQiQIS2t?EG!Lg`Q-N=n6aH4~$&t=(O|$>z~-u95muC&-m;)PTJwzb}BrmGFuD5q93_>cH2}U0q1PxM4E_Q@_Ku6l5)j ziEU2+&tf$U=I(C$XWgR$9Z+ zdSjnWWE^!jF+Z;5z&VmUNj(}DvT;jgme~+A^(O!`Vi)}PBnyoqIZ-a|-SLvm+B= zGF$Rb70+E{_ha`FzNP>7 zEdcU2s8wo&nt$+<{;TKo{tbb9M0<@Cv>%vNN{2id#m@UN7W9e7H_PKgr{~q6?hWhh zts%hMx>Lss5>LXy!o0jPk~4lw#>CH{-KjN-!*OL#m8$3xIprvE<|0T34-d9% zf(n9|^T@EvQv4N^MUah0kT>I-fEAcGLy5`iBgZ<7vzw1k1=?)qEDW&jsM%dKu5UTb zAeJk%Uo znGl*fctir&6!;LJdH=r$L$Y<>Kx@>E6tnDq`|{^a_SCOD4t^(gXQ17TOV-h zHDmKuBj>q@mmw5Psj9f3rCDpUDC@;R7i0u!=~9pya9D2g+zto`S-J;t3 zrDkuoTzCUCa9<;zacU|Kc6D*+HxB&2fAge!KLf|FJk1sH31xa=nCk;qiU`<_j$F7T=l> zG&D5SE8R6NTx_=F?y?itOV6s!P8f(HbgTX|#IQ5)hW!H8fn62N4{qb9e*NIyU=nQhLV0`p~ac#K)Dg z=joLWy9Xkd`I(F-X4@w#Hrfs+Kcd&W&ZgDCFmNKL9e9FI<|D#xCTx8oRSI>XgTomDHrd7YycJWxyOxDLoq=c9|hV{Tm!tH*gd(vZ7V(e*i3ERvf zLs8;&92st$pG~R~xRcon5 zukK!71w{Ucq`z(6RFW7jcs~3#v)9I5B|){(^~+%6IawH(YCO?^ksInz_5`0X_Lm}N z7&?IUD9f(@{cx%G0M+KNhb#Ac7Ut(lU?@WP8b1su>qKCtC1xGyLEE(FaQa&FJiYN~bLtb>Yr+~zmuJxLQTR!%^~W&4 zl>#?Mg8knRnM61AC%6ZI>^BeC@3g9V7hl~+cP0Rweak`p;C+wf@Z22BuC`|J7_bE^ z>6$`s&joydnF(JlySIIEM0#Ju{X4!lbAu>gv;evHC)Z?JYQ8uyPt*gzno4v zuw2p|{~dR{PUM6(ncn;e&E52p{9c=bYd=}qt_!H_U~H2C@_FJW%c@KC6!*YXF4I+; z#|KD>gQao%B}@Iz4bwq>9E<1W=9CnES`a`ONUeM)Q~-Pn zV&{E?f5t#Ji&KAoitq21>l_YlXHUEDKYDt;S90kJ5xwOWd`BFT3GMOWukZnsr*(&4 z*>oGIL8u%319TT~xU3vgLffRz0Z0DF!7ynXo3iiKFa}rk_0_!FCw;A3-p&XWa|s+} zWR2(1dC^R^bGNf_bQ}Q#e5iKNZ%>8(Fq3=v^r0QN?B%bnga3>bs1Ftqe?6){?YW^{ zt+&czULjjBRN`VKr-W{)P&?jJ7x)h7S7~=|b_8PhethqBeheZUZjS|(bbv1b3k@Lr zQUVE7mrlv*%lXssOKtn~FW#9V2b)?u?o^V1HOLDnB7XEw4zAyd2-g+sx0nKK*24F! z|4R9*KB@YBPlk{R za@8}69@O7^|jP$ z<6j~M`ZoY$cY7d|IGSNuLideu@;|7^l36X9Hpt6<)Uak<3JXED&ZaPo!(T+bR0T@; zW*JI&a{FJtZJprfG9AaD>u;MN;|e+?0B_O{8_bZu7s0GqDF2uYj^C3J(W{YU#lwrx zoHW7x{<%rb0>=Ayp{2u>`0WEQsZWbB%?TR7*=*c)C*HFEqWanZ)I3cM`agD8BhDtg z`!7Gn?Z0W!8Q&I468fk}rU;%GZEPZV=2E{by+6z>hDm`T=*O5wmht~{sq0S$yPFFo zq2q7Vr=YdpoDXm|{{5PVJq-RxJ@7Yv0jpI%PFt{>-)xx6JJr%2&F_FZg|+y-1L$ns zuMQ=7Ss?Be|ApPl^`cCa;EH47pQIEHAukdT)xnjXc0&j81b)RiqA<$Cd4_;JLS$`;e5o1F+2GFOn@|0`9)FN)^u>EO|Jq&|(5iwRKf!?HB=x}N@-v6Xfy332Jk%Ve`Sn*rbX`g7Mcoz2>vyDB%VDQh;1?O0|L8ZMSpGAjDR+b zD20Fzb{7;85#i-sPp1+E%+d9aAIkszj)BhpQEtsY^U`2XufhYSjh{~=(Aetr7P&w{ zSSXm$N*l$85-k7h%%i3$x@`2EEm0P{5qm|fwp~b-vdK-*E9N_p7nKgQv#1YZKKU7|NgB& zvv1z8FXP1`{-={&y%R>c!yj)7Ar+&81v4&l--&GmKbW5%llm;&X6|ntRd1kgdbYWE zPq^FH^{I%P_hIf*ZZ0jt!|cL4LeJ79(YYRghA`wXK+ZSCd=6zuYNag8dog7U2%zkB z^#Xy){}N9)5YMn%wcu5NvB(;5M6^xzE2UcK%fu7Hk?&yQau}hUl>vVa>C6u6uypt0 zlKa-li@yJO@wNaFc(}U*M$pq;8ZeQo^mZrsIu}{}HOfQ5zn@gqiyeAWl{=%=Q@OdP z1o%+`%0fb-1c!^ z4twkBU$v@`1{uHj*3M;dm?hM7e6!}>$SGcfV)FhqFhirlEZc<%zfTDRC_PUSZ2Fnc z`AiePco$yw$7{@b-aF}HcD22I?@}AeUo-XFZ`^XFiDV5;)-3r#3b7AszU}kuFSSAY zX9d1$00ZjX)Nke5O;kt9`>6>YCkd;@eTIN09Pb95xNit&V;WAWapyZrs&|;w$LJvZ z?|a%s+(N~Vl+4TB6+($hpuKycug)}XGcuNx*S{l{g|>}RQVthl7Z6_GL^o|?%U<-c>}=UuVNmSUn$#@|_T<&@{q%Hqz>Iz*CH^lH zf*+Q3EO42o6(RWSz~ML^(5hVYYG^x;wA}`oYw-s#1AVwR6~JFEP$Th>kF}@d6ZzUOZ{VHCp+}`Es$f42%V+G- z3S*ZFVT(GtnzX>qVk)1*`^-mdx~FpHg{vPVzlQme^M6vM8r}i|j?0p6bU7I(=x{sRCYd%1UDv(7}jIs2^FGj)fbwfN~P-PL=QM>;MSH zY#RW^i^1wA8&`O=cohD`?WyV!zrCvQH+t+~g;Mq<%O@$H&mcqe;a+V)IdKwTQT+n) z6!@QOPKS1zR@dG!Rve#yO=O;PTe@1LJYRf-*R1j#Tqg0_FH{u6%E~$~bU<*%5IDyh4H^=XVo)EgSW{C*&?q17-Vt1n7v;hZU&3Zw?N%c>#8GKbn|wp2 zmL+BRkCj`83Ib*K$+P@n@_8-k__7OCX! zKVs%qQbgooz0K;w#A?=5ZsToq#wB8^LpJEGSAP++b%IYcL5fiUGnoYthS%@s!40xb z7kPRZ%s+Y~{&LxvKPt?i55!#!Y!TYeyyaHJLxJh{H=q#+Rg8~0v(M_OTFta6nZL6U zagArS6l2l!2y6H(4W2b1hPBbnj$+v5i&x6BT`8p4!K52J)*9aL5tLJHAwO0|R=4MC z@5q*tH$SlX_Dmy^UL}{SfER2lf576ff}_4Cy#de)2hXQ?HCBOQ=*}%Ubp%vsm7_TK;MuXfTOYIt>)I{p9~WFi(S{LU0mU-h}%Ad%u}JY;u%zi%Z`yRNTp^uU+DjzZ|D~Kg{uGe=R@L zSYs&IgeVPovWrd|6G4`jlF?Rq;>UIM$(2X~>4qEmbzqGM->q9oQnXlbZ9VtQKKUlv zN{Kk#yNARiVZrF^OnWB~lFmZJ6a%wC{?Q1NDm$)UMu~fKtjFRJ_L!xnDvM%ug?6;S z@NgeZbdhj9k2A3uh~xwhJ8VAQQD(rxj5 zUY3W5#8#P1^BgtzE?k*CcYIuFvD$;EjP}J4L!r5}piNypo*QQ*^sR)uf%R$jLKS)5 zfsA6C>+yuC@%Q4S*x0*#H48+7+iHKET|Jd8VdvwkItDiN!xg$;ryuzA;WEQ*Cw|UN zWV7ikq-ylcuj@|t3puGZ# z&pzxVJck0;jIv2l>23hJeYoMp`fq|`Qm0a)1 z9j{1Kr*=iWR%_;dq@U!1KaYSz4qP3Ul_}O*+;LZTJiVUOyVPhyj|s`!irFlb_$C}g zg%kC82_}=L!m3jY1HrK7F(qj>{LF0^q{l~daoo2I&qZTcNmr}d$KrFOX{*;BYC7N}@kQfN% z4H+W~Sxw#ixWv~6RGYy2oTT0X0wd0~cHF4_{Zwx!op|2cL856J3TSz19WNtIC+cG0 z;-b;1*cR+j+;uK(Vd>(H-UNnt|r?K>FgE?CDLF`ALQJdO$Gbs8|lS;!Y=OTY{2i+x@yhV*9Z zF`fD3Q;0|xVjotKQFZdUuUy*t&VvUpl#ObFVs@&dlV-G@}``px2I>v({ zpEEBLc`EoXWk@;{9d5oro|o&xFF?DTCwv87s%#RvgVy4v+|Yfsuqtwjz!wN}``jz_ zd;WQN`wK54@#-SSa!*zKzt8F(SHT;{JtOh?@xG{HWJL68j!r07l+_)U)JM7(*@=*c z61gGeOla<6Rn^9F!L@O$R0t36`=e+}Z20T}lor>AOjcM@+1){_u$pmqP?1fr5wiKv zOTN8UNHZW1U*!#~RUPJy?yr0r#lb<~81v{9Ts6_dhT6FvmQVz%aB7Kc#)s|;aw$<6 zxHRx5ugQsGC`8`p4(hm?#&hh zck4L{X1%ALELZ>`*al%4pBX_^3f@rT5;mV)rmuxN;g7W)2RM4aF(jb=O#TYvAbegz zwwMg6uUc+`L43U^?+E#Qc??6ls`K^7tXZgdG$H-ulq;U-kNwpqfwhZmzV?q2*kx!T*MXOIrv@J1#Qy_ zkXqHluJD{~X(S5bHXcFpqG=r@NYFa&!pqsaG#Wh}CB=%LKFO!r@UzJcm<^lm+%BgT zvJQMAp|#$Iq2WQm?-T0b{&bxOQ7xX6eB3MFAS-Bftp%5}5F++s2ib<$%)V1UOiXJU zC2&X1d}}IhHUL(0!f-{=<3z_4A2?vax&0UWDiKw<0fX{x(~M-RcdK;>2XW0|+$aQ3 zV$8pAyRzdrOV%6_5MDA(W2l7{Z?NLloXx{^3sSN)npw(LW+y#?xSv~rzuPnzTgplz z=zG=`c?0vW9L*p6X#FDpT1S^9MMqnE{gc`D?yh{RKXze#rT3T9AKyoJ~ zpk0(%{p4QSb2{QhT!*qPcGod;-o|mahrCz}Zb)H7o)@3G@Ep(a^j>H1TRSI{er&Tw zi^G_mwWqIO{We~lZKkqni+xz(gQICuk`ukoN<5@rhpm^Gs%5H$5-ztDutIdOm(PV~Q<0cr+;?1_)sJ-I&s(<-t*UJ@zl*D5 z>?oltTY864vnYr7i0%AgDwb$+L4vvwOlLL9rq#LV*#n>glj+Eit;{!W&aSR>=Ed(S9s8MP-Wu#fw#@0& z1Qh7OKFb{rnS+JfRN2`^qRJn+^9tu*<#D~~q#EfnDX&5-+7v=y=GFMfz5W(fm&oSv z$P*`iZYqOh1#Y*7P$fP%b2w^)ZCVguH0mqASz;+?Ox3LUBb zO*T7u$x4ftuhhDq-nM3TXC>$@!QMckUcWVL19R`dC+G<_%8T>)n%Td*c#0dJVE|!V z(n-7;aoU?Vl8|#|C(N%??eW6Oy+BChmf9eEO~2{*-6L>SKNV9!L;BN7cJ=AU*w@h; zeY4CuS`{|#BC=m)t?Hr?_<1myU&2Z&kQJ5GDO4#1!1nO+sBv0Rn-$Q*BOm7pFtC5l z-@<9EbIqWg^;+c}bq=vZ?u_r&E)9?8lEkPJIABLGa?^k;y|p4EzGM=FR|~q)p`3&9 zJ0r*%j#u26OH-oaz#L@KIicnbo(jl&8_S+kZ?vW`FE1{NMQ|rwTVq2rv}hA(f@LyPUQv!Vi!9+d=aqib(4uB#9*3GCK%5Tn}Haq1YAAI<8?b^#~M1 z2L!)wJqf>>V!1R?xJ(p2GGFQ}QDZ}u5hKUdX=K}!N-UNC1*`^@ z_?uu((B>j}JcZ&WCwFvHP#KRp z_tp4kj<%MTq=iLoZ^K;Kk|Gr`S_zG$I_5W8W;)sj-y7E^+{qW+*)wB2T}iy|GkGXe zS_YT@1fCtk&f33^4%u#_F>5-Xnxf0`bhjN=R^M-j1od4}EFB)b`~U_t{oBww1aRbhYSW`m`nu-4B&{ zlPU^~+KLMo@fYrtgudUm5hlH(gf%t^q_PhZP1>+z8Q*_V9g)v$v~Sbf6fjAmruovd z=S#&I_h4Q2OYk8MNp!_?Rg|~yF}r72<;FH769bLeC*)d@E}vv@=xGbo@?f@v!x4YJ2fpr)%j5C>*X%rHJ~0mN-vNMK0b~+RV_brBje1Tv;6` z4aZLsBg>OD zt9T?LNLvrNFzLl3xYPth;}o$hg6z~3-JOT-Lqnc%n6_n==Mk0%YhrVqYWqvHUx<^f z_aLyQCMEl&tZw&Qf~CK1$h1XM*ev)Z?L(1g7cTQ*b(2tm@Q>zSp8RMrtV%g}i|E?% zIjPx*H3Zh z^|jOD7hq-0=$;M6zC+ufx08JqpwiZ+G{o$p$eucb%? z^@M_8@(xp94Z9Z6*usJVi6((a0lcHqx386=Yc_X;>IkPWat?Tc{&FgK!_{nNsW>@f zyWYW-ShW61raJP6`m70B)WcYEgU^O-FtWDK{rEB{GKRTv%6R_5yvRTHH5!?HumF6& zH+_4$RQ`IUPT6;>s3Gn0F;kSL%BSt7*)?v4^5C6CJk^}4P<8}y3xk5Imw2JW8Sq!! zg#Obq+FG1+Xl-0@S&ob`TUw9|Slds&AJC=+qXg7hzbaG};Z!3I_Kk9h9!%9Mk1yMn zr3lDz1T37cSy5Tf)qb9HRO~I^_b2FVs=#l*Y)GmNZcltbiuqGx7wmMmA#=OlC+ z`*?u}Oripfw;PC z@c+>g{DvX;waD~D3F2R_=E24Y!Y~Ua8wPVGiRwJ|yymA!WK3|B38T(< zxu(WI)Rc;^ds3{Q`uAWc;I87wQCbm+XYakDjk~M4&=@Jl`X}rLlBKw`Z3rzzft0?E zW)4D}mOLJn9fmY2rIsU2a~*Q0zJFbRlJIB~4}S86?;5_yylFEv>qL%rZ#&cU>vpI8 zdpxM~&IN=N>sqm}_DiUc5)S;)Z6D4NG;Ewn!&0nUm1wk_{o8ESHX~NLyyN*J`nEa= z73MpSeJ4|bLx+8{t6ETmUa-I6tv(U?_*dauxDJ`P%iUno-OB&v&0KR=paE2S*HaS<^Puy@Cg z$L-=pGpAoNTiUH@3?{GVN#fsGuFwkuuN!hgI?0+tHY$ikirCjE-x7qEqK}XW!}Lw# z%gAr}{jssNq}l=-q>^X1DRrYqPf^gxnGruDlQ%#}+@Dr?32)qO!@9siAbvYCrv%e8 zI%hx3dWaZ*ijXxRsn5ps1d%o2uoo#JZ>b#qPcV*45r{b)`6%H^hdB4O61TxUCUS%7 z1!h^({py8dg^itTqCox1+omsbbDBtQf*~=!@uO8IoaY3lz%1Pdq#|P5hL9;?}C+ePz%6Kkj!KfBP z>`0p@pC}+8ur4KQZR$F9kCZs(y@G62tLsC9&z@E9cZkdx+fl=-UX1(?V;LB>EJ6dX zU3M$;Zn-lC#)z4WqDbxGHI|GBE<3GBU(-5jzO1|0Cv|3NT#9am?{*oJR!G@7W0OJb z87dfmS=H?%j5|?E-`!!72AaNBcNW1x;3>K<`^*H`biGg+IKq$2l*p@k4+f`!`ffxawmSfo9s3OJlmtdL9fd zI;HBEKH1Y4{=&gHw%S+2c%-7Gzau8N%rD*yDrPe#>(sGKbR#0A-l29DsM^w}o30wmHQ%5Y610zM-0j6|Hsja^ZQYxf$tZ*|q7qi@%a-`2D*3Vv8Q$o2W=rCD(&4skz~1ocZI zOp%DCTxKIyS_D(1H~Y1sKEBDx=LNUidxC}!>?B7}?c z<-N*u+gN!;TVT!hDwm&$JHi0`CcJqqEc8y^k-GsbX58Q4F6qrWrKWXKI`#fbmT&-# zx>%vQoX9pW2ky&vT9G}i(KYjtFe0`>pO9OSz<0?P9vRzl_nIQhdO3^ESFND~N8{5m zb2XkJvD+BRujEbB?}V>ycz@AafTQD>V6XK13x#Oksp=h9Gh$S$+)<_p0wtl8RdnKN zb2shOX;cv&!5R4RQVy#JHSNtuYtty%c5`-Ty6ah!TftIBn^k_1(V%BL!nr3UvODlQ`-$dT^vy%pBd>ywQuOaNV+aT@rBYjtfVm>!g_;fx%dDeYv zNT2)EBkXXcJoDUNx6mI7T$vT$5(1%DL{C^1Xwbk}g+-ZvmZ`PB&3<|S33H9+6odjF z%p2h2zJgW}Pv4EkZ*M#MLcmTmc_W9DAb->Gg-k0u^#t+~Y!KhUgn|X1{Z(z&zxvo# z_DPK`zl^!1Z*$!75}!Ie8Q{s$A*lxs1&8`l?@m}!y8h(HDHFO6%ij#XRdi`4QrvUsE9i*g;(Q0P-w@~4D_Kk z5cMoHLXh&7=+0*TE8s(-2#g|wM2L;&U+>S*W)&+UO;#o0Y@V*2Kai~x8ZoJlpD!J? zyzbMtN7xWjC30ENO~u0+pX~~SomQ)$WgAQp60XYaaEi&Q2c-~LVz73hZ9u2Y<^0Q0 z{HFZJJTA} zpi`6*>1y*8AXP&UeM6;Muj_Le8!3`-cj_U6i_GC*@N>=R@h}!m46YS{xzpuzmv14L z67F{PXK!`${DY@}2vtCKP&?2ux!(j#9KU?|af<J;kv92QkX}LpEIs5 z-x?bj>?q122@S^EDI|Lv~m#PaYLxom{za z`X6T4*nQv}R;cOIM)wR0(xQ=pFz#ZAm4nLH<~5U7vS$u-5Mhw487di~UChP0i-}HD zj=4S?B_8@TF}cwr>o3ivIFzMzMC{2?I8v%6;X+^y`iZ2h&_(&N_F#^*Fr|n`g7ImL&KQDk3 zyz-C>VZmMJBzWjPrjlqdT(Ck<;!OjG)6l@_$~oaEvq0@^niO;0KOf;RLmYejz8lVX zc6XgHmQb$l7LI0QZd1^w*3lKHfhBA>N=Q>}_1K*l@h}5v#V(+m3EZbEr+~!p#D+Fn zcO93(42pk^IEv@j@ZBZgJr#yr8db9s_So#>l$=&mE{8Z$Up^|d$}L?CSq zi#=LdB?_|EGKm{7DFI7_*8@+>bJf#zAILr}pwcoB%wjbaPL*Y9pK;=+cf3j%EKdc_ zir+^~=1&eXDV!G{a+mQHVP9cu&)ubVQxkH`6O>IvZ-h0PQS1|xPK>wjD|@{p@~&WF z#X||Cw9;1oDiYD&n_IYN!kP#e!XCyn*ihIt>0j_-(AX7G@8FM;IsZ_Kn)|Vf9v%h1 z?1T?+wrs0o(iFBvAf}iMhBkT8)^Jmu#k3S{D&{?c*cu5le1@K<^o)f0rkn+IbmbrQ z8XwcmyJMRl=(OFti(uxU%SH1>g{5J1GP5*x)X?FvduTW2Xxk43W%|SXTQtORHOQrC zX`0Vy#8n~-jF@ETf(8ZFS!jlNi^zpP@)D~8sjxEEfR+r#BA1Yo?N(2;b{)+o9P^0}1mJ9})i z@jSo4pxwAlrk1P{yI~gB79~WBbW-M|6gMHQFb~u4Z4GxG4fD3t@Jc0QA5UidB^=N& zy(S?6fw1c%J;PgM`J3aehbgG;g#H3NtzxBV!o{WI1pg8jcE55EooXlINKy`p zr|k?GNY`7Wm78t@FnN)alxMSJpk5{Yl$-?i5|-vA4qJE?Z!f5lcpel%>ss!&_DRv7 zBRFhmWFS#Zoa14O*e#JY?ja4b$}CDX@P>*xP3nAEMzoc=wO@po_F_=r5db^lGF|9yI$Hki^+ zS!HEg^7-ypr_!bkhr{{ z5T(K?yx-rBM7+y*#eMB{KZo`?z$^w8T<=MQ2>N&;Z;G+M^-MR@tg%PpMuZIw&kmZE zQSV%khJ}*9Scv8I%ne=rKq0a(jvY0w^Y^Gx$8F?1p2fw0KcKEQeWK*Ky;$KkH-&BD zd7-Y0vqwQDJ+`gJ@l5>L!)|x4_9LokT_*vKy9l<`4(4}1lfn>=0wx(K-joWYB<|mMQG5;YY!ELQ-Atk~Zzb3fUV{jYIh3@$taV>g=uNI`rkd{=? znzo|jo7AnE>!p@uzhS~zCfj+a&Z$FnZh5lazSW_41BLa>>aA1CF>r z{41LVnyYG$#PsEs)D1B`Z8sFiR@=iEV&rif(A+J-V*!<^{7nDBfrEpeZyxN?ey#5?o+|K&D{QbY=}EvY>QvO0pSjI4(8m!cJLlZCN`Q%`SBDIf6OXLSy~n~ zk!ofyMkxK|h<-+Qn@DPER@$ms!GPG2%Vzo_ss)Y9a29e(QSG$iHFd-Qk^$U%PZcda z)=X@}fcAN594FmAP-Rc+YXvK7C_OU^l@!T%{i`TBd+7EVjt5e(zlpsQwVlA+fxcjjavX34InE zvv(MyleaB0Qb@5eR`Y6V8AsN7u-CCg!=X(kU8LcyAV~Qfhdf)9Lf!Zxey{ttU5$^{ z5VIaT`bfZM;74L1z5h7mztcXgM(x+P=iRB?iMTS6hm6fRG!{ulkNL2!J!Ib7_EF)A zNCHw5bKz6UGdKlw`bBAo<(?y`rXQ^2-qaP$U(LyD3vgSnD~Mz+O1dbACt^=553<{h z8n^~(9=;DJe^JD<+;^c|#49}GhZW;nzw?h!BeZUM+Nq^_#5%sTF5PzvZ+ZOt%cWxl z3^l5l6g(B;psgdU>ac0uZ5#w!msCPS6XdC^Shpo~20AC>X_b)|ei$N$btv}t>363} zHCmPfRs3(jiqFehJST<|Z5Pw*sfR9m%bCX;?jH(bX}1kxX}dbMt-4;r$NF|7nwP#)OHHiR517@%8Ef-$znXQc%VlFySa)tzxf# zsNFw8LPFBsSG4l&+k?_VNtImZ8A=+{S{Z_l z<31#J`UfiagE_e{Kab%+p=oKK2|LUo!I?@~sUcD}t{GO$WWot8b@e90BYfNID=)sa zSmg}wGjqPlR?rnkZRxc>(f`#OGrfuaukHwi;43|tH-x#@cVC{WH!EOmXCA`Ay`i|{ zc9@nEl1XRqvGd#gJ1DG2>*^;Wk?@CzNhGT67$J^lJE1_C&wncO2qq$e@N(Y~+BG?C zUIDq_cY6rAj-5<}&I6%R6|Shj;2$mISGG-V@Qcz}S{nc6XEz2cMR!^q5nSNph^Ysd zIOEQd@3YGdNc1R%JzM~0xsrca$jF*NWy+dk+&8fA<2RftzbVS`UdM9=Io9BZbEHmOqnpWs8r>qW3-7g+k6N^zVdB(NC zDP&~DDdm#le6C*cs`LJ3ljWv7Y?@|LaMGHp0)(J>9?08uqSry52ow5XBUZ=ggairSGiYxLhqPl99;(46i>vZCNuaDfaJyv>|2R+(2A2@- z&N?A-@`{=N4U&yFQRQ=IO)9&0(?_Blz#VRP)Hc?F+x}9hRP!Py3LSOXocIwPhqhfB z^Gm@+I1y2I#ibbbj{&d<19K36MlW8y9s{{ZijmUsNL z6Ui^!0dCU0`Os>%#;%{N+UNv3aSat>rJ$4i2DWhSX7$w`55j^T(j=3cPGb}db*~R;`(t-3Ptu*xsY_-$+~ZqRq%)->t~TXU)!R6eWM?~3+6rwd?fqVU+vdhX5p zEn7EZfqEk{iJ*{B$*Etadjv-nxx|_c@?RA#wPEg(rTLBy$uzk=L>g_Qgb_R-RWSXl ze?1?kU)8Tk*8VP}yzo>8MIw?mz2$z9()B>%x;6r)Z3V_7nNHycqQEdS<=q(OZnN`? zX6ivb`n_RC0o~y5cv3PDU5KgytA0{4o5|qO;%4s<0uHnJvvEES*8Tf$|B-ECp_)8IivSMs6Q?HNRu z+}5jSlnTz8Ky+O>ud~>sI6a``wW!DB-) z<6d}k6yYP|NwQ^rO6%rQ!ll(d)L=1Uf|=+0@Y&`R4zeTe7cu!BM;rx12TTLasA7Ru z5N|RIWl__V7otT(8dQyu`{G@s4K&JwdqB0TvUu4jlRik*SqjzSV-fA+k)hm;xv3#< zZeDm@d?0~!ywWyK6Xe+dhzkj1YMc*|idl(jK143DA)?=;dp&w_QOWnM$C;|{FU*;VKpe>6` z?Sm+_@s&EX)lG=F44WJb+8F!DhK#&XD zQLK>ADM1nD4nbNryy82ej-Kq`s?f>5!|8Ne(pT8#8r?cZ0}tTKw42a}E1nSrX>CP zP<>0Cr*LJo#y?_?ZW_iHGhKxsV6aZ?i)rLh7wZ226}HqP-ZOx)pED6cf!Sl)wcQT< z6=VG_4`*mc)7aSQhzUa@TzlWosJ3tMaZN$hWwT{j1KyYDTXkVNm@!XKm4<@8y}2Is zZoC+jG;-(_c_t_>&(8FK`lP8m_v`zNfJk0!}3#G(9nHmC6Z;z574mV zRYBCBu7igR4}sR}+-SYoN9Vt%8)%UgA?ohiyx)pBHZ2SZ8lnX4ZC@G3f5)C&pWzTS zKbTpW;pQ(H53s@6*1gg=hML1TB{2dXknk%oTnAr%}4UM>kl{j5}J@_W$ zYD++$4W3M@JP%Yj3OYXnpoSAF4%e-ce5$1jX!9Al7Zpp(Cvq0!otw6s_zdJ(A0PT| zULTf5NW}uiJjk5O{_?=M7HFVrVi}p<{)(A}#bz2pL8I$`72O20Z&_}u_Ch3!R^Z=n z$?1RG5*#={1h6Ab#yRsm+!e zC*JZCxR@-ZH>-PR^NzjET1_U_(tUjRCk`Uewta(@eZj&M;OQK;nk%^z%CV-jMsfI z{#IL--+Q0KcM49$BoJ<#rO$?ok!0#omo~djxUlQ39b7iC!0g1~S=;-&7CjMHcx1QL z(r=9WFn7FN2K(nZm&TF8>dKoOi2337`UBI9s>$ECQ-TBfe5?Fa*D#E{_jZAQj@gJ+ z-kEJ+l*X@`GjA$;uMwEB|L@n6_&=`4mb4&#c&nb=+sp;MVXt*_?BF6)P|ggFw$^f@ zG(+zKSzlyON=>sy6%NjT7!jqUxrMv&v_Bu$;NrC_`_>TzzE`-8FKUSu8+cLHOuBfDDBi>LjC&Qwa(0<4zNl*lJ$yEe zHF4yy^Y&}0t0_jJ-xd9^6|e(QW&%n%9uiR1YS@KN6%!o>kTqnJKS7u0EH`?XQ*oiy z$GS5ga<9ocH#!2dAzR*h3t3ww9WAJ=zI|b{U2PwkiD9?eJ4xHjwgttr4@I?Q?4L6yY$tx>;uXoM$S9`lfCEJ9*?Rg6-PQ(=lc56nBOH)g)@7F^W z(GQTGHWs`0L5{H~()yPbTul2W`{H&{pKa5T;MmjITFt(>bkGtB@(KkK1)%1Vj+Mj? z{=d$?1RTn@i~C=qB$0|xmLiqx`;xV+l|)oRMfS-~mS(b7WX-E2~5AoB4X*rpiJ+DckKo%R(K7Fj-jKe@zZmCFMayHW_CZdADEh&nta0^ z-5GMsWp2z+3gOtYGr53}qMaaLblEWbstBh^$31?-Xlfsf_z1-CI8qF!PAXS=yG@S7 zQYRJ0!5n$ry0D!kW*&D!85#~blgcG|?gRH!vvm6gLgbgLT=ic@a!H~+GV+|%)}m@D z>m|+}Xl{;Ks3c>#5cT3M7Ize$b8;l!LdIlsj?Gdm%qO$@CN2Kb{GC)Kh&I4cWpV&V z-6`wr2hN7XFehQ7AjVWh|1q-(zuD~X3=V!VUSHQ#Qdc(M84z^qN?W_;DImjPK;`>s z+o0>I+$tGoX3STsps|7v5D{>fq|pFiD6TJu_kKekxzU2;c6>u~hgbuRe|n5?yX{GL zkyboVvR9)c&* zrN?J8o!uDh64JzL*W5>wZ}1n9H#O? zm=y4?wIsz{Vl_P0`zj*hfe_HE3q|CXOWh4e(mIAuW+8>aY8lk zNj&j~ZBUKVj#LvMr_6UX5NPyl(b7C!#oR#Nb)q>6YZ?_tS~dzdzO3&|KKvvf5S}mN zI@TR_md)e)>?qd3iH{6Ov>a;cJ@nW8_E?u52xP2Bu4*5&{!Q8=mAUAZN+Z~sdM78- z6TN*qf|!d*sl=~F8YonNas)&YgEJjSDVz4m$phIFHFQIllpem`W=I0vA8N4PwLEcde`O1=uG!>P7MsPJ^2A8; zyR~rRFfERtC|8pxTW%XM)JEsC#R{%$alOYmZafw zHzpK}xHxb_LzxK|sJ7mZVE6d`A=kj6gMzyk0s(|ttz$nr+O_2QRHL7{jL*#E_?xPvRl57wo|&>JcTzLf-?<2!e_hGT?(wI8+1}lHJqm=p<8iC~zBUJY z643Sanw2#Ia?K*s@hEt$ZkCNV5$xLx_*Joh5b$DqXY|(>@Z7frI`~N5pk`BfH zXKv<^bq~dt-T@&vC-b-3RmZ)z!_vL^u_NaWePOJV9XT)UDd%Rxn1u9H-85ie9OkMS z8Obp{q7XfhbX)LD$>8}fpdW)T@6JNMZ0|TMC zjGj&5LAW4;PINEpCV%Tx6D$_%0o`}p7s8q^ASfgf_v5sdX4&fSPQCEC25ud1H|Ym~ z#L5??cyMH`c}Xrrdh;iQAfY=t^dEE2$xm(mgWp5iwEtD&OtDuW@k5XpC=7)Qy>>1x z2Q7C`V65}i&(liow=@xSo+n63Nde7)p+y+jgW;Ah-z7MUPp~|xo_{`r)D;-OGl#U; zL3Z)B(;F#lj&H4!tvkqbRPZ^WJngsB*L=u>1q^qk`7iGAzSRPYZ`z@|%fJ7&FBS{v zVDo_vwt3I0vN9f;IW}l-xUv{s5GZO%Jyu)zC@sR?+1}o(uXR7dzkdiAzt{`ASxhWW z#K&%Sy1^~QYt#3G1BdT)MJv*-x(Y@16P(yvjRM(%yAq`S)L@KuKE$En})yEVXan-g%5+dQw(~TR?Tz z%*Qw5fRPcN?gz(Ty!nV|?G$Zmf8^lfCtjUCdN={J2VG2nxfya$;6&4fFQlkdA;>f| ze1zUVVfv#M@Q5N=ZqDMX=_*Y?o_pZgzEX;gYH!8_P>*Ka(nCRyxV~B`}a??;A9r4V+pjT;_hX z6>PeL_z|2-6#gLl(~64wI~jM$afte?t?VS({H6=7jVmjMn|A0ghi*k8rAk3zf5wh@ zt7}d?xp{7GPPxrSTt-&*zOVElp*$B?SMJ<@q_R`RO};~q#b*l(k*hS0A3nUWR(>>H zhMb|>`staYYOoo2_M`;CT7Lj4e&rbdqq@BoNk)y`;gKicR7zGF|Gwfab#QomcL^1I zSd{C{!6$7`rKP3Aef{AWhrJqcj`)4Kr|5%Ktwd_y4IFN`V8k(U{6teSAOh{}I#O{% zkOVj=6{r^2U;ag}Iyz!oI{0{@bui?+W5u9o=<`6^PTJ%9V7q&eWW+j>xb$QA~ zi@t_mpEkPnJSoJB>9A>CX`0%o&SPayY;crem%YJ3eId#H>tU(Xk4GEQ|JNOA{ikyJ zwK0BFfLJIcskWSWPzY8T>dM3EWdo)7kUjC%r%A>acf$3K@!sQ=!r0l_aZq3DFbi7l zlvHTDb1^}l(A`8%1J4a3y#36su>D8iS>@cX2|TNIrriN4Ak}c7ak(o_(>BJ(XN3&= z&j6P4M=62hW>kkzkP=SdCR>&Zk)NM`rt}$sa8vy}7$6TdP1`ZY7o>sU8Ylz{at)c9C z=U5uKH{z1}rS#DZMx2&>*)%Do6s;#L&JFp#VS8>r;JDe> zr=m1}xx;hKOsH>k)yD=B1<-r}b%ID(Rjtfq0BWIm@X#~qAX|x+S)Z9_pvGr+)`>Yc zH8pWP>Pjm6r+lXu4%k;dV5PSrDKz$>+82_|6pSH(i zhq|@h^grs?Z+Eq6j(FP)Nh!bOl@SyiWI^zhNTL>=CHkR1W;a%6CnGPeuE)xb7&JO( z*gH)i)w7QLk%InO&MR!tp}&^%{#5q_(<=ljon9PIX^%CgUE9NQFx2{MMpqi&COv6o z2%F(8DO$$Q`UQUM6KOCLF~L9yj<8$#mIJ-%ou%j~%4i+1~a zf#_I`VD&JsP79fw*Ygqeg$heo;f7%!|ERO(=%wY7x#6>Ea~m{jOn~r1-`GQ$)cX~< z();yiEKR%-zk?gb1&rAqUiG#Q%I~ley`PTSO1jq=)0YtprCJMEUqhs)h670Ab0oH@ zN`6Hc@Fu3td&CFkZrW6S_aML6M~aW1EI{P1et@(p?ECn^95#opjC56XP_w5 z!kCaWw(;b>W6&TVvI$PGv9z!zB!U|Ra5c_ z&~o-Hi(gL{WVNVRz8AVV-EiX&VPx-$*f}`tmtx$-q9*gUR&ju;5eoPHbTK^+Szm7- zu4s>X^+IEsZc|xXXwmnTL9B|JeiCp;ODOq#!B}T~G{`_Js zgha#pu-P5+25*VPs)X({797mU0^DK+D?JB3FgN3Efr2^jHoCqWR$44=hIjmivJLsD zX&s8M=p>-E^IBcb29j_oipo*C?g}@N>2EgyqY3u}T*}k)sOskBz@+~fpW8c2QLeI# zO6ijCWxw;BFX9y8o5(`K9|j@M`vq=ei3_czoVspNw|sUHo93}AwsR){U~Wenn%q?{!)8-X}z#%pU!>WC062`#0% zP6v*q4^RcrV%X&>M_kj$$YCQ5?>FH1$HfL6U6vEG594^lJENPOGHV~O8bW*1U)fN@ zF)A+Vl?_%Zjc*|D>^DX1@(wecK5hIyP`3qcb8VDBmQx#jAPyRhN&0-=g2O9F(-gT! zK=AG(UA81Dw{D)qjV5Oqqmo@pxJ2t({l!gAhO@TW42|*u?>{PwvF*c)(bWC8d@AzI zz@o9K_goiJq0>63-2R8H=Xi4Gf1duB(3;clF2b-QsGt_WjtClKC@QChg>KKDw4Uu_5$iSYe23{pWK zQBl6*8r&QIz7jb4Y*630j5zv+@|g`S@6vH3*?w}_E0umU_Y%DlQ9qT_)tGq2Xb+U6 zUmEAp`o!F38dRH@xbJPoFz!g|12&H-jDmK~d^Vx9daAutQ^{v9LXEHT7Rl7#VGUwL zlKD>Yy@WvS+tkZIwvS?Q+_)9khqZ)D&v}5CBHHycNgio(bwCSvB#Ax0JyO?Iz3V?| zK9`~$G|0Qv8`sLKTMKtt!^fn78^I2h$+B+dhaBwfIp6YSXzk=XD%CupL?4sTCFnO@ zR#XHO7xB({M;gvwJmHidk|_P_iyQXcvj48K10nx8Ex>oB$pYZ0ZbOA|g%y8dYH)Zt zGWp%NA25)0j9>(9Z*#(K@x1??Ni{|8m zJN1gb18F_{$v-{Z)b(l(HIXu?^glL)3N#4(+nyjxiA%)fsOoF^b$mB-FuO{Hr)D7&QZJqk{ec$NKk?&$@ z7Xs*L>c_w0N5{rAywft?kPgT3T4_H7#b51$r$+JTuEM_fL&xvkQuengoBh}9%f&VO z!aGt-b_9B7tH^(UxC8hon-N`RY43TOQfo(IP_AFJIn^S|Nom)hRd z1-Lpn`D(P%A>?HOzMc$KFz2_FJ3oTYwXs}i(LUp|>}5%W?wtUmrKl&O-J#ldKbJrLHn%{okhC6ZDl#=q6O$h;}WHR9V-l>%Re8_b4MLi*9mRnsg!(@A? zs)y$E{NAqM;9%&bGcRH$hWb*@Ph9;+aY?M|V=~)PKH9lw2(q0aZJ#K`u+zQLouq*ZQc&P;acVL?2a)#Wp1Hg-i^Z_W z;r=TtV~0Si=XS5rYB~31joM}?LCSf0F@P}EzWqj~C}6bn<|529MT*JVTkVg2;VGw; zTQXcp*~WLxEr>aaPGHna7B1*!u6^&mY;L>DmHKk@Ze&cYH<@o8ZbOmZ>Y_wy!)=6$ zOkCakqsCbZX*QF-A3G89XT8}teT3Z6gbwWQpmpJRQ4`7D-hus$SXx4QS|;bFD5i(8%#4Zhyk ze*O%pP3l58qE8HSdKk}_N-l8gNTNvIDZo^=f(;4@TU&iztqi^a^ym7KH9qp%%yK5e zPgu5ochWvJ^LDV<$cJLI5&YWzK_d6ZOhoe7?B>cduW)o!^VtK#&Y{WsNGNP@NTwt~Wv5SFA*E(M(2qq4KsMbJ5~zlGGW&xL3bjN$$qznt54=OIkr8y_cQKH4;20oMCQ_(KEa-qazCsC% z`&bJ_?HV26`K<#=jQ9yhwC&j5rSYY?r3lfszz%aluC1X4%LN%w&>J4i@Z+W)j}b=Zce_n~gRm)Uxj(%;fXSpya3DpI-1nS2Ba=uxBw55WpZ! zx3~|k+1$i68BN_rX;iv0F8>G=XoTj^Iu{bE@R9lT#=fDa{7EsEO7`X`T~L)%`5>M^ z2ak3@ghHSV9P68UwTRuF!V9b@a~;U14oK)FGlJOs?njviMy(<- z$h}|nZ~x1KGvcb($Sd}S@WXhqm-un`S@fs7&T$+;?UzkVDm^bzA1mjtn z>F&HlXn~eKg*Ud_&>XN4TW4+y&tlKrDK+-{7fs20SYWeS`XRS^Ve6OPu(J}XV;zV| z_=Y9?G&<~S05RR%gv|otTT1+8S$Rg=K}v_ zYoD;x1NNdwt`n_3Gm-qv=mhuALhEwcx#mFWawBn~*muW!27bDGsh zjZV-4W}kTEmy&jkenT&|4A+B_GNDG!4ciiUIw1!sKS37Iq+yT4w;c%dy%+00@3Ao@ zEomm4qSbz95i(ZIZXg)nxSfuUyT5$;dr@;*WokbpWDw`W&ukX+RV7axA$%do3oR@P zEEx;^Fb0B={LQJaXpeoM5v)L5c}#n(A&94ps&y8-t6^P#+Q7gl+6|};Th5)1MksJR ziQZr0c6nz3K%2Bix1#@e(>#37m5|RTh3b3{t1?uXQL1wq4l}cPmU9AY>$ARn2tGQW z4))P>Q0$a~mEO6h=GHob+jo-YL2~D6&?jxeR71i*BobR&TS*|+0Q97CFYCJma$GBj z_|ldXgP4d@y|+=yop0g2mKWi@8N)Bl5}v8UdyCe8Z_A_eCNLhd>9E+nM#w;7IP1T| z9bkK#-#tg-XAwb7Nm<#1W}L$pVJD*AieH@7`l5oyzZ?IyO}n#UmLkhb5nj|WJZaYO zY~(oBc3}M%-S{%yd~9+sd#U>YOgj_NJY^Db{b%6d+zm=2kI@piGfJhtI3pz~8TvRd z`6c##qTkRJ+wOKb?sr!Kc{H33Y3kJ=CkJIH{Fm-weIw@t^f#atwX57V+H!%yy^a}m zeLo!E@pX?+33*i)uJrSn!3C{V!161)xbRp_-9k}VOt*@ie}BKYX?0RF4$>`7>`K|= zB6X@uHDXsHI&#@exyz$U*##Rjn3O*FusPSJ#VZvP#CJKa7tUco)rlWP0Egf05ITYg zS=a{G2DK`eVLU>*ErY5Z@^+DdSjsj+%C9ZW?Qn)s&(`&L zetUZPACy5Eq9`L+yOpkBlYP;}LAX5?mO@-;4?8IK^ps{Jnv6_C%ELe&=Ej$AZL{e0 z^MQpsr)lDFS&Y!v+TCDZwd>i}kypRr?sVb!##huA1e2Q^!y@S(WlWcis70`JXC)>k zsyish-TQSUK~>*8s+L%=$gmamjkO}WK1g38&0-NDy{_-#tSw^&bZFsHj)`ngVi6i0 z;$y~i8A~B=>@h0#Htd>sydS>S)pB5X_|Tvh)W{TV7WAoIU0v-r<%aI1W@2Izk|*A# zdjNy&J5Th}RU8jgdYGm9@d2fmx>EZi<}>5{{i!*JMEQF3vs;s_XZoQNr>QNo-_|!^ zfK&PIptW5hdd7jyiAYbEri2@&x!dYpbjD!;zM0r^-f)Y?3(dVJvCoBNi?xj<@rHX; zq@0LbI7?E{Mtfj@OY7`&s`XFiH!eLXd0u?pyZQdum;;{>LS~41e(3cPj1A8yT_#vp zxfZt=a<@?v&avH!Hhi#wO8vawcC7B!?(LgycYV}8Y;hyLB&V5Hj?tmixx+@>+QaVA zpoPWrO(aCjz%uuQU0&D4IB)APd)^KH)>-sivB>%WxV3~uso+9@pU`S70CQySS}YpU zt#1oy@V}ry3>m?bf_kc=7aJx!=qQ@=>gQ#k7Fm4swv=;ekg>Cq(|uQZFKUJ0Jf-Aw z!}#5$yrC{MEqaVZIj9OU@udV_5;zsk+Fjme(XKL-Tk_Awmc_4AeOF7|Y?&AJ4m@KT ztkFz&SiHjVYwuw46v)K7zD(YR5+u%mQbur?piwaLP-ek7v&Nk=Vmauc!gkrC9Y6UI z91la~L$50|gi0IuDY6t}I}Utjz~$>a-#F5QuKp;RcZb)KHfJCb<2+etHTB_QO7|=c z2Rb1~5=w=G=Z2G-eWT;DuB*}pG_iALFwS(+3}9&?41V)nH&WM)hpa%uTz~_-9y*^= z`N+vf-%930(kbyJbtRhc9L}O5E4k$Oo&cGztQr_v#W%dT1d~TKliRgs0$V z1H+|jFUutd*0<6JMJU9@b#08qNQJj}n_3B01=sG;hd3!+cT4DSqmgLmymCQ!#Xr|r zc;1R3xpFyo&T8NU#0F2QZ77;%*rX|N_3_as&!1bfg~S@|+qo^&SGc7nZ*()}amnow z^EvOd3DNirJ~e~;n6^W+6BG19b>Aqj-x%iC*|#8g&+6zfeut8kBC1kw%+MAr2)V8W zAv|XtjRiH0fXt&7eeivdmr%n@x*UaBbJyb`p3g6wfNyGL=9ypDG^I6XX?#SipA_?< z%|d)&4<%RbmD|(i7{BQ7C)=biIf%y8n?643i?ETubpY(9JShAO6-&|whB_J4$cOTd z0-B8GPaT~dm#*t*Qc1`<@(B-k?)oJom61Hi7`3j9=MoaCtfDGb=T1`D1UP4=cUsTqyXUAErOKm~QwCa`|O2Uuo8#j&}$Bvg@UH5izC8|ON zQY*u(4$+tnUClYE+-^I@!v&3^;W?>|N}7ZsB`2@l(kMSp$|;Fr$2Am;8SO=}ES$;-S3uRTf0Mv-_ysb(Y zzn&FK(0t;v((F@lwB|2v(qFxU$O=12i(tI?b#hl4%~8?28gWo`LtOFk%*>kh!anly zyzFNbw=9j%+glS*f;?%s!6b5MK1u9W2aMe+fE%{#VkINR;sU;os# z(i!K0)FtELgv?@x?#)OSF%vEw<#>vY=6P&aSF6FWc=h}v&t~II_E~zd+p8j`J8hed ztCXiQ`ZGVdV2Pc-&;6&HALIbE>+9ffi2%TLD=nxa(+o29fNxWZIW51fS=RzHbogin zS^XHr>-61$Nu7KHqT{DC)w2A>(}y4_aS*-l zA-f}CKaa#yzx$$!3@pOLGgWP(?{E5By%2k}f4*ZyBbBo(bF{MJKODwD9;mKX=X5#y zlgQKlOoPkjMHj4WLnssFmDKo#ZTHl^H#m$zVtxKaEjNgW8|j}iU!-;GFz2|r_o`K` zDVs5M(1r-Nujy{3ro{PBer7s#ozM8lfXc$NQW*N95AM%gjnyzD8>YV`3fy6Mjv@o& z;DEYJS0y6{K~eEo+Ta;wgGV};YE24Wfg;|OcxJkX48<7u{fj7@L{1y=fGA$TITMm# zgle?91&^3VL-2!+VQDDXh|-z7FhySAbG`+!f;%^ep_F#r6m@ImcC&^Ghf`LI8%ze6wO+rD@p~aV(eCWzJ>Z=&Gm?_yxO1uj zHw|gz=x8P*i-N2)U6Ydd{aaVfi(+%GXHqi(*~H%$apaW9Kd0z-?gbZ_>Ao5pxy^VzM`jD^=f?z?z4Ef zoyF~mxho7wLO`^1wJt_Rg z6xG8G2lP%GWL3;C?rO?3FToT}YIq2RL->IBy zG3BbUr)aYGbA{;fv$SPPPf{?wl3i$3-yndnTUHE)szMwXeWE;)m*q9D2jOkgKM@_ zN1WvAJSHYbD@g-m>EivE1t*J{eK+j#dvWG*!jpdz-R$hn$O+r`pXA)KoBQdxXy1Ug zP_9_a=99K77BVVGg2d2#>%$gG0)fo70_i1PNZ(N26>lg(H!ynw5XOHA0oNKndLT%R z4|
Qf&eg!i^s-0?5Ylj%)R4Gs`uPAt{3e;a47o%Vsk&@J#O$fXb>`znXz(A~QW zbjT)Y)HrftvPPa@u4~&$f{o?OPFCrXU}L3~HBqLHs*(V>UyQ9?CvuIQhN~1 z?{R_q%w+=P6>M8s!G2PS^2=$TUsTex2R{O#*qqk+4scLXTMXZ7XxRZ(0`N|Xzj#a9 z9SbLL<6B$nuaUQ5$LgXEkQVjRz<42BC7I~)dufsFAqf>XarGTBY16JS0$v2hScmQ+ z%PiQdS^RUK-!I;ZFMSGABb%?{fdS|M8{KVx>GzC|rNc;i^;B}&_UDg=7%BW;{!dxtb2tma)W4t?C+VdF@3D`WdGpwVZg=hP`%BydAwOiLD85e+dKXC$0VxTIBnb#QhmZgJcl&;*XLnw#y`hp(DMinBRUKN{NTJi%>>DR7MkA>Vi}T0AIFE(O zqSCZEAEuGkoEw+(Jjx{JzGbXpQW_W-$Ucc$N1NZhU|eJ}c$vzFI1kQmuKR3o*cM0V>mG@!l|REh=3O9{q=)({AZDYy_2EywwGZJm9(lXq9C zBTi4sm+7bIOFqTE_NiZ?PyNlwmK?{S4_>h|d%HXHo0**%J>A%d4nKTQe+7*F&8`iJ z^>a-71BhaZ=WMXxe8B5(aViJ$1ye%F_e{yT7zBzRvR?2HB-lYp^lAt!o@3fqAexbe zflyqD_iP}QQ~`LP8mbB2P%RaLH`SVoz*}lvwZTKRq3(gNsV~%h@JKxXTF0;blvw?` zR60)yh6ZtR;Ac$x1f=2>i<#r6qT-5w4M)z_%p)ZV7(b^op#o4C0vad=#f<~sIvG#m zOj#eK6P0+GjwgmgTd(0|Id;C`N|O$KQ|Y%^s(o~%PqHGmPLKWiVy(wzI_wD_Se+l6 zCS?C)rVaM*cl}{G(*28xKFW&zt2e*yzwVF7iT)AXiRXL!XAYWSWo>)7&vN?cO!EXW3Jt{rQwVmi5pPg@6- zPd8Us%kz`~Xb}HA7_Y%@!fYah*(jUpc$}DIq@6bQ0rXq}K3)Oda`SG`C)&m7#28(; z_-$qJkFh9Dp zOeM-5nsq8&VBt_uj?aa8b}_6_RCBh^e((M!W@08QSr^w+tC3}4m z#$?l{E2k+NFVPS$o0<&OTNXRJd6}Ym_OMt;_d@d}WW6W~-`+RlaZIBWbC7rTUSrkx5d{1`Q9TE@LtRHat?f-gk&|9Y_Tc88Q zv5#VEy|5DNb{wChGb~OF^+I!>2sJ6wA+kw?8i%3eF=W6sW2w&@>ZpcVC^dge@*xjf tpQnrsh|(Hr@;%Ze=7j?E-MTNa6&v_Mf9LK-KEoV2#;+D4r+Nw+J`O4`+G zNzX2AhbDspG>}tJpog4#E|6Oe{S$I33iLkLp7JmBQ1tg^m$b~ND21JUJD+df=kMvK z&8C0v+i%4`HyHaDJN2_bzk?^=M_|4!GdLc;tm`aU+~fAHlbb-kjT?QAmn_d_-{o(MhGn6Vx9 z>Fy`S)SOzPHnD={#7tVkopIqE+wyba>pm-4N!ofJBRMy*h{s|zIWOwT+QcLtIeYl| z1D3Ulm-N2tGdAN>qqvZ4B%2f5O7qwNUrju%FAd!0VsU9kx4MiKFITs^LT|w!vz0w% zpJ>fC_oE^V^8QgIMrow%LWXe$<+tWt!SD#WS9bYo9>iHxjIt=m2J&QA)iFQI4?ss! zlHJ@NzjY^+;Yr^4`04e0`e`T5;vx>ycpQmNG3dP62{X}o9H(h#Ff8H|^lvF|PD*8@ zvdiTPcIsYLc0L+nUk6{x4FcLK2$Y{kMIfR_aX(TnxOqH~$2p8BBHn_V%0k~w>FY%_ zH67%+S|=)t9)s^VkDg!H#L?2Dx$P`Gi9|3Y?xP}-xoXPj;V70-9vsS0C=2_e6=4x7 zdmPC@pqy|xjL_+%gTuo}Dl5(o2Ff9MgoU!wI4_icYDc>m$o`SC^?j9fLxd;F!pBWX z)~j4>69~AVYNUfufM0z%)f~z=Blh)Y*H%^@r3dl?*7O>BuafOt@ngjwk?#gUKMnId z2)<_je&yQH;3T^CWEAbk*|l2_?(f{brnO(TTf@)fIc(L$lfQ|E@wTzb9n<9&U*|3E z8x2rv+yU)yAAc5KHCl$lH@J(xag!WyYp$S$C(@nhc@IzCMgv=9M27kN6FrO#2YL9uW;E6|mhPYsC zm%e50DdU5?%1(n}{G?l#Kfv+I)AmAUB-ik*uM6`&3Wt$0^U;YipGV_`ss>`yz0RTfFNuoGuy$Zet%BqF>$k}dR2@yO5C^~m@7A3$oq zVRzX_rH*tw)wW?kj`!|%8(Lt>iXMfjd=0#)S~e&h(vR~v%Zo5WATY8_HFjVg6n)YZ zIm>~GBc%1(I*0^t<3Tzq;z6c`FMolpwBuJ+r@a0kE8=W0%9n*|6;WPnW4<J%FlJI`E5FVcxT5Mj{uEe(JSrVAP z2Gd*_m zHKHcQu_7KOYw|FYulo8v7&yO#ax;ZlkzMw18m(~@DiY{A5X5K6q)u{e`6GKusU{SF9`8j7`mnAz#JeJTYbzsu7?OWD%;jS@y>> z`i*U3)h4*Mw8E@LtnZCj7~%3OL8fLOJMt^N1Hj$V4mQqg;B3G^wD_8s5Q($bGw{QDxIyCQC7QkS9?{@9gF~M zkp7o^PMtq3zejOC3R7JonJe`gWJlFP0WYf9XK@c7Fi;;}3WAJqV#p0;*96+J-`s-rat@kd_-6}u?Fqrpc z3^AmprfF?LIYRPbSs5$K&Y(O#KwWak2r^IVd zZGaWL3nX2{px(qzJir(`L1vgbPdXD_q@WO4gI;S>cj6`}OeW5xw#^=<4_MJmTKF<4 zS|M&3s3e)N|6)x&?7Bj`Og!Oim-9T7Czw~XlXId5PO9=mYt`LT)-WuNl#gnne>@xj z(u`L=d^!!@$8#8ETf^e$c?W_Rb#yIdN5cV}gwT#u-aN`lNti`9<&vU&2`{W1XKB=S$~^os zuY81AJ&*W=VE~}HxD}8V?(E_^B?X_>FkOC%$;v#AK11g(PAL!nWWfHl zP1az-ttw_FJoNdfn7`3833<~_jl`SSqP_`i4a2hq>Q^MxCMZ;~pjz6fT6?8#8^CWP zIIb4oxO)rbQJfuihBDF>>%urtdLfEbwI}Rj?4PndqARnLHAqAfhC#LDz{@G@)~Sz* z!P4}8M--{NyHZ-!B3%3*`&!Y}+jAqU42ycpa=_n@4&$t}p?XPcAE;oy^7IB|Sn?0V zL`cm(HB|F0S&#e?`u>b3@1P-gLxo#|VCuqfHq4gc!+89;$>;+rOYQPqfM5bVy8uvh z|E_U=x7(KYKq@~7N+gZayAcErk$kEUN=2i5lbW}wAylXnnTBLi5-*X*2rr&ZK?z*1UZK)d7 z;IIi*?+;J_^eG$UWiEaV%ayBZ@2xB=q43A(>$P-5*{bA_%kwl52pIn%&?Ax@NO{OL mw{(GDMg;P#=Ny-mE>hj?Qc`adiqh$Uz!!=wD!51^=6?YzMW#9c literal 0 HcmV?d00001 diff --git a/model/__pycache__/model2.cpython-36.pyc b/model/__pycache__/model2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751b966ae9805df5d124449b3d932b9506754ebd GIT binary patch literal 5051 zcmai2OLN=S6}}fQ0^mcmEX$G~3G2+XWhar7bf(ksXwo=QoW(4y3wy?c#vLXcQG3orE%s+-&=mYW>$*Q+AGY7+s{XMQv5dt|(kp)8V1nnp z7v=HYfr$IcIsnMNoy8lY;TDO%`r#jBz%E`Z~SRdEpyt=Er+Md(&u-<@FFHw<6iL)i#~gt+ zmAbDgJ0A_Pu7fY-h9NB#hRV<5A{6oCq!%j}r+G4vCpnBLrn7~2lm*@$>FY@}v_Hsm zwN6wPKf$?^JbrOu6I)A<#RUQdvoMI8YABBPx{DPx3{j{vui6WkNb!6GUfygy;gbbT=5gdACvEfVXq(Mc^Lkk z{rkqv}mo!M8bdDSg12SZP*+HGOEezvZAe#CzWseQ`s zvCm2!>42(j!+;$3?sXelV9JUgM}7Gk&Z27Bp!7&T$&)NEq70G1$THQ~fq78)>GzSe z9N0KwTF}-}B!nLi`lBKlWTo(p`}eftS4OA2-XJTIY%t1~Woi|1UTkA$ZNnr)ACBO{ z1sr~bk3B6$8Y4{%>QO^KdM{-6Cw%DpYa6U&_vBtEh>4NsQ0LOpPC(NK0u?KJUSirW4TKliho^G8<_GBqM~;k<}e|7hdN*O zL-`SS#LpXO=CW8bAcwk^L%Ht45y>G*5VUo*%RbUees+&@>T;f|Z`Q z`5I9Z}SoGqoz)H&Xb)gd$6A2FRfGXPo8CujJ|!^}p}6j(z3t&{UcU z8jF++mn|nR-IJ#L0Z*lyGGM!K`SI0KH|OTSmvlw?(z+s1(PhgZ&C4Kku6WhT5!UUE zq>PcID234xssO#FH(na>U?J%_P}5ruF|S}VHZ>=Pyn)SmYRo87BR(U>B2sU&?6>Fe zjcq!rO)zL_hFSGkKkBj2W8!l5eqi3eeYbRkw{Qaa8#D;FbOFsJ<0>Lb+lR=?g))1? zQJJo+(q(EHrKY`m+BX7sFakV55?}Hhb@8ke9w+%I>T951g=Pmyb;DSPCtb0t(0tB> zg|bL;Kgw_OCYOh$vG(jU7orCudvXmnY9IMcHW1qXciu_nF${B zFf%k91*Zhh+Ip@1aqZc(^G4^{bZp?cxnCla&Ycc|;HX!OWPA&quLI~6$DJ1d2+Uir zi)ptWT&BBKfCgYL@5uyk5V2Xi_)^5`kSZwc6AC1l7KS>C$ycc3RW#j76&w&K3z}Fs z)F+{KNe{A%@sIFS(Y%d#&;a0R8y2YXsy5h{0N27=DVkGxp{@0wAPQ}xkex;mWx0@A z1j!*c$o8Cm>r6l8O?zqsYv5fVXCj6K6Fc>QLhKZged;{zOmvZfLShYitxesDo1(Co zIFs5od(?l(ie}ovmr2nEamPR<$b|hjYwBUu6nR4Y3)^*mG1$;(I1FX9p@+XRtv z`7up+O|Oz;FAciRn1^5Hg@+grcyvw}2Jn@OTLCEH&MvM~Qt)OCv*qWsnt2j`iQCP62q~ z;Qzn})QNuypu$5{N>CID$&=p}2$FDCliZVBRahlGHH2MJCT3bws~{)03hnmt!Nf}4 zsfE@n{Im{7r<8?%5@7w>CTlR^RuwN39(X<~-f#6t0?xEjBlRY>sBZ!q!|-f@`V$GU z2?|vxsFpUW)_$qm2H@8Sj;qBt?(U(yNwTBPP{z9ATo?yRCq$8|c7%P5{WG>lbY*t3 z28l?*AgFd6csT{!I`Ih1l&1F?QKa(jN@-P#aPfcaYeiRIFN~}*EaH}E-am+slB~3$ zdP!>^s8G7{^a5m9l9HkPGc^%4RI@EvkNi1!f5nq`&=5SKx~oAjb743eX3OwlJpSBd z^ahoscKI$qtckh106-ed84q^5ZFvtY<%eMjgi$&-!tfE&P8C9_=95$$$akpukQ#z_ z8d6D(KlbSJQ)(z`NA0rb@aw+k zI(STf#cjCjdQ`WiYE(nRCRDvQ0DkXLHpt5?to_>6wewb%l~6Xol0i#Hl&wk*xx5G_ xAy`!Sx<`@&DG#}3PZ#KAL?F+4apIEFHKw~=O6na#K{`DUctWj2^%7~s{4c5?mmL5A literal 0 HcmV?d00001 diff --git a/model/__pycache__/networks.cpython-36.pyc b/model/__pycache__/networks.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b35dab5945170accaecded8ec7e31082e283382 GIT binary patch literal 3597 zcmbtWOOGT+5w7>EAJg+#ug45*V_QaT!Hm5IftSU**keXUOUqi@0u_Q%O=nGabyrul zva0tn?&jd}b)`LU;K1232>HYh;NX)jS>l>E!~yM%GZ*+GtNOKCTS)F!MMXwLMn&eA z5gD&E8s6VO|CC+XQ5T-CSQT+0)`l=jjz9!%L(C`g#TB46l-&Dy; z2XFb7szmD0X2yl;J8Xwp%>GdIUAD_y=7IBAjnzL?{2JS1O|}I|owe9DBn_s%qjWAk z!nc)fM-@h##s%K?IAhZ!%7t+n=Fz|2-}~g`ALqZn(bLEfKZ@GG^9G*$FCa5zs0`J) zHq(Y0Q!i9zk92;sK+MpXLyPG%^+H$4&SuKUt4o>hJi61V&k(=RmIGpyI^3ge9|z4J&#8iiccUJr}jpqugW zRaR;LedE!8@#7vWCFyv=4T4@0=6MhZCkWDT9Dy^=;*@dPCSj-W5GH}J?nZHcPzZyC zMTi}Z!(5o>5zhkQ_OdiD!n6oFx=_bL&kF`2xh02xQut-K`p@%6gKQif-J3?Iae8#? zm+v0`;s^*u$x#{=cQZc94=4AkUlLp9WD>M{TM*st3CM=KIib8)O4q z69NhD7hte~o8Wrb?AvRbtuup}4@!7VLm6W8UsW_Eu$FLtwgLA&37ZAjYVh>Oui@!X zda~iE4o{y|aMOWv2<|ob-6i~uuCNIHt4yw&({5p6DaAm=W~@(?V~ z%IY9gr-9eVxK4z&ygUJ0qc9%FX}`iE;9I0>5!oiPLu41EqjS^+MVNUnOd|di$qD$9 z5PV{91Ki5-0W>+065TzuRpGnhS}Jct-=js+kB;UtbN`G%1+F?%tIVYgT+dvM=?}1# zODI3ufbvYvqs*lYT+iIEo_8qcQRdQN`e+R}>oMT$C2+Maw-B*Jjfz!2jMJ}=QZ3`f zAnRu-a`np*(j-Dpgy!-FG%Ii4L#$tsdqCznSO~iUvq!MdkJh+^=p2Ir*~b76N*exv zLS@=W<5wwf3Uj6tKyxaz)(k+PoF2o%fW^Dg!k%f!PHk>V3v4+oo`r=4ivh_BN6MOo z%k0HhKEUOJQt5Qh58gsKiNhp54~sZU+v5n^#oE7q|5`r(jj+l((^2^)M9QBB0e=NA z;jOmf{JT_>zRXEG&5CwBo+QyYN{fi0xLQfp??;?2q_YgkNu{(9?%6c$698`rj-{8VYb{xG>!1emlHPSqQ~67##_9{nb!BD@?P}E6r=yBFthwYcr;=?gxz>`~ zUUEB2ZdY=9@Nx;yQxDv^nLd=!T&8Fqs`=005&R+^M0;CCBxQ(5A3Bvq84=0Lh^kqY zwv32mWkkf45fN8LMBEc1!Zqa1O!%qdthlFPypHg)yaU&~yHZZzgvOHtyhaIEwrb#J8mAs;E&e z1bv=OC%M!-YC|o&I1PG(FioRG)Us)@95iCc_+seqR7SygniTOQiILP!SQKf%(O!BW z!D*89M#yN!vk5%O7~p>#_M;$=&!djZU!}0F5&0pJAA|UfbUM=$aNH;zvz&YrlYKYI(Ba8i@j5xHFN}k1 zQV4T0KzqzzfCfeHhz#uhWE#XQ7c~~=1&>dsRM583aRUT`gxczIn<+ZKhV5U7{f6B0 z#jwPoS>mt|3cVW@zVRN>?UQht=W&=GFEEw01?$LoZy=1`l!c-m=RtTUjFa#bOKko$ zN5HgvSrUborlNj~zVz*ohe;A89b4#Fn(vmU2ka_X84!P&mTBPVoIG9!9TuVeje?VR zV3Pk9g!+;?y3WT22F@Z0zTHlCVQyr`6- zlcFoAZ0w}G$zOy(*mR+w7vn#tkt28LFim-vtf?vDIw{Ben?QD6M{|6ZBOM(CZ literal 0 HcmV?d00001 diff --git a/model/base_model.py b/model/base_model.py new file mode 100644 index 0000000..4573d5a --- /dev/null +++ b/model/base_model.py @@ -0,0 +1,48 @@ +import os +import torch +import torch.nn as nn + + +class BaseModel(): + def __init__(self, opt): + self.opt = opt + self.device = torch.device( + 'cuda' if opt['gpu_ids'] is not None else 'cpu') + self.begin_step = 0 + self.begin_epoch = 0 + + def feed_data(self, data): + pass + + def optimize_parameters(self): + pass + + def get_current_visuals(self): + pass + + def get_current_losses(self): + pass + + def print_network(self): + pass + + def set_device(self, x): + if isinstance(x, dict): + for key, item in x.items(): + if item is not None: + x[key] = item.to(self.device) + elif isinstance(x, list): + for item in x: + if item is not None: + item = item.to(self.device) + else: + x = x.to(self.device) + return x + + def get_network_description(self, network): + '''Get the string and total parameters of the network''' + if isinstance(network, nn.DataParallel): + network = network.module + s = str(network) + n = sum(map(lambda x: x.numel(), network.parameters())) + return s, n diff --git a/model/model.py b/model/model.py new file mode 100644 index 0000000..e76792c --- /dev/null +++ b/model/model.py @@ -0,0 +1,166 @@ +import logging +from collections import OrderedDict + +import torch +import torch.nn as nn +import os +import model.networks as networks +from .base_model import BaseModel +logger = logging.getLogger('base') + + +class DDPM(BaseModel): + def __init__(self, opt): + super(DDPM, self).__init__(opt) + # define network and load pretrained models + self.netG = self.set_device(networks.define_G(opt)) + self.schedule_phase = None + + # set loss and load resume state + self.set_loss() + self.set_new_noise_schedule( + opt['model']['beta_schedule']['train'], schedule_phase='train') + if self.opt['phase'] == 'train': + self.netG.train() + # find the parameters to optimize + if opt['model']['finetune_norm']: + optim_params = [] + for k, v in self.netG.named_parameters(): + v.requires_grad = False + if k.find('transformer') >= 0: + v.requires_grad = True + v.data.zero_() + optim_params.append(v) + logger.info( + 'Params [{:s}] initialized to 0 and will optimize.'.format(k)) + else: + optim_params = list(self.netG.parameters()) + + self.optG = torch.optim.Adam( + optim_params, lr=opt['train']["optimizer"]["lr"]) + self.log_dict = OrderedDict() + self.load_network() + self.print_network() + + def feed_data(self, data): + self.data = self.set_device(data) + + def optimize_parameters(self): + self.optG.zero_grad() + l_pix = self.netG(self.data) + # need to average in multi-gpu + b, c, h, w = self.data['HR'].shape + l_pix = l_pix.sum()/int(b*c*h*w) + l_pix.backward() + self.optG.step() + + # set log + self.log_dict['l_pix'] = l_pix.item() + + def test(self, continous=False): + self.netG.eval() + with torch.no_grad(): + if isinstance(self.netG, nn.DataParallel): + self.SR = self.netG.module.super_resolution( + self.data['SR'], continous) + else: + self.SR = self.netG.super_resolution( + self.data['SR'], continous) + self.netG.train() + + def sample(self, batch_size=1, continous=False): + self.netG.eval() + with torch.no_grad(): + if isinstance(self.netG, nn.DataParallel): + self.SR = self.netG.module.sample(batch_size, continous) + else: + self.SR = self.netG.sample(batch_size, continous) + self.netG.train() + + def set_loss(self): + if isinstance(self.netG, nn.DataParallel): + self.netG.module.set_loss(self.device) + else: + self.netG.set_loss(self.device) + + def set_new_noise_schedule(self, schedule_opt, schedule_phase='train'): + if self.schedule_phase is None or self.schedule_phase != schedule_phase: + self.schedule_phase = schedule_phase + if isinstance(self.netG, nn.DataParallel): + self.netG.module.set_new_noise_schedule( + schedule_opt, self.device) + else: + self.netG.set_new_noise_schedule(schedule_opt, self.device) + + def get_current_log(self): + return self.log_dict + + def get_current_visuals(self, need_LR=True, sample=False): + out_dict = OrderedDict() + if sample: + out_dict['SAM'] = self.SR.detach().float().cpu() + else: + out_dict['SR'] = self.SR.detach().float().cpu() + out_dict['INF'] = self.data['SR'].detach().float().cpu() + out_dict['HR'] = self.data['HR'].detach().float().cpu() + if need_LR and 'LR' in self.data: + out_dict['LR'] = self.data['LR'].detach().float().cpu() + else: + out_dict['LR'] = out_dict['INF'] + return out_dict + + def print_network(self): + s, n = self.get_network_description(self.netG) + if isinstance(self.netG, nn.DataParallel): + net_struc_str = '{} - {}'.format(self.netG.__class__.__name__, + self.netG.module.__class__.__name__) + else: + net_struc_str = '{}'.format(self.netG.__class__.__name__) + + logger.info( + 'Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n)) + logger.info(s) + + def save_network(self, epoch, iter_step): + gen_path = os.path.join( + self.opt['path']['checkpoint'], 'I{}_E{}_gen.pth'.format(iter_step, epoch)) + opt_path = os.path.join( + self.opt['path']['checkpoint'], 'I{}_E{}_opt.pth'.format(iter_step, epoch)) + # gen + network = self.netG + if isinstance(self.netG, nn.DataParallel): + network = network.module + state_dict = network.state_dict() + for key, param in state_dict.items(): + state_dict[key] = param.cpu() + torch.save(state_dict, gen_path) + # opt + opt_state = {'epoch': epoch, 'iter': iter_step, + 'scheduler': None, 'optimizer': None} + opt_state['optimizer'] = self.optG.state_dict() + torch.save(opt_state, opt_path) + + logger.info( + 'Saved model in [{:s}] ...'.format(gen_path)) + + def load_network(self): + load_path = self.opt['path']['resume_state'] + if load_path is not None: + logger.info( + 'Loading pretrained model for G [{:s}] ...'.format(load_path)) + gen_path = '{}_gen.pth'.format(load_path) + opt_path = '{}_opt.pth'.format(load_path) + # gen + network = self.netG + if isinstance(self.netG, nn.DataParallel): + network = network.module + network.load_state_dict(torch.load( + gen_path), strict=(not self.opt['model']['finetune_norm'])) + # network.load_state_dict(torch.load( + # gen_path), strict=False) + if self.opt['phase'] == 'train': + # optimizer + opt = torch.load(opt_path) + self.optG.load_state_dict(opt['optimizer']) + self.begin_step = opt['iter'] + self.begin_epoch = opt['epoch'] diff --git a/model/model2.py b/model/model2.py new file mode 100644 index 0000000..80aa47c --- /dev/null +++ b/model/model2.py @@ -0,0 +1,167 @@ +import logging +from collections import OrderedDict + +import torch +import torch.nn as nn +import os +import model.networks as networks +from .base_model import BaseModel +logger = logging.getLogger('base') + + +class DDPM(BaseModel): + def __init__(self, opt): + super(DDPM, self).__init__(opt) + # define network and load pretrained models + self.netG = self.set_device(networks.define_G(opt)) + self.schedule_phase = None + + # set loss and load resume state + self.set_loss() + self.set_new_noise_schedule( + opt['model']['beta_schedule']['train'], schedule_phase='train') + if self.opt['phase'] == 'train': + self.netG.train() + # find the parameters to optimize + if opt['model']['finetune_norm']: + optim_params = [] + for k, v in self.netG.named_parameters(): + v.requires_grad = False + if k.find('transformer') >= 0: + v.requires_grad = True + v.data.zero_() + optim_params.append(v) + logger.info( + 'Params [{:s}] initialized to 0 and will optimize.'.format(k)) + else: + optim_params = list(self.netG.parameters()) + + self.optG = torch.optim.Adam( + optim_params, lr=opt['train']["optimizer"]["lr"]) + self.log_dict = OrderedDict() + self.load_network() + self.print_network() + + def feed_data(self, data): + self.data = self.set_device(data) + + def optimize_parameters(self): + self.optG.zero_grad() + l_pix = self.netG(self.data) + # need to average in multi-gpu + b, c, h, w = self.data['HR'].shape + l_pix = l_pix.sum()/int(b*c*h*w) + l_pix.backward() + self.optG.step() + + # set log + self.log_dict['l_pix'] = l_pix.item() + + def test(self, continous=False): + self.netG.eval() + with torch.no_grad(): + if isinstance(self.netG, nn.DataParallel): + self.SR = self.netG.module.super_resolution( + self.data['LR'], continous) + else: + self.SR = self.netG.super_resolution( + self.data['LR'], continous) + self.netG.train() + + def sample(self, batch_size=1, continous=False): + self.netG.eval() + with torch.no_grad(): + if isinstance(self.netG, nn.DataParallel): + self.SR = self.netG.module.sample(batch_size, continous) + else: + self.SR = self.netG.sample(batch_size, continous) + self.netG.train() + + def set_loss(self): + if isinstance(self.netG, nn.DataParallel): + self.netG.module.set_loss(self.device) + else: + self.netG.set_loss(self.device) + + def set_new_noise_schedule(self, schedule_opt, schedule_phase='train'): + if self.schedule_phase is None or self.schedule_phase != schedule_phase: + self.schedule_phase = schedule_phase + if isinstance(self.netG, nn.DataParallel): + self.netG.module.set_new_noise_schedule( + schedule_opt, self.device) + else: + self.netG.set_new_noise_schedule(schedule_opt, self.device) + + def get_current_log(self): + return self.log_dict + + def get_current_visuals(self, need_LR=True, sample=False): + out_dict = OrderedDict() + if sample: + out_dict['SAM'] = self.SR.detach().float().cpu() + else: + out_dict['SR'] = self.SR.detach().float().cpu() + # out_dict['INF'] = self.data['SR'].detach().float().cpu() + out_dict['HR'] = self.data['HR'].detach().float().cpu() + out_dict['LR'] = self.data['LR'].detach().float().cpu() + # if need_LR and 'LR' in self.data: + # out_dict['LR'] = self.data['LR'].detach().float().cpu() + # else: + # out_dict['LR'] = out_dict['INF'] + return out_dict + + def print_network(self): + s, n = self.get_network_description(self.netG) + if isinstance(self.netG, nn.DataParallel): + net_struc_str = '{} - {}'.format(self.netG.__class__.__name__, + self.netG.module.__class__.__name__) + else: + net_struc_str = '{}'.format(self.netG.__class__.__name__) + + logger.info( + 'Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n)) + logger.info(s) + + def save_network(self, epoch, iter_step): + gen_path = os.path.join( + self.opt['path']['checkpoint'], 'I{}_E{}_gen.pth'.format(iter_step, epoch)) + opt_path = os.path.join( + self.opt['path']['checkpoint'], 'I{}_E{}_opt.pth'.format(iter_step, epoch)) + # gen + network = self.netG + if isinstance(self.netG, nn.DataParallel): + network = network.module + state_dict = network.state_dict() + for key, param in state_dict.items(): + state_dict[key] = param.cpu() + torch.save(state_dict, gen_path) + # opt + opt_state = {'epoch': epoch, 'iter': iter_step, + 'scheduler': None, 'optimizer': None} + opt_state['optimizer'] = self.optG.state_dict() + torch.save(opt_state, opt_path) + + logger.info( + 'Saved model in [{:s}] ...'.format(gen_path)) + + def load_network(self): + load_path = self.opt['path']['resume_state'] + if load_path is not None: + logger.info( + 'Loading pretrained model for G [{:s}] ...'.format(load_path)) + gen_path = '{}_gen.pth'.format(load_path) + opt_path = '{}_opt.pth'.format(load_path) + # gen + network = self.netG + if isinstance(self.netG, nn.DataParallel): + network = network.module + network.load_state_dict(torch.load( + gen_path), strict=(not self.opt['model']['finetune_norm'])) + # network.load_state_dict(torch.load( + # gen_path), strict=False) + if self.opt['phase'] == 'train': + # optimizer + opt = torch.load(opt_path) + self.optG.load_state_dict(opt['optimizer']) + self.begin_step = opt['iter'] + self.begin_epoch = opt['epoch'] diff --git a/model/networks.py b/model/networks.py new file mode 100644 index 0000000..637ceba --- /dev/null +++ b/model/networks.py @@ -0,0 +1,135 @@ +import functools +import logging +import torch +import torch.nn as nn +from torch.nn import init +from torch.nn import modules +logger = logging.getLogger('base') +#################### +# initialize +#################### + + +def weights_init_normal(m, std=0.02): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + init.normal_(m.weight.data, 0.0, std) + if m.bias is not None: + m.bias.data.zero_() + elif classname.find('Linear') != -1: + init.normal_(m.weight.data, 0.0, std) + if m.bias is not None: + m.bias.data.zero_() + elif classname.find('BatchNorm2d') != -1: + init.normal_(m.weight.data, 1.0, std) # BN also uses norm + init.constant_(m.bias.data, 0.0) + + +def weights_init_kaiming(m, scale=1): + classname = m.__class__.__name__ + if classname.find('Conv2d') != -1: + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + m.weight.data *= scale + if m.bias is not None: + m.bias.data.zero_() + elif classname.find('Linear') != -1: + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + m.weight.data *= scale + if m.bias is not None: + m.bias.data.zero_() + elif classname.find('BatchNorm2d') != -1: + init.constant_(m.weight.data, 1.0) + init.constant_(m.bias.data, 0.0) + + +def weights_init_orthogonal(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + init.orthogonal_(m.weight.data, gain=1) + if m.bias is not None: + m.bias.data.zero_() + elif classname.find('Linear') != -1: + init.orthogonal_(m.weight.data, gain=1) + if m.bias is not None: + m.bias.data.zero_() + elif classname.find('BatchNorm2d') != -1: + init.constant_(m.weight.data, 1.0) + init.constant_(m.bias.data, 0.0) + + +def init_weights(net, init_type='kaiming', scale=1, std=0.02): + # scale for 'kaiming', std for 'normal'. + logger.info('Initialization method [{:s}]'.format(init_type)) + if init_type == 'normal': + weights_init_normal_ = functools.partial(weights_init_normal, std=std) + net.apply(weights_init_normal_) + elif init_type == 'kaiming': + weights_init_kaiming_ = functools.partial( + weights_init_kaiming, scale=scale) + net.apply(weights_init_kaiming_) + elif init_type == 'orthogonal': + net.apply(weights_init_orthogonal) + else: + raise NotImplementedError( + 'initialization method [{:s}] not implemented'.format(init_type)) + + +#################### +# define network +#################### + + +# Generator +def define_G(opt): + model_opt = opt['model'] + if model_opt['which_model_G'] == 'srddpm': + from .ddpm_modules import diffusion, unet + elif model_opt['which_model_G'] == 'sr3': + from .sr3_modules import diffusion, unet + elif model_opt['which_model_G'] == 'lwtdm': + from .lwtdm_modules import diffusion, net + + if model_opt['which_model_G'] == 'lwtdm' or model_opt['which_model_G'] == 'lwtdm2': + if ('norm_groups' not in model_opt['net']) or model_opt['net']['norm_groups'] is None: + model_opt['net']['norm_groups']=32 + model = net.Net( + in_channel=model_opt['net']['in_channel'], + out_channel=model_opt['net']['out_channel'], + norm_groups=model_opt['net']['norm_groups'], + inner_channel=model_opt['net']['inner_channel'], + channel_mults=model_opt['net']['channel_multiplier'], + attn_res=model_opt['net']['attn_res'], + res_blocks=model_opt['net']['res_blocks'], + dropout=model_opt['net']['dropout'], + image_size=model_opt['diffusion']['image_size'] + ) + else: + if ('norm_groups' not in model_opt['unet']) or model_opt['unet']['norm_groups'] is None: + model_opt['unet']['norm_groups']=32 + model = unet.UNet( + in_channel=model_opt['unet']['in_channel'], + out_channel=model_opt['unet']['out_channel'], + norm_groups=model_opt['unet']['norm_groups'], + inner_channel=model_opt['unet']['inner_channel'], + channel_mults=model_opt['unet']['channel_multiplier'], + attn_res=model_opt['unet']['attn_res'], + res_blocks=model_opt['unet']['res_blocks'], + dropout=model_opt['unet']['dropout'], + image_size=model_opt['diffusion']['image_size'] + ) + + netG = diffusion.GaussianDiffusion( + model, + image_size=model_opt['diffusion']['image_size'], + channels=model_opt['diffusion']['channels'], + loss_type='l1', # L1 or L2 + conditional=model_opt['diffusion']['conditional'], + schedule_opt=model_opt['beta_schedule']['train'] + ) + if opt['phase'] == 'train': + # init_weights(netG, init_type='kaiming', scale=0.1) + init_weights(netG, init_type='orthogonal') + if opt['gpu_ids'] and opt['distributed']: + assert torch.cuda.is_available() + netG = nn.DataParallel(netG) + return netG diff --git a/model/sr3_modules/__pycache__/diffusion.cpython-36.pyc b/model/sr3_modules/__pycache__/diffusion.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b448a2e892899a907e8e5846d3b81a1a4322a9f3 GIT binary patch literal 7494 zcmb_hU5p#ob)Gx_Gvso2x%6sRl4aRWZP9TgOHQ1$5iHksWGA&YfhE8#;dVUBJKQBj z&QN!T(t0zCB<2QI11N9;14)aZK!H9qKv5KZYafz41PG9aASjvuxj=x{eK3SPG<|9j z6iL2wFUkFNp2FQZymRllf9Ib2ogY3uHy2+2{`dLIj~m8+8T&pq%Fp4+-v!|YXMLlk zzgdgn-R!f0*)j)K%Np1%o9Q;EXZ75chjy#)4}w;p>g|3wsI)2*o@#3j?M}ZosI_XS zbGf%`wC4E|ukh+FYc23OUPH|ypXUqsuD@jPMPA=Ecs;RtOJl3m;Ox55Y~07njJ2jE zU7p2>;9Ts-qpX?4>=`h6^P7`KzAE2}6cv54{liD)$M*Sqez`LE!D2H&)58ss4Xfig9S|Ler&Dzn)N6Aiq*|B)9?|Kh!8PF{Kc z^NoLe=|8vMz4HG5fBS#_?b`Ymue2Rv$H&Lx;Zf3jnL(ORotvh3Qp=Q^OGaraN>Vnt zbb_7p?31EK0_`v5oroQ17FDdsc6%`X8b` zK>asM3**_dvFJRV^dHa~qV->SMa{-|b;eInlST8~+> zQ#k7r#*SOM=!w!Pqwsou!{F5017A>jW7Z3~4W02y{PEfC^wZ( z87r8zbb1w1`+Lx{)Gw+fH0~{E8nmjuChc3Jm~W7n4dXwqG?^f2-#0;|t97R3>vO#L z)W%=`6@C8Y%CzcZPf4eprFqe^diiveUtYP7GcsCkJIkShf?hU0{PyzCe)PZpjR~YR zh_~+l2A}4f2+$_xh>(IPjS!?I(kU|0-jr@E;&daC_8=}cr8(?M3!_SFnBBq5{dh2x zOKVy2>A|p{43e}+_@{)(MAMgk-rh_|E5!nhTq80M((?7PqFgM}M8WhbCRj z@R}?UM|}|AOd?X^XgVg<=g)u`%x4SGXdizyJT41obXnEbS!mL;48AHm&1$C4#1rV* zCb`guzDeFny-E5|660oo<6&!R8IT#;@eSR4aJ#&wZ|V#d_Mmz z#uxbFuG0#VuvZxy{6oCXmv*s%R&`SI2mArPtZL>aHIMNXzN%_!lbXl*Nq$Pz%=2^n zG=ET;g(g44A3|x7pXCpeVR+lHjn)zb@`PM`DIVo{H%?!i!rFZcdy!`U;*GU7ArgJ) zn*>cw`A0ztV+^bx<6MC@Mq~ozjR?2{3RDNhjRd7zj$;;2aU21O-;+*08YV*eQPfSl zMHETDy&0!z($8Ud2JuD`<=yQ>hCE5LZk|M)R0jPl&m%GmvVxn)yJVVRkvwVVNxvgL zjs*$QRPiw)pCIywAhJ5ubOl1)b$# zrT6osh)B{m&^R$;AzQ{}Up9{u!<4;Ev(tw#ZDzvW!;LR+2YV(SH~}#C){a%coN$6~ z#w|r{MV(ccsCzct1*f#Z?~YBeR@yxeO6KDU#ztzEXy;yO6WkZLB&EZB9^5l`T)b7$ z0#)NHWD>?E3`y9Vo7Rs_CHNXfoPV9|_{Bo$tC~d|M4eZl!Z0>t6Ut2QRlK|V-skWR zlQe6u!7yTU#j{sCuWM-?E5M+A{0pjO>6n(qy*{T7_Nni0;o-g!Ne14E0fVV#f?*8Q zs+X|JVBf3_Ju6n)C3{;6w$$! z)`O}Nj$<_Frlb6D^XY!J5q(grTveS#((Voqb(E*}ng$mSH`a$4Tusr@Z~fqtva!cFNa8eVXG!Pcv9*^3S-Iu|f{c=Q2t>L? zlIEG1ILS|6Qg)YoczDT$T8Dc>bHLj#V)z?YdpMF#3cA@Z94iH_08h<`X%O?RB7hA>)MTTvldfs-OTcvt`{xB$G{E_CRF&?XybS=>Y4 zT=^w%#kQX~y7Uj~I9ojF)@{G6GqExi1U?uMR?&Lh z-0^|&eiuB+Vb6IRs0#XDEO#crPx{{ zg!ofbOIIYrBrXK`L>krCsYFf|pd&7%t74K!aMVmM^oNP%4I&B1J~u7elHqz>v^SMo z*Q)6I+D~pLeF<#<(q^d)Rg@hO__da2Ckb>~#QadP&rtOnm^MENVn8Kq_6U^2 zW*;(b*1%K6v%t0=KB_2(Mw#gb=>^>br5l7tI6F#RK-Y2VNL}Dm9B2Se`KB$-DSd&C zAoQF#dI((|1gY>O8VI7tu-7N(s@rS0-Uo#xZen;HK%#&TB2-6YDCiRk5gqb0>UVF# zE8b2-mg};WYz>>f#?#|QOff>Mt|fawlB6bw#pw^D9D!yZxsd7gg3RiO<&Jaxrp#9l z_5K4CD}8uLiO3T!sdfWJ;_VV42$TDIlL4+`(M_{au1#}QYXq8L&J{lr za03`~0ozLj*43&^$uq4`Dwi-fRS#U?Z1_)T%%1=;U{Y%^v^FSR&?^2`pk(gkol*zO z)NXD+blgRqtcJ!^@|@`&#q#v1yLuJ{(!CvorO-cZpBNC?Dy@!P0>7sgYA;~?0{Y;} zsmQ30viLlaz4k-7tlWOR@A) zw!xv7a%ceyDuf1h=2?G4vG9+@!PGZURY;7L}8wNB+c8$qPhj zyL8WB0q;_*L6#8d#uXLxljEY$%vo!r)9gdqsIo?+>69J3i=ak}+f=$KhGPbzF?I(|J<<57hwuR@K(wSPPnor&6i)D`Xn}GaDH!^A>M-Aeu3u$m13yU z7D;|oDW*=)7~R$RO7r@w6FgO+l#ZwB+93+S1@{N$HX8ut@D1n_3SP-q668TiZ#U?8(*}7zH$u=dsqrE3uXbXN3 zxk+z|0rA+BlQ~8&r@Q_+nDTQVIM_!Vl00%S29qAo_op%1R|kgo4gV(1EpWS{)Y`nR%Y$;-)CKbb)TN^o&5l?SOO%dQZbb51P(*Sa-vi@4nvs~6gv_kn^IBPFxgt}41ihe z&Mtap2@si0rM#@NOI1qAKBjW9Z@%W>f51NGm{VVKLfLwmy`I?*5{eyWfbQ*i zJ>5P1-tT?%o3GXD!NXttBK+0w8ODDZOP7uIJ(Tn}s6rzY1EZ~fMO)zB9EhRWHif2H zeY0=39nh_TJM`Kf=yvElH`;#ah8}(^?-`*VR-PMSB{KWL%xYIb3BoEU)q+w3r54sf zsTY(wD2;Fpl(mA=0A)Sg0A-_~tbx)DH$mAfDC?lK!YiO$DJUDDY=zsPY!{R!C|AR4 zpj;~`o1pB3*Fm{nP+FmQXzadr0S*}ZyF!^sf;x!PgGtiM;!)BQ)P;+FO_cj6sgEiL zUydsnE%twkN{KT`9OdfQx9%K`hS8m~N%SO6?!5o#qX!?{8IHnea3_^t>!39mMCqML z5@ol?&y*XT#c9T~rnH73q=&l4Z8Ye2zV_?5$*}rN$XaeZG%}G3w0BVBE~ov7{aur1 zkQN@^HI?mRCw9W4u z2O<=q`P^??p%vPIfc<^LHrfte!BNc*M`=tf><->ddZRE-4qw_3&6~DRWyZ|tV*>&k z$c>H}Vk@)_OF8Le97*MOI&l(bosP1?c!(|8X*4*H4U7fvE1gbn&`r}$M{kk#BI~F& zQBo3%Ap&uJ{pDA>UCujyHwO<>xsQ_eQRPM-%CTci%}n&o+{{JaLa{^hGb=Z*8&kS- zLTe__?`B@#5AB(#8o33kGZQU{s%1=V^j1)U9KHCh((j=edZGK7Ikj^eJNEZ4NY2=v z6K2oGk#fW6Y21r02z`erhVg@+-;;E=D|qu4|BY+6#%<+fBiTDrZdZ1b!$>yqm?RFU z$|OloCQ)=ADJwb~D|^_@j+8YR9V)BW&6JhK30k9c*O6;9pqh;0H0lhZr_n$;z0oAe z!0{{^D?ciP`4X58M)I^P!)=UD*HID61JMAWZldgn^PNlBUkuuHNy15Y7(v28r_d0z zYn{%?q&v_(EK>Ofy47r87#p}tzl^Fx zcIYy;&Cq&Y0k*Z7TVUH$8+%bVo5<)9nDoxUL7YT;y>ce{CHyhA-$o-N6*XpN-(-AS zz_*pzeFwz_+RYHXeQVnQPRtph-(&Qf`;s(c*O#Q8@(-vYNUQY;B-7x!|rg{RrZr8>%O1{jLxrMU`nQm zRKP{q5ls^caP4KLmG~EXSoKYekYA#T(6~Um{C&D*kx*62hgavq+>>CxN>(hw3Oi;i}Ziwq(oPtz()vnjVgS6&()4o7ha zk3*J&nYMUU4WT#CL7GrFxAQtLcDva7KA`!+&S}8XB)oIp?VqB93eV&VN$noW{IZHx zN;8&w{Vkr#|9G5shvPxCvVB^QW=r2fweU}w*|*5+h1O>#bk)Ya14Ih460qbpXfEq7 za7gG=9%*8fW-#itwAUR(or7)) z-20>C>06<)d-yGRuDzf(d<$OD=s!m(JbNJ4#ra0L)rGc_sOee}IoVKX85I`#Y#vtp zC3@svpeh$9Em@Kki|Gx}?xB1MM;wN4I7XSng zMyJXD27#NHa1{b9a)!aoAsDd8?R`lMXSr*cIxh3GgVsN11O-9wrqMZm7dmL6bL*}sEn zQVJFbFgBN~;=I9pdIikvVWJYHUxb-9Em>fut`{odV#;r#Q^%^WrtR-w{D-uF*7in4 z66JS*PKBXEkRj-Jp}zv1KvlmRr8s)=`)XnC@yqt;)I}Hl&h7s{;AR;@0FRkFvm%Zo zkmPPCa27B+i0N>Ld%()nzJU4xun*~6z<{!57C`3pD*&I-2@1mjiT)MV0l(=-k}hfs zQe6TS$AA>BFe8-9i1}*q`9Cvdv}-0)Bj2O0&86_ZgV3e}py@R! zBBJPD4m%oimqV?C$L3HoMa^7Cgmd0n;_Cu%q=$47Q%REdQBqPE3I`wCQ;Wj9sav|5 zDeNi2LI;7N$!!bF7HAROrU+!ah(H{92eZh(ri!`pKD9nT1qV70MwFk$I96bnO5nez z#E}xGmwBuhBiVXVlpBZ|%K7H~p?&cn`Cbt1oXIjZCIQ19t;bUtWOsmsc=JvhN&V#%H@PP<5?(ERs(!pZr^@{+Oz-QpHCD zH;R*VGE@PKP!=CfMw3+Tpck2h6Pe-UFgnPF-7`r}WOt1^K3tl!pscb78?A#n}-oL$bec&3n)^iji2c*afJ{DIV((-+C5$ z=*>t+sL*{%0r-;1fZ-;S!G4g@2Vck`aqh~uGnjGqg=ARtxyYjAB&jj420XG#FQ0h& z)JcX(Rmg?0my^U7d4#I8?Q?JT!uv}aez}SG5qjoc%M~U>j8w+;ml?FM6trWy=$FcU zl=KEF=p3vCJ1NRX2>BXnujmi*^O7ypd9M_D?$wo#}}Y)t2HJYFZ5`(DE{$OazOWl`YYk6)QtW4o_tVO* z1~cCs{1kl81$ioqVF2WEUetd-{C&7SBYHc|*9uCIHl1E8#LgTbL((B`Kl!UA@@*a%|KHv{J z@8cs568Dr#P}VRGDFS~Q$rOo0Ydjk3B#wo19Eb1xl|=QZJ_ z)nlYH+3OC>r3&UYGi3|gZm~UI-WJ8eK|JgpM$E?c%5k8W zw_QmlLp^!BI+jtndb@sr+;@4Gw!LI@O6$r~dTlFCLgmG22VY~dw$GVryjZ)o{I*JT zBzfM;jr32@!`Wh^9>5dRpUYnnwk)(v`ane)Xk>ptt?}1XYC;0SRDoSE`2EClQdf;f@D@)V8%X@6=7_LZ;bR6|E{(`IJLmpYV+SF*P#rXiTT5MX_7HQM+ZK83+M1GxLV~P9Z qg*h>={BP(_(RYa(9M)(>LHKL=w(mCAo10Ci88mCn23j|o>;D52oO6Ky literal 0 HcmV?d00001 diff --git a/model/sr3_modules/diffusion.py b/model/sr3_modules/diffusion.py new file mode 100644 index 0000000..668f8e8 --- /dev/null +++ b/model/sr3_modules/diffusion.py @@ -0,0 +1,249 @@ +import math +import torch +from torch import device, nn, einsum +import torch.nn.functional as F +from inspect import isfunction +from functools import partial +import numpy as np +from tqdm import tqdm + + +def _warmup_beta(linear_start, linear_end, n_timestep, warmup_frac): + betas = linear_end * np.ones(n_timestep, dtype=np.float64) + warmup_time = int(n_timestep * warmup_frac) + betas[:warmup_time] = np.linspace( + linear_start, linear_end, warmup_time, dtype=np.float64) + return betas + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == 'quad': + betas = np.linspace(linear_start ** 0.5, linear_end ** 0.5, + n_timestep, dtype=np.float64) ** 2 + elif schedule == 'linear': + betas = np.linspace(linear_start, linear_end, + n_timestep, dtype=np.float64) + elif schedule == 'warmup10': + betas = _warmup_beta(linear_start, linear_end, + n_timestep, 0.1) + elif schedule == 'warmup50': + betas = _warmup_beta(linear_start, linear_end, + n_timestep, 0.5) + elif schedule == 'const': + betas = linear_end * np.ones(n_timestep, dtype=np.float64) + elif schedule == 'jsd': # 1/T, 1/(T-1), 1/(T-2), ..., 1 + betas = 1. / np.linspace(n_timestep, + 1, n_timestep, dtype=np.float64) + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / + n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * math.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = betas.clamp(max=0.999) + else: + raise NotImplementedError(schedule) + return betas + + +# gaussian diffusion trainer class + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +class GaussianDiffusion(nn.Module): + def __init__( + self, + denoise_fn, + image_size, + channels=3, + loss_type='l1', + conditional=True, + schedule_opt=None + ): + super().__init__() + self.channels = channels + self.image_size = image_size + self.denoise_fn = denoise_fn + self.loss_type = loss_type + self.conditional = conditional + if schedule_opt is not None: + pass + # self.set_new_noise_schedule(schedule_opt) + + def set_loss(self, device): + if self.loss_type == 'l1': + self.loss_func = nn.L1Loss(reduction='sum').to(device) + elif self.loss_type == 'l2': + self.loss_func = nn.MSELoss(reduction='sum').to(device) + else: + raise NotImplementedError() + + def set_new_noise_schedule(self, schedule_opt, device): + to_torch = partial(torch.tensor, dtype=torch.float32, device=device) + + betas = make_beta_schedule( + schedule=schedule_opt['schedule'], + n_timestep=schedule_opt['n_timestep'], + linear_start=schedule_opt['linear_start'], + linear_end=schedule_opt['linear_end']) + betas = betas.detach().cpu().numpy() if isinstance( + betas, torch.Tensor) else betas + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + self.sqrt_alphas_cumprod_prev = np.sqrt( + np.append(1., alphas_cumprod)) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', + to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', + to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', + to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', + to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', + to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', + to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = betas * \ + (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', + to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch( + np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + def predict_start_from_noise(self, x_t, t, noise): + return self.sqrt_recip_alphas_cumprod[t] * x_t - \ + self.sqrt_recipm1_alphas_cumprod[t] * noise + + def q_posterior(self, x_start, x_t, t): + posterior_mean = self.posterior_mean_coef1[t] * \ + x_start + self.posterior_mean_coef2[t] * x_t + posterior_log_variance_clipped = self.posterior_log_variance_clipped[t] + return posterior_mean, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool, condition_x=None): + batch_size = x.shape[0] + noise_level = torch.FloatTensor( + [self.sqrt_alphas_cumprod_prev[t+1]]).repeat(batch_size, 1).to(x.device) + if condition_x is not None: + x_recon = self.predict_start_from_noise( + x, t=t, noise=self.denoise_fn(torch.cat([condition_x, x], dim=1), noise_level)) + else: + x_recon = self.predict_start_from_noise( + x, t=t, noise=self.denoise_fn(x, noise_level)) + + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_log_variance = self.q_posterior( + x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, condition_x=None): + model_mean, model_log_variance = self.p_mean_variance( + x=x, t=t, clip_denoised=clip_denoised, condition_x=condition_x) + noise = torch.randn_like(x) if t > 0 else torch.zeros_like(x) + return model_mean + noise * (0.5 * model_log_variance).exp() + + @torch.no_grad() + def p_sample_loop(self, x_in, continous=False): + device = self.betas.device + sample_inter = (1 | (self.num_timesteps//10)) + if not self.conditional: + shape = x_in + img = torch.randn(shape, device=device) + ret_img = img + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='sampling loop time step', total=self.num_timesteps): + img = self.p_sample(img, i) + if i % sample_inter == 0: + ret_img = torch.cat([ret_img, img], dim=0) + else: + x = x_in + shape = x.shape + img = torch.randn(shape, device=device) + ret_img = x + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='sampling loop time step', total=self.num_timesteps): + img = self.p_sample(img, i, condition_x=x) + if i % sample_inter == 0: + ret_img = torch.cat([ret_img, img], dim=0) + if continous: + return ret_img + else: + return ret_img[-1] + + @torch.no_grad() + def sample(self, batch_size=1, continous=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), continous) + + @torch.no_grad() + def super_resolution(self, x_in, continous=False): + return self.p_sample_loop(x_in, continous) + + def q_sample(self, x_start, continuous_sqrt_alpha_cumprod, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + + # random gama + return ( + continuous_sqrt_alpha_cumprod * x_start + + (1 - continuous_sqrt_alpha_cumprod**2).sqrt() * noise + ) + + def p_losses(self, x_in, noise=None): + x_start = x_in['HR'] + [b, c, h, w] = x_start.shape + t = np.random.randint(1, self.num_timesteps + 1) + continuous_sqrt_alpha_cumprod = torch.FloatTensor( + np.random.uniform( + self.sqrt_alphas_cumprod_prev[t-1], + self.sqrt_alphas_cumprod_prev[t], + size=b + ) + ).to(x_start.device) + continuous_sqrt_alpha_cumprod = continuous_sqrt_alpha_cumprod.view( + b, -1) + + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample( + x_start=x_start, continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod.view(-1, 1, 1, 1), noise=noise) + + if not self.conditional: + x_recon = self.denoise_fn(x_noisy, continuous_sqrt_alpha_cumprod) + else: + x_recon = self.denoise_fn( + torch.cat([x_in['SR'], x_noisy], dim=1), continuous_sqrt_alpha_cumprod) + + loss = self.loss_func(noise, x_recon) + return loss + + def forward(self, x, *args, **kwargs): + return self.p_losses(x, *args, **kwargs) diff --git a/model/sr3_modules/unet.py b/model/sr3_modules/unet.py new file mode 100644 index 0000000..3d58e18 --- /dev/null +++ b/model/sr3_modules/unet.py @@ -0,0 +1,259 @@ +import math +import torch +from torch import nn +import torch.nn.functional as F +from inspect import isfunction + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + +# PositionalEncoding Source: https://github.com/lmnt-com/wavegrad/blob/master/src/wavegrad/model.py +class PositionalEncoding(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, noise_level): + count = self.dim // 2 + step = torch.arange(count, dtype=noise_level.dtype, + device=noise_level.device) / count + encoding = noise_level.unsqueeze( + 1) * torch.exp(-math.log(1e4) * step.unsqueeze(0)) + encoding = torch.cat( + [torch.sin(encoding), torch.cos(encoding)], dim=-1) + return encoding + + +class FeatureWiseAffine(nn.Module): + def __init__(self, in_channels, out_channels, use_affine_level=False): + super(FeatureWiseAffine, self).__init__() + self.use_affine_level = use_affine_level + self.noise_func = nn.Sequential( + nn.Linear(in_channels, out_channels*(1+self.use_affine_level)) + ) + + def forward(self, x, noise_embed): + batch = x.shape[0] + if self.use_affine_level: + gamma, beta = self.noise_func(noise_embed).view( + batch, -1, 1, 1).chunk(2, dim=1) + x = (1 + gamma) * x + beta + else: + x = x + self.noise_func(noise_embed).view(batch, -1, 1, 1) + return x + + +class Swish(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class Upsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.up = nn.Upsample(scale_factor=2, mode="nearest") + self.conv = nn.Conv2d(dim, dim, 3, padding=1) + + def forward(self, x): + return self.conv(self.up(x)) + + +class Downsample(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Conv2d(dim, dim, 3, 2, 1) + + def forward(self, x): + return self.conv(x) + + +# building block modules + + +class Block(nn.Module): + def __init__(self, dim, dim_out, groups=32, dropout=0): + super().__init__() + self.block = nn.Sequential( + nn.GroupNorm(groups, dim), + Swish(), + nn.Dropout(dropout) if dropout != 0 else nn.Identity(), + nn.Conv2d(dim, dim_out, 3, padding=1) + ) + + def forward(self, x): + return self.block(x) + + +class ResnetBlock(nn.Module): + def __init__(self, dim, dim_out, noise_level_emb_dim=None, dropout=0, use_affine_level=False, norm_groups=32): + super().__init__() + self.noise_func = FeatureWiseAffine( + noise_level_emb_dim, dim_out, use_affine_level) + + self.block1 = Block(dim, dim_out, groups=norm_groups) + self.block2 = Block(dim_out, dim_out, groups=norm_groups, dropout=dropout) + self.res_conv = nn.Conv2d( + dim, dim_out, 1) if dim != dim_out else nn.Identity() + + def forward(self, x, time_emb): + b, c, h, w = x.shape + h = self.block1(x) + h = self.noise_func(h, time_emb) + h = self.block2(h) + return h + self.res_conv(x) + + +class SelfAttention(nn.Module): + def __init__(self, in_channel, n_head=1, norm_groups=32): + super().__init__() + + self.n_head = n_head + + self.norm = nn.GroupNorm(norm_groups, in_channel) + self.qkv = nn.Conv2d(in_channel, in_channel * 3, 1, bias=False) + self.out = nn.Conv2d(in_channel, in_channel, 1) + + def forward(self, input): + batch, channel, height, width = input.shape + n_head = self.n_head + head_dim = channel // n_head + + norm = self.norm(input) + qkv = self.qkv(norm).view(batch, n_head, head_dim * 3, height, width) + query, key, value = qkv.chunk(3, dim=2) # bhdyx + + attn = torch.einsum( + "bnchw, bncyx -> bnhwyx", query, key + ).contiguous() / math.sqrt(channel) + attn = attn.view(batch, n_head, height, width, -1) + attn = torch.softmax(attn, -1) + attn = attn.view(batch, n_head, height, width, height, width) + + out = torch.einsum("bnhwyx, bncyx -> bnchw", attn, value).contiguous() + out = self.out(out.view(batch, channel, height, width)) + + return out + input + + +class ResnetBlocWithAttn(nn.Module): + def __init__(self, dim, dim_out, *, noise_level_emb_dim=None, norm_groups=32, dropout=0, with_attn=False): + super().__init__() + self.with_attn = with_attn + self.res_block = ResnetBlock( + dim, dim_out, noise_level_emb_dim, norm_groups=norm_groups, dropout=dropout) + if with_attn: + self.attn = SelfAttention(dim_out, norm_groups=norm_groups) + + def forward(self, x, time_emb): + x = self.res_block(x, time_emb) + if(self.with_attn): + x = self.attn(x) + return x + + +class UNet(nn.Module): + def __init__( + self, + in_channel=6, + out_channel=3, + inner_channel=32, + norm_groups=32, + channel_mults=(1, 2, 4, 8, 8), + attn_res=(8), + res_blocks=3, + dropout=0, + with_noise_level_emb=True, + image_size=128 + ): + super().__init__() + + if with_noise_level_emb: + noise_level_channel = inner_channel + self.noise_level_mlp = nn.Sequential( + PositionalEncoding(inner_channel), + nn.Linear(inner_channel, inner_channel * 4), + Swish(), + nn.Linear(inner_channel * 4, inner_channel) + ) + else: + noise_level_channel = None + self.noise_level_mlp = None + + num_mults = len(channel_mults) + pre_channel = inner_channel + feat_channels = [pre_channel] + now_res = image_size + downs = [nn.Conv2d(in_channel, inner_channel, + kernel_size=3, padding=1)] + for ind in range(num_mults): + is_last = (ind == num_mults - 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks): + downs.append(ResnetBlocWithAttn( + pre_channel, channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, dropout=dropout, with_attn=use_attn)) + feat_channels.append(channel_mult) + pre_channel = channel_mult + if not is_last: + downs.append(Downsample(pre_channel)) + feat_channels.append(pre_channel) + now_res = now_res//2 + self.downs = nn.ModuleList(downs) + + self.mid = nn.ModuleList([ + ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, + dropout=dropout, with_attn=True), + ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, + dropout=dropout, with_attn=False) + ]) + + ups = [] + for ind in reversed(range(num_mults)): + is_last = (ind < 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks+1): + ups.append(ResnetBlocWithAttn( + pre_channel+feat_channels.pop(), channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, + dropout=dropout, with_attn=use_attn)) + pre_channel = channel_mult + if not is_last: + ups.append(Upsample(pre_channel)) + now_res = now_res*2 + + self.ups = nn.ModuleList(ups) + + self.final_conv = Block(pre_channel, default(out_channel, in_channel), groups=norm_groups) + + def forward(self, x, time): + t = self.noise_level_mlp(time) if exists( + self.noise_level_mlp) else None + + feats = [] + for layer in self.downs: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + feats.append(x) + + for layer in self.mid: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + + for layer in self.ups: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(torch.cat((x, feats.pop()), dim=1), t) + else: + x = layer(x) + + return self.final_conv(x) diff --git a/model/srddpm_modules/__pycache__/diffusion.cpython-36.pyc b/model/srddpm_modules/__pycache__/diffusion.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dca998c8f5a0f03626c89f905644b49282468973 GIT binary patch literal 9250 zcmai4U5p#ob)Gwa9L{pNt6gfft3Pk-#4-INS&Cyci7F|!EIFyYMJ=^T;93&+`n_qJ?FdU>I>7;{`C(&kY74u82@5SJT~%|@g{$UL>f|bjGF!x zHGyxlBf4hI>{>NT=yJPlww;=bI;&IYdNq&B?M|`l*Zd*PRIP-1r!(E1u1%xNm4&-T ztt=Pilq}s9wHY}r%P5(ZGjbNcN8}MXhu=B5BrEbLDCVyjazP%uYsh1v*{%$%+EFR4 z8`a|vFi&HpYUXAXaIf}F@GFdD?ngn| z>V`=g_G(ji>Z-fh3!19l$eq^?3}utB3AfF3{ep!VT(nf=JEB>Q_r9L zw-28^e(9rcSAKTwpSOQ->7)Po>3{y?%d2l(YB)rW$HQB|%j|qnfSu5w4O2a-nJUeB zBeOUqHk;Dgv86q4^qqG^vnZ=@Dn!Xc#j9M49t6CVDkcEMb@()q&p?rH> zZgTnGqr8ancQXt9+2g)QpicA`sP$3%S8|F+6S6d>uul-hcR?||VZI~Oh16-g7_W59 zK#t~PX5s{deaz^)nTwXloiNfu+p8FoS{Fc9%nAe1_N5J_DP;JQNC(uDfA8SwCTllA+ z|M!1jfZXcVw;ud2p6awB(W(+9q#LXes)$_fq_JwO=WboqqqQ)%yY+NEH+!wzLa({i zi|=6MPQBa9=U3wNYu#Qa?1oVq$`_Q1Rn^PAq_G~7R;n55d76?klA5Qd6(nkw1}cvB zLW03Mz4dy+8n2T%qL}N}H^P9FI2iRwnwva}#1NjCg+_b$E8}&AKTa38bWZpty$hf# ziIbvidO}svvO#>Ihkl9Qtlq>BD2cIAgm}aXPXjy?Tl;6_hFh&9|igXObs*E_k8`^PQFuvy>iq+bSA zLWYi5U<(D4Z!|3W#zW<_dwEIck;z2c_@?MveVDnnkkIvk$(jih*c8&DeKrtMEE*N# zee-*mRMlQlC(u%zLISm#XUGzhrVO!@hHEGy~WE!HeqkRAYrEsN5}ufsqDmT(9}3PT{`$;ZN?Jo289 zb8`N!Q}e^Ac4=V9syr$e?n36Z>0!y^@|axYlJcP=JjErm!;+`u zLvoo*j>t3eGxA|TojG|{J|aI$phtdAK8o2@?isdGI|{ixogcYY-%OHLJ-RaT;15XW zbF*{q#!7=+0zLFgHifE^XON`E01_U+gu$^IfeA!o1aPrP?aV>yWZ3WlIW$-_^|YjI z0ZUWBKHltM|9U~tidtz9KhdLHZv!w~M!j>O z-c8+s@g}s4O^Ny{ddw}2LyHQojbSJxbiP{9izU(I?n~!h0&8+JjfYGk#F`ggzWxH0 ztIvT5``~N%(VKxb#x5V=H^Vd_=B}cAh}e9w0Kc|i9>#_d`PgQXBPT^+HvAmUErq*j zn|NX4VF;|gl>#|QLMm+PklLJgmVm}~Y(QhE4d~s0sa7((U4W8#c#8ugGBec6LS_@P zN&WUz=15N#cg((vuM%qL%&9cYQ>Iw`sFrm2gUBb6J z@jZ=iAgytI8Fhmb&_Clr z!QPtu6c^$^6m(nBX0pHfWG7w=KCyO*8>_I<>g{iMVz;V$Zhz%mFUAo{wPF?At}B?% zMwp-8Eu%3< zqa+6S;ltk-&NG~(vnfu0DbK^bp(y|o(mG(Y)#z>JP6Woj&X7AVjF{}!F|kN$ze$I* zd>Ulq17ecTsBjHHGgQIqX0r*CZ4*;+3+u}ZK=Q3FPR72rCuHGM>Q(ftMZwJaZG2W;=50=xmX7j@{4UNz z(3;#EDOjyEsc!qZI&&*vB^y!yF?txE#VcTFS!{piAWQbO8b>#X52TFD3zFfd`w+q=OJkuf!fNV8V%?E@m;;z<{_t7?? zSs6HKZ9jf+C7;xAyd*Mb<0ZX+qP1h4BH% zlC3U05p4%nm)*@|sl6N=)%*6UEh{mciph!hPRHZUTD- zTeKD7F5)QnIRwU=i=|Rq81o+xYS;en7t~B|?}q#thDa(%^jZ3|rVXoRixOTa^dVbw zU;C>KvQP9-n1k{xsNY3~4&Hc0Q@o3?6OQ1Tp$L<)*et*&L>s~AA{g!C7*6yaz>(FT z!Z95J+%VfX&MuN#yL>7|*s2ZaBF)iZyx!Rl-+8+7P39(j+;%GZmanP+N8~a7(^@0S^rcU_RAWi6OYapZpIU%4^foC}k z8jqTcy$JbN+Da^)%UC^ai#Ea{>^E>iIBq-#qk>W^`I8LcI!Fhx06<$C+=p3_Hh?Z5 zTHo9HR_dmO)RQ=O!9-E;1m`WBGTa^Oit&xh3=t77L`$FK1_=RnBM?~F!A3(L#mrtn za2==!ck8uR5k{oQP~Xq|_Vg#t#K%nJr_SWcr_Chu0o|=Vv-$9l z*?frEP|)<5%)^{K=`TUI7>rJ7bMU|iSBJQgMZJBDrPYeomOF9WTc#l2GDW-**s^7q zAlRM8DMI(dhyXbN#3t_OAJ&~ylrL-i!phj9F|JW^nvyS5LVGRu08B%bgpxxpYoU6B zO6}%mr<2>AI9gL=GFh#wQ`*znTR8XTMHrE^6~&v0MolHHVyFV%(%XPE7{UE6z?K3w zqKs(i9kmZTO^?(KrYG+G9oq2`-2m_@0|40!mTd7bz!F8^p}a6J?nLP#mx9vVe(0c| z*wZ_2-?3Vg=Nx;_WqgoY_en7YD>IoI8$Qu$0tgK#5CmUnO&ryC&;}5Q_Hyp8*3-s1 zpZ?X`D3~DIw_sOu>t8{n<2G2OE4CL8&Rr9Z=WC$_y}S*{N78(?SJHD6if3eL>vA6y zdMf)SFv~Fv_<|(vY*Jj}-$AK^vF7%}hfIS)9uwprfgH~-c5gapGGriVcF6}w4w!F; z40wvD>3tXp3Jqd2Yol$pjSVw-Hnrarbhm)F0G<~psJMn`KV3AvEmChXW=U&qLLxV@ z<@-e#A)Kl++&4g4MaDP&*2T1#`soymi^WF9r86R%n1}mQm_sS^G14?3t;nWeZk%=* zCWK6kf9prswmvvo9Qkb4@;z~HLT%${A-aPoC?>iaEhHBVT;kmtnqO>Evi8FW2BP-? z>q|I{Q*%(yyMT*Z)@n3qOVpbf zK@IeT*hsJ;nVE25XNZDKM_&GFl7zT0j3d6I&;Y~%sBdhjZxI15MtW>f)Lm-y7A0>` z@*PUuCT1}Hd+)w^o1r>j|8={ZWytM5|V-=l=IcgQ#Ohg87Kn%Z4s z_+Kd1IE1b%%vtleI0^4|8YW}W+&%<6PP83kX%Z<}1J+?OlEB1>lmj|&)SUDc*AE0h z5b|zY188!EE+qJvq)X^pl;MN5H-b(9F4h(@_Z(Y>QtK_N1IoG2F>j7l>-7qa1$pPv zdesk**l@jCFP=Q87b8vEjdhQd0w#*3Np}PjN43}RIXuDW!5B6Yq}VeB9{u|$#TB=a z5Sqqm)}AAibx3q^G>fs(BJ4okhS~Z@$RKPcmW;Oz+BG+Ax?1VOI8dk!xDAY@ixt$_ zbRF7u26TbwW+n^5md;%df*`uWeRz7{Jm3ZZZ3Nnqb)-eY1E45Ir|Ap5`WUZ(Fr5dK zzyUX1kJ>4~{~~R8-t7KLZoc@+q%Uq@vg7Cf6`^_$w0ju0S9T{qO?EwwT!zUZvKt-> z##yibrJ>s|=}k+%9ljPTI?c<=wMK$Kb+=f;pfWm z-N$i2I81d$jmLp$+L5F-9Nwk@AT+2CEVlVRJjwTlTS%y%pl+neWMvVeQ){>v-M|I# z+JSq~LNivVkdM(MNv?+2UOKp+hE(YHGDtX#eFOJEKma$c;`b^{65;-j6I){wD+EOF@83>yEDBz`eRoT}PBCN>WNTk>vJj9CuWozO`hdI9OA4 zsSPW+dG-60yhF*4DPi#vC)A%K$)|^x1&y%N8C@6*1@>!1MEq2jkgVWM$Ub-u?nCfb zE_t5!lvnUv&!3%fMajqe5MBo_0#&71f8L+twm?G_>gOmW3rL6=3u>7@8Eg`J70IwR z&tbJQQFLbXKT&WS;!oto)x1Fe4J2%&e3hFQxe{zhbPTaceUbgqzJ?4cM-cVx@2-Wr zh>F~Ong44C&yp2&BB7<{-Ua=iKQ2NGjN~K|x(lbfZu;}EQqS`r@oaFx^}YWC+K5mn literal 0 HcmV?d00001 diff --git a/model/srddpm_modules/__pycache__/unet.cpython-36.pyc b/model/srddpm_modules/__pycache__/unet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15609bfa8f42cf1e345436fe19b3c19a1eadb6be GIT binary patch literal 7600 zcmbtZ+mjnvdGEVg*O}4CN4k>KPl-NY8QMYEKky@J5 zEzfwWm7e2d7AtQd4b>W>uz1VUPw%<&gs+NxqkQW zJ9@ibuRQ$quY=xa4CB9zwZ}pG9!mT_r~)G}M@Bn)n{5-{*2o-NZOhDH_RtzSZ5MER zRBm4)4|0#XU8fK+ox4Uk$;2c(`u>VPzY zEkL$%NCS}VU|ZDX##RBxDLqm9I_jj z4~_jBm%!IJ*f+HmMW`!-xIc?}$zT%oOm^Yn+d{dI68or9P~oJE+2+BoP-*jAk$zJB z@0aczPsZV$^I3Q}i0-`q_|bz8?u;ivIJy%A({TsQ*(i+f%%U*4J$6og5R*#>>!)5ryw@`%!rJ-&8%;=ke8CcK!wjJ1k1CBdC zGaRGs;s~y;KN^g~?~V_{AQ(hPOI)}Vk>fl59;(Dx7(<-h#Mx7$Vq0x$!JdEs%=y)l(>e0>gxor~B;oc=LE`c^c+~3zkXXlEP^*bAZ6ASb z&XYlSrd^?opsVd(H_>)Hh|rqEvc$ftu5*Svh^Dhd{Sn&jDn`uW$shpP+okCw?u6sR zPl#lHqRzT1;Eb4ZXn;!B%p0K7#g$DeyEM>A-?(? z*3X<0{a_yiE1dS#4bwA9^b5$1=7RE@}ZR7Ly5`LLMcQc3@QZ3>S*`j z*&se%@{v<0_`*kuN@^w$ekyPa%9|;!kh&BPj>e!B6q$rnlzPFcw}6~8xd<)Qh+w@?+h{vHG1|8N?2$J0@` zfj%{XtHp1jTIqgbL5!jA&~=Ds$HBJ?dtlO~1m1H8FfSm9N(YMSqf1fPRbiaKy4B)d zcNBK|-5yjJstQAU$t~1ts9*<5)2`T{{gT=un0lzl3iT!`ZOx$Y_b1Vlw*u|-@GfXh zg)W3*MJvw!B1&#BE9SO&u~VS>N!8t8Ip;!2S%m3}rM3PldK5`gZ2oBs|1!7cqK0{~ zz1()G3oo)9C_*+^Mv>(c(+U(i88{JLQqZjpx9w~9JEKYO zxQ6V*!`Z_v=L;Kz$5$=`2@$f>{JSRX#BBTmK39Jxe^ z-(puSL4eE{r~^Ti?R7PRAa^oxe7O{UA2Y=i!FoBjtZlPtU9{E+zLG?emk-%Xa^FYE zVn;e47%|3)FlZJC{tk$Y76{Ee&-e$Bx~Tikb>r*Coc%VLgBIHkSGbzTEUx0>S;p}l zwjQwhE-J(z%kV|}1;W}qUU{M{0{I*U{}`pf4f^$l*ynZJ3gNfc#Of8L{~q>YAYDj* zO%c*VSw18jpZ}lztUp99@a-g_@tH&-P@<~6V!47J@w7@S$k3EIP3LZWM(h^*#ox_fWlDFaz`y%(FbJLF2f$L5r1Wib=zGMq^Yk5zYTZgK5o& z&KxPs+?gW)7@BkHzjwo!mqb7*w!Vm>&y)vx@1-S@CUsJG;m*rxnVDl2M}BC}EA#5S zmUzQb;G~tbx{!1zDGw_Y<=2dPJ+0@fV17`-3U!RGruD!_BDK*gJ9yGx3cR$Q*0R;+ z4Vmm(MguCdEp?6XJcJ~0ZIl(v&k%0dmQA%rz+8< zc+yYC-E&2wvcHvS>MdF1u$v$kF6F$}-f=iMI!?5EHVBeq*;o6NBQage6KEHHSv}SE zN%&N|xO13=+UX`qBs;uJ<;U19=8cA7HjwD~R>gcB39)0=@N1&9tfskVU0m6eq{0wb zEB$&!J3tpptuO3ASrrf-N}eJBim6)5I7(!U*~!1a_u(Rhey{?6C>LI97atAv#!j?{}eV?_yz5t;`FQ&oHb*y7g!zXfUWHEMhdifyT;bLIcET(9=OzD97e= z37%6=y_4X6L_8x+OO`>|yd9z<%WjZJwl{L4kJxz+l`hkLiE|UWsJ{acL&c^qJ=qKA zS08auo=o%bnT6j*>lw;2hVo1cEOyL`*FH(0m8~GgeE4R$j}qTR1+9aj>Bp&KtbU?GC97JPNE z$AbHCYu~))D?)lG%cqjJHr>y*2wOF%N$^z<8XMvJj&^={5GI%O_gNqS>b`8|p4 z0TE+tBVc1>Uy3S$F|RHIHy6yQVI3IOBbvy{d<*N8!A86AZGdl|f1FfcUd;!_-#(k~ zq&sOXu&)^>*1VZ+rQV4*-^I>ux^o@ZY4g_kHGpyb zlss`m)~{#lL*=pl2Bb{L5uz!K-##+td*{DMb~2mAXq#iT zmt)l!HWy%(VCMtB-Jp_V2b?}OPydlveQd@*N;zY9i5IW|wYD(gM>#%wa=un_C9NlW zFcZk#U@EE-R$rBPqIRGH2*X-VX6fols1Ofy;=x5I30jtfylBNTrDMMIXlddk#1_l* z5#TTp5`SM?g(qu~HU&{J=gMO4EOtb|jZ4@t%5%ExlGf`^r(qO`21ypID<8-O`}?@2 z#U(H=ArPw%0tVwxLKTOBwx^S6cKc%^^OGQ6ZoPG-KZv@cEMmW!8H>NcA#@c{Q4tF1 z`T!A;Dn7-2>U!=gJ7Zj^$J(d)?!bEL%F=|!VsP7aG4IP*xH1@bk3u15JHry`K)W2x z@IL{UYFDQ!EcR;G`(bx^khoonCTHA5(UEF<5Cyt4h&#A?PTKw~4#kVKYwKszW=EZ3 z^vf;uKcgo;L}k<~aKrq0aLtanXQO5D?*ZOg<3<$Fn!L9GX~9gguYosQH)ir1cVn2< zM2q8F82370T(4o(tcyBr)h8`Ijn^_^`44>X-wymkV5%11Oj4K&&TS}`bN&vkzNiR7 z<hJJX%d)0pv@;&! z&nE-ENv!jmy!}|chr3vs+ak>|H4%9 zq=tD-A}+8F3 bnhwyx", query, key + ).contiguous() / math.sqrt(channel) + attn = attn.view(batch, n_head, height, width, -1) + attn = torch.softmax(attn, -1) + attn = attn.view(batch, n_head, height, width, height, width) + + out = torch.einsum("bnhwyx, bncyx -> bnchw", attn, value).contiguous() + out = self.out(out.view(batch, channel, height, width)) + + return out + input + + +class ResnetBlocWithAttn(nn.Module): + def __init__(self, dim, dim_out, *, time_emb_dim=None, norm_groups=32, dropout=0, with_attn=False): + super().__init__() + self.with_attn = with_attn + self.res_block = ResnetBlock( + dim, dim_out, time_emb_dim, norm_groups=norm_groups, dropout=dropout) + if with_attn: + self.attn = SelfAttention(dim_out, norm_groups=norm_groups) + + def forward(self, x, time_emb): + x = self.res_block(x, time_emb) + if(self.with_attn): + x = self.attn(x) + return x + + +class UNet(nn.Module): + def __init__( + self, + in_channel=6, + out_channel=3, + inner_channel=32, + norm_groups=32, + channel_mults=(1, 2, 4, 8, 8), + attn_res=(8), + res_blocks=3, + dropout=0, + with_time_emb=True, + image_size=128 + ): + super().__init__() + + if with_time_emb: + time_dim = inner_channel + self.time_mlp = nn.Sequential( + TimeEmbedding(inner_channel), + nn.Linear(inner_channel, inner_channel * 4), + Swish(), + nn.Linear(inner_channel * 4, inner_channel) + ) + else: + time_dim = None + self.time_mlp = None + + num_mults = len(channel_mults) + pre_channel = inner_channel + feat_channels = [pre_channel] + now_res = image_size + downs = [nn.Conv2d(in_channel, inner_channel, + kernel_size=3, padding=1)] + for ind in range(num_mults): + is_last = (ind == num_mults - 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks): + downs.append(ResnetBlocWithAttn( + pre_channel, channel_mult, time_emb_dim=time_dim, norm_groups=norm_groups, dropout=dropout, with_attn=use_attn)) + feat_channels.append(channel_mult) + pre_channel = channel_mult + if not is_last: + downs.append(Downsample(pre_channel)) + feat_channels.append(pre_channel) + now_res = now_res//2 + self.downs = nn.ModuleList(downs) + + self.mid = nn.ModuleList([ + ResnetBlocWithAttn(pre_channel, pre_channel, time_emb_dim=time_dim, norm_groups=norm_groups, + dropout=dropout, with_attn=True), + ResnetBlocWithAttn(pre_channel, pre_channel, time_emb_dim=time_dim, norm_groups=norm_groups, + dropout=dropout, with_attn=False) + ]) + + ups = [] + for ind in reversed(range(num_mults)): + is_last = (ind < 1) + use_attn = (now_res in attn_res) + channel_mult = inner_channel * channel_mults[ind] + for _ in range(0, res_blocks+1): + ups.append(ResnetBlocWithAttn( + pre_channel+feat_channels.pop(), channel_mult, time_emb_dim=time_dim, dropout=dropout, norm_groups=norm_groups, with_attn=use_attn)) + pre_channel = channel_mult + if not is_last: + ups.append(Upsample(pre_channel)) + now_res = now_res*2 + + self.ups = nn.ModuleList(ups) + + self.final_conv = Block(pre_channel, default(out_channel, in_channel), groups=norm_groups) + + def forward(self, x, time): + t = self.time_mlp(time) if exists(self.time_mlp) else None + + feats = [] + for layer in self.downs: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + feats.append(x) + + for layer in self.mid: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(x, t) + else: + x = layer(x) + + for layer in self.ups: + if isinstance(layer, ResnetBlocWithAttn): + x = layer(torch.cat((x, feats.pop()), dim=1), t) + else: + x = layer(x) + + return self.final_conv(x) diff --git a/requirement.txt b/requirement.txt new file mode 100644 index 0000000..9a0ce6a --- /dev/null +++ b/requirement.txt @@ -0,0 +1,13 @@ +torch>=1.6 +torchvision +numpy +pandas +tqdm +lmdb +opencv-python +pillow +tensorboardx +wandb +pytorch-fid + + diff --git a/run_metrics.py b/run_metrics.py new file mode 100644 index 0000000..c907b81 --- /dev/null +++ b/run_metrics.py @@ -0,0 +1,18 @@ +import sys +import subprocess + +arg1 = sys.argv[1] # sr4_01_ddim +arg2 = sys.argv[2] # 3 + +command1 = f"python experiments/check_fid.py {arg1}" +command2 = f"python -m pytorch_fid experiments/{arg1}/results1 experiments/{arg1}/results2 --device cuda:{arg2}" +command3 = f"python eval.py -p experiments/{arg1}/results" + +print(f"Running command 1: {command1}") +subprocess.run(command1, shell=True) + +print(f"Running command 2: {command2}") +subprocess.run(command2, shell=True) + +print(f"Running command 3: {command3}") +subprocess.run(command3, shell=True) diff --git a/sr.py b/sr.py new file mode 100644 index 0000000..5882ca0 --- /dev/null +++ b/sr.py @@ -0,0 +1,244 @@ +import torch +import data as Data +import model as Model +import argparse +import logging +import core.logger as Logger +import core.metrics as Metrics +from core.wandb_logger import WandbLogger +from tensorboardX import SummaryWriter +import os +import numpy as np +import pdb + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--config', type=str, default='config/sr_sr3_16_128.json', + help='JSON file for configuration') + parser.add_argument('-p', '--phase', type=str, choices=['train', 'val'], + help='Run either train(training) or val(generation)', default='train') + parser.add_argument('-gpu', '--gpu_ids', type=str, default=None) + parser.add_argument('-debug', '-d', action='store_true') + parser.add_argument('-enable_wandb', action='store_true') + parser.add_argument('-log_wandb_ckpt', action='store_true') + parser.add_argument('-log_eval', action='store_true') + + # parse configs + args = parser.parse_args() + opt = Logger.parse(args) + # Convert to NoneDict, which return None for missing key. + opt = Logger.dict_to_nonedict(opt) + + # logging + torch.backends.cudnn.enabled = True + torch.backends.cudnn.benchmark = True + + Logger.setup_logger(None, opt['path']['log'], + 'train', level=logging.INFO, screen=True) + Logger.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO) + logger = logging.getLogger('base') + logger.info(Logger.dict2str(opt)) + tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger']) + + # Initialize WandbLogger + if opt['enable_wandb']: + import wandb + wandb_logger = WandbLogger(opt) + wandb.define_metric('validation/val_step') + wandb.define_metric('epoch') + wandb.define_metric("validation/*", step_metric="val_step") + val_step = 0 + else: + wandb_logger = None + + # dataset + for phase, dataset_opt in opt['datasets'].items(): + if phase == 'train' and args.phase != 'val': + train_set = Data.create_dataset(dataset_opt, phase) + train_loader = Data.create_dataloader( + train_set, dataset_opt, phase) + elif phase == 'val': + val_set = Data.create_dataset(dataset_opt, phase) + val_loader = Data.create_dataloader( + val_set, dataset_opt, phase) + logger.info('Initial Dataset Finished') + + # model + diffusion = Model.create_model(opt) + logger.info('Initial Model Finished') + + # Train + current_step = diffusion.begin_step + current_epoch = diffusion.begin_epoch + n_iter = opt['train']['n_iter'] + + if opt['path']['resume_state']: + logger.info('Resuming training from epoch: {}, iter: {}.'.format( + current_epoch, current_step)) + + diffusion.set_new_noise_schedule( + opt['model']['beta_schedule'][opt['phase']], schedule_phase=opt['phase']) + if opt['phase'] == 'train': + while current_step < n_iter: + current_epoch += 1 + for _, train_data in enumerate(train_loader): + current_step += 1 + if current_step > n_iter: + break + diffusion.feed_data(train_data) + diffusion.optimize_parameters() + # log + if current_step % opt['train']['print_freq'] == 0: + logs = diffusion.get_current_log() + message = ' '.format( + current_epoch, current_step) + for k, v in logs.items(): + message += '{:s}: {:.4e} '.format(k, v) + tb_logger.add_scalar(k, v, current_step) + logger.info(message) + + if wandb_logger: + wandb_logger.log_metrics(logs) + + # validation + if current_step % opt['train']['val_freq'] == 0: + avg_psnr = 0.0 + idx = 0 + result_path = '{}/{}'.format(opt['path'] + ['results'], current_epoch) + os.makedirs(result_path, exist_ok=True) + + diffusion.set_new_noise_schedule( + opt['model']['beta_schedule']['val'], schedule_phase='val') + for _, val_data in enumerate(val_loader): + idx += 1 + diffusion.feed_data(val_data) + diffusion.test(continous=False) + visuals = diffusion.get_current_visuals() + sr_img = Metrics.tensor2img(visuals['SR']) # uint8 + hr_img = Metrics.tensor2img(visuals['HR']) # uint8 + lr_img = Metrics.tensor2img(visuals['LR']) # uint8 + fake_img = Metrics.tensor2img(visuals['INF']) # uint8 + + # generation + Metrics.save_img( + hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + sr_img, '{}/{}_{}_sr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx)) + tb_logger.add_image( + 'Iter_{}'.format(current_step), + np.transpose(np.concatenate( + (fake_img, sr_img, hr_img), axis=1), [2, 0, 1]), + idx) + avg_psnr += Metrics.calculate_psnr( + sr_img, hr_img) + + if wandb_logger: + wandb_logger.log_image( + f'validation_{idx}', + np.concatenate((fake_img, sr_img, hr_img), axis=1) + ) + + avg_psnr = avg_psnr / idx + diffusion.set_new_noise_schedule( + opt['model']['beta_schedule']['train'], schedule_phase='train') + # log + logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr)) + logger_val = logging.getLogger('val') # validation logger + logger_val.info(' psnr: {:.4e}'.format( + current_epoch, current_step, avg_psnr)) + # tensorboard logger + tb_logger.add_scalar('psnr', avg_psnr, current_step) + + if wandb_logger: + wandb_logger.log_metrics({ + 'validation/val_psnr': avg_psnr, + 'validation/val_step': val_step + }) + val_step += 1 + + if current_step % opt['train']['save_checkpoint_freq'] == 0: + logger.info('Saving models and training states.') + diffusion.save_network(current_epoch, current_step) + + if wandb_logger and opt['log_wandb_ckpt']: + wandb_logger.log_checkpoint(current_epoch, current_step) + + if wandb_logger: + wandb_logger.log_metrics({'epoch': current_epoch-1}) + + # save model + logger.info('End of training.') + else: + logger.info('Begin Model Evaluation.') + avg_psnr = 0.0 + avg_ssim = 0.0 + idx = 0 + result_path = '{}'.format(opt['path']['results']) + os.makedirs(result_path, exist_ok=True) + # pdb.set_trace() + for _, val_data in enumerate(val_loader): + idx += 1 + diffusion.feed_data(val_data) + diffusion.test(continous=True) + visuals = diffusion.get_current_visuals() + + hr_img = Metrics.tensor2img(visuals['HR']) # uint8 + lr_img = Metrics.tensor2img(visuals['LR']) # uint8 + fake_img = Metrics.tensor2img(visuals['INF']) # uint8 + + sr_img_mode = 'grid' + if sr_img_mode == 'single': + # single img series + sr_img = visuals['SR'] # uint8 + sample_num = sr_img.shape[0] + for iter in range(0, sample_num): + Metrics.save_img( + Metrics.tensor2img(sr_img[iter]), '{}/{}_{}_sr_{}.png'.format(result_path, current_step, idx, iter)) + else: + # grid img + sr_img = Metrics.tensor2img(visuals['SR']) # uint8 + Metrics.save_img( + sr_img, '{}/{}_{}_sr_process.png'.format(result_path, current_step, idx)) + Metrics.save_img( + Metrics.tensor2img(visuals['SR'][-1]), '{}/{}_{}_sr.png'.format(result_path, current_step, idx)) + + Metrics.save_img( + hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx)) + + # generation + eval_psnr = Metrics.calculate_psnr(Metrics.tensor2img(visuals['SR'][-1]), hr_img) + eval_ssim = Metrics.calculate_ssim(Metrics.tensor2img(visuals['SR'][-1]), hr_img) + + avg_psnr += eval_psnr + avg_ssim += eval_ssim + + if wandb_logger and opt['log_eval']: + wandb_logger.log_eval_data(fake_img, Metrics.tensor2img(visuals['SR'][-1]), hr_img, eval_psnr, eval_ssim) + + avg_psnr = avg_psnr / idx + avg_ssim = avg_ssim / idx + + # log + logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr)) + logger.info('# Validation # SSIM: {:.4e}'.format(avg_ssim)) + logger_val = logging.getLogger('val') # validation logger + logger_val.info(' psnr: {:.4e}, ssim:{:.4e}'.format( + current_epoch, current_step, avg_psnr, avg_ssim)) + + if wandb_logger: + if opt['log_eval']: + wandb_logger.log_eval_table() + wandb_logger.log_metrics({ + 'PSNR': float(avg_psnr), + 'SSIM': float(avg_ssim) + }) diff --git a/sr2.py b/sr2.py new file mode 100644 index 0000000..c9c49a5 --- /dev/null +++ b/sr2.py @@ -0,0 +1,244 @@ +import torch +import data as Data +import model as Model +import argparse +import logging +import core.logger as Logger +import core.metrics as Metrics +from core.wandb_logger import WandbLogger +from tensorboardX import SummaryWriter +import os +import numpy as np +import pdb + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--config', type=str, default='config/sr_sr3_16_128.json', + help='JSON file for configuration') + parser.add_argument('-p', '--phase', type=str, choices=['train', 'val'], + help='Run either train(training) or val(generation)', default='train') + parser.add_argument('-gpu', '--gpu_ids', type=str, default=None) + parser.add_argument('-debug', '-d', action='store_true') + parser.add_argument('-enable_wandb', action='store_true') + parser.add_argument('-log_wandb_ckpt', action='store_true') + parser.add_argument('-log_eval', action='store_true') + + # parse configs + args = parser.parse_args() + opt = Logger.parse(args) + # Convert to NoneDict, which return None for missing key. + opt = Logger.dict_to_nonedict(opt) + + # logging + torch.backends.cudnn.enabled = True + torch.backends.cudnn.benchmark = True + + Logger.setup_logger(None, opt['path']['log'], + 'train', level=logging.INFO, screen=True) + Logger.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO) + logger = logging.getLogger('base') + logger.info(Logger.dict2str(opt)) + tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger']) + + # Initialize WandbLogger + if opt['enable_wandb']: + import wandb + wandb_logger = WandbLogger(opt) + wandb.define_metric('validation/val_step') + wandb.define_metric('epoch') + wandb.define_metric("validation/*", step_metric="val_step") + val_step = 0 + else: + wandb_logger = None + + # dataset + for phase, dataset_opt in opt['datasets'].items(): + if phase == 'train' and args.phase != 'val': + train_set = Data.create_dataset2(dataset_opt, phase) + train_loader = Data.create_dataloader( + train_set, dataset_opt, phase) + elif phase == 'val': + val_set = Data.create_dataset2(dataset_opt, phase) + val_loader = Data.create_dataloader( + val_set, dataset_opt, phase) + logger.info('Initial Dataset Finished') + + # model + diffusion = Model.create_model(opt) + logger.info('Initial Model Finished') + + # Train + current_step = diffusion.begin_step + current_epoch = diffusion.begin_epoch + n_iter = opt['train']['n_iter'] + + if opt['path']['resume_state']: + logger.info('Resuming training from epoch: {}, iter: {}.'.format( + current_epoch, current_step)) + + diffusion.set_new_noise_schedule( + opt['model']['beta_schedule'][opt['phase']], schedule_phase=opt['phase']) + if opt['phase'] == 'train': + while current_step < n_iter: + current_epoch += 1 + for _, train_data in enumerate(train_loader): + current_step += 1 + if current_step > n_iter: + break + diffusion.feed_data(train_data) + diffusion.optimize_parameters() + # log + if current_step % opt['train']['print_freq'] == 0: + logs = diffusion.get_current_log() + message = ' '.format( + current_epoch, current_step) + for k, v in logs.items(): + message += '{:s}: {:.4e} '.format(k, v) + tb_logger.add_scalar(k, v, current_step) + logger.info(message) + + if wandb_logger: + wandb_logger.log_metrics(logs) + + # validation + if current_step % opt['train']['val_freq'] == 0: + avg_psnr = 0.0 + idx = 0 + result_path = '{}/{}'.format(opt['path'] + ['results'], current_epoch) + os.makedirs(result_path, exist_ok=True) + + diffusion.set_new_noise_schedule( + opt['model']['beta_schedule']['val'], schedule_phase='val') + for _, val_data in enumerate(val_loader): + idx += 1 + diffusion.feed_data(val_data) + diffusion.test(continous=False) + visuals = diffusion.get_current_visuals() + sr_img = Metrics.tensor2img(visuals['SR']) # uint8 + hr_img = Metrics.tensor2img(visuals['HR']) # uint8 + lr_img = Metrics.tensor2img(visuals['LR']) # uint8 + # fake_img = Metrics.tensor2img(visuals['INF']) # uint8 + + # generation + Metrics.save_img( + hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + sr_img, '{}/{}_{}_sr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx)) + # Metrics.save_img( + # fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx)) + tb_logger.add_image( + 'Iter_{}'.format(current_step), + np.transpose(np.concatenate((sr_img, hr_img), axis=1), [2, 0, 1]), idx) + # np.concatenate((fake_img, sr_img, hr_img), axis=1), [2, 0, 1]) + avg_psnr += Metrics.calculate_psnr( + sr_img, hr_img) + + if wandb_logger: + wandb_logger.log_image( + f'validation_{idx}', + np.concatenate((sr_img, hr_img), axis=1) + ) + # np.concatenate((fake_img, sr_img, hr_img), axis=1) + + avg_psnr = avg_psnr / idx + diffusion.set_new_noise_schedule( + opt['model']['beta_schedule']['train'], schedule_phase='train') + # log + logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr)) + logger_val = logging.getLogger('val') # validation logger + logger_val.info(' psnr: {:.4e}'.format( + current_epoch, current_step, avg_psnr)) + # tensorboard logger + tb_logger.add_scalar('psnr', avg_psnr, current_step) + + if wandb_logger: + wandb_logger.log_metrics({ + 'validation/val_psnr': avg_psnr, + 'validation/val_step': val_step + }) + val_step += 1 + + if current_step % opt['train']['save_checkpoint_freq'] == 0: + logger.info('Saving models and training states.') + diffusion.save_network(current_epoch, current_step) + + if wandb_logger and opt['log_wandb_ckpt']: + wandb_logger.log_checkpoint(current_epoch, current_step) + + if wandb_logger: + wandb_logger.log_metrics({'epoch': current_epoch-1}) + + # save model + logger.info('End of training.') + else: + logger.info('Begin Model Evaluation.') + avg_psnr = 0.0 + avg_ssim = 0.0 + idx = 0 + result_path = '{}'.format(opt['path']['results']) + os.makedirs(result_path, exist_ok=True) + # pdb.set_trace() + for _, val_data in enumerate(val_loader): + idx += 1 + diffusion.feed_data(val_data) + diffusion.test(continous=True) + visuals = diffusion.get_current_visuals() + + hr_img = Metrics.tensor2img(visuals['HR']) # uint8 + lr_img = Metrics.tensor2img(visuals['LR']) # uint8 + # fake_img = Metrics.tensor2img(visuals['INF']) # uint8 + + sr_img_mode = 'grid' + if sr_img_mode == 'single': + # single img series + sr_img = visuals['SR'] # uint8 + sample_num = sr_img.shape[0] + for iter in range(0, sample_num): + Metrics.save_img( + Metrics.tensor2img(sr_img[iter]), '{}/{}_{}_sr_{}.png'.format(result_path, current_step, idx, iter)) + else: + # grid img + sr_img = Metrics.tensor2img(visuals['SR']) # uint8 + Metrics.save_img( + sr_img, '{}/{}_{}_sr_process.png'.format(result_path, current_step, idx)) + Metrics.save_img( + Metrics.tensor2img(visuals['SR'][-1]), '{}/{}_{}_sr.png'.format(result_path, current_step, idx)) + + Metrics.save_img( + hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx)) + Metrics.save_img( + lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx)) + # Metrics.save_img( + # fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx)) + + # generation + eval_psnr = Metrics.calculate_psnr(Metrics.tensor2img(visuals['SR'][-1]), hr_img) + eval_ssim = Metrics.calculate_ssim(Metrics.tensor2img(visuals['SR'][-1]), hr_img) + + avg_psnr += eval_psnr + avg_ssim += eval_ssim + + # if wandb_logger and opt['log_eval']: + # wandb_logger.log_eval_data(fake_img, Metrics.tensor2img(visuals['SR'][-1]), hr_img, eval_psnr, eval_ssim) + + avg_psnr = avg_psnr / idx + avg_ssim = avg_ssim / idx + + # log + logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr)) + logger.info('# Validation # SSIM: {:.4e}'.format(avg_ssim)) + logger_val = logging.getLogger('val') # validation logger + logger_val.info(' psnr: {:.4e}, ssim:{:.4e}'.format( + current_epoch, current_step, avg_psnr, avg_ssim)) + + if wandb_logger: + if opt['log_eval']: + wandb_logger.log_eval_table() + wandb_logger.log_metrics({ + 'PSNR': float(avg_psnr), + 'SSIM': float(avg_ssim) + })