From 642e71892ad0be4bc5024e3c272c7c16aa7fdf93 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Tue, 17 Sep 2024 14:38:58 -0500 Subject: [PATCH 01/17] initial progress NOT FUNCTIONAL --- .../python_stable_diffusion_3/export_onnx.py | 50 ++ .../python_stable_diffusion_3/other_impls.py | 515 +++++++++++++++++ .../python_stable_diffusion_3/txt2img.py | 546 ++++++++++++++++++ 3 files changed, 1111 insertions(+) create mode 100644 examples/diffusion/python_stable_diffusion_3/export_onnx.py create mode 100644 examples/diffusion/python_stable_diffusion_3/other_impls.py create mode 100644 examples/diffusion/python_stable_diffusion_3/txt2img.py diff --git a/examples/diffusion/python_stable_diffusion_3/export_onnx.py b/examples/diffusion/python_stable_diffusion_3/export_onnx.py new file mode 100644 index 00000000000..2610b321ee6 --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/export_onnx.py @@ -0,0 +1,50 @@ +import torch +from diffusers import StableDiffusion3Pipeline +import os + +pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16) +# pipe = pipe.to("cuda") +# print(pipe) +# print(pipe.text_encoder) +x=torch.randint(1, (1, 77)) +# pipe.text_encoder.eval() +output_path='models' +encoder_path=output_path+'/text_encoder/text_encoder.onnx' +encoder_2_path=output_path+'/text_encoder_2/text_encoder_2.onnx' +encoder_3_path=output_path+'/text_encoder_3/text_encoder_3.onnx' +print(output_path) +# os.makedirs(os.path.dirname(output_path), exist_ok=True) +os.makedirs(os.path.dirname(encoder_path), exist_ok=True) +os.makedirs(os.path.dirname(encoder_2_path), exist_ok=True) +os.makedirs(os.path.dirname(encoder_3_path), exist_ok=True) + +torch.onnx.export(pipe.text_encoder, + x, + encoder_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { 0: 'batch_size'}}) +torch.onnx.export(pipe.text_encoder_2, + x, + encoder_2_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { 0: 'batch_size'}}) +torch.onnx.export(pipe.text_encoder_3, + x, + encoder_3_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { 0: 'batch_size'}}) + + +# export_options = torch.onnx.ExportOptions(dynamic_shapes=True) +# onnx_program = torch.onnx.dynamo_export( +# pipe.text_encoder, +# *x, +# # **kwargs, +# export_options=export_options) +# onnx_program.save("text_encoder.onnx") diff --git a/examples/diffusion/python_stable_diffusion_3/other_impls.py b/examples/diffusion/python_stable_diffusion_3/other_impls.py new file mode 100644 index 00000000000..02291ba4159 --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/other_impls.py @@ -0,0 +1,515 @@ +# MIT License + +# Copyright (c) 2024 Stability AI + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Some code in `other_impls` originates from HuggingFace and is subject to [the HuggingFace Transformers Apache2 License](https://github.com/huggingface/transformers/blob/main/LICENSE) +### This file contains impls for underlying related models (CLIP, T5, etc) + +import torch, math +from torch import nn +from transformers import CLIPTokenizer, T5TokenizerFast + + +################################################################################################# +### Core/Utility +################################################################################################# + + +def attention(q, k, v, heads, mask=None): + """Convenience wrapper around a basic attention operation""" + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map(lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2), (q, k, v)) + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) + return out.transpose(1, 2).reshape(b, -1, heads * dim_head) + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks""" + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, dtype=None, device=None): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias, dtype=dtype, device=device) + self.act = act_layer + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, dtype=dtype, device=device) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +################################################################################################# +### CLIP +################################################################################################# + + +class CLIPAttention(torch.nn.Module): + def __init__(self, embed_dim, heads, dtype, device): + super().__init__() + self.heads = heads + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + + def forward(self, x, mask=None): + q = self.q_proj(x) + k = self.k_proj(x) + v = self.v_proj(x) + out = attention(q, k, v, self.heads, mask) + return self.out_proj(out) + + +ACTIVATIONS = { + "quick_gelu": lambda a: a * torch.sigmoid(1.702 * a), + "gelu": torch.nn.functional.gelu, +} + +class CLIPLayer(torch.nn.Module): + def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device): + super().__init__() + self.layer_norm1 = nn.LayerNorm(embed_dim, dtype=dtype, device=device) + self.self_attn = CLIPAttention(embed_dim, heads, dtype, device) + self.layer_norm2 = nn.LayerNorm(embed_dim, dtype=dtype, device=device) + #self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device) + self.mlp = Mlp(embed_dim, intermediate_size, embed_dim, act_layer=ACTIVATIONS[intermediate_activation], dtype=dtype, device=device) + + def forward(self, x, mask=None): + x += self.self_attn(self.layer_norm1(x), mask) + x += self.mlp(self.layer_norm2(x)) + return x + + +class CLIPEncoder(torch.nn.Module): + def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device): + super().__init__() + self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) for i in range(num_layers)]) + + def forward(self, x, mask=None, intermediate_output=None): + if intermediate_output is not None: + if intermediate_output < 0: + intermediate_output = len(self.layers) + intermediate_output + intermediate = None + for i, l in enumerate(self.layers): + x = l(x, mask) + if i == intermediate_output: + intermediate = x.clone() + return x, intermediate + + +class CLIPEmbeddings(torch.nn.Module): + def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None): + super().__init__() + self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device) + self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) + + def forward(self, input_tokens): + return self.token_embedding(input_tokens) + self.position_embedding.weight + + +class CLIPTextModel_(torch.nn.Module): + def __init__(self, config_dict, dtype, device): + num_layers = config_dict["num_hidden_layers"] + embed_dim = config_dict["hidden_size"] + heads = config_dict["num_attention_heads"] + intermediate_size = config_dict["intermediate_size"] + intermediate_activation = config_dict["hidden_act"] + super().__init__() + self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device) + self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) + self.final_layer_norm = nn.LayerNorm(embed_dim, dtype=dtype, device=device) + + def forward(self, input_tokens, intermediate_output=None, final_layer_norm_intermediate=True): + x = self.embeddings(input_tokens) + causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) + x, i = self.encoder(x, mask=causal_mask, intermediate_output=intermediate_output) + x = self.final_layer_norm(x) + if i is not None and final_layer_norm_intermediate: + i = self.final_layer_norm(i) + pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),] + return x, i, pooled_output + + +class CLIPTextModel(torch.nn.Module): + def __init__(self, config_dict, dtype, device): + super().__init__() + self.num_layers = config_dict["num_hidden_layers"] + self.text_model = CLIPTextModel_(config_dict, dtype, device) + embed_dim = config_dict["hidden_size"] + self.text_projection = nn.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device) + self.text_projection.weight.copy_(torch.eye(embed_dim)) + self.dtype = dtype + + def get_input_embeddings(self): + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, embeddings): + self.text_model.embeddings.token_embedding = embeddings + + def forward(self, *args, **kwargs): + x = self.text_model(*args, **kwargs) + out = self.text_projection(x[2]) + return (x[0], x[1], out, x[2]) + + +class SDTokenizer: + def __init__(self, max_length=77, pad_with_end=True, tokenizer=None, has_start_token=True, pad_to_max_length=True, min_length=None): + self.tokenizer = tokenizer + self.max_length = max_length + self.min_length = min_length + empty = self.tokenizer('')["input_ids"] + if has_start_token: + self.tokens_start = 1 + self.start_token = empty[0] + self.end_token = empty[1] + else: + self.tokens_start = 0 + self.start_token = None + self.end_token = empty[0] + self.pad_with_end = pad_with_end + self.pad_to_max_length = pad_to_max_length + vocab = self.tokenizer.get_vocab() + self.inv_vocab = {v: k for k, v in vocab.items()} + self.max_word_length = 8 + + + def tokenize_with_weights(self, text:str): + """Tokenize the text, with weight values - presume 1.0 for all and ignore other features here. The details aren't relevant for a reference impl, and weights themselves has weak effect on SD3.""" + if self.pad_with_end: + pad_token = self.end_token + else: + pad_token = 0 + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0)) + to_tokenize = text.replace("\n", " ").split(' ') + to_tokenize = [x for x in to_tokenize if x != ""] + for word in to_tokenize: + batch.extend([(t, 1) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]]) + batch.append((self.end_token, 1.0)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0)] * (self.max_length - len(batch))) + if self.min_length is not None and len(batch) < self.min_length: + batch.extend([(pad_token, 1.0)] * (self.min_length - len(batch))) + return [batch] + + +class SDXLClipGTokenizer(SDTokenizer): + def __init__(self, tokenizer): + super().__init__(pad_with_end=False, tokenizer=tokenizer) + + +class SD3Tokenizer: + def __init__(self): + clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") + self.clip_l = SDTokenizer(tokenizer=clip_tokenizer) + self.clip_g = SDXLClipGTokenizer(clip_tokenizer) + self.t5xxl = T5XXLTokenizer() + + def tokenize_with_weights(self, text:str): + out = {} + out["g"] = self.clip_g.tokenize_with_weights(text) + out["l"] = self.clip_l.tokenize_with_weights(text) + out["t5xxl"] = self.t5xxl.tokenize_with_weights(text) + return out + + +class ClipTokenWeightEncoder: + def encode_token_weights(self, token_weight_pairs): + tokens = list(map(lambda a: a[0], token_weight_pairs[0])) + out, pooled = self([tokens]) + if pooled is not None: + first_pooled = pooled[0:1].cpu() + else: + first_pooled = pooled + output = [out[0:1]] + return torch.cat(output, dim=-2).cpu(), first_pooled + + +class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): + """Uses the CLIP transformer encoder for text (from huggingface)""" + LAYERS = ["last", "pooled", "hidden"] + def __init__(self, device="cpu", max_length=77, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=CLIPTextModel, + special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, return_projected_pooled=True): + super().__init__() + assert layer in self.LAYERS + self.transformer = model_class(textmodel_json_config, dtype, device) + self.num_layers = self.transformer.num_layers + self.max_length = max_length + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + self.layer = layer + self.layer_idx = None + self.special_tokens = special_tokens + self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055)) + self.layer_norm_hidden_state = layer_norm_hidden_state + self.return_projected_pooled = return_projected_pooled + if layer == "hidden": + assert layer_idx is not None + assert abs(layer_idx) < self.num_layers + self.set_clip_options({"layer": layer_idx}) + self.options_default = (self.layer, self.layer_idx, self.return_projected_pooled) + + def set_clip_options(self, options): + layer_idx = options.get("layer", self.layer_idx) + self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled) + if layer_idx is None or abs(layer_idx) > self.num_layers: + self.layer = "last" + else: + self.layer = "hidden" + self.layer_idx = layer_idx + + def forward(self, tokens): + backup_embeds = self.transformer.get_input_embeddings() + device = backup_embeds.weight.device + tokens = torch.LongTensor(tokens).to(device) + outputs = self.transformer(tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) + self.transformer.set_input_embeddings(backup_embeds) + if self.layer == "last": + z = outputs[0] + else: + z = outputs[1] + pooled_output = None + if len(outputs) >= 3: + if not self.return_projected_pooled and len(outputs) >= 4 and outputs[3] is not None: + pooled_output = outputs[3].float() + elif outputs[2] is not None: + pooled_output = outputs[2].float() + return z.float(), pooled_output + + +class SDXLClipG(SDClipModel): + """Wraps the CLIP-G model into the SD-CLIP-Model interface""" + def __init__(self, config, device="cpu", layer="penultimate", layer_idx=None, dtype=None): + if layer == "penultimate": + layer="hidden" + layer_idx=-2 + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False) + + +class T5XXLModel(SDClipModel): + """Wraps the T5-XXL model into the SD-CLIP-Model interface for convenience""" + def __init__(self, config, device="cpu", layer="last", layer_idx=None, dtype=None): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=T5) + + +################################################################################################# +### T5 implementation, for the T5-XXL text encoder portion, largely pulled from upstream impl +################################################################################################# + + +class T5XXLTokenizer(SDTokenizer): + """Wraps the T5 Tokenizer from HF into the SDTokenizer interface""" + def __init__(self): + super().__init__(pad_with_end=False, tokenizer=T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl"), has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=77) + + +class T5LayerNorm(torch.nn.Module): + def __init__(self, hidden_size, eps=1e-6, dtype=None, device=None): + super().__init__() + self.weight = torch.nn.Parameter(torch.ones(hidden_size, dtype=dtype, device=device)) + self.variance_epsilon = eps + + def forward(self, x): + variance = x.pow(2).mean(-1, keepdim=True) + x = x * torch.rsqrt(variance + self.variance_epsilon) + return self.weight.to(device=x.device, dtype=x.dtype) * x + + +class T5DenseGatedActDense(torch.nn.Module): + def __init__(self, model_dim, ff_dim, dtype, device): + super().__init__() + self.wi_0 = nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device) + self.wi_1 = nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device) + self.wo = nn.Linear(ff_dim, model_dim, bias=False, dtype=dtype, device=device) + + def forward(self, x): + hidden_gelu = torch.nn.functional.gelu(self.wi_0(x), approximate="tanh") + hidden_linear = self.wi_1(x) + x = hidden_gelu * hidden_linear + x = self.wo(x) + return x + + +class T5LayerFF(torch.nn.Module): + def __init__(self, model_dim, ff_dim, dtype, device): + super().__init__() + self.DenseReluDense = T5DenseGatedActDense(model_dim, ff_dim, dtype, device) + self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) + + def forward(self, x): + forwarded_states = self.layer_norm(x) + forwarded_states = self.DenseReluDense(forwarded_states) + x += forwarded_states + return x + + +class T5Attention(torch.nn.Module): + def __init__(self, model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device): + super().__init__() + # Mesh TensorFlow initialization to avoid scaling before softmax + self.q = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.k = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.v = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.o = nn.Linear(inner_dim, model_dim, bias=False, dtype=dtype, device=device) + self.num_heads = num_heads + self.relative_attention_bias = None + if relative_attention_bias: + self.relative_attention_num_buckets = 32 + self.relative_attention_max_distance = 128 + self.relative_attention_bias = torch.nn.Embedding(self.relative_attention_num_buckets, self.num_heads, device=device) + + @staticmethod + def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): + """ + Adapted from Mesh Tensorflow: + https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 + + Translate relative position to a bucket number for relative attention. The relative position is defined as + memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to + position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for + small absolute relative_position and larger buckets for larger absolute relative_positions. All relative + positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. + This should allow for more graceful generalization to longer sequences than the model has been trained on + + Args: + relative_position: an int32 Tensor + bidirectional: a boolean - whether the attention is bidirectional + num_buckets: an integer + max_distance: an integer + + Returns: + a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) + """ + relative_buckets = 0 + if bidirectional: + num_buckets //= 2 + relative_buckets += (relative_position > 0).to(torch.long) * num_buckets + relative_position = torch.abs(relative_position) + else: + relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) + # now relative_position is in the range [0, inf) + # half of the buckets are for exact increments in positions + max_exact = num_buckets // 2 + is_small = relative_position < max_exact + # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance + relative_position_if_large = max_exact + ( + torch.log(relative_position.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).to(torch.long) + relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) + relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) + return relative_buckets + + def compute_bias(self, query_length, key_length, device): + """Compute binned relative position bias""" + context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] + memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] + relative_position = memory_position - context_position # shape (query_length, key_length) + relative_position_bucket = self._relative_position_bucket( + relative_position, # shape (query_length, key_length) + bidirectional=True, + num_buckets=self.relative_attention_num_buckets, + max_distance=self.relative_attention_max_distance, + ) + values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) + values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) + return values + + def forward(self, x, past_bias=None): + q = self.q(x) + k = self.k(x) + v = self.v(x) + if self.relative_attention_bias is not None: + past_bias = self.compute_bias(x.shape[1], x.shape[1], x.device) + if past_bias is not None: + mask = past_bias + out = attention(q, k * ((k.shape[-1] / self.num_heads) ** 0.5), v, self.num_heads, mask) + return self.o(out), past_bias + + +class T5LayerSelfAttention(torch.nn.Module): + def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device): + super().__init__() + self.SelfAttention = T5Attention(model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device) + self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) + + def forward(self, x, past_bias=None): + output, past_bias = self.SelfAttention(self.layer_norm(x), past_bias=past_bias) + x += output + return x, past_bias + + +class T5Block(torch.nn.Module): + def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device): + super().__init__() + self.layer = torch.nn.ModuleList() + self.layer.append(T5LayerSelfAttention(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device)) + self.layer.append(T5LayerFF(model_dim, ff_dim, dtype, device)) + + def forward(self, x, past_bias=None): + x, past_bias = self.layer[0](x, past_bias) + x = self.layer[-1](x) + return x, past_bias + + +class T5Stack(torch.nn.Module): + def __init__(self, num_layers, model_dim, inner_dim, ff_dim, num_heads, vocab_size, dtype, device): + super().__init__() + self.embed_tokens = torch.nn.Embedding(vocab_size, model_dim, device=device) + self.block = torch.nn.ModuleList([T5Block(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias=(i == 0), dtype=dtype, device=device) for i in range(num_layers)]) + self.final_layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) + + def forward(self, input_ids, intermediate_output=None, final_layer_norm_intermediate=True): + intermediate = None + x = self.embed_tokens(input_ids) + past_bias = None + for i, l in enumerate(self.block): + x, past_bias = l(x, past_bias) + if i == intermediate_output: + intermediate = x.clone() + x = self.final_layer_norm(x) + if intermediate is not None and final_layer_norm_intermediate: + intermediate = self.final_layer_norm(intermediate) + return x, intermediate + + +class T5(torch.nn.Module): + def __init__(self, config_dict, dtype, device): + super().__init__() + self.num_layers = config_dict["num_layers"] + self.encoder = T5Stack(self.num_layers, config_dict["d_model"], config_dict["d_model"], config_dict["d_ff"], config_dict["num_heads"], config_dict["vocab_size"], dtype, device) + self.dtype = dtype + + def get_input_embeddings(self): + return self.encoder.embed_tokens + + def set_input_embeddings(self, embeddings): + self.encoder.embed_tokens = embeddings + + def forward(self, *args, **kwargs): + return self.encoder(*args, **kwargs) diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py new file mode 100644 index 00000000000..c175ea01ce8 --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -0,0 +1,546 @@ +# The MIT License (MIT) +# +# Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the 'Software'), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from argparse import ArgumentParser +from diffusers import EulerDiscreteScheduler + +from other_impls import SD3Tokenizer + +from PIL import Image + +import migraphx as mgx +import os +import sys +import torch +import time +from functools import wraps + +from hip import hip +from collections import namedtuple +HipEventPair = namedtuple('HipEventPair', ['start', 'end']) + + +# measurement helper +def measure(fn): + @wraps(fn) + def measure_ms(*args, **kwargs): + start_time = time.perf_counter_ns() + result = fn(*args, **kwargs) + end_time = time.perf_counter_ns() + print( + f"Elapsed time for {fn.__name__}: {(end_time - start_time) * 1e-6:.4f} ms\n" + ) + return result + + return measure_ms + + +def get_args(): + parser = ArgumentParser() + # Model compile + parser.add_argument( + "--onnx-model-path", + type=str, + default="models/sd21-onnx/", + help="Path to onnx model files.", + ) + + parser.add_argument( + "--compiled-model-path", + type=str, + default=None, + help= + "Path to compiled mxr model files. If not set, it will be saved next to the onnx model.", + ) + + parser.add_argument( + "--fp16", + choices=["all", "vae", "clip", "unet"], + nargs="+", + help="Quantize models with fp16 precision.", + ) + + parser.add_argument( + "--force-compile", + action="store_true", + default=False, + help="Ignore existing .mxr files and override them", + ) + + parser.add_argument( + "--exhaustive-tune", + action="store_true", + default=False, + help="Perform exhaustive tuning when compiling onnx models", + ) + + # Runtime + parser.add_argument( + "-s", + "--seed", + type=int, + default=42, + help="Random seed", + ) + + parser.add_argument( + "-t", + "--steps", + type=int, + default=20, + help="Number of steps", + ) + + parser.add_argument("-b", + "--batch", + type=int, + default=1, + help="Batch count or number of images to produce") + + parser.add_argument( + "-p", + "--prompt", + type=str, + required=True, + help="Prompt", + ) + + parser.add_argument( + "-n", + "--negative-prompt", + type=str, + default="", + help="Negative prompt", + ) + + parser.add_argument( + "--scale", + type=float, + default=7.0, + help="Guidance scale", + ) + + parser.add_argument( + "-o", + "--output", + type=str, + default=None, + help="Output name", + ) + return parser.parse_args() + + +mgx_to_torch_dtype_dict = { + "bool_type": torch.bool, + "uint8_type": torch.uint8, + "int8_type": torch.int8, + "int16_type": torch.int16, + "int32_type": torch.int32, + "int64_type": torch.int64, + "float_type": torch.float32, + "double_type": torch.float64, + "half_type": torch.float16, +} + +torch_to_mgx_dtype_dict = { + value: key + for (key, value) in mgx_to_torch_dtype_dict.items() +} + + +def tensor_to_arg(tensor): + return mgx.argument_from_pointer( + mgx.shape( + **{ + "type": torch_to_mgx_dtype_dict[tensor.dtype], + "lens": list(tensor.size()), + "strides": list(tensor.stride()) + }), tensor.data_ptr()) + + +def tensors_to_args(tensors): + return {name: tensor_to_arg(tensor) for name, tensor in tensors.items()} + + +def get_output_name(idx): + return f"main:#output_{idx}" + + +def copy_tensor_sync(tensor, data): + tensor.copy_(data) + torch.cuda.synchronize() + + +def run_model_sync(model, args): + model.run(args) + mgx.gpu_sync() + + +def allocate_torch_tensors(model): + input_shapes = model.get_parameter_shapes() + data_mapping = { + name: torch.zeros(shape.lens()).to( + mgx_to_torch_dtype_dict[shape.type_string()]).to(device="cuda") + for name, shape in input_shapes.items() + } + return data_mapping + + +class StableDiffusionMGX(): + def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, + force_compile, exhaustive_tune): + model_id = "stabilityai/stable-diffusion-2-1" + print(f"Using {model_id}") + + print("Creating EulerDiscreteScheduler scheduler") + self.scheduler = EulerDiscreteScheduler.from_pretrained( + model_id, subfolder="scheduler") + + print("Creating CLIPTokenizer tokenizer...") + self.clip_tokenizer = CLIPTokenizer.from_pretrained(model_id, + subfolder="tokenizer") + if fp16 is None: + fp16 = [] + elif "all" in fp16: + fp16 = ["vae", "clip", "unet"] + + self.batch = batch + + print("Load models...") + self.models = { + "vae": + StableDiffusionMGX.load_mgx_model( + "vae_decoder", {"latent_sample": [self.batch, 16, 128, 128]}, + onnx_model_path, + compiled_model_path=compiled_model_path, + use_fp16="vae" in fp16, + force_compile=force_compile, + exhaustive_tune=exhaustive_tune, + offload_copy=False, + batch=self.batch), + "clip-g": + StableDiffusionMGX.load_mgx_model( + "text_encoder", {"input_ids": [1, 77]}, + onnx_model_path, + compiled_model_path=compiled_model_path, + use_fp16="clip" in fp16, + force_compile=force_compile, + exhaustive_tune=exhaustive_tune, + offload_copy=False), + "clip-l": + StableDiffusionMGX.load_mgx_model( + "text_encoder_2", {"input_ids": [1, 77]}, + onnx_model_path, + compiled_model_path=compiled_model_path, + use_fp16="clip" in fp16, + force_compile=force_compile, + exhaustive_tune=exhaustive_tune, + offload_copy=False), + "t5xxl": + StableDiffusionMGX.load_mgx_model( + "text_encoder_3", {"input_ids": [1, 77]}, + onnx_model_path, + compiled_model_path=compiled_model_path, + use_fp16="clip" in fp16, + force_compile=force_compile, + exhaustive_tune=exhaustive_tune, + offload_copy=False), + "mmdit": + StableDiffusionMGX.load_mgx_model( + "mmdit", { + "sample": [2 * self.batch, 16, 128, 128], + "sigma": [2 * self.batch], + "c_crossattn": [2 * self.batch, 154, 4096] + "y": [2 * self.batch, 2048], + }, + onnx_model_path, + compiled_model_path=compiled_model_path, + use_fp16="mmdit" in fp16, + force_compile=force_compile, + exhaustive_tune=exhaustive_tune, + offload_copy=False, + batch=self.batch) + } + + self.tensors = { + "clip-g": allocate_torch_tensors(self.models["clip-g"]), + "clip-l": allocate_torch_tensors(self.models["clip-l"]), + "t5xxl": allocate_torch_tensors(self.models["t5xxl"]), + "unet": allocate_torch_tensors(self.models["unet"]), + "vae": allocate_torch_tensors(self.models["vae"]), + } + + self.model_args = { + "clip-g": tensors_to_args(self.tensors['clip-g']), + "clip-l": tensors_to_args(self.tensors['clip-l']), + "t5xxl": tensors_to_args(self.tensors['t5xxl']), + "unet": tensors_to_args(self.tensors['unet']), + "vae": tensors_to_args(self.tensors['vae']), + } + + self.events = { + "warmup": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + "run": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + "clip-g": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + "clip-l": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + "t5xxl": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + "denoise": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + "decode": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), + } + + self.stream = hip.hipStreamCreate()[1] + + def cleanup(self): + for event in self.events.values(): + hip.hipEventDestroy(event.start) + hip.hipEventDestroy(event.end) + hip.hipStreamDestroy(self.stream) + + def profile_start(self, name): + if name in self.events: + hip.hipEventRecord(self.events[name].start, None) + + def profile_end(self, name): + if name in self.events: + hip.hipEventRecord(self.events[name].end, None) + + @measure + @torch.no_grad() + def run(self, prompt, negative_prompt, steps, seed, scale): + torch.cuda.synchronize() + self.profile_start("run") + + # need to set this for each run + self.scheduler.set_timesteps(steps, device="cuda") + + print("Tokenizing prompts...") + prompt_tokens = self.tokenize(prompt, negative_prompt) + + print("Creating text embeddings...") + self.profile_start("clip") + text_embeddings = self.get_embeddings(prompt_tokens) + self.profile_end("clip") + + print( + f"Creating random input data ({self.batch}x{16}x{128}x{128}) (latents) with seed={seed}..." + ) + latents = torch.randn( + (self.batch, 16, 128, 128), + generator=torch.manual_seed(seed)).to(device="cuda") + + print("Apply initial noise sigma\n") + latents = latents * self.scheduler.init_noise_sigma + + print("Running denoising loop...") + self.profile_start("denoise") + for step, t in enumerate(self.scheduler.timesteps): + print(f"#{step}/{len(self.scheduler.timesteps)} step") + latents = self.denoise_step(text_embeddings, latents, t, scale) + self.profile_end("denoise") + + print("Scale denoised result...") + latents = 1 / 0.18215 * latents + + self.profile_start("decode") + print("Decode denoised result...") + image = self.decode(latents) + self.profile_end("decode") + + torch.cuda.synchronize() + self.profile_end("run") + return image + + def print_summary(self, denoise_steps): + print('WARMUP\t{:>9.2f} ms'.format( + hip.hipEventElapsedTime(self.events['warmup'].start, + self.events['warmup'].end)[1])) + print('CLIP\t{:>9.2f} ms'.format( + hip.hipEventElapsedTime(self.events['clip'].start, + self.events['clip'].end)[1])) + print('UNetx{}\t{:>9.2f} ms'.format( + str(denoise_steps), + hip.hipEventElapsedTime(self.events['denoise'].start, + self.events['denoise'].end)[1])) + print('VAE-Dec\t{:>9.2f} ms'.format( + hip.hipEventElapsedTime(self.events['decode'].start, + self.events['decode'].end)[1])) + print('RUN\t{:>9.2f} ms'.format( + hip.hipEventElapsedTime(self.events['run'].start, + self.events['run'].end)[1])) + + @staticmethod + @measure + def load_mgx_model(name, + shapes, + onnx_model_path, + compiled_model_path=None, + use_fp16=False, + force_compile=False, + exhaustive_tune=False, + offload_copy=True, + batch=1): + print(f"Loading {name} model...") + if compiled_model_path is None: + compiled_model_path = onnx_model_path + onnx_file = f"{onnx_model_path}/{name}/model.onnx" + mxr_file = f"{compiled_model_path}/{name}/model_{'fp16' if use_fp16 else 'fp32'}_b{batch}_{'gpu' if not offload_copy else 'oc'}.mxr" + if not force_compile and os.path.isfile(mxr_file): + print(f"Found mxr, loading it from {mxr_file}") + model = mgx.load(mxr_file, format="msgpack") + elif os.path.isfile(onnx_file): + print(f"No mxr found at {mxr_file}") + print(f"Parsing from {onnx_file}") + model = mgx.parse_onnx(onnx_file, map_input_dims=shapes) + if use_fp16: + mgx.quantize_fp16(model) + model.compile(mgx.get_target("gpu"), + exhaustive_tune=exhaustive_tune, + offload_copy=offload_copy) + print(f"Saving {name} model to {mxr_file}") + os.makedirs(os.path.dirname(mxr_file), exist_ok=True) + mgx.save(model, mxr_file, format="msgpack") + else: + print( + f"No {name} model found at {onnx_file} or {mxr_file}. Please download it and re-try." + ) + sys.exit(1) + return model + + @measure + def tokenize(self, prompt, negative_prompt): + return self.tokenizer([prompt, negative_prompt], + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt") + + @measure + def get_embeddings(self, prompt_tokens): + copy_tensor_sync(self.tensors["clip"]["input_ids"], + prompt_tokens.input_ids.to(torch.int32)) + run_model_sync(self.models["clip"], self.model_args["clip"]) + text_embeds = self.tensors["clip"][get_output_name(0)] + return torch.cat( + [torch.cat([i] * self.batch) for i in text_embeds.split(1)]) + + @staticmethod + def convert_to_rgb_image(image): + image = (image / 2 + 0.5).clamp(0, 1) + image = image.detach().cpu().permute(0, 2, 3, 1).numpy() + images = (image * 255).round().astype("uint8") + return [Image.fromarray(images[i]) for i in range(images.shape[0])] + + @staticmethod + def save_image(pil_image, filename="output.png"): + pil_image.save(filename) + + @measure + def denoise_step(self, text_embeddings, latents, t, scale): + latents_model_input = torch.cat([latents] * 2) + latents_model_input = self.scheduler.scale_model_input( + latents_model_input, t).to(torch.float32).to(device="cuda") + timestep = torch.atleast_1d(t.to(torch.int64)).to( + device="cuda") # convert 0D -> 1D + + copy_tensor_sync(self.tensors["unet"]["sample"], latents_model_input) + copy_tensor_sync(self.tensors["unet"]["encoder_hidden_states"], + text_embeddings) + copy_tensor_sync(self.tensors["unet"]["timestep"], timestep) + run_model_sync(self.models["unet"], self.model_args['unet']) + + noise_pred_text, noise_pred_uncond = torch.tensor_split( + self.tensors["unet"][get_output_name(0)], 2) + + # perform guidance + noise_pred = noise_pred_uncond + scale * (noise_pred_text - + noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + return self.scheduler.step(noise_pred, t, latents).prev_sample + + @measure + def decode(self, latents): + copy_tensor_sync(self.tensors["vae"]["latent_sample"], latents) + run_model_sync(self.models["vae"], self.model_args["vae"]) + return self.tensors["vae"][get_output_name(0)] + + @measure + def warmup(self, num_runs): + self.profile_start("warmup") + copy_tensor_sync(self.tensors["clip"]["input_ids"], + torch.ones((2, 77)).to(torch.int32)) + copy_tensor_sync( + self.tensors["unet"]["sample"], + torch.randn((2 * self.batch, 4, 64, 64)).to(torch.float32)) + copy_tensor_sync( + self.tensors["unet"]["encoder_hidden_states"], + torch.randn((2 * self.batch, 77, 1024)).to(torch.float32)) + copy_tensor_sync(self.tensors["unet"]["timestep"], + torch.atleast_1d(torch.randn(1).to(torch.int64))) + copy_tensor_sync( + self.tensors["vae"]["latent_sample"], + torch.randn((self.batch, 4, 64, 64)).to(torch.float32)) + + for _ in range(num_runs): + run_model_sync(self.models["clip"], self.model_args["clip"]) + run_model_sync(self.models["unet"], self.model_args["unet"]) + run_model_sync(self.models["vae"], self.model_args["vae"]) + self.profile_end("warmup") + + +if __name__ == "__main__": + args = get_args() + + sd = StableDiffusionMGX(args.onnx_model_path, args.compiled_model_path, + args.fp16, args.batch, args.force_compile, + args.exhaustive_tune) + print("Warmup") + sd.warmup(5) + print("Run") + result = sd.run(args.prompt, args.negative_prompt, args.steps, args.seed, + args.scale) + + print("Summary") + sd.print_summary(args.steps) + print("Cleanup") + sd.cleanup() + + print("Convert result to rgb image...") + images = StableDiffusionMGX.convert_to_rgb_image(result) + for i, image in enumerate(images): + filename = f"{args.batch}_{args.output}" if args.output else f"output_s{args.seed}_t{args.steps}_{i}.png" + StableDiffusionMGX.save_image(image, filename) + print(f"Image saved to {filename}") From 96712fb556f8e747e2516189087fe7cb3aa32b8c Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Tue, 17 Sep 2024 14:41:45 -0500 Subject: [PATCH 02/17] add impl file --- .../python_stable_diffusion_3/sd3_impls.py | 390 ++++++++++++++++++ 1 file changed, 390 insertions(+) create mode 100644 examples/diffusion/python_stable_diffusion_3/sd3_impls.py diff --git a/examples/diffusion/python_stable_diffusion_3/sd3_impls.py b/examples/diffusion/python_stable_diffusion_3/sd3_impls.py new file mode 100644 index 00000000000..8b69d58e2da --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/sd3_impls.py @@ -0,0 +1,390 @@ +# Copyright (c) 2024 Stability AI + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +### Impls of the SD3 core diffusion model and VAE + +import torch, math, einops +from mmdit import MMDiT +from PIL import Image + + +################################################################################################# +### MMDiT Model Wrapping +################################################################################################# + + +class ModelSamplingDiscreteFlow(torch.nn.Module): + """Helper for sampler scheduling (ie timestep/sigma calculations) for Discrete Flow models""" + def __init__(self, shift=1.0): + super().__init__() + self.shift = shift + timesteps = 1000 + ts = self.sigma(torch.arange(1, timesteps + 1, 1)) + self.register_buffer('sigmas', ts) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + return sigma * 1000 + + def sigma(self, timestep: torch.Tensor): + timestep = timestep / 1000.0 + if self.shift == 1.0: + return timestep + return self.shift * timestep / (1 + (self.shift - 1) * timestep) + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input - model_output * sigma + + def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): + return sigma * noise + (1.0 - sigma) * latent_image + + +class BaseModel(torch.nn.Module): + """Wrapper around the core MM-DiT model""" + def __init__(self, shift=1.0, device=None, dtype=torch.float32, file=None, prefix=""): + super().__init__() + # Important configuration values can be quickly determined by checking shapes in the source file + # Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change) + patch_size = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[2] + depth = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[0] // 64 + num_patches = file.get_tensor(f"{prefix}pos_embed").shape[1] + pos_embed_max_size = round(math.sqrt(num_patches)) + adm_in_channels = file.get_tensor(f"{prefix}y_embedder.mlp.0.weight").shape[1] + context_shape = file.get_tensor(f"{prefix}context_embedder.weight").shape + context_embedder_config = { + "target": "torch.nn.Linear", + "params": { + "in_features": context_shape[1], + "out_features": context_shape[0] + } + } + self.diffusion_model = MMDiT(input_size=None, pos_embed_scaling_factor=None, pos_embed_offset=None, pos_embed_max_size=pos_embed_max_size, patch_size=patch_size, in_channels=16, depth=depth, num_patches=num_patches, adm_in_channels=adm_in_channels, context_embedder_config=context_embedder_config, device=device, dtype=dtype) + self.model_sampling = ModelSamplingDiscreteFlow(shift=shift) + + def apply_model(self, x, sigma, c_crossattn=None, y=None): + dtype = self.get_dtype() + timestep = self.model_sampling.timestep(sigma).float() + model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float() + return self.model_sampling.calculate_denoised(sigma, model_output, x) + + def forward(self, *args, **kwargs): + return self.apply_model(*args, **kwargs) + + def get_dtype(self): + return self.diffusion_model.dtype + + +class CFGDenoiser(torch.nn.Module): + """Helper for applying CFG Scaling to diffusion outputs""" + def __init__(self, model): + super().__init__() + self.model = model + + def forward(self, x, timestep, cond, uncond, cond_scale): + # Run cond and uncond in a batch together + batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]])) + # Then split and apply CFG Scaling + pos_out, neg_out = batched.chunk(2) + scaled = neg_out + (pos_out - neg_out) * cond_scale + return scaled + + +class SD3LatentFormat: + """Latents are slightly shifted from center - this class must be called after VAE Decode to correct for the shift""" + def __init__(self): + self.scale_factor = 1.5305 + self.shift_factor = 0.0609 + + def process_in(self, latent): + return (latent - self.shift_factor) * self.scale_factor + + def process_out(self, latent): + return (latent / self.scale_factor) + self.shift_factor + + def decode_latent_to_preview(self, x0): + """Quick RGB approximate preview of sd3 latents""" + factors = torch.tensor([ + [-0.0645, 0.0177, 0.1052], [ 0.0028, 0.0312, 0.0650], + [ 0.1848, 0.0762, 0.0360], [ 0.0944, 0.0360, 0.0889], + [ 0.0897, 0.0506, -0.0364], [-0.0020, 0.1203, 0.0284], + [ 0.0855, 0.0118, 0.0283], [-0.0539, 0.0658, 0.1047], + [-0.0057, 0.0116, 0.0700], [-0.0412, 0.0281, -0.0039], + [ 0.1106, 0.1171, 0.1220], [-0.0248, 0.0682, -0.0481], + [ 0.0815, 0.0846, 0.1207], [-0.0120, -0.0055, -0.0867], + [-0.0749, -0.0634, -0.0456], [-0.1418, -0.1457, -0.1259] + ], device="cpu") + latent_image = x0[0].permute(1, 2, 0).cpu() @ factors + + latents_ubyte = (((latent_image + 1) / 2) + .clamp(0, 1) # change scale from -1..1 to 0..1 + .mul(0xFF) # to 0..255 + .byte()).cpu() + + return Image.fromarray(latents_ubyte.numpy()) + + +################################################################################################# +### K-Diffusion Sampling +################################################################################################# + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + return x[(...,) + (None,) * dims_to_append] + + +def to_d(x, sigma, denoised): + """Converts a denoiser output to a Karras ODE derivative.""" + return (x - denoised) / append_dims(sigma, x.ndim) + + +@torch.no_grad() +@torch.autocast("cuda", dtype=torch.float16) +def sample_euler(model, x, sigmas, extra_args=None): + """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + for i in range(len(sigmas) - 1): + sigma_hat = sigmas[i] + denoised = model(x, sigma_hat * s_in, **extra_args) + d = to_d(x, sigma_hat, denoised) + dt = sigmas[i + 1] - sigma_hat + # Euler method + x = x + d * dt + return x + + +################################################################################################# +### VAE +################################################################################################# + + +def Normalize(in_channels, num_groups=32, dtype=torch.float32, device=None): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) + + +class ResnetBlock(torch.nn.Module): + def __init__(self, *, in_channels, out_channels=None, dtype=torch.float32, device=None): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + + self.norm1 = Normalize(in_channels, dtype=dtype, device=device) + self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + self.norm2 = Normalize(out_channels, dtype=dtype, device=device) + self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + if self.in_channels != self.out_channels: + self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) + else: + self.nin_shortcut = None + self.swish = torch.nn.SiLU(inplace=True) + + def forward(self, x): + hidden = x + hidden = self.norm1(hidden) + hidden = self.swish(hidden) + hidden = self.conv1(hidden) + hidden = self.norm2(hidden) + hidden = self.swish(hidden) + hidden = self.conv2(hidden) + if self.in_channels != self.out_channels: + x = self.nin_shortcut(x) + return x + hidden + + +class AttnBlock(torch.nn.Module): + def __init__(self, in_channels, dtype=torch.float32, device=None): + super().__init__() + self.norm = Normalize(in_channels, dtype=dtype, device=device) + self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) + self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) + self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) + self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) + + def forward(self, x): + hidden = self.norm(x) + q = self.q(hidden) + k = self.k(hidden) + v = self.v(hidden) + b, c, h, w = q.shape + q, k, v = map(lambda x: einops.rearrange(x, "b c h w -> b 1 (h w) c").contiguous(), (q, k, v)) + hidden = torch.nn.functional.scaled_dot_product_attention(q, k, v) # scale is dim ** -0.5 per default + hidden = einops.rearrange(hidden, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b) + hidden = self.proj_out(hidden) + return x + hidden + + +class Downsample(torch.nn.Module): + def __init__(self, in_channels, dtype=torch.float32, device=None): + super().__init__() + self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0, dtype=dtype, device=device) + + def forward(self, x): + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + return x + + +class Upsample(torch.nn.Module): + def __init__(self, in_channels, dtype=torch.float32, device=None): + super().__init__() + self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + x = self.conv(x) + return x + + +class VAEEncoder(torch.nn.Module): + def __init__(self, ch=128, ch_mult=(1,2,4,4), num_res_blocks=2, in_channels=3, z_channels=16, dtype=torch.float32, device=None): + super().__init__() + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + in_ch_mult = (1,) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = torch.nn.ModuleList() + for i_level in range(self.num_resolutions): + block = torch.nn.ModuleList() + attn = torch.nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device)) + block_in = block_out + down = torch.nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, dtype=dtype, device=device) + self.down.append(down) + # middle + self.mid = torch.nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) + self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device) + self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) + # end + self.norm_out = Normalize(block_in, dtype=dtype, device=device) + self.conv_out = torch.nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + self.swish = torch.nn.SiLU(inplace=True) + + def forward(self, x): + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + # end + h = self.norm_out(h) + h = self.swish(h) + h = self.conv_out(h) + return h + + +class VAEDecoder(torch.nn.Module): + def __init__(self, ch=128, out_ch=3, ch_mult=(1, 2, 4, 4), num_res_blocks=2, resolution=256, z_channels=16, dtype=torch.float32, device=None): + super().__init__() + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + # middle + self.mid = torch.nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) + self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device) + self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) + # upsampling + self.up = torch.nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = torch.nn.ModuleList() + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device)) + block_in = block_out + up = torch.nn.Module() + up.block = block + if i_level != 0: + up.upsample = Upsample(block_in, dtype=dtype, device=device) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + # end + self.norm_out = Normalize(block_in, dtype=dtype, device=device) + self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) + self.swish = torch.nn.SiLU(inplace=True) + + def forward(self, z): + # z to block_in + hidden = self.conv_in(z) + # middle + hidden = self.mid.block_1(hidden) + hidden = self.mid.attn_1(hidden) + hidden = self.mid.block_2(hidden) + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + hidden = self.up[i_level].block[i_block](hidden) + if i_level != 0: + hidden = self.up[i_level].upsample(hidden) + # end + hidden = self.norm_out(hidden) + hidden = self.swish(hidden) + hidden = self.conv_out(hidden) + return hidden + + +class SDVAE(torch.nn.Module): + def __init__(self, dtype=torch.float32, device=None): + super().__init__() + self.encoder = VAEEncoder(dtype=dtype, device=device) + self.decoder = VAEDecoder(dtype=dtype, device=device) + + @torch.autocast("cuda", dtype=torch.float16) + def decode(self, latent): + return self.decoder(latent) + + @torch.autocast("cuda", dtype=torch.float16) + def encode(self, image): + hidden = self.encoder(image) + mean, logvar = torch.chunk(hidden, 2, dim=1) + logvar = torch.clamp(logvar, -30.0, 20.0) + std = torch.exp(0.5 * logvar) + return mean + std * torch.randn_like(mean) From 0c01ba130553ecff6fc20038dc00b2a228226485 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Fri, 20 Sep 2024 00:39:12 -0500 Subject: [PATCH 03/17] continued progress --- .../python_stable_diffusion_3/export_onnx.py | 6 +- .../python_stable_diffusion_3/sd3_impls.py | 68 ++--- .../python_stable_diffusion_3/txt2img.py | 281 +++++++++++++----- 3 files changed, 237 insertions(+), 118 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/export_onnx.py b/examples/diffusion/python_stable_diffusion_3/export_onnx.py index 2610b321ee6..6c3e9fc826e 100644 --- a/examples/diffusion/python_stable_diffusion_3/export_onnx.py +++ b/examples/diffusion/python_stable_diffusion_3/export_onnx.py @@ -9,9 +9,9 @@ x=torch.randint(1, (1, 77)) # pipe.text_encoder.eval() output_path='models' -encoder_path=output_path+'/text_encoder/text_encoder.onnx' -encoder_2_path=output_path+'/text_encoder_2/text_encoder_2.onnx' -encoder_3_path=output_path+'/text_encoder_3/text_encoder_3.onnx' +encoder_path=output_path+'/text_encoder/model.onnx' +encoder_2_path=output_path+'/text_encoder_2/model.onnx' +encoder_3_path=output_path+'/text_encoder_3/model.onnx' print(output_path) # os.makedirs(os.path.dirname(output_path), exist_ok=True) os.makedirs(os.path.dirname(encoder_path), exist_ok=True) diff --git a/examples/diffusion/python_stable_diffusion_3/sd3_impls.py b/examples/diffusion/python_stable_diffusion_3/sd3_impls.py index 8b69d58e2da..9074166f375 100644 --- a/examples/diffusion/python_stable_diffusion_3/sd3_impls.py +++ b/examples/diffusion/python_stable_diffusion_3/sd3_impls.py @@ -20,7 +20,7 @@ ### Impls of the SD3 core diffusion model and VAE import torch, math, einops -from mmdit import MMDiT +# from mmdit import MMDiT from PIL import Image @@ -63,39 +63,39 @@ def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): return sigma * noise + (1.0 - sigma) * latent_image -class BaseModel(torch.nn.Module): - """Wrapper around the core MM-DiT model""" - def __init__(self, shift=1.0, device=None, dtype=torch.float32, file=None, prefix=""): - super().__init__() - # Important configuration values can be quickly determined by checking shapes in the source file - # Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change) - patch_size = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[2] - depth = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[0] // 64 - num_patches = file.get_tensor(f"{prefix}pos_embed").shape[1] - pos_embed_max_size = round(math.sqrt(num_patches)) - adm_in_channels = file.get_tensor(f"{prefix}y_embedder.mlp.0.weight").shape[1] - context_shape = file.get_tensor(f"{prefix}context_embedder.weight").shape - context_embedder_config = { - "target": "torch.nn.Linear", - "params": { - "in_features": context_shape[1], - "out_features": context_shape[0] - } - } - self.diffusion_model = MMDiT(input_size=None, pos_embed_scaling_factor=None, pos_embed_offset=None, pos_embed_max_size=pos_embed_max_size, patch_size=patch_size, in_channels=16, depth=depth, num_patches=num_patches, adm_in_channels=adm_in_channels, context_embedder_config=context_embedder_config, device=device, dtype=dtype) - self.model_sampling = ModelSamplingDiscreteFlow(shift=shift) - - def apply_model(self, x, sigma, c_crossattn=None, y=None): - dtype = self.get_dtype() - timestep = self.model_sampling.timestep(sigma).float() - model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float() - return self.model_sampling.calculate_denoised(sigma, model_output, x) - - def forward(self, *args, **kwargs): - return self.apply_model(*args, **kwargs) - - def get_dtype(self): - return self.diffusion_model.dtype +# class BaseModel(torch.nn.Module): +# """Wrapper around the core MM-DiT model""" +# def __init__(self, shift=1.0, device=None, dtype=torch.float32, file=None, prefix=""): +# super().__init__() +# # Important configuration values can be quickly determined by checking shapes in the source file +# # Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change) +# patch_size = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[2] +# depth = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[0] // 64 +# num_patches = file.get_tensor(f"{prefix}pos_embed").shape[1] +# pos_embed_max_size = round(math.sqrt(num_patches)) +# adm_in_channels = file.get_tensor(f"{prefix}y_embedder.mlp.0.weight").shape[1] +# context_shape = file.get_tensor(f"{prefix}context_embedder.weight").shape +# context_embedder_config = { +# "target": "torch.nn.Linear", +# "params": { +# "in_features": context_shape[1], +# "out_features": context_shape[0] +# } +# } +# self.diffusion_model = MMDiT(input_size=None, pos_embed_scaling_factor=None, pos_embed_offset=None, pos_embed_max_size=pos_embed_max_size, patch_size=patch_size, in_channels=16, depth=depth, num_patches=num_patches, adm_in_channels=adm_in_channels, context_embedder_config=context_embedder_config, device=device, dtype=dtype) +# self.model_sampling = ModelSamplingDiscreteFlow(shift=shift) + +# def apply_model(self, x, sigma, c_crossattn=None, y=None): +# dtype = self.get_dtype() +# timestep = self.model_sampling.timestep(sigma).float() +# model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float() +# return self.model_sampling.calculate_denoised(sigma, model_output, x) + +# def forward(self, *args, **kwargs): +# return self.apply_model(*args, **kwargs) + +# def get_dtype(self): +# return self.diffusion_model.dtype class CFGDenoiser(torch.nn.Module): diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index c175ea01ce8..9befbedc570 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -24,6 +24,7 @@ from diffusers import EulerDiscreteScheduler from other_impls import SD3Tokenizer +from sd3_impls import ModelSamplingDiscreteFlow, SD3LatentFormat from PIL import Image @@ -60,7 +61,7 @@ def get_args(): parser.add_argument( "--onnx-model-path", type=str, - default="models/sd21-onnx/", + default="models/", help="Path to onnx model files.", ) @@ -106,7 +107,7 @@ def get_args(): "-t", "--steps", type=int, - default=20, + default=1, help="Number of steps", ) @@ -211,13 +212,16 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, model_id = "stabilityai/stable-diffusion-2-1" print(f"Using {model_id}") - print("Creating EulerDiscreteScheduler scheduler") - self.scheduler = EulerDiscreteScheduler.from_pretrained( - model_id, subfolder="scheduler") + self.model_sampling = ModelSamplingDiscreteFlow(shift=1.0) + # print("Creating EulerDiscreteScheduler scheduler") + # self.scheduler = EulerDiscreteScheduler.from_pretrained( + # model_id, subfolder="scheduler") + + # print("Creating CLIPTokenizer tokenizer...") + # self.clip_tokenizer = CLIPTokenizer.from_pretrained(model_id, + # subfolder="tokenizer") + self.tokenizer = SD3Tokenizer() - print("Creating CLIPTokenizer tokenizer...") - self.clip_tokenizer = CLIPTokenizer.from_pretrained(model_id, - subfolder="tokenizer") if fp16 is None: fp16 = [] elif "all" in fp16: @@ -237,7 +241,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, exhaustive_tune=exhaustive_tune, offload_copy=False, batch=self.batch), - "clip-g": + "clip-l": StableDiffusionMGX.load_mgx_model( "text_encoder", {"input_ids": [1, 77]}, onnx_model_path, @@ -246,7 +250,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, force_compile=force_compile, exhaustive_tune=exhaustive_tune, offload_copy=False), - "clip-l": + "clip-g": StableDiffusionMGX.load_mgx_model( "text_encoder_2", {"input_ids": [1, 77]}, onnx_model_path, @@ -269,7 +273,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, "mmdit", { "sample": [2 * self.batch, 16, 128, 128], "sigma": [2 * self.batch], - "c_crossattn": [2 * self.batch, 154, 4096] + "c_crossattn": [2 * self.batch, 154, 4096], "y": [2 * self.batch, 2048], }, onnx_model_path, @@ -285,7 +289,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, "clip-g": allocate_torch_tensors(self.models["clip-g"]), "clip-l": allocate_torch_tensors(self.models["clip-l"]), "t5xxl": allocate_torch_tensors(self.models["t5xxl"]), - "unet": allocate_torch_tensors(self.models["unet"]), + "mmdit": allocate_torch_tensors(self.models["mmdit"]), "vae": allocate_torch_tensors(self.models["vae"]), } @@ -293,26 +297,29 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, "clip-g": tensors_to_args(self.tensors['clip-g']), "clip-l": tensors_to_args(self.tensors['clip-l']), "t5xxl": tensors_to_args(self.tensors['t5xxl']), - "unet": tensors_to_args(self.tensors['unet']), + "mmdit": tensors_to_args(self.tensors['mmdit']), "vae": tensors_to_args(self.tensors['vae']), } self.events = { - "warmup": - HipEventPair(start=hip.hipEventCreate()[1], - end=hip.hipEventCreate()[1]), + # "warmup": + # HipEventPair(start=hip.hipEventCreate()[1], + # end=hip.hipEventCreate()[1]), "run": HipEventPair(start=hip.hipEventCreate()[1], end=hip.hipEventCreate()[1]), - "clip-g": - HipEventPair(start=hip.hipEventCreate()[1], - end=hip.hipEventCreate()[1]), - "clip-l": - HipEventPair(start=hip.hipEventCreate()[1], - end=hip.hipEventCreate()[1]), - "t5xxl": + "clip": HipEventPair(start=hip.hipEventCreate()[1], end=hip.hipEventCreate()[1]), + # "clip-g": + # HipEventPair(start=hip.hipEventCreate()[1], + # end=hip.hipEventCreate()[1]), + # "clip-l": + # HipEventPair(start=hip.hipEventCreate()[1], + # end=hip.hipEventCreate()[1]), + # "t5xxl": + # HipEventPair(start=hip.hipEventCreate()[1], + # end=hip.hipEventCreate()[1]), "denoise": HipEventPair(start=hip.hipEventCreate()[1], end=hip.hipEventCreate()[1]), @@ -344,39 +351,45 @@ def run(self, prompt, negative_prompt, steps, seed, scale): self.profile_start("run") # need to set this for each run - self.scheduler.set_timesteps(steps, device="cuda") + # self.scheduler.set_timesteps(steps, device="cuda") print("Tokenizing prompts...") - prompt_tokens = self.tokenize(prompt, negative_prompt) + prompt_tokens = self.tokenize(prompt) + neg_prompt_tokens = self.tokenize(negative_prompt) print("Creating text embeddings...") self.profile_start("clip") - text_embeddings = self.get_embeddings(prompt_tokens) + prompt_embeddings = self.get_embeddings(prompt_tokens) + neg_prompt_embeddings = self.get_embeddings(neg_prompt_tokens) self.profile_end("clip") - print( - f"Creating random input data ({self.batch}x{16}x{128}x{128}) (latents) with seed={seed}..." - ) - latents = torch.randn( - (self.batch, 16, 128, 128), - generator=torch.manual_seed(seed)).to(device="cuda") + # print( + # f"Creating random input data ({self.batch}x{16}x{128}x{128}) (latents) with seed={seed}..." + # ) + # latents = torch.randn( + # (self.batch, 16, 128, 128), + # generator=torch.manual_seed(seed)).to(device="cuda") + + # print("Apply initial noise sigma\n") + # latents = latents * self.scheduler.init_noise_sigma - print("Apply initial noise sigma\n") - latents = latents * self.scheduler.init_noise_sigma + cfg_scale = 5 + latent = self.get_empty_latent(1024, 1024) print("Running denoising loop...") self.profile_start("denoise") - for step, t in enumerate(self.scheduler.timesteps): - print(f"#{step}/{len(self.scheduler.timesteps)} step") - latents = self.denoise_step(text_embeddings, latents, t, scale) + latent = self.do_sampling(latent, seed, prompt_embeddings, neg_prompt_embeddings, steps, cfg_scale) + # for step, t in enumerate(self.scheduler.timesteps): + # print(f"#{step}/{len(self.scheduler.timesteps)} step") + # latents = self.denoise_step(text_embeddings, latents, t, scale) self.profile_end("denoise") - print("Scale denoised result...") - latents = 1 / 0.18215 * latents + # print("Scale denoised result...") + # latents = 1 / 0.18215 * latents self.profile_start("decode") print("Decode denoised result...") - image = self.decode(latents) + image = self.decode(latent) self.profile_end("decode") torch.cuda.synchronize() @@ -384,13 +397,13 @@ def run(self, prompt, negative_prompt, steps, seed, scale): return image def print_summary(self, denoise_steps): - print('WARMUP\t{:>9.2f} ms'.format( - hip.hipEventElapsedTime(self.events['warmup'].start, - self.events['warmup'].end)[1])) + # print('WARMUP\t{:>9.2f} ms'.format( + # hip.hipEventElapsedTime(self.events['warmup'].start, + # self.events['warmup'].end)[1])) print('CLIP\t{:>9.2f} ms'.format( hip.hipEventElapsedTime(self.events['clip'].start, self.events['clip'].end)[1])) - print('UNetx{}\t{:>9.2f} ms'.format( + print('mmditx{}\t{:>9.2f} ms'.format( str(denoise_steps), hip.hipEventElapsedTime(self.events['denoise'].start, self.events['denoise'].end)[1])) @@ -440,21 +453,38 @@ def load_mgx_model(name, return model @measure - def tokenize(self, prompt, negative_prompt): - return self.tokenizer([prompt, negative_prompt], - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt") + def tokenize(self, prompt): + return self.tokenizer.tokenize_with_weights(prompt) + + def encode_token_weights(self, model_name, token_weight_pairs): + tokens = list(map(lambda a: a[0], token_weight_pairs[0])) + tokens = torch.tensor([tokens], dtype=torch.int64, device=self.device) + copy_tensor_sync(self.tensors[model_name]["input_ids"], + tokens.to(torch.int32)) + run_model_sync(self.models[model_name], self.model_args[model_name]) + encoder_out = self.tensors[model_name][get_output_name(0)] + encoder_out2 = None + if len(self.tensors[model_name] > 1): + encoder_out2 = self.tensors[model_name][get_output_name(1)] + + if encoder_out2 is not None: + first_pooled = encoder_out2[0:1] + else: + first_pooled = encoder_out2 + output = [encoder_out[0:1]] + return torch.cat(output, dim=-2), first_pooled + @measure - def get_embeddings(self, prompt_tokens): - copy_tensor_sync(self.tensors["clip"]["input_ids"], - prompt_tokens.input_ids.to(torch.int32)) - run_model_sync(self.models["clip"], self.model_args["clip"]) - text_embeds = self.tensors["clip"][get_output_name(0)] - return torch.cat( - [torch.cat([i] * self.batch) for i in text_embeds.split(1)]) + def get_embeddings(self, prompt_tokens, neg_prompt_tokens): + l_out, l_pooled = self.encode_token_weights("clip-l", prompt_tokens["l"]) + g_out, g_pooled = self.encode_token_weights("clip-g", prompt_tokens["g"]) + t5_out, _ = self.encode_token_weights("t5xxl", prompt_tokens["t5xxl"]) + lg_out = torch.cat([l_out, g_out], dim=-1) + lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1])) + + return torch.cat([lg_out, t5_out], dim=-2), torch.cat((l_pooled, g_pooled), dim=-1) + @staticmethod def convert_to_rgb_image(image): @@ -467,6 +497,95 @@ def convert_to_rgb_image(image): def save_image(pil_image, filename="output.png"): pil_image.save(filename) + def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): + # Run cond and uncond in a batch together + x = torch.cat([x, x]) + timestep = torch.cat([timestep, timestep]) + c_crossattn = torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]) + y = torch.cat([cond["y"], uncond["y"]]) + # batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]])) + copy_tensor_sync(self.tensors["mmdit"]["sample"], x) + copy_tensor_sync(self.tensors["mmdit"]["sigma"], timestep) + copy_tensor_sync(self.tensors["mmdit"]["c_crossattn"], c_crossattn) + copy_tensor_sync(self.tensors["mmdit"]["y"], y) + + run_model_sync(self.models["mmdit"], self.model_args['mmdit']) + + pos_out, neg_out = torch.tensor_split( + self.tensors["mmdit"][get_output_name(0)], 2) + # Then split and apply CFG Scaling + # pos_out, neg_out = batched.chunk(2) + scaled = neg_out + (pos_out - neg_out) * cond_scale + return scaled + + def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + return x[(...,) + (None,) * dims_to_append] + + def to_d(x, sigma, denoised): + """Converts a denoiser output to a Karras ODE derivative.""" + return (x - denoised) / append_dims(sigma, x.ndim) + + + def sample_euler(self, x, sigmas, conditioning, neg_cond, cfg_scale): + """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" + # extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + for i in range(len(sigmas) - 1): + sigma_hat = sigmas[i] + denoised = CFGDenoiser(x, sigma_hat * s_in, conditioning, neg_cond, cfg_scale) + d = to_d(x, sigma_hat, denoised) + dt = sigmas[i + 1] - sigma_hat + # Euler method + x = x + d * dt + return x + + def get_empty_latent(self, width, height): + # print("Prep an empty latent...") + return torch.ones(1, 16, height // 8, width // 8, device="cpu") * 0.0609 + + def get_sigmas(self, sampling, steps): + start = sampling.timestep(sampling.sigma_max) + end = sampling.timestep(sampling.sigma_min) + timesteps = torch.linspace(start, end, steps) + sigs = [] + for x in range(len(timesteps)): + ts = timesteps[x] + sigs.append(sampling.sigma(ts)) + sigs += [0.0] + return torch.FloatTensor(sigs) + + def get_noise(self, seed, latent): + generator = torch.manual_seed(seed) + print(f"dtype = {latent.dtype}, layout = {latent.layout}, device = {latent.device}") + return torch.randn(latent.size(), dtype=torch.float32, layout=latent.layout, generator=generator).to(latent.dtype) + + def max_denoise(self, sigmas): + max_sigma = float(self.model_sampling.sigma_max) + sigma = float(sigmas[0]) + return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma + + def fix_cond(self, cond): + cond, pooled = (cond[0].half(), cond[1].half()) + return { "c_crossattn": cond, "y": pooled } + + def do_sampling(self, latent, seed, conditioning, neg_cond, steps, cfg_scale, denoise=1.0) -> torch.Tensor: + latent = latent.half() + # self.sd3.model = self.sd3.model.cuda() + noise = self.get_noise(seed, latent) + sigmas = self.get_sigmas(self.model_sampling, steps) + sigmas = sigmas[int(steps * (1 - denoise)):] + conditioning = self.fix_cond(conditioning) + neg_cond = self.fix_cond(neg_cond) + # extra_args = { "cond": conditioning, "uncond": neg_cond, "cond_scale": cfg_scale } + noise_scaled = self.model_sampling.noise_scaling(sigmas[0], noise, latent, self.max_denoise(sigmas)) + latent = sample_euler(noise_scaled, sigmas, conditioning, neg_cond, cfg_scale) + latent = SD3LatentFormat().process_out(latent) + # self.sd3.model = self.sd3.model.cpu() + print("Sampling done") + return latent + @measure def denoise_step(self, text_embeddings, latents, t, scale): latents_model_input = torch.cat([latents] * 2) @@ -497,28 +616,28 @@ def decode(self, latents): run_model_sync(self.models["vae"], self.model_args["vae"]) return self.tensors["vae"][get_output_name(0)] - @measure - def warmup(self, num_runs): - self.profile_start("warmup") - copy_tensor_sync(self.tensors["clip"]["input_ids"], - torch.ones((2, 77)).to(torch.int32)) - copy_tensor_sync( - self.tensors["unet"]["sample"], - torch.randn((2 * self.batch, 4, 64, 64)).to(torch.float32)) - copy_tensor_sync( - self.tensors["unet"]["encoder_hidden_states"], - torch.randn((2 * self.batch, 77, 1024)).to(torch.float32)) - copy_tensor_sync(self.tensors["unet"]["timestep"], - torch.atleast_1d(torch.randn(1).to(torch.int64))) - copy_tensor_sync( - self.tensors["vae"]["latent_sample"], - torch.randn((self.batch, 4, 64, 64)).to(torch.float32)) - - for _ in range(num_runs): - run_model_sync(self.models["clip"], self.model_args["clip"]) - run_model_sync(self.models["unet"], self.model_args["unet"]) - run_model_sync(self.models["vae"], self.model_args["vae"]) - self.profile_end("warmup") + # @measure + # def warmup(self, num_runs): + # self.profile_start("warmup") + # copy_tensor_sync(self.tensors["clip"]["input_ids"], + # torch.ones((2, 77)).to(torch.int32)) + # copy_tensor_sync( + # self.tensors["unet"]["sample"], + # torch.randn((2 * self.batch, 4, 64, 64)).to(torch.float32)) + # copy_tensor_sync( + # self.tensors["unet"]["encoder_hidden_states"], + # torch.randn((2 * self.batch, 77, 1024)).to(torch.float32)) + # copy_tensor_sync(self.tensors["unet"]["timestep"], + # torch.atleast_1d(torch.randn(1).to(torch.int64))) + # copy_tensor_sync( + # self.tensors["vae"]["latent_sample"], + # torch.randn((self.batch, 4, 64, 64)).to(torch.float32)) + + # for _ in range(num_runs): + # run_model_sync(self.models["clip"], self.model_args["clip"]) + # run_model_sync(self.models["unet"], self.model_args["unet"]) + # run_model_sync(self.models["vae"], self.model_args["vae"]) + # self.profile_end("warmup") if __name__ == "__main__": @@ -527,8 +646,8 @@ def warmup(self, num_runs): sd = StableDiffusionMGX(args.onnx_model_path, args.compiled_model_path, args.fp16, args.batch, args.force_compile, args.exhaustive_tune) - print("Warmup") - sd.warmup(5) + # print("Warmup") + # sd.warmup(5) print("Run") result = sd.run(args.prompt, args.negative_prompt, args.steps, args.seed, args.scale) From 6483a962bdb0dc3f9ad71b62b6c4c3e3ecce0421 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Fri, 20 Sep 2024 01:24:14 -0500 Subject: [PATCH 04/17] continued progress --- .../requirements.txt | 4 ++ .../python_stable_diffusion_3/txt2img.py | 47 +++++++++++-------- 2 files changed, 32 insertions(+), 19 deletions(-) create mode 100644 examples/diffusion/python_stable_diffusion_3/requirements.txt diff --git a/examples/diffusion/python_stable_diffusion_3/requirements.txt b/examples/diffusion/python_stable_diffusion_3/requirements.txt new file mode 100644 index 00000000000..1789f8f4939 --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/requirements.txt @@ -0,0 +1,4 @@ +diffusers==0.30.3 +einops==0.8.0 +--extra-index-url https://test.pypi.org/simple +hip-python-as-cuda \ No newline at end of file diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index 9befbedc570..cf891083567 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -29,6 +29,7 @@ from PIL import Image import migraphx as mgx +import math import os import sys import torch @@ -221,6 +222,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, # self.clip_tokenizer = CLIPTokenizer.from_pretrained(model_id, # subfolder="tokenizer") self.tokenizer = SD3Tokenizer() + self.device = "cuda" if fp16 is None: fp16 = [] @@ -233,7 +235,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, self.models = { "vae": StableDiffusionMGX.load_mgx_model( - "vae_decoder", {"latent_sample": [self.batch, 16, 128, 128]}, + "vae_decoder", {"latent": [self.batch, 16, 128, 128]}, onnx_model_path, compiled_model_path=compiled_model_path, use_fp16="vae" in fp16, @@ -464,8 +466,10 @@ def encode_token_weights(self, model_name, token_weight_pairs): run_model_sync(self.models[model_name], self.model_args[model_name]) encoder_out = self.tensors[model_name][get_output_name(0)] encoder_out2 = None - if len(self.tensors[model_name] > 1): - encoder_out2 = self.tensors[model_name][get_output_name(1)] + if model_name != 't5xxl': + # flipped outputs for clip text encoders... + encoder_out2 = encoder_out + encoder_out = self.tensors[model_name][get_output_name(1)] if encoder_out2 is not None: first_pooled = encoder_out2[0:1] @@ -476,12 +480,16 @@ def encode_token_weights(self, model_name, token_weight_pairs): @measure - def get_embeddings(self, prompt_tokens, neg_prompt_tokens): + def get_embeddings(self, prompt_tokens): l_out, l_pooled = self.encode_token_weights("clip-l", prompt_tokens["l"]) g_out, g_pooled = self.encode_token_weights("clip-g", prompt_tokens["g"]) t5_out, _ = self.encode_token_weights("t5xxl", prompt_tokens["t5xxl"]) lg_out = torch.cat([l_out, g_out], dim=-1) lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1])) + print(f"lg_out shape: {lg_out.shape}") + print(f"t5_out shape: {t5_out.shape}") + print(f"l_pooled shape: {l_pooled.shape}") + print(f"g_pooled shape: {g_pooled.shape}") return torch.cat([lg_out, t5_out], dim=-2), torch.cat((l_pooled, g_pooled), dim=-1) @@ -518,23 +526,24 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): scaled = neg_out + (pos_out - neg_out) * cond_scale return scaled - def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" - dims_to_append = target_dims - x.ndim - return x[(...,) + (None,) * dims_to_append] - - def to_d(x, sigma, denoised): - """Converts a denoiser output to a Karras ODE derivative.""" - return (x - denoised) / append_dims(sigma, x.ndim) def sample_euler(self, x, sigmas, conditioning, neg_cond, cfg_scale): + def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + return x[(...,) + (None,) * dims_to_append] + + def to_d(x, sigma, denoised): + """Converts a denoiser output to a Karras ODE derivative.""" + return (x - denoised) / append_dims(sigma, x.ndim) + """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" # extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) for i in range(len(sigmas) - 1): sigma_hat = sigmas[i] - denoised = CFGDenoiser(x, sigma_hat * s_in, conditioning, neg_cond, cfg_scale) + denoised = self.CFGDenoiser(x, sigma_hat * s_in, conditioning, neg_cond, cfg_scale) d = to_d(x, sigma_hat, denoised) dt = sigmas[i + 1] - sigma_hat # Euler method @@ -567,20 +576,20 @@ def max_denoise(self, sigmas): return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma def fix_cond(self, cond): - cond, pooled = (cond[0].half(), cond[1].half()) + cond, pooled = (cond[0].half().cuda(), cond[1].half().cuda()) return { "c_crossattn": cond, "y": pooled } def do_sampling(self, latent, seed, conditioning, neg_cond, steps, cfg_scale, denoise=1.0) -> torch.Tensor: - latent = latent.half() + latent = latent.half().cuda() # self.sd3.model = self.sd3.model.cuda() - noise = self.get_noise(seed, latent) - sigmas = self.get_sigmas(self.model_sampling, steps) + noise = self.get_noise(seed, latent).cuda() + sigmas = self.get_sigmas(self.model_sampling, steps).cuda() sigmas = sigmas[int(steps * (1 - denoise)):] conditioning = self.fix_cond(conditioning) neg_cond = self.fix_cond(neg_cond) # extra_args = { "cond": conditioning, "uncond": neg_cond, "cond_scale": cfg_scale } noise_scaled = self.model_sampling.noise_scaling(sigmas[0], noise, latent, self.max_denoise(sigmas)) - latent = sample_euler(noise_scaled, sigmas, conditioning, neg_cond, cfg_scale) + latent = self.sample_euler(noise_scaled, sigmas, conditioning, neg_cond, cfg_scale) latent = SD3LatentFormat().process_out(latent) # self.sd3.model = self.sd3.model.cpu() print("Sampling done") @@ -612,7 +621,7 @@ def denoise_step(self, text_embeddings, latents, t, scale): @measure def decode(self, latents): - copy_tensor_sync(self.tensors["vae"]["latent_sample"], latents) + copy_tensor_sync(self.tensors["vae"]["latent"], latents) run_model_sync(self.models["vae"], self.model_args["vae"]) return self.tensors["vae"][get_output_name(0)] From d076d83e01c8b8b3c6aeed4fa147b4f604a24d23 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Fri, 20 Sep 2024 01:28:52 -0500 Subject: [PATCH 05/17] update steps --- examples/diffusion/python_stable_diffusion_3/txt2img.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index cf891083567..e5db9f0493f 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -76,7 +76,7 @@ def get_args(): parser.add_argument( "--fp16", - choices=["all", "vae", "clip", "unet"], + choices=["all", "vae", "clip", "mmdit"], nargs="+", help="Quantize models with fp16 precision.", ) @@ -108,7 +108,7 @@ def get_args(): "-t", "--steps", type=int, - default=1, + default=50, help="Number of steps", ) From cecf24e27106d5797a040e9d69ed3c5296819eac Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Fri, 27 Sep 2024 19:33:52 -0500 Subject: [PATCH 06/17] update script --- .../python_stable_diffusion_3/txt2img.py | 83 +++++++++---------- 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index e5db9f0493f..fccd474afab 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -62,7 +62,7 @@ def get_args(): parser.add_argument( "--onnx-model-path", type=str, - default="models/", + default="models/sd3", help="Path to onnx model files.", ) @@ -210,17 +210,9 @@ def allocate_torch_tensors(model): class StableDiffusionMGX(): def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, force_compile, exhaustive_tune): - model_id = "stabilityai/stable-diffusion-2-1" - print(f"Using {model_id}") self.model_sampling = ModelSamplingDiscreteFlow(shift=1.0) - # print("Creating EulerDiscreteScheduler scheduler") - # self.scheduler = EulerDiscreteScheduler.from_pretrained( - # model_id, subfolder="scheduler") - # print("Creating CLIPTokenizer tokenizer...") - # self.clip_tokenizer = CLIPTokenizer.from_pretrained(model_id, - # subfolder="tokenizer") self.tokenizer = SD3Tokenizer() self.device = "cuda" @@ -365,13 +357,6 @@ def run(self, prompt, negative_prompt, steps, seed, scale): neg_prompt_embeddings = self.get_embeddings(neg_prompt_tokens) self.profile_end("clip") - # print( - # f"Creating random input data ({self.batch}x{16}x{128}x{128}) (latents) with seed={seed}..." - # ) - # latents = torch.randn( - # (self.batch, 16, 128, 128), - # generator=torch.manual_seed(seed)).to(device="cuda") - # print("Apply initial noise sigma\n") # latents = latents * self.scheduler.init_noise_sigma @@ -461,6 +446,7 @@ def tokenize(self, prompt): def encode_token_weights(self, model_name, token_weight_pairs): tokens = list(map(lambda a: a[0], token_weight_pairs[0])) tokens = torch.tensor([tokens], dtype=torch.int64, device=self.device) + # print(f'token val: {tokens.flatten()[0:5]}') copy_tensor_sync(self.tensors[model_name]["input_ids"], tokens.to(torch.int32)) run_model_sync(self.models[model_name], self.model_args[model_name]) @@ -476,6 +462,9 @@ def encode_token_weights(self, model_name, token_weight_pairs): else: first_pooled = encoder_out2 output = [encoder_out[0:1]] + # print(f'token weight output for model {model_name}: {output[0].flatten()[0:5]}') + # if first_pooled is not None: + # print(f'token weight first_pooled for model {model_name}: {first_pooled.flatten()[0:5]}') return torch.cat(output, dim=-2), first_pooled @@ -511,6 +500,11 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): timestep = torch.cat([timestep, timestep]) c_crossattn = torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]) y = torch.cat([cond["y"], uncond["y"]]) + # print(f'x out: {x.flatten()[0:5]}') + # print(f'timestep out: {timestep.flatten()[0:5]}') + # print(f'c_crossattn out: {c_crossattn.flatten()[0:5]}') + # print(f'y out: {y.flatten()[0:5]}') + # batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]])) copy_tensor_sync(self.tensors["mmdit"]["sample"], x) copy_tensor_sync(self.tensors["mmdit"]["sigma"], timestep) @@ -521,6 +515,9 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): pos_out, neg_out = torch.tensor_split( self.tensors["mmdit"][get_output_name(0)], 2) + # print(f'mmdit pos out: {pos_out.flatten()[0:5]}') + # print(f'mmdit neg_out out: {neg_out.flatten()[0:5]}') + # Then split and apply CFG Scaling # pos_out, neg_out = batched.chunk(2) scaled = neg_out + (pos_out - neg_out) * cond_scale @@ -581,18 +578,16 @@ def fix_cond(self, cond): def do_sampling(self, latent, seed, conditioning, neg_cond, steps, cfg_scale, denoise=1.0) -> torch.Tensor: latent = latent.half().cuda() - # self.sd3.model = self.sd3.model.cuda() + # print(f'latent vals: {latent.flatten()[0:5]}') noise = self.get_noise(seed, latent).cuda() + # print(f'noise vals: {noise.flatten()[0:5]}') sigmas = self.get_sigmas(self.model_sampling, steps).cuda() sigmas = sigmas[int(steps * (1 - denoise)):] conditioning = self.fix_cond(conditioning) neg_cond = self.fix_cond(neg_cond) - # extra_args = { "cond": conditioning, "uncond": neg_cond, "cond_scale": cfg_scale } noise_scaled = self.model_sampling.noise_scaling(sigmas[0], noise, latent, self.max_denoise(sigmas)) latent = self.sample_euler(noise_scaled, sigmas, conditioning, neg_cond, cfg_scale) latent = SD3LatentFormat().process_out(latent) - # self.sd3.model = self.sd3.model.cpu() - print("Sampling done") return latent @measure @@ -625,28 +620,32 @@ def decode(self, latents): run_model_sync(self.models["vae"], self.model_args["vae"]) return self.tensors["vae"][get_output_name(0)] - # @measure - # def warmup(self, num_runs): - # self.profile_start("warmup") - # copy_tensor_sync(self.tensors["clip"]["input_ids"], - # torch.ones((2, 77)).to(torch.int32)) - # copy_tensor_sync( - # self.tensors["unet"]["sample"], - # torch.randn((2 * self.batch, 4, 64, 64)).to(torch.float32)) - # copy_tensor_sync( - # self.tensors["unet"]["encoder_hidden_states"], - # torch.randn((2 * self.batch, 77, 1024)).to(torch.float32)) - # copy_tensor_sync(self.tensors["unet"]["timestep"], - # torch.atleast_1d(torch.randn(1).to(torch.int64))) - # copy_tensor_sync( - # self.tensors["vae"]["latent_sample"], - # torch.randn((self.batch, 4, 64, 64)).to(torch.float32)) - - # for _ in range(num_runs): - # run_model_sync(self.models["clip"], self.model_args["clip"]) - # run_model_sync(self.models["unet"], self.model_args["unet"]) - # run_model_sync(self.models["vae"], self.model_args["vae"]) - # self.profile_end("warmup") + @measure + def warmup(self, num_runs): + self.profile_start("warmup") + copy_tensor_sync(self.tensors["clip-l"]["input_ids"], + torch.ones((1, 77)).to(torch.int32)) + copy_tensor_sync(self.tensors["clip-g"]["input_ids"], + torch.ones((1, 77)).to(torch.int32)) + copy_tensor_sync(self.tensors["t5xxl"]["input_ids"], + torch.ones((1, 77)).to(torch.int32)) + copy_tensor_sync( + self.tensors["mmdit"]["sample"], + torch.randn((2 * self.batch, 16, 128, 128)).to(torch.float16)) + copy_tensor_sync( + self.tensors["unet"]["encoder_hidden_states"], + torch.randn((2 * self.batch, 77, 1024)).to(torch.float32)) + copy_tensor_sync(self.tensors["unet"]["timestep"], + torch.atleast_1d(torch.randn(1).to(torch.int64))) + copy_tensor_sync( + self.tensors["vae"]["latent_sample"], + torch.randn((self.batch, 4, 64, 64)).to(torch.float32)) + + for _ in range(num_runs): + run_model_sync(self.models["clip"], self.model_args["clip"]) + run_model_sync(self.models["unet"], self.model_args["unet"]) + run_model_sync(self.models["vae"], self.model_args["vae"]) + self.profile_end("warmup") if __name__ == "__main__": From 13ddaf9d736fa9cb89c251dde853d979d6c630bc Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Thu, 31 Oct 2024 15:45:30 -0500 Subject: [PATCH 07/17] update requirements --- examples/diffusion/python_stable_diffusion_3/requirements.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/diffusion/python_stable_diffusion_3/requirements.txt b/examples/diffusion/python_stable_diffusion_3/requirements.txt index 1789f8f4939..5b930488c29 100644 --- a/examples/diffusion/python_stable_diffusion_3/requirements.txt +++ b/examples/diffusion/python_stable_diffusion_3/requirements.txt @@ -1,4 +1,7 @@ diffusers==0.30.3 einops==0.8.0 +transformers==4.46.0 +tiktoken==0.8.0 +sentencepiece==0.2.0 --extra-index-url https://test.pypi.org/simple hip-python-as-cuda \ No newline at end of file From 1d1b3d80d7922943168239aa338bca2889b37f24 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Fri, 1 Nov 2024 18:04:35 -0500 Subject: [PATCH 08/17] update and cleanup files --- .../example_output.jpg | Bin 0 -> 78161 bytes .../python_stable_diffusion_3/export_onnx.py | 110 +++++++++++------- .../requirements.txt | 26 +++++ .../torch_requirements.txt | 25 ++++ .../python_stable_diffusion_3/txt2img.py | 105 ++++------------- 5 files changed, 143 insertions(+), 123 deletions(-) create mode 100644 examples/diffusion/python_stable_diffusion_3/example_output.jpg create mode 100644 examples/diffusion/python_stable_diffusion_3/torch_requirements.txt diff --git a/examples/diffusion/python_stable_diffusion_3/example_output.jpg b/examples/diffusion/python_stable_diffusion_3/example_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d729cca118fc3b1ced96926f2fc0a66303754e9 GIT binary patch literal 78161 zcmbSzc|25a81Jz!QK?aOvNe+>6S5^zwz1BrXhIs1CCM5>w5TRKA=$!M62jOjt%g$8 zBq7PZM95N7-RDfdd++CS|GM|;LzXk=yzjGopYOB0^SAf!5VDzIWNL)4upkHv{6qfs zBL@*~PEIaP4sI?kE*>6kUOo&z--Zo*!dtd(!ibAVN{EYyiAl*S@05~Olo1odYTy)A z@M>ymk~_6@HTUdOR#n@B-h_pRhlg(ipAbL4(4HM)JNEoPfBb!h2yn9_>1funTZ( z*?~X6xs`a5OWFssCob3sz)ysRq4_Yx29w(UZ~B69LOvAA7o>U%XbwX_c& zGBh$aF(ud>K62F7?iktml#AHlkI|7&CadtZGB9~%oOk4*s4L++-V zclnvN(=JS0(oq|7PtKzuI%@R=T^(#yhI>Gs4}Hfdj%NIorJ4V9B2qU>(CHzfianZEZ| z*KkV0!9c1#FByw4gmP0cNE(gC5HCh}@qNj>x|1x}JPfZBcFK&3^I@>^ktmt^tnp35 zshEZ$)<|x=8C8=u9I+KcNl8rOz)+|NSr?1(F`=9<6d{n=cZgZz+zMEXuxh*r-U!#5 zpF?pXMw(Ia`aFipnt9^I+*|}K1M>`vN^QlEx+oD_VOvJHh-A`mG>L^v&6zWUgWa0-e14$- z{I(BPJRHyS?Eq$4#XX`=r>j#R_84sq8=_$%%!#q?-N^{i2{1b>$fUn^I13Ie2mrTAlB3d3?DvSU#nHqKR1%wP#y>S9Uu*nx=@wh z37)Y6$9sFBYdH0(8vWT>khl^e2N(Gd$MZ9kLDu^Gu(!JWCe*ZOv>Z+_@pr9qpqpm0 zy)_52^MIi?6*-IjX2}|D%?5ht&4q>IE%OV|sfcrDF6C+T|1q(1Ibs8~O!%)Vu z)G>q#N7T|_UD3P&LOGmW!!RPNK{hw{v?TRq?5V8%_K?+ zcnKvDT+BSAlNx~ThoOT>er8St1j4e=DVaua?>rE#K97kZ{Dp-;4aBlh4{qkQCyh{b zI;5I0X@x}nMtD3}(+-5jz*wu$7zlt7cw1c#JABC(6JZC9r$k0^VDo7?Y?$PyWOHlQ z$aH%@{o#~IbcYN%M2s{d4N7`uxfGR6HGpT6hEp*ScvGfwjd1QhAUOC;*gC z#)Eh;({L{zkiL@~JS3hU1NLu?fb$3eiZC$r0C-?pp$G@Em+FH_gCC;MJ>hUBd7OZw zks!jn2EZw#E+5r1+)zpRpR0oExC-1D%37_MXKolu>b_1B6h~PTGpX=IqUDS!fT^<@ zEXmxO%?e{vk#0#xt`MnYZ7^kO00zk{VrY^m5nI51!N|lwq#zY@(l8(!E0s(@02w)9 zX(XmI?Ia`EX01ZFo)_I+*Kc=#a(Qq@24{pY)!wE8RYWcr3}6Fh8pH!y0e=7))#ZTU z3`g4(u=Gi^SptVp4h02*Nc}4e)=YgcF#keljB`3d(C%WIJSBz7DUZ|7XK3=W z?|kzGy#v{v2w!stHInH+Jc( zwHt!ToPv7;)Xl?Qz#Y;50F6P%L@@a&eV%AyoJXLlX5df{KK94a0RVV=Q0q~*t zcSWGJF0ehg9z`X%C>?AS1!Y({z%&AJjiIcW!`Z>5RtPqbKV=Dz7$BO}Bw<+0d2!tX z)+zviaQWus++<#$3xq(hL@B00tDsH-cnkqe9*gAIJhFD%oJ?BD69$~1BL7Li4dkQ@ zp%T15nnXpsltGdWOoRbbWzlEy0xFpwe}f8w$0wPvH&+YMrVLpj<|XqH^*M2Xs$g?& z@E{5ipo?|m5+xd4KJJ3GyCH%Athu|PRtQeW4G<}Zfa5e2q1vfKAu-y|G9P~Uhwe5- z<0y2)lOW8Yp{&`3!vO&w?HHg4s=qOYI zU?vm0QUHZ*BH*TdL`#702=HpW{>uiCLo--60ErPU+8lxlgko$VCBB2rL@hT}%~_56 z;D6?g`VvMn$=Z{Bh?549Ahah;1X0>&=KZF})rti_6IU_cA*;IfoT`hJ{Aw$eC`XtO;cZ9VR05c_Wz?@`L!4xx! zA8GjzdMJ@V_>wRPYM=o0%6i(MD%c;YClat~0QyAu_MgvVzLKmtc98=$b0e?gd<-$% zczsq>KWTZuE>ZA_)PITLJqHMB0B~pNp2LeLs8aC<5r&+SP(B_jje_SvJb?)~HtyX3 zq~UZYppg-qsn)2v2btSDh*F&{RU=$C9qHp{Xj3`WHfrTHXhICe8)9>)>^np73?=a* zKy^pAaa1`e(IQ|wy?IO#Y9~f=fJ;Q)0McTthEfp_`(|Li_F#(4ZBlW)5d7WX&WIO8 zdp{=Gv1Tzb1e8SMAjpGszK{|_N0Ldyz`nsMwQ?vByZd5cMOuZxs^H}q)P~H7z~0b^ znpe$qe3Ota7QDU~fD;-E!S$F=2b9C4sUgY#q6&yte3Auk91_ScCDy@8iL_+wn9;vP zMbKDHNv)PccH|d|pfL|{mvb+~KhOypCQ+aPTRRK%D4N*Hx+Q-3pFzs$!45&|pjAE^ zSPEc+Q&%LQ9XI$h+1>|Ujo2(0B5FV)I`tb_Q&Qr=B2a*v1^Yl<6@V9b4lfy^q`7M< z22uz&%+~-=D{3YC6Pa!R29DkUWxn8cOb&IK0FG>^og@rm244a?%7-ov*eLu+>R|Z? z#-KFNJ7*_YK0mV415aQojTHG0`v9TY0e(^CpwxpKtolE0-iPb9W@SOyJ_dOQiZeve zBm;G!|Kb1|qVq7*c>SiXSo&9H=trH4$4no>0*fK$zfU1}y@)yK55}nj0n1>*I+||) z!?k8HGX*31N4Zez0e|K}%`cgmDWUi*2ZV|NzyV6&#Ih#yGNG$GIRbpCh`}lfDk7k) zf@sb;1dL7|%fc_D#w3+=Fppx0#4?yG@>m*e)*OP#EG!wX54gfd1fc+^GElPz@SVc9 zxv^*ifc;Z1@YGacCTVA`1C41y$sly0oC8$AO}2mF#A|L-%ngQypx}nmc}g-bN_c>3 zs6k#??8X9WSOb;XT*cUIPEMxNBM5jiQ(zcO9-0P*{*_48*Fb5Wy$wqmjWxpv%a8)h zvjTi!$$>k8cfWw_?LUqRo)4yAfWnI~*jck0m>pG{8X!bFCu+DKs0X$XHsA#~VIxHO zpHbPg72!<6(9vyDvZ>rToFic5phf_gVYq!CnS=vQh=G&{p+x(cQTbIw3|X1H6SkTU z5DSXk%(NsSa7T#uB&T-^MyN{&G2<{WVXBh`m}Gwb4(=S@068{32u{EoQCP;O1IgBC zK(9nGH0PH!Bny3HT*Ls;3e_?|7fr@}lW}piCM{I~Fy@czK8fV04}y6Yg-2N|Mt6O;Uv1a$8J>gjNSGEN`-MGA#j5daCeKl7J9U`36&$$rKJ zc6l7GTLi@)H2F@AhV?<7?2cR^**t{%iRu@!3=pYKJIQ2iVmg?r(ryn>6sa@117HC> z2b+UtlxS#4#rV@&fwqW{H<1x{W_&=)67nE8YdjDkQ-BuOIfW8UT0zx^XW)cUW(wF4 zh{JBCfM_Av2!RasMH0&T8aRO{JHf4)v6KZ(aly+9X;Pw8Tz6WI0TV+Tfr0?Oc!?K3 zgJ1_fk{iK`Ce=t@DR>Wxx!~~-!NA^uQ$u1}$7E6v_O(D=Xr$A@1mtiIqk;uLzgGJ1 z9=jq@El?@{%QozZH9$68Gi&}aRUZPN1&UlKV?onVH0*eRe-Ds`Q3{79Y$zy4zk_Xq zyE2&*kZ$dics`V>6`+{{P#>W05vZ$yznf41clIZ96YvBMfd4Bbkzmjo(&q^4zylaS zjEv_8@s@j2WHhrZVCXmPEJjPSt^j?DD+4 z`hrf79!jM>zO|e@vQ+B!c%#p+{@c(HwxBbzT8v3rbWfuSa>TvzA7Knc}?h~dH?akFk)2k$7pFLqu>Wi zWrl7vse<${7sLQ02}A+{1-ng&KA($*%FI~cxbnb!>c&%0W3bf9myQsPMkNNis>{tV z1kVS!Vr*xD_y*b;AiaA8H3S|bRWn02Fo8xOYV2U`7f2{#2Ld^PoN$&-iRS^xsW4R8 zim@rmXC;aFP3_*&VPtjo>edUI)1z0LAu4=Q=*cu2U<96=76W!nN#mhvlM+l#IP!C$WCosy7AkskBSZ|r^C@W@JJ4td&VdFC zl+1$>{rd_;6qrOZsmq!LvcLfK^JWjiEPpEH$+EqvT|JuQvFoe*-mf>*O64bxWHfWX z*z|PAWcBB%hw26Ckre$ClBx;cMJk+6bHr3WKO0Fj9uBWQWqXVK=uTzta#4phc@E*# z%t?0-_+}^0TE#K#^W*4#+OVkC{!_6A16jV;Br_eZUVSV|w%PGo=f+H{^JmBBg0E63 zsw~Z~_UMV^mzlX<{asy<$CWCrFel6<7HPk+km5hnURB=~d{npjM^9@LeN3p}fX&1D zSwG@0_rYeqykfEOhuH39$%J-tV)}QPe8z#30V`=O4}N}lx_Ylop))%9R_Az&vhrI< z{M_+}O^BDxESU*t1p!T?fljgl26c5Yqb(`HoD&@PE6{*vu2{xC>OqD=L|Q&;rr~y= zMg(Tbv>=jTD3rtgET5sr1XZEJfc$T0+60sw4MR&5lzJ5ssRvFz?t<82ZoQ3{01TiN zB~X-jUUoS&zB!8m2O?48FGKx}sQ-*g0Q65r!}&kz69wf0YnGm*1~lsb3rwZ#U}R_# zS?r_F$6(TE!DkSB(OiScZ|rUA!01tPiwy=g2Y`pk0d&xZYK|s=Hd;ntg<^^qLSe=+ z6x59P5jF5OH1GNj^{G3u2`}!9xg1$mb9{E|Q+&e(NOljq1ROnBFB<nRb$FSml#I6_y^y{ z!z}%}twE!{Kexx5esCV`iD`W^V4>977-f2Ar=q7^zh_jlQEO1_<9lsi*G#^Q7-a7n zSlhEj?h?+}*wFheuaDryJ5s4;^msn5t3sQe9l7i{>UjOGhM zlR5b>_1g^l1{8MZ|Br+EpiZFT{v(IUtzo;Y|FJdqf3nnsk{-+BfSqg&{ysD0-Itbv z=G1S}qo%){|4Gm=F?PcE*%77o?6%w(TrU2;jMlBrQ+C#2@U&ljX62`#*Z1)PzNYpw zFJ$kWSvLC?CvZEoRKeJF;w=8r$m1*9sP5w{e(&7sjaEAcG1oI76|jla}!f-I;DTK zj|lY&&v_|h0=68F%xkQ#X(K- zzH*l|c1R!UtrOR9PV_8{@pGkej+-m_ca(qg&FH({I!l zj)TZ*YQ@+=w%rUAC{P!NpD}Eb6Do89wB<3iWXQ2@NlfJgA_!>G`7c5P9B?zycEFkg ztzn~qj*0_3kwgjfIzviwoR)Z)5aAJ);WLdwluw0kl`^Vbr69)%NlOs7XlF7E> zXF~6(bGAJc67fH=?`UDX71oz9v~ik?P`0pjtcZP1M)F2JlBi!=D$!bP;rgOaR_S|L z_~k@THwCqXjOCkWR{6E{cAC?#EXQI_bQMZxZ1*TyLaaR}WpekE&n})iMWp)V-;=@{ zaR8?iLX8BQa}Y{Ey?K~2Ho$JQ9!SN#h_+#&J3*b+1n7ck*UgADU_E)5jnAoIlpL25 zQz0QlZ3u{9sPK0iiQLDezsRa>Dv_nd^1#lIC4KR;PGS7PT+yX5*Qn~)XFna*$JR8| zTd!yDGM|0koNlo>DBz>l*$6$KpX0Z~n?`59s~lcP$atrAdCWsY{k{rzogC&-vlNDc;Db<9ws%TE|)~Uz%7S(R7NVJwr)%78tIhN{&*P1<+%C>st zo#nW?b8o=?Ec=$mQ>W9b)JrUazG%;W>MCwB8Qti4^s~H1^6qa*{Bhrua5}ua(@n>( z85=w+KeP6M9*TMR{Y*fn&dxEnIl=IHo8Jnr%8%ry$l9~fYs)-4!?|8#7hI-yCB~d$ z#JRj= z@_qU{$q-WV67-`Y+@2S98a_MAFH~ra6+$!EX?{o@z)b;Zn8h=Bl%ccKsd=eQVh{?TG)rUX1gn2L;N8bV46H;PS0 zm`QXo1n_7`sG)3+KswsBf;Q7)lTrBlSKGP-oZ=r88luT4p1?&mKyWSfn&0#btbA62 zV%wLaHCH66RnxBo-==Aw&95gK+cj)EcZ*NMf0q?`@#*GieCrPTIp1L6Hih$!@5v{n z`}JN4eap!BKB;Tr>&{2A4?b6Oz(zJv;v82)kl?+CRrO0tZueA(hH?fn?qajTP5AC^ z>$`hY??uPf?OQ6bRo+?C(qzEd&wF7aMO`_;wcX!n|B&p&Q*z6A&7^e|aYd_K@`vkT z;dMWw(lcJ8rqVmVeh+P784@|H@2`-tg>~RmWXBR~`R6{^sK#c{h3_(2xIdT~u&&_t zA^nn`a`p6=eq;A5KUxakekdsPe05r(G1YJK%jvteC+5d#Ick&z*85Z;#Mj6u_`a}| ztHa3LT9V_fGKN>Nef6~HZ=>T8Y!5`GiLqQ??4$0o@BMYl2?;wNUQN6mfOYp1;r_6T zN4ON{xMy2{)7{O7UjBMfEUmD=1E0m|o~o#Ey)*LgXl?Pm>W^)GS4>iDe-Q|U<0KWI zsS_saxcTyRnJi)Vx#9jIkAtBR%ZL65{`L*89)HRc%G)=X^KdRt;7@x^Xk)bA2NS)V za@U28PrbhU)-B)p=9N_{yXk6yU+3YFwX7GVE5>`~PJ5=e((W8DIexpGw%>m5t37Xj zi*h|!Rv-V12s|d8>fU&>aG>&@UzqHfbL}7ZU+!;^Zx|Ndr_1qn(IheG4w3y)@hf`t zUxa(9c{+I3(KQL5yw`Hra(rK+m5qZxmUcgS+Okh(QkT<;9^_}4ymz)|D$y#GQZ^#^ z=7(`(x^&P5t9N%u*BuOnj7(X~D|SX$IaWW~UqL(WaOc>v8Mcs%QzMO)KV+9jf0o+R zo7K<(N|EbTeu)f@QpFzck*+%G5V^82kIQ-^Q< z^}Q?^h$~Q+D>fA-!8x2uc?~+y4uqmS$~e$6Cz{+EB0GCfEYpY1PVzIzH+a#k9c`Nd z&V5A(O`s~#ZNrieY5GOzyIfx|WHR%AxrZ|Z$!@4;pd}Q@teV^y`_Pv>a>uta+vzAH zgKTVnVp`O2BAQU)>2)+c0^VFvoNj!1F@|6BtnBjsKPj z0T$*zsMb)*>ch9{2tyvA54A8>fXb*??p4h)&&E_Os+)|j-o*XOKa6u$XB{{O{vs~d z-a=rrWgFH>a&{T}*829g-=EjZ>Z7iL{T>-s=N0e1k#v`d|4<(K(Yd15A;CyXc=khj zSal^K0=ENS6??^b`M%B#vjldFho(Z_&fOmzmBQb6d*k9fj5iK**)*v-hW_r3BS`!@ zGqKdH@i0S$WieLs+wnaz2Ym3?PYT{P{mLS^^D6zWQ0%uia%0YP=Lejn(={};k{dEI zo@TG+299pcetOEAo2+qqr}OsAFEhQ}>wlc2LV9NBemO|@Nna&vHm7Z}Hkq>j6L+qK z|FN6ds}b1{Tkm1f5|_9Zr>h?kt#7tRElj1|4K?=$`$bfWg)AH$sZTs)S1ab9-%x{p zMYvqMPvl;H39I%8GCkqteL2cN0*}tpj5S@JKPJmcUF~GwpsT&hIg@tHl_KFcP7^r{ z-l5qGS%pJD{6xZ{u5HXL-hPaD-Ih(peYtGzrDNB!%Kp~it@zpWpY?GEt)j*~Pj5LW zBf}Pdc=(Y!#YNEhQrR}uv&QeNkIQoJ7 z&iQfmxz1YRC#$< zca6g?aCXhMgLSaku3FnMSLncwpR!gBq9Jhys0)7)$#w1bbkT|K0?9s+fX?QAY4)o- z@4AR@dV0`NiM(-7mF~3WRqQ9RZVBc0UQ4vD#;-Ng;ge#KU&FNSdEI+p6r-P-aYJ^e zbd@}d=9yDoJzxppNw zWUV^5l`%4C|2izRKeO$~CiVO+*BX>w_mHJ|T+^q6mFGs!C-nO$`r38LwoA<3u{o!; z|JDYzDtm17_JmkL%&A|SN<;Ln{1F_S9<{h2knk|=jn|_>Rt#6a^@Hm5Ic2|*J zTX{=xZoK29NIuR-__JiHvet?W%`eWsssWA>K;pECE< zql}5V(uVwE+Iyk*FL1g?ZqIyu5_cF1=Sg()S-nU}?y^Co??4vJP2hlX5XmM6&=vY8NidIx7LLvV z_oYL-7)4!HNIV1p%Td0HK<}p0oFZWQZ#2O&7p8L{w>0YlYJ31K7O^v-ojJJ#;ZMBAr7ElQI0~!3bo*RbExMx!+$G~6mf(?FDKdNzdjec z1|}C$a*Z- zS%E}#XwhPY-oq=tdXGOPhgk$?UsraCxA4JzlI`+pKg*@L!ag!z_VmOalS8~byUg?d zBJ`m9o~0Wv6z^}ldFuwJ&vdZ*VpnM^@pbkIk$v=TVhhAwjlTP*qOa`FJYF5xx70z_ zY5Xm+Esm|9=AR`pKdNmM?~=tL7Ok;O*I518=hyPp!)9?+xS$cPrEfobe6#79Ytmmh zEbPAeGt3+?=B(Hod#~!YX{lS0qBi|FIR5awPM-d>?yD6$2zFxI=;5cXB`O`DR=|DfjWXccd-Uk!Y0=O67_kG-sZ0;!NXx9iv! zx6iUpzus-0Hw-AUR8n(kv<8i~p(?n{G*UN2uDq6JLU* zqZT#GmE?bJvv+ov%vNui5mBGJ5!z$h$#EOm>B;#@SKFrk4tdgb;c`~D_OSYDE8@}= z=Fxv#z+2(LlEPB%ieMR4oLe@q^c#Or`d{SW;{#qhRH`m5EVK7`jY=Hfm6fZ~80L|_ zv0ROR@M$!^k6>;Tc>G&hvIVXO*xh!2yp0H?+> zdzp3cRTR1eQxGCx6b3Rtm_9%Y`FQXJoSW=fESr2vQjQv5s7I^Rkzb?hbu;OV_xlbE zU*vMI-_TuMT@(1$FmJcFvawFfchRLul`w@)e_i1-xxc*H_!>{Kb#ndqa62gONY;qe zqwd>JJC7glog64W66eMq;#xQT-sX|np^zZ*M$v}1x5H)cC*5}KS{Ij%7&TekrnkoF zkvYNeI`W}~-t9kw+2?Fg;i|X#Zs@3GrCQPY?RQqS0#&mCQuATP>W(8MdX&_eyCz5z zYi@CSH{S=Y9;s!M*US=^(=$~%|*n0#u(R?VL@ zpW8wgzCT-O|6 z^&j`IJ*ZE#=qsxiF&)&g3mx8{KNMb2(%;I_5L0{v%MDNY@7_#rbnTkG)Gk^0>)@p4r@AwOTI{DJJ$^QmE6vOjWEHMwj2Yiu zD)*g_dbIueJ{|9KcaOSt7)s;Xx-@?Wm3#9Xv2gERpHY2DxH8~cZP_#k_N`4CezZyH z23pr>hE4+%B%pLaf|&tys0Es2P`PB*FO;$9bO1UE0dog1dI6iAisd@G|!9WBt3@+b2fE$ z7W6b=Uc_@~`;URKN4ud#xkFGT9ZrRx1WY*eyyWqgLy*+|$Y)4BNP}hyj5LM7AX8%6 zrD&oV%soQe8rs`vXk+$cbD<7Ofr&75<|Ut%CB40S{IEG5*%G+b@-vS zkFSR!`yMJ0dc?OHT{f$hQXL6ySDNkA8n%1F_PE$* zYJkzN&6Txb?b5TdS$O;Ms5ARYYJYBI+f}D)@>;T7Pgefr(OYBfm3@t=Z2DB*fr9)tLiPI$o83P==015} zdaamX5+py8mn+! z^sh_)DvT`OyEYcRMluR_Y?yM9Ex^AnG0VxHEAwA-P4#gp_=|XvPi!}=b(b}>l{n?q zd46|6Ncx1xisy~N<2OW1Wo4@>UdtDDG_{h{Wg<2XZ*ttFYnm5E2#h=1I^ZS5S-gV3 z*)zIZ$oOdUo2>VxviA}=()SpBPL;S7_xNhF$0w5~{rUHAuErlFO`fgoXiT^6YI$qA z`<-6Py$NcH3#ECV$TixTSa+p^1qyx^^?NzI58hH1*%z6e=Km+)ddT4!jWwz3E9YL^ zTj{-Hzmm-pG(5Q0rlYXhADST+_gL4n#z5#Si8pKcd+Va8_)%P*qF{vAn*jlb@=KvX zuUY8by_FLicFguXavR>-Wy~L{(RX7yIj!KOv=wiwz{Y464y&TVSkDTz6;0Xn0o2T4 z<_R6lL2LeC&CI3*Ob}2V=xP6wXcOkxBNT>7od={@`m)`Fy}ioa1G^SKm@~rmXotI| zv+QJx{*d}3l~Mi|;VErsy|Zbz#p3AA&Us&}j*M{3!0v4hk|DXq6+iwWjm>6Napu)W zQx-2~7-dut-+k9C*I2M$bGec9qOZzhWxnaq)QYi1qW906nlg&#w|*Am?V5Kj3pE`n zl1(`oG_}>i0v9LBJ(-hrS-0Wpc?p^Mv$u!uTz7r)Zq)rGiTbNWETCCDxO`Xih|q=U zRoww|tzCG-3Z zn?H^8N-tpac$c^Z_D(4>PP~;kGshXU^ryN?^3we-_0f4*=^ftd~S}GNwe-hF223~ zFCwypQhWNN81wo8+Ydh*?KYuf z7ONFyqMK$Hd}tT!vzwn_J%5#yj5$>l|I)oDX*H}j(erq*lhA7#GO3^NoPD`((}~9J zaJlza8!Mh|5-1x8oJjhSUN1j3Us^B4XZ)@=eQ6J2UqbT4g1XP?x~$-)7`;o%ZEjM$=9?aZkQASuc@3DSFH;WF(Oin%kRrq8>{T03+oVLg^ibK=`OeMqyTrNTAjKJ*KsZjD?-=uK1Qdja2}qM$SpCb zGWFfEhfVV3_j|j8SHkW01DUg zk*pO`G`cbJs%wxfLnB=1{n6oVpV{JfURm<_qwZ(j@V(3S_6>KP)t&1KDYpz>_4`FH zq|d#;z1Y6rM)#I>U5xs}?zKxVR{D=TpsqC?P~1{5CbhJ(Kk7d3&o?ihD83I>_m!)t zKPhn_aod<|b5Z%-Gu|@YYBR?tt>)$^$1ovVyD#D$8QLvwKX-MDYm8c<)gccj=Aae9 z(jDBCj{svq6d0nA-(>(ZduUusfFbWFNSS`qDgQ?BVbJ8ipvElDqeWJ}6ah~H(bBxvgE!%#{x z)aB4hhBJm`2Ko}%@$5JcWF;SeALe_)_IT{(b9V)tux?enD0}GP<=(*cweDHr1U~0W zFNK#%W7MBKcku}plB|nN_IV%2T8B@5XX$-+a%j4){IvMTwjb?wcFP|z6)l%G2g-{1 z*>jBNf9UTDw%T=g`Pu&7wIgu?R{rX&$8L{0$K82a=6S2yrM_ECby&am@ct(gT~ES{ zZ`Vs6%9cHB(Yn7!v)h-phx_$Zma*WQY3;^8{xN6u-DnAQuZ5n8_0Yz1)ZC8B{>*GT z=rpmvZsop%!L$ZZJmzWJWN4R`qE)FkLHknzDUdAm;4|l?@3UJ3)NVFrjAp#wJ=kV& z{Y`DP|2A^Wt#s~kwZ`3@cQO*pySaKj^vl=kZLx6Ho3X#7jI#mvX8H4;<~d!@o8P-kk@B}md2Cia>kQz0;Zoi3vPuw z_ua0ksc#!`K4Ev8R`YJcRx>Ctev?6=pwTDlvv03 z43%$vaxIvBPomNs{`pkWhvl++>V2IVwj%VZ^5YpT-P^7hlq%VFt)($y4pEXyXXkDAa zLrnLB)}OJ-86G7PBlEd^Iu7#F-Vd7D7fq;vcS)~fMR!((*yb1a3;1PesgGUcRWIKC z7_a8%^?lZ5^A;lXrq|*2Kej8<9NOAk9c+2$OUdl+5&NoLI#a3J7VS60JjR~oo%?F$ zyjXttM7laxqNdjU)vf!ohb5`%;v4L!8_XVEzniG?cE>K-KvHx)7aLoSu=i!~EYUHb9$kPe|brDbYj%_%7xmmR8hi!JV zdzsn!fMRKT^=|61F-Zl2=r>%mH~HI(p63ReX0|=e9y&=p^0e;4*|gTub)BHMi*Gre zY<;Vfrm&kOZeg>L>a3$^$!7y)H47u2DnB1l@~47}4+Aes^ItH2IhJJEHFy6KhB`Q` ze%Lv~XeL9_(zIL}YMIwsQ>g*Mvre%+T_11VI?%LerDNpuZAL$Y_o2dXVsojddY2(I>Nt)v*B&=;PHlIBO`LHCsHI&z1&v2Nxt{4d)v9tSMz$>yZ7k@Rwy;5Bs6Be z<>LAo@A+tcIWDR^=UBiu!P>N!@*hvM2>jZndy)5Pu;fduj0?RdKrnvM<#K3Fm@nR5 zeWokvT(am-iCY}xy&0zx#@CEu+@|~#bWEk9ukt^4)K!RmdgY~W%S&Nl!4Cr-0%e6t zjHt22@0ACeZ?_Nb3ON;6=1F#zt!N3U97-Q%yrbsLm58gI);LnHGw|lbV}(h*s7lk_ zkxE<4$8o0&SCEOd5AE0Yyqq~<5UTbUN!apsBqhb>FA`^)8Rht0jq%h++yXi@)Vccc z7^r%{=`jdoFwLG41343#slkyB=70h;nc*^+J8@1A$1+%jV_`fFKG~SZ(qLo-j$8^D zVK?yf!N~xX3zW!Sn7k3$+pz-*E)XBfL~g*0j8Fj_-Oz+ud?WLe2b5hL>2LxBoq=Y^ zK~ocs!8v!Kx#hn>8c0SBS+~H{G@M#M2kFtw5Kg8sR|p3wAZKO3siGWGq^aU+T$t=gN<6=Fvisp;v zy@yAaU!I**h)@*>mp{hc`_WpnG)`dJZGrtpRZ~~i&k6BgM|4#!l?zNak43&dd(+WH z?u}^G$KsMo)#?qyYsp()nJVTB#74Z&-lNHqEHG_%KEFrp=%p2{w8+f|o=scj+#B8+ zQNIxx+2^!Id1!OAW1`Apprkmp&Ij*nsfQ>i*>aWomz=pIIT*nz@Lc$lU1F2S*a{!3 zFET-`BaB*Z>y9x!ka47J-0*MLRT77x|?xW##A ztJ@DQ4&r_ukx?$7#QfYTi7(o?r&ma;cwWG07kz!dap|D|*RD?cpeZx&9Z_>QuK5RQ zmkYw&@2mJ(y^{1RUJ&6*7rwA(LjBSK*Y=a$9}J7{y{3I^F_nB|KVBhxc%sEVdT0~* z#l}7nI?3(T(Qr#E!ELIxqc0*P%YBYynw%LaJ#+icjU(68ipE5xvqrRvEXa6DqA^=! z#`_z}pYGPMJfO63Pix(~E!!1aeMlm6=&7W|X}#WRnTK19+ozIx8bSu$=A5=3`e?Hz z^7B$Jz1B=bEI6l7uW+&4X5NZQ^JT9*c~nL2;HRi??+)7|A}d|Sav@?Z3%bT&*~X9@ zzK!O6z?mx87uwv?-K2vy*H=61NKM-c>81ENn%;lf=f9k=aY*HW#I0#Z zjh>xOYIij7Lp<_6A9Rm+zAdOu`E>oK*P8&z#Wt(?a#HfHUE4g3c3E!pbRhPJiuG(B z^r(5hayCKPV7X<xA{5)Dn~E{d#q zyrDE+l}_@FpxtO1)|@(_6MZwr*01lf@q6{2m|fHsbNV!JmXt)cAKE9Av;VS7k@0?- zPuTeY>2}3-3C=IQz85ot|00a1+F?Yw>rQ6g^>xjbPg@!Wr{{DG&Epy}b2cixS@)yW z)7xNap7UBazywdv`io?%U@ENkmFB$_!=1ylkZsKD(FQ&&0owY9jv$dSM#k1Kr^#YlKF?{%-|BNvkXAQSWD&MYL} zj6CIujmoONmYl!ecavV+JABY1V^>ihvQp-|+L-QD^)s;NHP&%7!}Y<3Up9@lqQuwc zUt-VQTrM}5Pko_KYA`M=cQvbb=uWEW;foKieG6AFJ=2~qm~d6BQh^-%b)@uLb>@M$ zD}mO{LVuBzXeuULiQ#6tx76KZb$dum0>@x8di@h?)iuG*nU-8X!^_CRyGQRkB5 zt-HZrl(a7{``POwO)?+po9>do1m~ypWtQ-WlABv9PaL?QERb{Kw?Spp`r6U61;_GO znz3P?ANCPp$j|c0d-;@)bLaM-phrBet+VD-cvHQ>eRjvAcXO$$9V$+PdFu*Vluto* zuijf9?sI#tZ~CnKZn)_8AtjAJeHSNPpM;1>hO8ZYhQGefmCxsl-?l!`BNX;_;UcRx ze?hd(WLedvmv|T%NXt>&`=3kX@PccwJqSc237ofpc`1(Vf>E958N@m}0%S-~N__CH z2fAPOfufKWK@hT6gbtY%)E1o~C@m$UHJM_(K7_X%QimFh4@16Xpb2N8FPoyLqG6sp z(vI0WPG@~-0z=^xD(lO@bZcm(@xE}fv8vurRc6EaSmx&rLGvpcCdARwX#Bf6{(d9q z!-3Z_Bk64XwKq=ttXV)X5RZHEYpkcVUmgb zcejT`pYeO3wY@%wlaHG#x`?0EojPaYLx@Y_s6eI^*>US9{Sag=QmE32Y zdWd~;x;fOpI3P9gFJc$Ybl<}*HP>|9r(O4Et~e;2o#Wv=`HRNIB~eiJ zLuBy%xbdQ!-)osohwHUtAAD_lA^sz}R?~cWb-ARkblujszppRa&aN<( z`?=^1Ek&b{q~E*p&-+=i$(RVmOLM>CIm}j}i=(oRBKRpwQib)^1FvJ)>){ zl=}J_vwc^;ZY1+;+A~YsVr_HTxNmbtNT(DOG{)6d~>vD4X1PW<=)>Mfy9!F0U1G5-hoaXI7^q=eM;=|AazC)J>y>ir`XXm z+`tRr_%0M%SWe>gF^}ARU~&gL$*&|Eop5OJO@avPTVp5fjw``&*H zeF_q?L@rPGCJ54AKj=PL7i0IVlCM~O#VF2yH(f`8_EOl%c4AWX#C9(ylh2Q`GCsV| z;z~5j=5bpY9%@YJYdjV3Eg>`S-OLF*l{~PzqvB)m^y9Po7gh`CEkbelSX~Z7@#JlD z1|=Hj+Qy#prp4!ov9zDGzsmnTZvOtYdCY6s-}A9XyjD{9Vwco2r;)^vFi(|z8rv6t zPWaG&+ASI+=8XRFXHm|RD}5||q2>rx*)GZ6Vx@FjalnpuUh*or>vNb9>ZAKs61Br- zUdpB-;#$sIb+VIHq_U^tn#%qn9zXmDw`79D*$VfMFY_wg}b`zV=+&k%R9KbpG2@+|c4oLqE>6yGpi7!3OWw zgoi_uaHNcYbBBZS%xWLA^vBZ`4F?_n9q~oq+$bMI=S8$2r=detL?As7cp?jgtPm)j z>_FcD0FoaScX%%bbm#*8Y6EZY0_8xn1N}oph+ccxnwCn;%NqS?ulYF<+&4-TVK8r?U=pt!LAYp)Y0y(G6@zm0SrsCB4l-SL(iW?yk zbPz$|$TSb+vVbQ#&k>QC=Z|kVpCbnaqR)1{9ko0%kGbx9c6bD@bYw2HSGOtX zTS2>*-pAZBO*UexSB1o_;RlW@ZDf_DliZGAQ0R3uieO*(({Q?L3v{8zG~_n*raiw2 zFy0{QW70mrLxq!|85R_)~Wfj}b*YUbch$fRHi;-%w-0v(O7S@s!5tkA8h&-G+F{i^u|OrIB{|G1|x2 zdzWgSRy4~ix;4-)hcbm;F=83ZJLun-k}ESCg0p(KUukyL)g^ko0vxk?$!{vvBiyxf zw{7Tpr55o<1`Ii*-)Sej>>0*pd*My$nMFx3uCE?(*Xc`_5ngojB0@n&kEEr-rA^zDxw1A$)K4sKodoUSnJphk<=F3IZ`arSLugE$1p`4F7haKr+ zxWFFMo;LSqyTWR8=S(B!nYf4QCz+i_aknx_2Qo_&!M0icg^>NEO$!tt_!h@UZC;?M z8c+!(Je6r# zX+R|aGCP3B1IV10V_EF$Oa6EV>KU3m+{l62gRip;4^_7@D=>z(8kW}MTNzVm00EUpSI6toW^b--3`28B({t!GcudiZ3bDwW&4Xn14c_#gEHx-i1qD zP{fsLVO_i1_KWoTBJ5o37j@BUXh7WNuk&bgxX2~Fjc@$x#ul@KLFI>o)LS`^$-EOHKv6LVC-hd`z} z{4{ii&~8r)ma;4~kRoGap#Fmr)=AV3fzf{VdO^I9+a91_Za+QXFG%E1z#X;%vNddJ z#uf2OO%@CtIasYU(WM~03o=5vXy&PEvx1PerTrPC(4;ZEB!h`zkJ-)I+`;VM+gi3F z@f8s2mpB-^Ch-lzY6bnV6hlg``yL?J?U+`x;4n_TT$vwtq}+r0Ufl-}Kk*J5?BMi4 z#bv+s8Y>j91e9KGi6Yj!5*M|XRxK9t@OiL!{Qh(*&fVqpjM=X+2Xlr}2JnA>BJ5CM zh}+y9gR^!#d|Y(2GP44s7gnJ56~wvF;0oQ)gBk)1a85q3X{!4@_j+a^xpJJ)$GYci zMZ`n@hA}G9H=vClG4CsV)=kI3#LBtD^ODgQqa{>t^T8aIg+Sb?4)H(!p>dEd#2Wlc z%WWOgqDN2yN3xjpti>Q8Y)CKyQ|i!7Y?Qb$3J=ByCz4e0YbHGFc4p~oOss3jhZNKf zj<#$<33d}#@}Vo2j*XOMGe!#rY*hCA)T4wB)K7` z8G1hM^+MoYsxEAQ5|s@I!nWx7_&ZzY#sKkXcHI4z%QbE-m`-oE-1%5vsT!L}4WgM( zjSMa$1Z+@kRqor$GQgo!0GOeGbLs;&z;TJ0wg)Iwd7G3No#t;T0u|t318zw`rXIN7 z07sn4w*Nm-@1G#o@D@S=bi)K?SrLSV_w7Ior!7rn{2vtELERnaWN78dVJjhyrO-@XXykUMB|Sbj ze+|mnjvORwertna9x2XUSy=F5Evs?CZAUZHZqf2}pXCdh&)vg*$AAUVvGQh;e)rC! z=7g%=rK8zw^Y8~4OLHCTcy>^dK?>EH_sP>@&Qf<0pF?*Jn$mDoyJ9tFe4&na+%HlJ z@-eTWgDe!o=t7-rjp)fNO3~N=nJlR5(sT#Y7#W)J`Kom+NeTMP_-28tD0V&lx&E^P zHX=3*yyb;6FdJNA*xn@uap##;Qku$)vD1W=2MZuTjLVjvPoGQL^NgcgqOMrH-&li* z11AgR)tI<2#*0ISArJ!EQC>v)Tvc8ZwL^{{d|wlVYPxX?#<0rVcwebr6NBt9W;Y`J zv*eN~oOj}^Twa+4Unecd7>ly;2DH-y4yv~mtgv|dkXJ!3+khBF=&7!k?#eih5DAW( z-*qc`P`J2)`#bwEhm9W*jw*aKz@GK)wQYDenMUQ`br8=t^7jB&tiRk{R(RG3XDbn<#qOMOI8WD?B35@#9uHri_A#kbPO2 z$vTt^PD~1Y9&Pmd0P(1qf#J_IO(C%4_tC1)$LU_^Ho>Wz%(LQ>j;X!hdlMtCH-nP>yrT0<3EZ3NQeqKqzPruFTL zcLt~(cx7?un=Em(WuRSr=QHP z*m|nftho}|cXe0_-jfb*-`~^n4U(m~KrVduo`>V+$gkA`ZVpj4eEBr?!}aZ7GW5pL zl>eaEER}Qjbwae5Dd-B{fm0^Wxr=8?WlON-D_~#!JLwd0zUux5^-G@3;toN8QhhnL zu58z7-e`hqybaWvYS(K2!XyziKi=*9Gce+E`Q-+-*-g)H^L|7B6SjB~bXm6~cLFNFzRAKpX(?UN(mD6y( zJ2t7;PKHn8wBFGpW`*ST_5(b@@w9S~X(&S*#RuGOb8<9_5M>9R)$fxgdvJh{shp!^Y81YeptSZ(6@1r`)8Z+BJ4r9spgau}` zVhY~o!=|Xc+h3DLNrtjZ?HoR#$K_A&*G7}cTfj>6Tl?um1PjFCo_$ToyD~Z2&%o^L zYto&(gS+&VDVBU0T`swFH3Mkym#9Kr4fC@rHe9z0uAv%Ev@P*L7>o$j{#Cikf>>jpP{fp+_c60URJ_3X_K$M%Z+!*}xH9yVr42#sc15rniiA@NNM1gOB=(QoV@ z3?y5?4iALQDG3kF0p~s7djFmmp)w1Qga9hX8zD-h5MX1z0|BN<;B-6z*gycj2sIE0 zcPBKUiU3==Jbwhx8w?^K889p>LBGkFDZughM&kkwTOd0{Q#fGWLxG|rM%mCg$jyE?ngJVQK63Vqxdt1EHwyZPi(r4SFHhHX_Y-`*k%&W5g7$fhNnnd<@Ie%^B z{Pyc~meh`0e+O#BZYwpkz$@p7xdyS7tHYNXLq8AVfi?pio|NGqUI@<#8V5YYTqtvM zE1xoymUER5R#>c};_gr+3tPjn+1x)TITVkGwjf-7Uvg-T;{Q8f#sKGnti*vY`^q-G4@njV};`@MaLX7*5PJdbA&*=bSq^$^HpU& zzB!oow`zj>N4&N3&I(s-S2Ssgk2Y2edpl2~4UorJt*V%c@APOn=a7ZA-|%0xR}Y0q zo84XX^_Cy(_Y}C!K0FX2f3TPur*@abd(j5Wo)q5co ztTMIFYiPS>ID&5bZIfu`%Ef^V>qaZ^Q{)ed6v8gjLnCp*9mt}OnyiqL^$jr$)rlDi z#M4Lw3Yv=a+G(6C)b$O$BeeAJQmHN)nDjgZ|rsWXG9C~m?l$v1S>8O&kk`07ok1q@MA2Fd%p6W;Wi{d@$E;z@^8 zE;8KjBA~q4H|lI;f}Kqda4^xrb=ajrApOt!-nZ8F?qxEf@o7^n`+Somp>(Au;&+sW zf7s9FhVfe6rK?=Z3p)r@yanFLmdLHBk8 zD|VrILb4~RA|t}e`b3+svQUDc>ePmNTc=X;HjoZ)$Vkx5Ya>{5@g`G(Ot!sMjz)4G z+(7g@T4sf(zcGRc_UtfQ<{KmoL>WbzYAn52q#qFRBJU0t_)0Np=ai&bnS%VjJPQMi z3wKkMJ@^<|RGa&e=Agm9BXREI`eOs8>#7@2hnzG zq?C3?cHjA*HvNVx1{G*`3P^m=k)(%Fs~~>wD}8bgS6cOtn`ZQ(iUS308NR$Yw)SZ1 zpSF$1MDM15)?B;Ay&hIMm+xV7I}AM)ctyb~o|o_xs+2dzI-MUeovs}wEx?Yn)7RpV zPkFpsskas2wkCzZ+_88)Esol?t+NTWmUFw@GMDspG`I@8yTq{{rwv^shl0R z)|9fhL_9pqS}W6E`2G%%ee@jKub){vGn#pD5Y9mee7vo4HJwOs&^+A=U+sQe46$=Y zAcwPQ3}X}jal|W}Y45{0L)O|RTa7&Iy+=CK)DlsiJGi0w_da8>p1YSHCzl@Vwwmy% zWD{kq>XW@r0)3tVzHH&LfZ2XTMgTv-T_xFRJvvV%b3ft)Vld8d0?-L3qVmVLyn&SmjlQigLs(ohLR2?Hb)Hl_@9|nuPUvs=L^INu~;URM-H!;_)GomzW6FNJ<@Q zG{LA&#qm;HSAoU4e%7AvXC?D_0n&8vI@ovwHQ0jSZ^C8AYS~tl(SsV_J>!S_rd_a@&Ez2xYI=& z=kZ#oarMhulfama*Bw}%k}s~Ax32)$2KE6mE@16U_=nrn0YI2Ae*w1IKiM_I0#F8l zHD91?phzD;90V@77J%CX5P*S@#Q!so%>lb+@H9aF01)W^v|@<#zuYs<2Ouv2fLZ~~ z_Fpg+_~;*?5b)Rh_Mq;9mUZdHF&+7o74oA8h;KN6n=aZ^s~h1D2qGJry1f zxGFmp0^37?Ve!8o4VX`xfZPNy3kdOy#nI6W3n0p=hfrX}c248cu#_Lko9I;62PT96 zh2~sI=PwQ@%h31wSRlU_S~Jbmb=?h{qwz<-K~F(?%2kglg7zUTwf&H`^=BLV*(9h4 z0`xn`LYIn)v)P!pHCd@BdCJB2?8Cf?-}_-m&zlF1=yBfTcK48^9RWE=luCRL(ea=zM#@gmD zXM3c|Ub7U`GFt5Kg-Xlj8Vemm{_Z%@%PRK)dn+t?<%V$t87PWG*UySPlQJ80gB+O? zc7u2c$=E2cC*qaes#)IsO4Hj5obphA+PE?W`{EUhGjmlOc%XA<%l}YOHP3 zz>ZIh{Ot6A&*1GgbnttnV>4Gd0#$cqyy0SX{I=B;`}Tyw*f9od&*uU|;B=g}-xsCp zpEjzR_eskU8N~rkCywN=-v>p^?DWRe+N}9%aIxOHbj*@Ah&;X_QDs>;$c%Prov9Fj ztB?~W^Mij1ot|-_I@FZ}mww>iDw-J_V zoMD8;Bqz3vm2~AjzrD=z*><^xHC1k-CcA&lgqquE~IJ zz_03BIv={(!3vV6Okv4dtKj>q{DP{KJo>>j#Xv6Y=5RK`ML?!;Pq{rqxbFqyw|VVa zAKO9&d$i2O7ep2k$0)0#kL{RU^-1&H^b{s2i;Z53Wh=6VzJeX$jrlR9&&T1|$l3<2 ze>UkHdlRoohrHLE4%IU4cjx_F{6zOdm0%D5rtRqvUTV?>4_KTyB>ZZzjK7Dd4UwJ7 zmBMe%sY}aQ9LsZ(3|Le~%}a-pu`i@q^r>1#Xep=3=op7eB0SEIdJDOqe>o+p)InVi zXBZk{f&1;)FP0`zW|O+`Xfr;!9+TLKd#d2mz;@CW|g}X1b+@{X2n5|Pxt8%l>-r`yuh=2a>p&4sf2mO8)RJ0Ye zT5_&NTXnNLu_@%N$R%Srowz}b0K*XF?1Wtq!Eg0kr=aF z;}XnkM&z+dp>a8kbs}6>8=!pGO0$%aO8@$^CbOVf@bi7#vpnN^-znu>98wJo(&;8x z5Q;N?8^nCGI!v)pMa12twJSmLsr%+XD8EY-%zjX*Wu3)kkBL2e_ejf!!M6AZ0(JA; zi^9|LFTehSI`Q{0%algU&nnJ$? zZrgoH@&^o2$}l0{E9ZeLp7&`2ZQYJ{JwF);KQlr~Ra>H;t)O7~#_100_79#_in52T zI~2?-Ri!TG%UDlhoW?s`t-rtGZ&Y0zY}sna&A8!mNYUoQ)OwAc&3O)-jn; z&`2MukV<~lPE-fFmfoSXaTfc$WGmxFJV7!;PZCdpEfaf#jq>nA$na^Pl9zPwqdbaS z$6O4di-?G18xZ?7Xh+Zr)Y)Y~H)oJC@X?OW4_HKRR~17`BoeuRBmsQjHdR^p-Af{2lx{LHPCi#P|A zIXqq|hi4V>d$xh6pQ|Fig;Ml#@n?Jz4K-%(a+9rI$-^dJ7p$xbYUjzZW=CJElzd&8 zr*1h*`PV9Q#mTAtHWC~X9XdCDn%MRj*R=&YoD9rtUL4U^oll0?IFDj!LH}q%jKGDb z07G3}SBd7(+D1&Q`mJZYCN8=ujFZkt%#@(mFH^t7&NSp?vrdLWlv-QN`b zyv2cpW`ydS^Oxg|TL-k0P|^AURTcn2_>G8}k5TkyT>`=>5(78@RC^o1m;p=xl@<^fO{~^Q*kkqQ%Dl!Cj#LeGVw}*Z?LUiEf*>Ip}|pPr#l9usHzs z?>p8sV88GlZ~?zj#c6tqRc1xlfbR;VNC7AwfU0?838=~f<|_cM1z`2U{nk`$fB_0H zPZ?!>V4{ULrDavI*^am2_Zw4I?1xdd#|``X#vORyDMa30VM%t!dN@x{^yE{cr>C}q1`@6Fa5H8p~w>QcX=EWv7YO2*yk*T5}8(JVlH zjDNRFnmbDHX=yX@B8M>AXbSH_Ni*tPb-+M$4ySMAZlMK+V1gXjtC zgHfvmLR-|1YFlc@cSi!*S?ZPIMHR_;+|TiyBhfy-iJBK^VrCzzQjyBGg;Tpv267s{ z@uH6v`aevL`wvyi)Luko5}u`9y7};j4>3k|m~t@`349h(Nd22#>3-N`xEI7LD2#W? zv2G(ET*mV{5gECr3%2Po3E%61p3TUzqOCU{{9-kE;zs%OHM?+CjN)Louaz~+fh&&1 zCo$G!8;9ejWj2d#O1fPv&q3Ce;%dOMsGKR0s$dayB>yZa!giQCiT4I$sCdTCdDYZ> z-SA4-ZQKv%-I|(&GuS(3sWH9vM%T(}O%F!ZEaP&zd4%_N5^g2iZmUy)f0@y^$=Q58 zHr)Ol9Bjq%MkE?-k2~Of9CT6i(;4uDbz#-`UWW$=P1j_WWZL^8ThT!dEFYWb67heEN2!WW z9bBM9R57P!h|~*gI2Ew7#kssF5?amY2J@RI;UrsItv+UZR2N%ltVpEktTH1)u@QGH z-3UkORy(3dElUnYhE|`N>;_+CTYgh2h@G2DNT~`;u_M{l40~3Y4hrsInpTW=;|^Tn z_PDL%snQ*ML|7EyCXKLdX&NhGopZ$uCL@^QvrlxVV`e|_Te9siEne^RoO2-mf%~X- z5uE^AO`CQ2Riwg=kPe$9USk*0ZBVA@9_B*cQWj4^EiAf`CpOH2oIpJfA5lzNUOK6M zG(u9+Gc8Y->~tk{ZWC#tuq&3l3n%cJ)4s>-8!LIyUt68RIawP~(Rw<3gaW9VrQ+6O z&7JzFW!F|U==6zAkw1@5wI*G#@N(IJV?k77y z*yRHiBsNQrDK2u%g4E)~o0bw95{MG|Nb?)1J-*HjeV$9IZ$L9f4`6{P6D6!L$hw%h z?wmR>Itao_Q)$cLITQrC$OtKj-fdT}Y4sPa=#D;D0W=2tXF z0TwX`UMM({JW8BU1PAJAG2gGOw4KX-hmx?_va(|!8^|w* z6NC&N%;aV()uvgMd&4QU2TMj2e4>9}`8p}AI_|izSce_=847)1n#Xau{S^lKUfs+L z8?lmrg#5Fy0z4F8)K$Y+-Ji#iEDxJ?%>LQ&GtGj$GfA6LH5iSRf+#VM^AFlDdf{4z zss0fJt(UWd_Z9a61M)IQ`0Z+*p{9rtB^^lvaD-QS&r|X+>BbS|Spyw9bDarm+J9zC zCy9*4CyE)SQxs8~zU%*@sOLmzkyqC$5u#qj1P&jE*Y?{u z{q>Y~cFl}=&uoK{R^bo4zIe-3 zCrKzRj}eYvC-IYwmIlmWfgtkMx}D;Os*32u|DXiFQ7#^4{Z9Ng$Y72<9qsYRI5?ti z{|@J{-(HCP-rLT;(y_26csFiF+-LX4>QnJ`xyl)6Ka$q=U1>fsGev+m0><#^etqWY zffz=KkBP}42q}Hl@KGH}-eH7LRXmG!MWnV|rZOu6J5cQzyG&o-oaTz@E;8ba*n}F& z_RZt$vxMg@%BOWZN|Qt+>EbTm$9d0g51nk-)c|-4cA7hi>_a7`Q^}2OFw&nb5(e&a zBJYu?qV}JkXoLtFA9}x`W`DIxN|#}mj<2tcW0OQUjg~p|qJ$RBDWN9Kh<&lrAx0e! zzqhtVm8A8Y)|cC?d0STi_GvBv0MYUt*yeu&v>fZ+0`!KA!3>bwVPN@xlTu-*VJ7oaz)HtWBZ z2w+YC%3Xj@=aw&3o;K4biN#)55~%pMlh0{5=TE|>HX>ZgvyVrF63*4M;HsbTYo@x5 z=OnAO#i1ecm%#mmPP_h#u_|ll**r?8!&ca4+Cu*mWtfelqP5q4>G&%q0u8_PUAa^y zEqVNG?$tr?%tum)BfVd4XwUPzsx%cF0oCHeLtb-+2`H!!nga@O3srw^le!2ytRA|w z#jP>2arbMs}pFXBBDjdkT6b)vQy@U9O#R_z)jkBkhB73edg7;cGE#2mFmIXJ5C zI-&7FwyJhB;ljqgqXRH29=ng4l$g<(ba|#AN?{tJhAQ>QPk3wV8{)B_#_&nN0#TOu zr_5SOt{tuQNenT@X(H;9-jg!df8V>AE{r#w+ZO8K>c(WttX^ma+c7rF7imRGB9J05 zy%d>{S3x_&t;B^}rIH+gE}PsM(+;s*^Z4i8!OlItXXtC-N{e1QOPF%2B6{hcf}PvE ztNy>M4Qv}7pQxs7_9Yvi_=9;~g8Khz0d(~%m#@^gi zWXy6mHuGmAT2S?#@ZH=u;W^!}Q>g5L!Pz%{w*sC~^lS&eqPe4(IV!pJqIfO{$6Vs2 z9rt&we7wO5|!y|h3tw#UWc~0Ta zu^gfF?kw`$nuL1MLx}rKT?B4jOEz)4N38Er5^sy>mJTm#F@J)0gi+m02YxHgJu^rY zwk!qXEW;aAle~B?w0NNA(>ECEIFu_UF%o4@xWk5R+PpHmdGD^enw02rS7BbLc& z$8OR7z{^#%uvzzwsg_U=JaXUoRiIwstc{VO$s^#p1^y3;wcTzQ_?v+3`{xd-)b&b< z|DfJQPF|PY#-1^o2o1_e_jA-8z{R+cwLC?KeMR&0YEDMamOfp5+^(m-?=6TKH7PLA zVWJ5pT#7g;r2_=cQ4 zgE7m5zB44P#ns!HI-s0iLt~DAB5-yFgsQI+nwZq!_56v=>egh2<2lr{DzDWas|3dJ zONh%XI#P+I5#HET(M`I2C<##qwLWl%D#goK+sg@U&OP6%3K0(5!y3|Lpnou({^23c z)QVS4^+O<#7J2G%kXKH!D4^(w`W-1`!RkPv|9N>8si|KmbzF)n_q}~AN47zN21Gep zN2JqZKP?*Kfj;gFp;;j|_FWfkP&ZKB$na40#B1E8M~J!(&ai0co0?$-0Yqkwp_(-n z?)RB1DeD$_S^(V%Kt;mD;v9J_D<%^L? zE0H+@S4&-jLmNMQw;6j~{WZvf&qX%_)1t~MscJJ?WSV5o8u@U$O+#Dyso&o1_V$i* zP*1OdY#YDb{#>s%iIx`z_JCfWsML|5NldZXISz+1&W^(=q4q%pZt9z+{-!*>?8T2D zrt9`U>Id~PAsHEb^p`B2hW=GvztVTral6nghPy9hOX6t!q}Fgl3BK(#6P9I*e$6iP zqMH9L&R3L7XHa1rnj_Iy(!CksIzT^pQt^G!N4!;I_uIwL5&c-W%F=O$9==>TQZTP5 zVlly1u!Lu0(9a(^_o4+%6}=;K#aB!MNr;y`U_ZrgJmYQyrQh1TY7%S-(4*zlwV8^O z)@@cKS!o$QXT1}=knc;Fh}ZH|zo1}d$bVureULMAK~-Q8ywFHxw5{%Iy4i$cnqLY+KmrQ&t6_=OaG}0+Ye(PHk ztNOokE?X?pfo~kic@2PN@{xX`hp!po}?2s7# z;13m=J5~%|;XL}3}Vru)4j)jE1o)J7B^#U4flpEVMa9x4R|t8z?C0p(}e!~EInArkQ2L#jvA@@kU zIqm@xEYMj5P&@$}{5wNnZF58$6uj+6JE!vTEp&w^^ZwVrEeR7AAS`733snLmMxI|y z3Sc(Wdy^mlkQAUPKqPs9k%Q^YQn8dX4M+z-p+`!wJq9As=^`7Z`R;$chyQvufUlCL z1TeOVKvl>tL6{Ga1>33BxExtk^3Q7vHP-vaC1iNN2)|;mbVeCEcM=&^YFltS?!kgn zrs-i)c!Zh826`p2_vPE{H}tNmozA=$(5$;NJ&Pu|zzU}-5e+bbFL=(skM<}na5LH~ zU}dl-Anc2eg44NiId{&>y(A2WPDFDM=1cqCTX#xkVI(3Yv3qc=7n`rc$Kfi|iR-z@ z^GQ`E?C}@BiZ<6y^k1V|qw|v_?nu6Cl@hYSN>4c<{93Py6T*?03OC%6o%1i1V~%=j z^q2#nqoqUfQne-JN<~Nh-WByRD!9%N@d3dj)*kZcJ$-CF2TQ9`m!L38q|sjojVuva zRx`QC(pr}nYzNt_G6#wc9RXJEr-p7tDs@a#>{-u<`33iOiZczqC;OtVI73&|B6_95 zPO*zbjm-_pH5yf@c523jlyeVfx(HsxJLykLxv`auOo;H=?@T21PO1Mm2qhl9e)>7@ zY}z$<#$*yN+qPnZ=H(tDDfS*t^!e!YcQ9$!1)nAMJBmiE4P6Pcz%~ `Yi3pcgDi zrY~L6ZBX>&0~pbky|q(kL`k_R4|PI12aV6{oI))HHUjaF8Jz^^&i2HH-Qyu* z^A%<$caQ)-WGoFiOR=oC`9WiUTE3r2>qdeW#<2USZOu*a3uHb(f?!ed%td{@dR z>fWQCD=yWdDlXF3Zp*m>)3s~j2)0&dF;^NlwZ-ASL;hY0bBB4IHJxQO*hPLwY{q9Q``Xx#Ywi(g`K3GR z^?6p2T=2*g=70m3M1_M?j2piEN^b%@1+;unmS^%I%TOgF{{3>GV>xn;S#)vvh4iqP zDn~i8N;W)vX%E#WRmGAr(g;C1f!*1)Sq$TOoKa5{<(2FRrOrrUlCY$46;i`~P9^73 zZ3kMvM4s|MeL2hiNS9#&Rrr2^t9TJyoMLZKQ&8b30~25H$8nbp{BYT<1&NNf&tIXN zt;-Nu;EFsBFne{I&-D4DeCZB3Mycl;qiY!aiHzioqbf8>d;pQ^n&niem{K2kWcOa@ zw^Um&ja)K5?67k9XIJ7jWPY$93TFD|C+7~{op1<+UysXwP-ZEo1M1NuOOKli;w%$3 z`wea>dc7>jxga}*c|glIO54x?k;g=wnUM1cpk_5TlorEQJCO3r^Ak&1xROg=jE@I; zH9dx0fpIove?Q=j+=rgmjp~KjfF=EUDyb*L4d^DMl%!1Wp0CuZ?QA)H-F(i&95k?iB^`y1^}y zQD@5$j^s=zUte#&|MW8J^v1nN?q)mUinnT0Z?P(weWBtY7|Fl)JCnMs&g4o+Mna68 zBbsmLRhf#;SM6=Eu4qN#g;BG1=$oA!^P6*qR$!DG_ih%H*%YF~ZZiotsv+i7fpgeI z$~KUho81m6u^>nG_;y=~Zk^Q>jgxo|P}fOw(>p|PYx?1D+(;mTTL)FGzNao|gaFT*S6pF3=q zbCE@rwh*w&$&2Lla3S?sex5IY)lk1+*8Es5T9gnay%p^g@-T2qwBB9K7bgx9PkPGd zo#1wQ#A~j!oH@qpTtE3i$1 zu#TH_6vdVC&Ddlr?B}x zUSF1zrDWU?(_V8LQ3Uqu+=TCc(s?A~M_7tpX!|6*y%<(8@}pBX@vOcNUpSM)j}`JhL|$TG)PERoleu%ed)nXwb*orSb^0=@GSFI zX7qFYdgI_T%#{Hx?VtmlIdvQYmUL$9+%6jd>izuS=f_w9hOdK*DMCMf3#b@%5ce8j zH+nNLc9mz!OhE}bhN^jT`wn!*i(GIeZikCOiu5XiB)?K^i{FwpMT+0@nn@~i0#md% zX5pA55a>MQdE>(8W3&g+kWAYHvOu8X<0Ahy3ija&-h&Mp$X;~C@9r+Q{gXuJr;}_M zsP&;21^OXpgYa?8{K5u$%0ZDAOsnth_o3UeC8>BX^SV_leyjZGbh}XFhOE$?c!nqtVtI z@s+Mjn^>V->j=I;l5r{G5ROncd6}iGY@aR--C>wxcC<5OhXdY9^$YO{HF+3MIA!7y z^V?U^8><_kys&+Q!L#Bp#XCK`^hU44;`_P{Tud7qN)XROWY25kVH$HJQ&ZmB@^2dw zs8Qsb=edR0wAh4Fp3Hkf&zX6A8xe)v+b;McwXn9ha`jYXV5)#2N@&U3WCtqe@^)%~ z9zkV$VCDeY;97#y{;3p9tPKFb9_X@5DeUJ1)*nD?7oo(O1I|7M z%69WHfiFj-cpvX-K@?3-*B~O)W)bfbs$)-xY)( z=TxQ@wt<#IAd3hB1Rp@n`PUu#mifSwaM%!>`PtwlG)wccS8q2&*uJP?5%;COmxAdX z2&0r(XQF|3bGi{PDpP{OYtkiU7nu+Bul5X+;*P#)YhgO0WM$!<1HZ_td%6d6w?;t8QyodelFfPX$sW4J$@3Up%p%CRe zTsXhfest~^cjIKgSjiQ%oJRWM#?d6kw8chY@p=G0`biFr=OSCrgt1i<&ShFRRSB#|;L zo55e%A2kF5t$TjR-Bj`nJgHf~EEv96wIh&+qv8`~o9%?tmp9tTDzcUsJ14?3AZD)Z z;2vC8OrlwO*oD|yZ#Of**pOAH!HaYxLU)>4tjN#5CfzBE-Jz5AI`=?DAV1%gIao9| z-GSPukXnR{cB7<9sv2b?%-lK^oK^B8)242O16@Ze-Bi_Ps?yy96fkb3;2s0@xqEFS zb?L{YdNs|^ba~?l?)0vnd!DVjpAoiKClTBdO}`_MCEu}&4+g7vJfVJV-o{^;_EImr z4}%&BFksq(GhFkL?C@AoYdzjdZU`)TJU#OKFk%~V5pO{HtT+>#x#gsuh#MN%O3#&vqnd$+N!XT1J*%W0Y7M?G6uQktxg{s=)N@X=gq{X z77fR!b(BMcuS**eCAv}&8?s^O5))@$BotoEOTzW^H9w{IIL;>$p4>gvSUO^xtxl_< ziYpbAhBY^=wL|1fj`9J5-h)FD6p-(t>ekgW7tkN*q$fU!m5w^|V!7DtwZ9xfO-!qN zSZIm(nS_IVjX8c_44V7Pz?8OO<^I#@DCvr6N-K(Gd2_!ZKdiSmNs*#1o{F2lb+ps| zaWcf#*`inITZ817eRe15=}nOuvuFMo=kFgL1JRqZ?WY=|1Q%`cgmp1X25o&iU95WE z5s!%*p%m|L)tiLdS>v>vhlpiv*tcn(RqdRLz=9n^Qy^3$TM`y~5j_ozi}N2#s5KOwzh{+XS-gzJiYd-(EK*y?dbY(y zvPEmwwqU!K*M`p{d$DtbVJKdzN+`S_e$6Tiu%xjmqD+MGqjEz=1Y|W0YE4!{I&q1f z-s#&$!$-Lcn|@>WD|}y)>;%ubn6*(<=rJRqMz0|T^EK^IFg-Og+{i{_%rsl8CJfK9 zJ{p@WPOmKwv42@}(}wv5WS{wnR)J3+6T31s+F#y;1fr72yYC-o1)bLi72^?OIUii; zUbrFU3WiXi4>(lg_F$q*vS!gSfWv^&q`o_0I4`|GRr3?)^B8>sx^53Vibk&=yARsqgB)?qk*A=ac6n5j4tGPvih>P6t4rcDfuqyDe z!a#NION}tgcI&!tzm7{dKF36V_fz>oJp(XeW7`_(##Hf=Tvw?My56SEAx$qy4@vq= zLkIV3E_emqSmF4P-&dCz*frNW!VtppExzNNAK$HHxCC5P8MQ?`QQW*RG4{~T&z4k) z=~01GYOTB&IeENG=O~sD7oc&BV4xF<=CiV-`8X60FC37C-r!nr(qn9R$|t@Y9}umw zO(52>XsqGLI%xbOj9r;Qp}9aIyPl--?h#u5G&;QDMGiKI66V$#@}N0NI&%GdtSfzi zd7I4EpvtS{pvl=lcgLDZaVu0Bwx{QnB5=1|0Xr#NWHtCKd_jFWBlV%Lj^g{fI}ze+ zK>6t88+fJN(iU&8|7}mzEH4;kAv)-&T5ZWHCxyg5xuck`B8CC!SpI465>V`K7%^5koVSBX1*oa41+;!SC7?PPxO&fo zeW1Yr4z_>H%ePVh;CgM|QwRwKzTf*WrhoVoDVcwFb6`dA_A%f%paeQMWo7Sy1;E<_ zX@T{@zd{+n2nkfEy=?(NA|HSX_q6TS$bJa|Avt^Wfa`cn^&$+XAA?B`Q zpI1WxF>{#Xr@r_>OWtMwDK&DJ5DNM7tQ$DXdTMKRaF$PFfi{u|tfbqcQXlg5>vz0} zfbKYpTvQecPc@2f^FQ8~vsN0J4#f9=8Kf!z%N^>7AEd)TbCMj&OZ-*r=laEpm6=7` zaAKM>kI0LW5fNsO19Q>S&+Q^jh~n{?H3%ZIe4t1#LjTvAAMHp6qSgUBUbhDlN?T3+`7INQ{(n z_OCXOfP_iOA^MmR*4aCyRAP20mz|%@GFL;k^Qy)frGMsaSLV$$=-C3Jw=s{}oQ@iq ziqndD%uU^kyuVEtX>~03S`)(RHhN=juTuQ1B_oaJtR|lkSCTsX3Zq=vsI-UqIw9UN z2t&KyxmW!k=&6@*j0DK#13rqfq+bZ0tb!i_HFeP2Jm859m5*uwACdzZfC zp+1NTB54XU4q+YG|MOGltRWH`R5POwZvC$JS+s440hzDLVqjE5Ewf3;BUuD1iyCBW z(mu1@mhA)9UELi`<~1F)YP?g2#n2bW{px4r@DqD|KdWr3nI*&U;%*f<<_s!j1&= zi;I`H=16!Wc6d%u6c-;BS<(G?Z>ng|TzZ^HhY-6RPb^{ihd50SF{)F3z_pZFWX>%V z$S*G?+OIhkswmfhpdQsj@+FD6Ve{iiTV4~8``dLx$ucP;>c)6gimMwv^4we%E);`R z)#_9QM&mgc9!zea9v=}eX@+^k?lmR0e9`sq!D^uGb;@=Ec?1FJpF4jnZIQXz8tFa% zkEOE!YOCS8b1az6IHlyJB}@B~fSftz zM`peY2^|N~gQ68_4lAN>`8l!7t+^m-HHFv>ghvsW5PKfs?%Ty@e=e{b5Sm7_P=Bv5 zsKwuWkYoD7b#>^&&R-m__v+eqP2?3=$=NT`4V5L2#90upoXs?XVPch`(XGY?z77@B zNwJdDdgnE`Pa1P?Qoj7)Xio$&h?x0F)_g{EN}`wGJLQo?NApQmhrXptRCVUFDg@IB z(D=@T>zU~Qf!4@@n1HYR1K?c;l<)Gu4EP@_vQZU9>7R1H#~v`m0d3a5=IAYw@&Cth z0sNFeYzVM7W@iS-V8GaS8c>D-^<@@-i!lJFd=JzsH2_R$5?GtXYz^R)k^k+n0j9In z|BUW{RUK%#0E6BC2R8$`+MNGCC#Mid)cLm||DP}}+X{*54Ufww|Kl=gu>tMSW8w@d z2|=WYr;26wEJ?Snmq({{DId!5JVfyJjU=x8^5>;!%wLFl*SzOQ+>^bnUu+_tpmN8& zFZ~?-*JeI9!`wv3vPi=Onf}2rp}dm+eU(BjO{CC&XQ{Df+Uq=`HFGfm})pci+)liPL4Jy-ix&21AK>p z)?A4^PvSQ3dH)J625KpU4&Q)w78(o>$ixaOEoa!$Bk6PAoBEq{*7PJ4+;>ln(3Zv+ zNw$WL41q|0kNYUz8qtU`w4@5AxaCT3R12AAhD`42&Bzb&l6Qx{-B!V{CX@cQaLX(L ziA?y=n2`n+JT>xKdu}@Z@rt4*?4<|=?`ZU2B;K9x53*(1yXrrf$^TAYmts^Xim9m@ zPZPiV_|hWcV)(k^wk1!8$UYj^520~zhjH8B(z49e-Jj#kL(}y>dmwBJHPIsJfn&`2Lz=sK<&A<8^(j>eZ(#f9=#SxOHYutt%|N#jqXIv<;k zs*PLzE5K7lO$llF9t>yA{XO%(A7}#joWz43$!?ol7Gs zq4leuXuo92mVo?;OrAbCyei3VFyAgbHRL?2<_+Kr$Bc1H|Jx1dT{aCtw4Q3olJn|+JB#l_Q zoCKt3b%9IN5xirlZRhKG_L$`zee$7{y8d4#XEk%MRzFsIxh!vRQw)yoBz64wp?C59 znc+C>nYqNx3k8K9qr)?HTU>tjTs!WpQJuFc_M2mLL&M5hLIZ|)e<0^4Q`*ctQ~oVc z=!(sJZN-3@Y-gLMjx!CTcplNHRhqcPT-9nO&c;iXs?Qg(MGRG2wjE2S-Q&)5ctZHN zL~sL(+U>Un97nh|=5E7!**V)8XdP2%OMuw;1-4jtmAC)8?aF2(?+;8=;}@;Xr+-fZ zV!SFC;4J;|PByw8Z5{|Dcjs3k=JS$uT^aMur*;l>!M^V~)s5-d`%~$A z>ltAMzfiTBQrn0I(_8rz5n)R$P7bmFpoG4tAd+Q;JFOdfIgdgoZSp~aEnCTa$YsgI z^`?5_N@Cut<k>@u}@MK>PuPYArv@CBw{E6N0Ssx~3hO0*Cc8j`ho5Bya_cJct*&Gz zzW;bFcH=wbPgox%2k;Fdzs)xce>kN?Iw%bAvLvZ?2VIU(d)wxpPc$}H{^30w%o;3 z7Ra=4*?5dQuf{w==L-ZD@4#`;sq zaHc70jm7Y`{uKP16=&2x_d-do>f2WK{)~|H52CH|9mg*{t0dI-_*?C$#@^dkHR(J# zM63N@$0>UouRgn$$F04(res~|WZfd_YLt98e{C~NJz(y0WtN~#pCP2z2uoDHGYY?@6O&Wl+ zM4D|x`nLf6SNQ(5T>lb`4S>=Jj5S|K2?58bd;d&n!1z*+6ELc)NxcREnsK1v1InSn zKPb$!57>RjzLpnTqFidYGpL< zrK|2=gmPEjJ*6-18s^)nsU{YTxVy0BbWAb27m0d|j<}bOE>=JN;(7VC?KkxNSZ=m* zvM7&8Ia;dY%NxI6UnvN5cVbic$~?%mvXZM8grAKvZ_7J+pAJ;53HQE7dvBgw7D8 zLB0`LMQ4~b>P!#G9uQ6Qnx~G0@El$A0Mlv+2Pd#%WWR;BX6g7(cgA3Go>SD{wWZI) za}=gsZwP0slnlxKz@hF+9uwrgd(t{N{<|df+Z51&GwVTVCKSSzx2!mh?JTr{TgArC zr)t?i-X^|mYpJtVAqjc~KYCM?U^y$7t)#FEA>5Wy)4x#Oe+idJ;j5Ik(JOhwTjo^FrFDkC(bJDv~JF!g%3-~}&EN&%Df}x(% z*ZI6F@{?(2zsmEEGnbb#@5Dxg%&uG-f4RuVZOS?!!Kx3lqDc>~nC&jGEqwGr`*zI9LfrUc%U_M1)p`R4P~%$e71=23UDlB{Gq8%PiH z(b-H%!!dn%F9Rx&0^d&vP-qU}9I0D%kKrs6Fhmw5+XV;zG^IAsLOTszkE~i!C|c1w zylu%{5s1N^O-jXyg9y)m6MJ{n1f(m}=1h1jKNz3_{G5*icCY(qMhRabDSX>*=MW#` z;oH)HBn%t_U7*scwokNYqLP;@CwnU%d<|aZ?SfyAeSKhi$doO30{OJZtigfON+&+w z?UVId<>FOk*<-6iw>e#{&G-?H&P^Pu0Z%tYOPGX~g5~8R)wYQC_xs?56ET|Z77^6+ zx@$(>Q{ovZCxWWJWIC1}j~br)7z?&eWkmUg!Ka#;A0O3d^G&>bj!$3%lHnsvoXtx5 z4XFyEGO8^>C|t4=rON1N#+6~r5UI!E-tbNRw6qtv9rk_%XzZrg{Zl!~dyPe`uOJKq zuV+>zF@DTa(tfwsXa;dm+%T@^Cz8{LyBBgzV8&8TAWi!JiZ62m8Ui7cO-T-Uu1$1(_zx6gcW*S#Ud3KXzb9OZ2Yy`B zct%ydWx2R_bdAJna*e&AMHU?72qpo*<4!WI$6CRmQN^}=oKZ8VWGU#rv%bnx%915- zXtu8kM=1^@-gW(f#<)~-3C-?@EI=Va3!+C3#EjvnK|^?YZs!ilIb76;{Fc!q*fia{ zs@MYbloQSB=y8dW@^Neg4uS=h-rYoaL2gqe-1C`6CR1poFB*NHqD3^+=p0$EcbkN+ zJ(7w%aJu>;zETFgYk6Xn97B=eEltWLd3!ik;Q|+p(C>?w%x~ti?yN+w(8QROZ|= zVz@4C^&^%A3&goYte{eR5Nu6^kN#S;Pe*M zDP*MKGuz_x-;riBj2f_YQf+a%_EVbM9c}Jdn@=8Es1)&DlROMlmFd@?!#ZspOLN1# z=vy+4iA4SiR!@^~pc2e9ecj%yviC5t4I!HS>?%qRF5j=at}#AuiK_garx3uIF-Gp$ zhSYF4skhGEE_n1|;@~7yK%HQ&=S>h8QAniCi75yaF>)E6LI|)8O$PR6-wI$r%;~?Q z7eJ8%WJ&)p(tyb5U$Lv*gaHrHVE!9s1_Gw?|2e$><^7Q9=>d*zbs$8W#~6Vi0~-V4 zXe=~B$P}Pe0xGA@zgSm*oC|cIg~Ts`)BaO56^Swxv%UzC#$h82{{q(2Bz3+aOnwz= zdhp^elP?aN+Llm-U&SEVw`$3O-+#1`sw&vVaF-a`ZdUI;6FIucel5r`){n=XiS_^jQ_GcR~57Z09? z$xXQGF3#_?2?`#njfygR2EnVO<>>#+NG!d=ffM$%mW3e|6C^EHh6MpTO+}NeXedb{ zhAoFm-wx}2Zdaiu%j^o(EZe;{H5Jgjfja2M9TMGFTXi)7jFxgc~Q zlkXCYMWPczZlz=KKANO;MV!OM8ex*rCf^5M^$c=Jl2o9BQ8f^p-N;T`aM0SBNP59g9@%?V`%kQI^AQAS@o#f*s{VI7u6X(XlCb8HQ!&n;!3%u z-kDFl*PA$F+Oo|^YgW&Bt4s*Q`00Lh%Hn5>c%nksk~K}#2&g2WejHVb>Oc|kCt5Pn z`&&PK8Zn%p_?0RsOuR~}WLbZ41xTdCqOcLuP}5TzP*dPQi#UDa&9do6d>xwI93iLJ z4>h-qL1r>Tm2*&hX*#DYp2|A>W&FGNenDfVz1KOaH2iVR z`TpWwoBD#Khnm=${|!frO&#AEx+GjYh$v$^^2EW*#KVwUA^EBuA8!+WUH*%(a$bRz zytZ>lp7>T|b{X|$f4-i+Y>`>u2sxh9obAxjofx_|hG69E{t7QQ3-(3lmo^lt8=tv? zwkCb)7(I96FZA1VA|V>E_>0NQ(&$QCf#NthK789hXwj+7*@knqR(sg~79gh|qXp+( z*Ftt=vFWa_lYe-%p{Bf%L!oK|uyBa4D$e#SY^#VARb&-nk%2$4(VWLd2?fV6S3T0b zrm)S7(2hud^}|*Vv7vlz?wyXd{RJP_HR)ew;lk$%KgK3=ORUSv^*#?`4aW8D@kPN5$q34=K6^$&r!{2~t<__|AHg z2Ri^ki?rFHTU_bLMKS)WEGfyM$><`u-E#$Gmx7asd|7ADS(rdWFj!3s;|rb$zO^Ye z?W4wJu_?Hlh15#lT5l~sog9xGtbT@Td(fd!)Lqxo@i7JVHr}6(_bT)UljO0{?uKxI z`^Jh$=J%)%0VqHWWaYcAV~X#?>mqO&m~x{>5eoFwGUto0X|t9)KAs8<6sUUhi#+uD`{$t=j&=M>XaYjFwb@eqP?ggVn099C5 zdJt+z!e>#h78ysp`|>jchZgO#8_sjTD|~99;hx(F2sVoAk6-OTaeZ;=0CwpIKmy~I z%34$NjqGg+t zUoz^Gn&4f$Hsa`s(ob)RU5OijXOq`reOpuT^m(JTQ*u{{U`fOjTlTT9Q-6Spq+|Mw z)I2oWAb!K`iS>nKRcx!0?4EpfSJ^kSxzvbYKX8CvbP4a<`j-WS##BvwKd105zC$ZB zFJvSURd&T90;CjE#BV+}I>VJkZHvzLBN-76M3P$4bhN4%$A7f>Tfi_?AyP_OfW}(={we0uGqHvk{ntku z$|gzGPImYkGh_=EE9&uRJ+Juqqk+c__5BEuv@KOl~8G$ z44wA#eBb9;{F5M$>%xTE8-wZ15ZnoQ6@5F7V&WoqUDQ*Al^qt-`uih(koSJk8A874btpxTgsV=lo8R9RVa5F`@!Q4XrwMtGh+kS}R$I%b+1tvcer2 z=AtY~+-7de&LqWi_-us99=Uij_of0Vjwh~-S@j`+j^m2x@RrXx_aR$`?r}eEw3E7T z`R9*LjG(axlnrxe}Wz z-Ug~Pq+zB};X>#@TU7ArC#64%ThW$ZD1=cljL!p8GPZQJ(zz+!SRCVw@AUf*Mc1jKi814gvJ z$3OV7vQEb5RR5x{U|@WT0`Jg3>33N5H6GX?MnYz9#GN{=_6+o;VRBcUKUdPmxuiy( zZ!cS83TaZlZ(k{*U9*GyxJyW#t*E~b{@Y%JzmD?gwNZBC^z)o>C}0oqryT?l9C=hx zOYZ53AyFM8ns~W4#b2JnDCM+H!;l-kkKFEQng?k-+2Y?><9@PK@nx44=juxo4V4Pa|JB5_<6xv>>oBVO{(f9iR6T- zwyG)|;4MBQutFI;ylat#ny>G20>Y~|86(cP#PL~&`?l1fH}6BSrNWBCABS!2gzu)q zn2nM|F({zS!Z3q{d~aDbS)V;94ocnLuh%?r{7MaTTk7pgPw6U;I8!-r@hoScm;U&uv?R|umE zPWnvD@?_X66US;A{v}M%)A&9v3|&(px!~`QteUqy<<;H#pfDi0+PyuiodxiVT!_Am z?!1EjsiE{jB?!j*z?yi07kGVYx-WIfjsgE35;veB@L9tavl$^XsYEq1{zjR#aiJ5d z!kYYN?ZedmE=)`k2spKMX&VN(&GI7^3=Sh0t14nyX_a<`DU9Rvz7bBI=Z1 zh?>yRS$mRVG`EQfo70$|^DSy4sNL$pfMSU(nk4~|qD~0}-rW8N!t%;LeWzjLk*1QJ zBt0ZW*W^hI3$5DuvKesyVk55FcHa<1$B*4yTSW->qJaqhSV+aX*hpT%x*L<! znDgD&;kRCyL7&?rWiAbrg~{a=YPL988m!P_suA#K4*((CQg5Q92-Wfb*WdgrHvq92 zz&O(rs)Za%K0Rk_Wa!nZ)(rG%o6QVHRmTznM!fVX1~8)as-QTv|sAi|3H(B z|AD5I^e@qcW?fgj;}xj<|8PB<j*7w`r4*UJUZr_;62FGq>%1L+Ra$`N8fiTS&#C%9(jR zNJGHgoGnC!>#T?xn`x2Bg_K7_bX8yQjOlCYAUxw7D^3Qo@NrehmKIX=PsZLyDFVsX zrDEG)CBDSy8PxGH`GiwpWmA4S3j=%;|DePvQj~<_fX6Dg98V~jQyOLQ8o4aIj6t(fknNedpDz#m{8{@Fv-N?qlBjOVPW%%}B9 z!<1y}yUKi_Sb8x*psfNa7hmbP+z26pIiF-ykExiZLe)UQoX1sPiQWO-Gq0Bjl8J*N zO8x1+#XZ1safkDFMO7c=%*&+0TfsrTB3}IEo z7h5wRCOFr0!zl|{UJ<;c*%ntf*a>jEnzAGuq_L#CC?iOAVZzFhJZvL#Pj${|81nzY zRBgE>x-wF-e0YI8P}drdJ1Ri^bK%qbN}evv^`k$xOtbp%obP5&1-WOoA=A78_i~-u z0ZwC=Jf@=`mymkEPj{sWmAY`j(d{W`wT;^1h&u&|_Lh}W> zE@(V8M4Twm?uE$T1|;M3tUiC&81yb2)MZ2GkP`GklwYhz8&5my!~>0aiJBUn+4YWQ zD8*hp?Uxx|uIL&O(;^Ob4U6Ik_vm_wT!{9?B(XATPmT$YScuydmxm@Ma+9B~i z7l)rELe-`)H~rakGvYAV`l^+x`Wl>{7(-x@-KO%MVc~3>Y|3N3Ma#SUd@XhF$6ExF zsyV0JYqUj-eJ*jlCMtGRiUu8e%+(~|rm;~xX|VaXnEO1Y zG?~{{=L;)3UQek3zu{*=s^KCzXjhXT(_+pVq3;Jn+DM6wFiqoC{Qh9pZx^roeSNQ- z`O=#O<$h84A&Cz9C=Z-4W6SzHr?AVVkU21pEyt!fe{q>I7&xT8!RD~#&7)bEA^Q&$ zW(UX}R=$U4|8SAotQh*LxpBOPYZ1U!*u=P-jGLnu7Q^oBD=UJqAFi+`Ifh2V52R37 zPfE}x^^!523@|}a6wYxz2T$BvQCXNyRgYs@;#}f1c&e|lzM9WoQKQ{i&?Ml}sY_0hGzKGg`quSuVV|9BHu@py?DaccIvaiIglp?h}U(VTFC8i>#!RARIWGfjm?`bjr z!6Xf6KRk#(n8aG;N7dG2$^&`|-4{?jW@bQlL3oBd4ba%wISPvk3yFbn0zjJ~&xB-E zm>)f`2zK160Dviu5Wiy76x2>zDFRnkiqmY5nkMpqpo7-ap3^ zPh4~K?k1t_DC5|AyXAX2+M}6BbH3Dlom5|2MJ&N2)$H1&4P8p|m;)D|W#{wR_Can$ za%m{pe-GnZQr_Tbk>7;`1+ui+UE^BJ7x^`=rw@wW?-lHoCX6l#l<+IF@EUiOE%o;1 zMf+g?MdJ ztzpEbLujt`ZK)r8lk~|Nvkfpq?kl*~#p-Jxv=`=E3dmVA7b6WXp&EmZ(O-vBSuIj8 z*hU^AL9qISw75yrq^wQng(ntN+?kkkF3u6UCRZczy+>W%)BkTO1Yd)Dm7KgT;L*!@J~{e-?Kg7a%H|Y)Ro~XS61M zR4s0Uy1(&}0o3=vR(+e>d&T4PKw#z{<7wt?`5*Hr`qPeoS;sRaw7Q;6w0F%jgexcT0Jrek9EllQ;E!eiB`ht;?%ST)Ld~MWg zh-qYV+njf@@&id{A8X;g|BnlG@=%D!8)I>@(L2TFHakyhBDc5AaVm3k5RoYp=QBZ~ z-IK24^==w(pPHa|YZ{i5Bu_D3_5<&thx^f#TmgC@#4{4qb36`ml4fq(C%nkqZbHLm z4U&D`B`-mTT(CoJV;y*6tg4zKP;|rvq$PZhm{UH>@|`P0ld1YpF@d8jz%wFSN97Dn z5fmuh+A9H}+!U&@#3fI4`6q9*s#=2{%1LFLz@Vhrh0?FAAJ}df;TJUH-d{hyk?Hce z>sJV1m2AzRKDOLmMbeftl6KF^_2@zD!I;-rE4M!EMNi8~^?2A|jp_WZ+;2?(D8*%O z?1!TunTv;Xf$D1A-ZU3=_VUxCz9`&89P@fI%!6Cc%XTu|$)S*oNqh|jxy4-t+iH1k z)bR6CVato5Giiy#O!q3;0$EepZHa_ZBsFOea`D72pE!?2BD5lr-HIU*iq38|{+1BK z!;7}UNUz`9{%z+)WrA$0abF1`8B#^|i>=?_$<46U=4A&$#IHDjgpe$Yrill*u9pk!I>t+FsgWR_x*0?Y4zxn~#bet7!_QW% zix(1{5c@{99+QDWO554c*P?EaKb%Czb))H5{UhAxT3BJcJpUuhf1s}MH?X|OBWCL; z+7{_?ak1GzW@~OK5YXeRB2P;ZV(ZCs%SjMK>SgWa$VWhCXaJ6BvUdrhV31Jfg~-eS z)0Ik*3=B;eBkkYg-E%~QI-YN@WQwSauS*{aU?1tPL`$zk%f+nr0_lI9N?)j5DM-9@ zBy=RABt&Arkls{-ObI7C5N3o|ai~c|^5f|NU$M}4ujz+x3V120V?c5V(o#||0~qp7 zqlRwLIFgIvG%QN&+9HsCxNx}G0;WWam0&1Iw5sAL6b_YQ8kLqr@7BN2f-S#}{B0M} zBO~m(=Bqth88BRfcR#Y(wl(+KD3bKF1j$_dY8+1SNEQh%bIQt!WA+fUIC?1tb0exNK&_L!sO8&bs+37IG7SbvM{pi|X;G8w|exXyv{ZA4*5(pYrD>>r;Q1edP% zBeeAoEb4}*zAFJ@HSk82s_SS!yym3Jos{f8luAu!G5AB%M~?u1WdW&5zmA>#`Er(n z^bXluJZu|2*G2sLA&ev|Fp7teb9PG8+p5gPEBnn9osC@y>MAc)$Y+B6Bi7F<-Lt1V zpBVrj={p^yYhHHZBVP6}c%<9?HGuiRpyli%GSt9DmvOWioY1Q|dVO{D(z}IABrfI$ ze~C@mWh|(Yylp=9o1%ZCn^+S&_j3HUCl6Z0?ZvoO76-RBhT*2hoNp+SN|k|8;fR%1 zqm(tZX96FiPuh4O+i?q`yZ;7Um687MT7PivI3D)Ke`M=>s)hFYjVohg63XW{4|U9SZgO@jb+X$k>~Py#g|I9o17&(v{qX$&bF~IFnug{ z>|Cf;;?zOd!KJ+vavuQGFLL}U2;-S`2*dksYM6_i$USb5^J!*55o%pcP~w~xqyH97 zrVxi)YT?u!%Qx&&j`L@S+(z7ilP&BO;^%xJh#sfnr0 zZ9jpeDpgLaK|+flAgu8%2uPH-h@KM0k|HES?v-NX!361X1A}po(%qo9TG-{I{b;fq z(8TB|Vv)4Lb(F+W&#svnzlO`{xJ|LS{d$?E2s%^>}hmhN=EmNY8IZv3Og*5h0?h0{LdAmGygL*!qShs0cv7hkBzMHzEs(hg$R8 zn<5oa3PF>@`_u{5lPZnf5qPZXE8h zUjS|+=!F9C`lv}USdlbBEK;Ka)#P9XQ*St>*sZxG&`9$W1Bv5`82o&+UPDE+zijHi zO+Dagkjr7gc)r^HReM)p+_cmYtb!sgU*qg;YxSb7Ees^|P>=R((d~MH7xz`J^4Bm! zd-gmB+>ZD}P1T<=fNak))qCf?Gu=;kS#qj)dfEQ_r5r^guZ6x_waz5lc>&49GKSkH zfBcA&%)6&BhA&VrlnSP;U@9KiS9CuaM99JXbe76l5-OG5_|7y|jJzT(%S9&Ce|4XS7_O%@sLTz4n#$B2 zs`QW)Q+HyNloBq+pP1XIwnaeRtN5f#un1PJ@O(;uH`A!hF7SGDudPy00M(!6uzH6; zR}g?EpuqO*XXWpQK#xOB{Z%W17w@g|XE_{n=3-w7hff8>SRrFzf-nS-^EAohDBKT~ zT9d{iT3U!2n=CjD%6A1bPWj6Np9&KFQ>rjQ*x|8-D7wuYX`~|Qc+W!Oe{L$=zE)VE zWJYVK)Vci!N>6{StO()vuqnV8NWUJ}>9%`kBaCa^T<6dW$5}Ykt0D;TH1F{G(AVi2 z4QoKQP@6%9;eGbE@u~~34yDv{g`* zDke=$()+&@iUwAj82ykx5y5e{O2bZbDKz6S)(zZnHI&`*5hm~Els$D@w2x1uJg*Cy z#r-wf*6|+CCcrlKK#35ylGHb6SX*j~{8!Vl%`4kV)*-<$>VLF*ZRLhnT;8#2a-?*b zTD5X#mb9l;KQbfCy@$XT0=-Xw9ZGD5_(?3lkr$X6m&b)`-Tin~v9C%$Raoxn^H zbX`pj_n~ZR53m~~(Z!0`6L?&rpspdQ_JJNOXEcZ`JJqgaoha~HzJrT;oiH!7;PC8E zkyV#u!UkE%TU_NBU4zBR5*$@W>ZNFI$02ecBc`6@n0}ZN?q1(mEwOr=P0IC0WHV~! zQ?((B7CKP9Xt*6UohiSn~aYF$)?aZPkjj zS-P6E0zP;fLQd6j{!3Otl<-JdJiu)c^;W{ipP|DbXQ=G(_Sn2wPp=dt^RgO@s6wwk z7#hCXXC00wGnn-)^Hi_nrynORwn%a=`m&nD(^h&%#!m)ZYUXCMh|W6X_c~uUj$b?G z_wP}|xYd61IOYmQG8H@C7Wfgr=J>;$amcS`yhmTaOSLV%Efu_zqIHSnjPeF00h-}m z$abtqF7kP({q?O;-rh%2olFNBTr?Lms%;?g%~L*Hqrog0nJ?a}=JkA|x0~Mw1zlVx zE^-P2S?llj57ub%(gJia2f;&GbZCN3LGTN39pSRmjyg)^w20&@rj|LZb~Ci(pp+-u zDYqMD5?|}_pCYC>!%r21TSHFDVqM1pva?N;wC;(^meKrDM^zG&<}I20cgc6Cn)Ma5 zm#(JQ$(X9l)&?0kv`POgo}O6P1%yukBS{#0kcqU!z6d#`uipvqcb))R;YCF? zsrU0y(7u1ent$tLslFbmx6A^CR`$aCD@Zv9s)I z_luVZU46uScz!fU8YH`!FL0-k*r{ZKpVgS7+|)qsvv{iV^mT?-lxBbNx2k3OyKg5Q zc-F!9;PKlz%6_cOKi4ybqIgsYSZTlZZ0u+m&Poo`xY(M$#zkrciJ?n~9(CxGlB7`L z5y31HrRlYb6^bIxT_?NTj?D>;O*mwaHBPIVe^n5URHf1kOHx3RLrLQMOJux7oVk!m zHlnqsvZ-cH?Ri)gbnNM%gi86m6fU?*Ui;x&ai=zLQi>qMPJcnA*ZRR@5S44e1Vu>h zE-nVmpDR5GS9u2&;{|IUyzlv4YqQeO+J7K#tb{og$G+B1*ZJ=3`-aW$ohqC3vv#8j zu68KBw3%UAGz72*zlo9WRJj{_)qM~ONw}U1l~Yb3jm zR0_3cV1|;}>H)hcnY$m6;gXt-QR*TaS9Lx~8jad!BWsrhdI$~`n7YO0WljDq5pMoK zd8tT_Jc6EO!w>1U(olKRq)kGVZ#hd(@r`n#U0EOM->aDOOjSud6lSQ9)q6s>m zJ)Ko`TK|l`j_~la?pl++(-RQ>T){6!FWaJb=rMnLHC#Bt+e)tEgB=|b=d;w1&ZbCP z;)IHwFsP?9n%je35UGq%`RtS8qs^xK>yw9SXbN25!G%RM{Z>3nR$>LJh_i+())X@l z6eMOk!%OqQ?+2zT5*>;_8Hy_+{EbeSY+tNR=;&)Oa-$jjQ{&1rF#*_D#p?Q~~Ssr0+H)sPm^QctovSGl

64O60R_n-otmXD~1=+cPazA{pWw7s0-e0TJkC z=pZpdQ@FF6Ufz^7KogR}S>v+85k*L56#Sw-nr|muvsngMT;ZE=Jw;o!7K~3B5koc< zi9h5I+}Xg8+8O)bj`mbcjMN<_!jFH*_7}eliiDp8k_*Gq^}fXOqFG^+jIEai>$ zKi4{-qxrv&Kj6D0j43}T^exa#prRWP3_54?O%3j5s?+Xn(OXiS@-EGC_c25!gh`SV z@_fCXu74RTW={V~ZL33^(95qXQGY-snU9>Zty(kuF{JvOb$BHatT{8(n3YHJ|aRSGnVA4*Fixg>a7G@A8B&=8VbwQ(k@X2NyllX2 z&>?iEJZl1`qCm<*rS_Z=g`&MY368Us_R8nlN zXIUC*lEKz-bpvPOoA5I{dHpP@x1msBhwE`+PJ`=TxRMm@dTY`>jcE+;V!xxn#xG_q znC(4(vwiy#XBQIZpEBv?Iq=e&*h!L4Z|(DV&olPT)BRIL)kSiuPj`ZSnZ9Boy)qU{ z3!}*PRn=jWFUbhc4IYNqkgqUn4DLnev2O(Di|1{M>;FJz+X=@;yfr!*q858xNRZv+ z*l%Q~b|$uQ%qiYu!~){%KG%1HrpE+X@+&`CepyAvo%FtJ3GJ9d_k9@5-G{40r3~JKm#(!?2D&a? ze`PnsJxkRXm9;wmTb(HH3wb8N{cJ_zB1-hkt5wZ!su)kl3^q+P=3m~RdX`0-YfxY# zYR7hN%j9s;aTH8z=hPY-&e60u|K=P2EHOU(t@XjXQiD3RW<~du;zE-+wb)fDg)T@& zElAT{R8|I@fR2()+7YC6qi|4Zx+s{^eLyrZh#*V$-;vJ<#NOHNTrKO z)D1KC0DehCXy2UhAE?tgLB0PA+hC_T(JQwMWX^q02b;fr`T=7Yoz0UPrsD=P?gDQ6 z_-jito>;*}BraS6G>=+{c4vk@tW>RSlkRA}Em}Q~@V%Mj+408KZ8uj%nOcs!xY|udf zSykDuk9+SI84`M<9WCN4$Xih|;{cZG&ZspW8|pi%qfaq14wV6z(9f^NFgOn*>@}iE zh1!6HaKBr82Y2(9Pttsv6%e;*PeO09?_ynj<`BU>NnbKA!)E9iQmNeV#kE9NnT_|Y z-yzd8FCD5$>(nfwM__{sU###vST={2+FapthT#N$~xi2_1u# zQ#V8%s0WaGLHiVK^vqmn9@kar04Q4*`H&D*ZE246<%7A%ToUQ(MQ23&3XKX0z9;~XFLy} zQ7+i{j4D+;081^vux)s*=A)1nAL{7OetMj5Ec;tVabvTx7O9|vsB&Lp*;eU-n0$Aq z)a>B69{jppW0iEE*LeFHD?GA!2baWy_xjgY*oHsL{nsNQdc8{4j3~}Xuif~&Czf*Q zqn+uLtn|%o6bg6JlJa2hLs=QdUkM5`MXp(%cw0S(>J?4`6D(AP>T_LX*ma~^Rdiq5 z%AH?nX?Cm0vMXj9YkQt=a&JilbfDxM8qjL{p8wn*fW#?iJ)4y03@E7tW!FJfnY#o^ zFi+a{7XeG-|Ch7=+g>RAD;5BDzOQdDF%_zf_0NU~<5u1@2BQ6Vs{d!CXazV2u5vJT zVGOGx9yP$h5tb!u)vA-{X_3Y;jvYZt55cH;Iaf*54{2@w@?3R}*J5*5&u!$pZOCRe zxBbQ|n~Mv4fh)zp<6VcUif&^~YqevwXdJR$uri{@vp>iRes_Lp4nlq?$Vrdo^#giE1aAg3bj8pYu$`E-25v zyy9%V+^6>E`>ppPs4tL}R-efoM89g8VZzx_1${i!T!mBws?C3Tr(2NG=D1I4kjBK)ebKV2ZDljnH0` zG^#2wGB&*?`c}x(_(}?HTtt{`=cnmK1^Lkrm7@yE*hG?=11nXe6BF)3@lx+VE-k)x+4pe*ofMpPg z!!Pet(h0xl3nro63H!Yu%GHmTn?iH3V@3*(Ez5b&@9b^yYOja$#DLvd2+en;f9Bm# z68OS2SC|doVXYOWzeMmhw_ygI^w&!oPT$tc_Z=A#TAOicuR#g^N=X(tBKF-$JKS6N zvvs18cm%KlJvY|dEanj5Fn z#Y1$7^qck0s~5UYTV%5J4i}ZpFLgBXSo71L-iQcVe;dgaZ)do=Wzm#nnyqS@9DyFYv0Xs;1hvgh?kb&n(K&aUZA zJF97*D2&e131Z5EP)|k075&UR;T0_3GO+J)04|#Vlnx72s{t$0f zttc~Gck{_p4=jQ_BxIy6`Z={Fu;P(j!kCdVVvc41*X*5p;+qr0z06Ji072k5KI-6& zsEgjp{Zl_v?V3*U$Jx<&2oqX)(MO^uh&B3R2Rj$9%Vzk?5s^zH(!=H6X);`&Q@tFQ zrLT!{T+WzV|K@P z>X~0y9BdT=9WS(tC#{nS7nwJ_(xHVn$MZ0E zhUTh>T?{UDU1yEnEwS?GA!qU*;&$PcFSF-QGC(Uj-1kdPBG&J$(gigN%SI_O%2&g5 zBw1J(YIHe&GBmdL&`C0phz2LZr`z6H4^&tyw@e#vC~#@&5UpCtAiC7e`r49(D7NSVHnl@}bWhn)n(o#AhI>w5Qz`@82gYpo!#$H4N9Q*LtuM{o0HP;ZWnza+FvoVqB>Di`s0wMhNF zOPxPkvcdd}DdFa7{Ra(s{|~}XasCXsLJdy}>)GaK3S?0TJB})GElgcy2}?aH1vbEW za7scWs?sF0Z%L~2ndhA*Z*OnIJ#pic>?Ct;(^>u5f3D#c`NPWTq$^|3L+4wo&&9$_Y!fFKP3p7MY( z6h_bw;AoryQJ}zF3jEJW{{Q=6_(#|Ps!&M)P-Q%m#}s}~8DC)8FmbhhLG_nkhigf# z+ucgLw#1}e6PVEFB?W4ZNF1QwNW<%{PRy$Hv6RQS&U3o;re9c9?{($-YGJjkT%XUU z{f4nxeB~*=Ea{*kJJdmH@AhnV)HPStQKibR9KmrD9ibnT7=zkT-it4WAEmvuUGYm41a#-l zD{zGaKD%{L3?9eH84uhb+ZZNjr`gqTH{OR79Tz}%01!-t|5NZ`7NrVDLjlBfGDR5& zxX^(@=s85~@&zV2)lf@as=zq(JQRP6NmD8jsi?yADon3=0bn)y-oGw&P;L_9=PMqt;r0JuZobn9{ zM`5aeYDymmr91E_)Zkf=@MR89{9THfkZkxId0gYd6TEg$T{pJxmq$bVjdLsEAA0(@ z2b&(#lO_<+i|f649_E_4n>MD>d$(vYnM? zyN`qR-}G+tI`E=|Y8kN|q&AiJYiYrJLf!zPDOq6~eujaaj{|WUxd}=?V8)VbMZM25 zR)aXDvp@g63%C(|>G;Fz`t$Z%h}0R7@E&SWS{{_1!t&bgdxKxy*9enH7iHuIAEnCj z%^YjW#~-Nx$?r`-D|a|Nn(f?DcY1WmWFo=a1`c}(8XA5I-hB6); zs5U}MY#l9AN&`Wv4dQQ)fJU(Ia+lTQ%-hfHGC*8}LeesyVvr zch%$K;Ex)Dwcam^dt$6>xz)a@GD7H?tqbg%<5pf2zm0*;_mj4$i^sD=CyZBE$XYKq zi?5$K{WV3?V$yZ^ZE z=5$c~(-ykKw$At-Lx&N5)I6^CwO@zv@%jHi9!)PSQvzSk^x{>=fL;?(-RhH<|G-)3 zj(}L~mL(2W9`c-XvAp|`Jd^8ioU;!{@c`~2C;a=ZkGqUQKT$l~*s_T#s07phKuUbA zt5+-z8E@8I;fo3bbi{DY?IN;5{(cZ8k^7ITstTjiLvj@jgTgLFI$8~hXFqT7k9;Lg zlYRE;VOjO@MegjODuXS_)im8y*WMXjmz(B`7WxiJ>(n@kpf+Mu*h~)I7dujxwx~P7 zkaDJ8aO;?QM1NY?*rJMN%eW9F-w+`5a!kRWmRew>WTwavN+4Umk5w;pZpan*Jv{YX zf96iq7GY+xy>t9-B~*=XGg9|l!Oqc5jAaY###B5{1u|~SV&D*P)bR5EC;sYl(dhnU?n@~-V z$r}WkeE8GK`FDo%SopH>*Si8S>$5}$`}x=TfgEvV=9LH4@iz;-0W=n<2|%GS^a7YS zJ$#b`(S{0wHnF>cU}gV*yqDsJW^LL42A&8&2Q5K}TuK1B2E{2@c&l0y*;ep6*QLP% zt<`g?%jpV0+i5iFk)42R_2|i_r^KJYmZyf}D;L)xj~%OjO>+xg5^DOSb{PM1{0CY^ zzhm!1y|Ra#;G5we%%wIr8y>l}?jyEDCfb7pix*O|$DhshrEuGogXDA!vnGd_$-x$g zv9cw+Tw*ll&c+yE3G!*%d1aOXE^K-KG27Z*#WdWh8nH4(T1w@XtH5dW4P6RHo_uO! zGsNJ*;GW(?jymwkhW<=)SclpBHpg?NaL~c9k=2H@4uqJ_H-STv5l~w=ZPKoezyY|7 z0-SpQ%+J7!mM%sel2!A{%Jq||-gsf;A(=hE_CWy!Bq@Zru2TuNWdN+y;omt5?aff^ z%YJy@J_1gkI%UVNl#%9_G=-|jscv_c-9p(c+C(Ea%!a&nC0!qrWe=;K6oosLw>x3f ztvM){8u<}btDmuRHJ7ibXVi`(-UiCIg^U+EF24Ah^Ev44Z8;r=mh0CAmiQr! z;n==FQN`YaYSx-0RM=Pb>w*kVEd#z75P#`#~R z>b3n{dq`T}Z?K4z>0qCLZT*nj*;wMd6UWueEllq@2O*gUAW?d^Y0o{dbs}J>?*G0b(m1#;?quqI zyiXLQw|Ks_cZY6tAad*NY3=maogVLN_)VnoWq8nrlkJ)5bm#IW(<~4J9yF+;T{(p; zr`#;da&-+x>mhpn(NRHDw?5;4R644UwLC+IOZA$HSlT^foRGWo{JV&~ZgJ{i{+r=5 z%o>Jo_XtN<9<2Z{ z&n5OLb#$g470`{w3R~p#o`TBBP{$l7#sPWC?5q+*AZ__u^dZ!N8Y>jD6j!-*WIl^| zXjzu!^M>v_&mnuNWw9Fx;j1{){te{w|PgSbR%GFgV3t z&F5Y}2B-GP(D_ckIhWq;$+^bOIj5!MYP45Wfn5AivH5|_JijSOR2T?>!H5U<*)g_E zLT^a>Ha?wx@jByGh{AY!L1)5{yS?-~behW&Da-4-NxSJUBGHMfV0NaOS6c8`(m5wr zGG$NyfJGwyYDu5t@Ml3@n&{M7@ywE^FL*aYf6X?y79aTU|DE*EBfX{TE0;a;cy;*o z)*m+$07vGB<|Ev~;i+`In5EfSSn10D%>1NEKRqA*x!W81Gt*|@(@Z$|Mdm4=-Nu_n z)GqL998~nOPj_38oucj*-{Xcn93*A`i3G<7S~8Uk{$V z42pc%cUt?P(eN2Z=T#ffUlzY3jj1_ngHk^YHFLuQ`I^6Q@KnNT zJ>#@uVB3xiSN>2)4B>^yQmlI${6hs-7HLK_vM&8a z(iuXr+onm`a79o{^ z^$itO)6r+5$c0}rlzFWwZ2jWy4@U0Mgs+dpE~l!0zeUcCv^!;EA{X*xME3VD7w)Qs zhYz%&;HJ!>Y7OdKReRCq&hj=vLoY=~HswLv+Yc9h1bu1eh?zAoihi2;!-?=tO}z+> zNYk#C9evv6ou5S9uuzYbPyL))9_TOZG}vBa>=j@bt=pn)CU$Wq$l(k)X0@@pG4yBX zC+A7T;H!?*>z{{3c}L~F{st&TbB>tHW6J<%U(IZ$_!leM2`tqHBfq9k4m^J#oHs7g z&&VZBQsm%|pAa3;&`&m|Rlpj6iWD<{rdK^Z|LLSrB}o5yBH!jZ%K;_gE;`y`2yH$5 z?IgbvvhQw7eRjyIV&q(7axL|zh1q?cs7AY~^spB;`h8#Ga*Wa%>!y)C4)J0vqzw<$ zH`BiR*QR#5jCD`;?k^Y@?{Iek#xot-;s@j*==lKnZ45>*pG<)Rk(TIy`p1<)V1xyG zrdTV5*~l#X-yv_dRxTslUGW`HYXnkL4{H|Jeg|G1(If=hQ|G<+8wM0j{3bR};&VOX$QEfnn? zlZjN-%rRlXz!FUG)f>AeVqJSon#k~U>Vu2xNrTIWEZyg^m|oKo=e3nQBqPDUKsopNf?TK7d=Yk{!siQjqmT&{I@!(z4W5& zuMz@H2L%?*uOL6tP|sRxh~GcEp8&kJvWvq{b@ZLu<{@#8vkeLho~+5H2#d-;@~^mW zR9_T0jypGMkjerO`?;D|B)AWGE23V_hxSJ*cm^G;S8jPMPOP@AG{;TQiL?xYmBQmQ z(58GKlx@Pnz~}bTj9e(w0|Fc-_^%?z!~wz#J?<0evruGhBVJdas_#nUcwzYICl(S%TlRhV;iJ0q(W8cD}J z%%a;>;N$ggn3{%lc)M)+cz zQaIfoNY40CdKgs&4NISYTI3+}K1hzU;p#N+w3OA{ukg7iDDla#v{RdRm7P+U+9|nX zu$7pyuryy!dFdS?G*o}HNO&f`=Qnh%u14E=2M&HkV0`5pWnmP3Tez(|7iGT9fc-!Qd~(19?#Br8d+w0~9}{BA0jmpMan!d(G5 zwSCPW9}76p9AW3+!VN0zaG`kIE}e@~R-3*F69bj^Ae={J{oZ4EiLUTOFQ|t8llRSeIGtXNtEa~Ds?zb%B&uUl)Zw_*{tb~Ww(uImBDt$6{O^C?7O&L!5_txG zE%5ByLfL>zFSk8->n=C#k3t`XeciLtD_kdxHOy#~HU~Yr@+vvV`$f~Jxy$Y4^-#lR z3dsJn1!Sp+I=si15r)h3toAy_R*Fd1SbeP-Z?Cy3lE^?xIz2u0)2J`(XRe#I86&dn z4wd_p-FE{;H=#<})``$hqwQ9FgBwY7}X!rU&3pF zWU^&)`kigJ?8;7iF9R zqY^KB-P7xG-Mwv%g5>8v3wmdBP6^TO%^ac*LJd4J<^BhqZzdUS6`WC_jf3x1K-rCIC zS;0D^|x6P^eu1=8_WH!I;F?5kAJ0>M`d@V-X)tm^t3nG;~^uqgPn~m?+rfJ zxy)dBZ^rQ@=&wco5^k%_L%%;%{e>ZpPo1YwyRe{E-fw-gOHBrebiC~+qB_dbAN+xg z$jSm?af8Gt5|{PL1?KzAi@f7GqwoKOdPsccsdyFl-Q8L%UPD9XP7vM1!!%sD4Re-rg6ZSZjGVkv=KiH=b;>y7 zib*8bFd5-sHV9CMuwZ~81#}#q{V$5K!^kA#|7%A!4MOeFXF*C*KrhRmH@BJ;SfFKa z+Vj3{=J0Rxr-m`8j)I!y`BeKJLU(YiQno=AB=y_pcHx^xl{pEfL#_9zHBq-{bDJ)s z4>K9tdv+=T+LT+2$=%|9m!ow-uP#Sbi@(LI7GGq#pQQfCZT*ZCJsfx-!M|`RF24PW zn!rJDX*TzJwyf=qjl1N*xP0ij>2(%+>514c%RV|rN}u>2r;M!ETbyRj|hEJHPL1;KHNu(D=myZ|xg z-zi2c9jw4rk^QAvlSo$FqYYcRJQ`?l3k|JEZo&_Jmwo6zRblvZs5z>oquxt%o7FAU=;P5z<2MJ5K9`6UJO$x&{w_rcRf>wLArRf2-=e(%Qv4J%lr2 zfx&K70zRUTWq5xe-S0&_sTn`_;*-z&I6QN7z4YTuwN=wj7M;eDSHfRRExy%ZP=anx zUdW~WIQ2(n?<8WyLv-ri$GQ)$t%gCHX>ay&|VjK>H{MFXCid5Hyz+`SsK|)ExF9#RM`Uv+zCBnQ~Tz z!#e}86j5aRpM&Vxf0FT_1;FOh3?v13oP%sI*iNj!ps*Ko)e;95;_tm<`>k#Iz_faP zs9+wt*h9B`ohT7HyWj8w`jDB)o!J>9D4zGl;;ZwjKUi9epAk`E4b?2u^C78bW*vYZ~kkQ%9R-S9)n) ze(_I0HJuysz`!i>M1c00syws#k&~l1n%mrUKkG^s00U#sINF8g_ZC*R@`pOB6;&Ok zw#?gw3zYki4c;tWvN(L4TuQ|ZQ5G+#4F8S6_v8lxy{6TP>f7#v;J0<-pwX4PLtK?}(+so*=HwzcVD@ZbCFFjfazu0p9Z}lhtw?6M;+s1Mm=Zf423bBbAS9XGZtQpVumP_>Z<-iBW%#K6ccFRZjeJ8t1{it7?Un z-MQui{x~tM%@8Zv_t)+3r@u4X$g!GU44zF_B73OhYoJ;E`?hq$y`!&YSLch>lvb-H zRO;PyQ1 z{(ETJWWSY+vOD56*&|HLfqv2!< zh$EU@nr5$xt9XjuLO#?G5pR~`R9PkFsV=Q@k|R0LF^=#(ejHoSk_59wuTc%5C*n2`#C18-Qzj^&Hh5*UdAk8YR2l zMecI85?(T*Z$Y!?k_(AsBA@@uWy{jm@2|3wjKifoCz#eAw{P)b6~ET^O3fa*7NT6>{W~a7z(l zFs^%pbart-h+;v6U;+8(8#7%uR&FH*X$;WP?rk2NMt*5vF)DUFIybAn+O7WErFRzW za&?~#X#tk(1)9{ce?afOnNmRmzV$%f-MWOln=l8wq(g!f9lJo|}V17-8r~Sz0 z7_DAh`ESFr;QqYnY5c*s>$0yq@!)=Z$o1#13q%q4Ya5O&XLn*?h^8Lg2a;)31w9w> zZ%7Fe0tnsz(+~$iOEZAZwW~3(O$SqGOSDqFKoXVjADH%kHI*Lre;YUVV{rV0}3A+0Fz$gYKA7v6J+qQ5(JtS6$Eb_X3y(N<$x-AG`dNcaPmsXsKt(t(sqi zt?QxPJ8kr_u;a9S?B#xCujOk$<3aYXU!LI|&Rg-_`m$J?{o=_;n}7M0#b@0dEt=(bMXl-S#fy2@ zeh+IK3>eP1B%izu(UFP!`)La)1_u_U^u;+>b&6z}K0v6;MJ!Ct;Kj@s2%bQ5w4D5J zddolu8qy?j7?Ah@=M08O0FESP3WX>o@?a*ZIkAr|Lv6+2oRWeuR7|=#r&R0+S^6pA za@SuR#}gby8-lqGqT;!+Hmp5{S=)tSa9c=7RQdTOGUI&wQ5E5QK(0Pk zP)1SS^p+r+&y6|_+CN}DWX?q7lel@l8^--jZ?Fq0y;kWaIr#Z zEinIjs`jSiYz|t0#+=7*LVPYEe#k0TV>}4wTB{#TUzn}=qxop$+w1)QKn8w3$1d4b z!x~#BOrR*tyjcFr;|aN&2mg*5&!0&&Fj&X8m)%7WaR{czS`Hm06lH$~PuzzpM_N9K z#Q{AI^#jych&I5kWm7lb42}Zs1;bcNcrRep16;loA)pn;M3Dz5Kup_aayS^+PMTOt zhzx*%8#=R6T~iiQ*u`uA)C43o%q|%TGwPTRUHuvj{UE(C+E>Ld!?B!NuP31dQLlV5 zk{&E0dX2-NI#}u9V>p7%b&%Jo=MLDaaqzA%Z zt3MA=ctK6~5E$TKJqSlxTY`kS5g0!_$0Sj^ObGbHvMrJsKL2;IpAFQ;R9iyA@XER;xGD>d(Er(OLG2J)E0b#RWZwrMY z{$FJQv2!)G8$z_PG^PRY*ZfYUIu7^`0z*9vKmF{)NHZ>(&AjKa@CK|Y#034 z+8$G!I&k9?%L;q(=Up6OUxaaA9vp_!ZuEX(-{No{!d^ywx^nx4 z?Ozr}IPAek|0+(8tn!Zp&O8ZKWYDcwE%L77Ud4Rvsn5Sj!xyrb$X?X0fJoa^x(gpl zfUMm5KI_T8V~Qpgp0_&WhVbYcvbx_pd@alvA^8oiZnJ2<$%yL6j8G4$$N|oRB^n_P zS4VOI2Plg)!B|!f9Hj5ofKSwM8Dj3=_n{AR@B)N?v%lDKiWZ_BI8DI(kHnAzP{WQrRc?gaD80sL}$W4Nyncz2_GSaF#Cd zAt(!ox)GC!M0dZkXahiTao_Gq&ga3*TRM)BNhgxL9+%anog4m zcXi$XCTO=c>02(RFW!);w8}ONh-0X9ZIQisBA)1tvI}BA)br1q?>ASm5BI>8nR_=S zhrZe3zq!p=Ok`3w0cO2oiy8i z(s?6_h4AL$HayaouZ_jFcD(Ct6DkAE`Yd1ij*H~};p_ZofI;sZ)vxTM`7cIP5sac9 zAyUPF#DzNqR|W$c8kmn;ae^#4^)$F}Z>kvVaRDm09y(r}Q#aq52$T~4OVQwfj0_w> zngZ%GJ%nF@HIl0gu|Nfqoh&R4D9YOZb&LQVoh%MlV3-GL%5p%SX_;(=ZYt{Kdh6Sp zS%wmV{o6PPaqmgO?gYMnC~nMm>p|d7M^L?=5&+jXf`|X=Os))Y?}M$A9I_9~!hTWP zn4~IwJ+Y|LRi1Q9J5hV&IO2O!D3XJbaJje81u-R2LSlYCQn81&lqvrJ;Jv^Df|aWT z)0u}o*8yHC*uM+`fWru25)l-L<?5Uigpqq*-k^3z@skW z{MhscU@zp!t@`nJHlpSdxh2Uw(z{(Nnfa?S>rrWcg3U53Nz?;JaKP3^*guKm+!|exkEC&Thf}U!-texF#>I^>BoF&@?vZ! z=TEv3{WBFCKsfU`1n3n>L}h8K5a@tsfH4)I;KnQCnjQiX4jAXj;m8wUD|pi(6a==m zWk|A@Y69#AKsHq03E&k5I?*)^xV<^d5%2b`2Fn%+NK{)4^%^m;8x|AE;7;WuB#uyI zPy;HWuBDMHX|Kfs&fJedavX|i;*w4d`A{23K@Yv-F8=D@=G-Uxif@?JJ=RzALzfpV z7NPfo6npn6*Eti@rBkA-%NSb}K;f}2=cew`0#7FRF{kycHNZZg^;4sWzYw6mJHvwNf7@#(Dz8G);dGN45YO762+h01wR!6fc)d%U_4Kkr*k0 z2oh!r+=QX7;6==IGh#WR!WhenOeDvyF@(2hgquYb^;P7v#R(JvO@~#g(+dWgesm)+Vs{5TNIellDKNXx+eVbW%ojf?C-vAwIKh}Wo*sae7l&;C<7P|8HkCC7Op;A zb}{yFlc`<|&twX18OBD4P-RjA;2t8tcGoa>1FSOE=y_9ehlc@xQ&|QxkI5$f~OVLQR%E|u?Al&6m*nH zCE>VYXxGo0>}C$AsCES%e~b7j|Kk1^lP;G9m&Pdi{qw!Kr~V$9{S6!%Ga=522w8`2 zjnrQj(x=#hQyvmW66qb>9Fo_uMX_vuHp`#MG`1!k$Ko64vpWMPUm8p#vhaE?sLI7? za`0vuf4a^5;*hJBqsUK^fhyllMNUO9c|3+$y9HNXU0$OZ><|77d{v`Sj!A5-SZhbO z>Qlmu`pp*bg(fO(HV7`4m$ZJR@NWQH=|V+ah7d(YNkvVz#h`|Da;4ouls_aeXdtI* zt%ZAg7bzw@lE$eFbkwm4o{k(x)sgm0X&);6Y87d*?Ibo7nG>yq=A?jZgmWV?w?H-l`+w3@-%glhgOhMt(qAF3iT38l+uqJ4l8t)z+pKqsU| zfU6kXCv7-whY|fsI=H4%yzDlzB8mEc4X9H@_bGo83zcu1BB1eoR&0kAnO5q=^!M3!H)wCDPl=w4jzMi3n5l~pB*%c|>nN;+C^mD~ z{^8ridrrTcgG+=$0x>GLxD=i)ixRJYF>x)tx>u(bJf^65BzV?NO;c~&b#}V9>v|EF zVgZ+fNl^gW+?202CTS+SV^8}RO>?2h!gb=o!g}0H+FOU|Hn6i0y_CSWRNp7$fubO! z-|H|GHMVve7r{+^eGof}XL#crNlq7yU7H$-$`AVS*dFLK{i#_$oPijHNP1tPj%mAy z!KtIi{Kuj0-WnGM%Y6o1!U-;dd^U2W=WWiL_2f01Y|kR7Keud@+Ju!MY-==MTAnB^ z=r)!?6G83A6g3UfV_Dsc4)QCC;A{gb&?1F1hD|7#1H~a{U7i5So+p|*&3Fz8RY%4y z;Q^LY;>)0k?UgUS#mDS>C(~afPOozyep)zm5C6fRIQ586PjV)ex7EIBNbRkCiW-;L zOb)LKLk$L&eR~pHJ<@p+EiB8ccQJO~LM+hvLsvWWfd0Z;Cr^Bu^zZ@K?U}3UB`ynb z#IR;8!KU1%aopAO5+^}@!1PsagrxXjsUNBN!gj0;Xv86xUUseO&pTnz%U8+M?w0Ti zSzt}T3BXjg0KmYR${XPFE3EnaI2kc?qAfhpogFub>+IejPpI|#K3Xkz<#t@r-^a!w z`5sjUR~fGq6G52~3f(5InUM%^h;W3nMYYpUVEdywh-fW+DueyBLbEe7*;$8lN^&aB zQ31C+!Fc?moMIyUv?FaK4wFz7ZY7MDTO_*EjH?!reDDE%;Rf-d7hoG_dFDa|p4FLh zXVN*8HJXJO!Egpqnvr$00)PV`nwS!(*yrZ3Q_Mhf2*5qfqlYx)U5(iL0TOYj^#C(~ zX@n^bNf)w|Ygdt2(NfH0MH?9?M{+D#h+j_KA_qlcW?nDXE1?*TB6`X8Qh<3xSPX`V zqK9L^^NFtDwabN|%P^h?K+2C#I;>EIua+yTZ&-Rw)&xK*xq=v~kF`U9 z9!{a*Wl#=31vQ1&(D!BVvtV*Vrk%R^#t zj>tUjn2_ukzp7!&HAhZGYTMTU^#_&AHP$eO>imVs3jK#EXvlMQLpy$=xTgxihL3pAcsvNNpyOcKt+eNcl1UaGP2Xt|R zx--6097X;@=Vpb(0^fBSvQ;4rJKb}1=HBW77o{tS}{AkN6Ig0$nrKo9_7u=Be)kkUIaa%)iFRWr^ zV_`MG>nTmYJG@gj-zuBH^-bN`V)Quu!f)$X5 zj)s%Q1w~C99TH1VUscx?9sE5xV9Sa8Za+-V^ObZ&%iX35sykk-7NptGK!1Pk+Pc*I zu5ww2_PZ_)fNOZ7D-$ZD(I3b@bLnU)L=O@^e|H01NjL{76Vy=AI8DRvBTqvHlyRjG z{6X?i>H<|2xQ;G`p-406;ztTBIN;n$?ot|GoE-~42(i3^S9-KkBwWMt z??xF8xIE2z58gwM&4k!8lPEABOEzl^y>uKAJOGiSz%;lvLjpigZP7NB{Fl*|oDAn` z9ZTY!mlUggAE}BC|Q;X%4$|{Ftoigl<6$vRQR^#xc~>dc}p{2fAgp^ zeeBU>3!VwO0=O}5g8bm@w5$hD@RY13K>7ycRk2;oN-sci4#E%s%o+>`n88^GB!$f_ zT#;=*%5^qS!vi1#YwWYTIG1uIklY*-;I-m_BEa#d;jk3~QtF={n4lSgkVd%N1QxDD ztTdksq;Rz%*w?fxh_@YsY@^D<;>)XqhK0!1+47tNZ1GR5Y2J;c+wSSZH94c=$lLdI zC&r-kCO7xN8*W0oVO-1WIDX2ei&}7SUj0)C{pqv11iE~iX#Y_9#n$kPwfxaO#jeqU zkfH!}NkPe*wVHinSywG9o&?EdL^Ep9P|2qcpclxf&m=@B7|1N$2zZUzqJwo0z1mw6$jHTh4m7ZSVx z`A-GJg+qQ1|L=267af&J^zxF5t1oDB4WoHu3T5@yX6_9`(4jO2_!w8H{jmBrl8@Z1 z+b&&Y{$5)njCtXG1^#AgERi&8q@@wI8$TX1l>aQ;CL>x|Gs&;EsWbMV=B1^rPKm4x z;rXd^kJ17j#&=GSa9O+xAP>pm6`mbO>1Z(TzXqtKJvs&oanDX`>SB@BU709@iRPvp zOy~=>=`%#okGgH#ho)$Z`u@n?Dh+FyGj8DbQARyNHuEQyFP6|O_f+2olEg=?)m8{G z3rfn9q<0jvTRPgl>X}t;YiRA8d3CuK-6e3}E^+3iH>W0;A~JG1Ob+MH606;Sde>HVsMXx(n?qzzq|@I02#29+Yrm6z!!QW{N6_ zkKKrA_GFHi?zQR#g#aHz0XBtq}X&SkzukWnqd&2p$iJ zvax3z!NfX{da9W~dUA#8|GK*36@_s8N+Bo6FiZHlB#A9{C09cb@tvA>YL39941)w- z7c8epX$S~Tp?7T&3mBNID-z!0;K>IiVew=N13PRCUS17KC|JrOdg@OJZow$)dm|66D%v2 zAtu0KVBu>0sO@u7H8Xn^5t09=;VKK-mJpRJ7@X2bi)M|h4gb_ za+6f@W)$=~#fqQ;Pjtj$ma%5KvCIHiSI!@d7A!*)R5Lx4hxSI1Z_!JxRfc8&@$sVY zG;>YXp7Ldp*;PifDH%s z&&L>~f4(u9yGQ|_i#W`|pN}QF|C)}-jkVoZH$GQbq#*sCybt0R%>Z#C7;Dd3ou$8g z*j4EG1U_8Tmdta!YyKZ7rsnKn`TpIF*dVQLZKH1NrSK&(_W;Pk1wr zfW&=@$3Ff|D|rbBWTFZkJ%3Ndk|E<(R_H!xLA)eybq1;#x4mU=6{c=UHo1COX?|=F zpd+&chFXZMrN{zKU?c_8ns$m0T7;Uf4u&VBa5JHsZ@CctE+A{rRsUM5%bU&c) z4|>KGU}kezwJKz=xuVPjGX1=gTu9rt0j{1i(C?@vfZ`7;hvw~?-`&7(hWu~5ovV+n z;rY0ZSpJv{vVyPF8uVT$fBGPHA^nnzxIA_H<*PkOi|aI|F}$!Ac&V-WL<+}SsPx9$ zsH3^~d_P5BhfQ>Mmzv_NSoC@(SCqP4UN;BB0n;?9V>0U*05Kz-8KH;gBY4GF#5{_y zzA9_w227|p4E*<*6yqNinQ4{FE#IadRsb_~y9=_|NrM2u>9Xl@&=aqU=L448_Bs@s zrwPqxBJ_0l1#p|;D3xb>Kn=!Fg=7+)0x0y%pDlSMjThlbJ0D!9Fp20j;~J=V_aW%zyv8ZF z^J0ky^%h>#+GoyyBjezLxhU-Sbg@#`<}!FNO47xoN-v34z;guc_d~Xxy%+k+Y1qq? zd8gTvhl>lo2kYT0<#JNu3;o79|7pMZ)Y21nUEB6~pb*rY()fx6K^ew1uF( z^y}{zh4|xP{tKDZ*0X7!I-L3U<2g_j%$MKl+)3hfT*79k+hr`DS)F;=po~vTIj&&L zXw|6*;k>Yangwy=IGpHd69wqx9d5e4Z52s&=L-o7QOu$3#$7qhrc5gX8Q1;=vC%jKbY#@uO~U*w5EeycBV8H%G$P_wx|3>y~|V9;#60QU_P0} zF@Z_KxDai!8dc>FkmWhZ3Q~;Ti8ozTzC@ARtf#N!U4tUjmi|Omp3A;42IIpAAgO#l z-Fn-g$hyHcP(&4R)!?{-d1%gDmxfvRKJy}ZhDmE1{CkSGv{lSDz_I;DU85GHUPo@d zp4C}1=ML?GJ8=}Xd9|p@oRoE|lC66Swy;8+^?8YfT=x@i+SP_o!dLzSvE=;4+1B90 zJ|LLJN`;0#IcGR&^gj)q-C3Jjon2$8`sLu+KEEs8G?V5cea8a!Mi3AMdzjzz_WxCM z?(t0he;hw^Crp__7!jM~wv{_mKQs4RC6d&NO%fJD*hrT(L-du!Oxq0KM4?oa$fe6% zMs7*DL~hAtZlT}#{(l~O?DF}XJ2H$@dk{uw+)UU_i8`M`EJ?(J&9XU~4jTX@QP}i=l)CmakWxy3eb(SK&rRmS) z*1I)L&KxOjD$vLh@{kta`|>Mp3B||wx*2OuH44dn=Q4uTelByz%AMsUxpAbccf9e1 z);*Uirsa)8F1Hs}oo$aVc|lVPms}|A{T_kEtyE~9j#{wGi0#e&ll1$1+$Ype!b;e+ z$?CT;>7Cm(@mALld@R{?xT-qcM|r;gQ(0oV!OPc0h^9cdjNoRYU_I3Eh1bv0VqL5W zc7qWIOvjpEDwWs_VM_)(dc2E+hNF5o?P6@w&l6> z&1ha*YF@!|GR(-{HnP_zGh2^X{my=ob zK6>k)E$S}DMykk>qE+mMWDd{{I-gnKwWWP(zw^g?a5AFqRnHJJ?Q}BNclU-$Vu#IG zavyH#UOc%WT$PL(UU!~&=MhJ5F-iIHCzeEaZk3(yq{#Rd~@wd4B!!R0s z{WCsRdAg^iKI2bgp{e+8_YS%+(63Q=NO4FKjx0lc@`&fRb^Y(c1ZQ8F%cxY-=Err$ zjpaD^N-C?`bPBGmyBbhZ`k74!tuiYFzK8ZVp zwkV-!k(}S%<3)uXDugu-%(#V3G6>dVXxSu^({prMPHeFXP^=kU_KPRrF4?OS#i$-S zp5i{ib8YanfM?H7_VM^0zFyuE>cm_5LGr|>3Saf>asi)RVN68rtli}rTsegb>F}o> zeO|H0hNSdF)$C1Ssmf+DRu|&1Jm$-m2U%@25Q(#;n=Pfqgg@C_f2+MFSV^Se%!dp41|1A3Fb9k@`qV za$n99YfoT_mg(k8yd4@wgY{fkB8zM8-&N>;=Fk(y|1xxZ1^|BvRJ-0|wtpW6SP$Z1qseeJU|8!{1=f!Mt`{@!k~uf1MMNDM#YFalQxRhJ>ZN&oa1m z-t)`64|~ujE|^agPC)$5{z>H<-5;)wO5@u{v5Sfi{RWo&c#*$|Tz6!@??JHE1Aecz zY%UtPjdDFEEMiZTHP*&0Dz5TzxzNrfFY&QqrUJv@K=$q1W3V}))CWOVkj~PrDX(f; zYc_UBaH&}Jb}hX+En~!`syDrkj)2?;D{7Ml4>JJQ(QNcq-4zy3xmYW&?P1|_*g5iE zCczq}vHL@d8I7fq8akLaJmmFv_V5_NV9Amj*( z6Pb4L|J-qXv={vyb;A1A*3_^ozV}|rkI3Cg3+B8;CTbJdR>Gefqjk|@+4m$({EE^Q z(y4YbTs^r(kvI1KG{`h-i&&u?t*s;a6KS4~`&aDIRXnv?M?db+o`EiV8TZ7{H~rzF z|D38y9mq!C#lO5Tb_o3`;TSx)uob2xvXMuXtI2eA_kMOr z^xl7U&U{M#F&vQm%6rE;Ffn}jZD_W3j)1T9PxKD<_T!eL+v_5Q>Yu-Ct%*Qas~lRg z*bL$!AnOSq83RUi>FAYX=6Z9gV$m&oOHH#BD}`ly%pgj)tz|k1X7qpzR@oxavpp#$ z%0{}`Eicy5>~Kje%|xX5`9ITBg-~?*Nr@M|C#rJ7 zVrrDRbIGYN(yt;;$2E5H3uBts{lBgAXZ>O|_BeI$N&1w21ouM=L;vL0I>DtQHcO|b zLX~BoZbxT8)86T=#*$>?Dc*-uBls<>rs+3Z@fP+~&ZiLf6+V&Uf0a#f2j59MMYVP_ z9$qu(_{0Fqx(;Alan!c-r!Gc+_{Xx$4E(0FZWE)LY6u58(yMq;n87EB!$HaQuEauX zO^~!Kd!~KDx{NMvMwks1d5YToEhEgEqWP1w=B@nnu|({NLtopaMQTp#W(H9*NXIS+ z#v_}saxS&KHv*v6Rm2L-k=dn90!q}}(bc?zR|02c|s$`r& zzgjkP{(Z(bwZFhCkBf0o4X|_bHStE;#g4C~L02DDT|IWMjepUlIL}ui#15nXW>dQ| zI~)CmyZdZZ#h~ef`=6bRR`O+rRMjg1MXe1+@`_E?>0)v1DgN%Wq})Uw^)g%yfR$>w zcy%qoP0!W|eDUf3b)m;CcCXvcIoyAY-`|S^#*cRy`ck`8qaQL4_V{eS2c~4UkIlAT z!T)M^@qR?LD5ns^;~T5HFI@Z!SW9j#6aAhXcw69XoHOmEqtt%7od2^-Z-SVbxx`py;UFB()_MY3h$1GNWN@Y5vkJA|7U%nIIvY5yUkaI`Zz$o7)uylv-`(%dG- z$HHu-TPN*p0dUQPV4f|M}Uya(9ouwf43`hk|PaI#1`4g$ z%_k|&+F~P4dYv*GCf4wgwsv2K>W-Ho>z~)Iq#quF>NXA=+r5&Z+B}6<*#y{(Glm7Tn{Y-F((IEFl=YiqJ~Cvib81B|D@2^}~yMPy8rn=?#NFhzw=9-V(b$ zd+Fq##y2C5&!EBA_?0{Rmbr^-9UsrQsarMyiPZj#qcUKr*^$BB8CvhV4Q|^cM^$e< z&rHys=rEdb%sq3|>3fI#r$1kupLbMhCLOmehMiTz3j@A z`@PH~5mtu&0@0D4&neeT9XC_$NLwqVbh_>s@o1TX{=e*jDUjx>n$~qKe{w8ha3Ds_ZHKSpE853g>jG;#6;i z@RsO0HL%|%Z^`cn@|9b{Tt`!d+)(Nr-CL_BFGedXY&I)86y82xefjSCU*HK8Uarx*Z#*O?XN4sq8Q=>xM>jsK zv0G9qgbUkzM%EmH)Kr9fZ!2gV(+udEJ7*W%p@ZYxd!iNw5@xHvxAzS8;4j4h!?yiwv20W2}RUDKmELU&k2&52Swk(J_HU(R$04< z!ng1^yj)Mxx%%ynhGBSjzIBYu9M~Iv;^ou{G3c^Ygd|>pJJ)yi4{D<5(pR5l-Xpu& z?R(1~P7Z3WA3O`kHJcB90ZVu5&a5`@=K2#%Cp=2H29Y2TU%@3Y0Q3tE5W{_23<|bX zcSn0r;6Y)ftrxK3{}GBf^Enp`$akoDicIDUyt}eNz|ovLg9VXBjWXGLeYB40a=#nNq{>C_yCi?*%qsu zVH5?V*p51YNPyREG&a-Gx=4G#UM>v7~jlU)eoM>KrKv_p1 z6S2|Nz!C><9=cTbo;)8QAX`Q07cf-S4BENjwL`WR%+cB5lz1He4^{@%1 zVKF1*ZlZ8fu-zKfQm78D;dJ=#1i^WL0Ev}v%z=jzET$ZFz)Tp|Z+VC3!thqFyq7RD zn4Rw32oYtMe2|nB-_4KeOeM)iz8}*51y6Hf1P*mV`f3myNzq;ky&6z;r8G2vpF|W5 zB;d7YoYl^!S+IeXVzJ{j}0=sCX}LnUAZHVBn@j8`v3 zx|N51keFsCLN~!kNg&O+4eG=Qs69qHs~fGh!j`en&9=>H@OQ=67)rxJn8g1AInav) z2$qDEh~s>vP%)E5Fo5&DYppBOLD!T3iTCQwe!~S;nneL)5|$VrjSfoigye&0!CS88 zXeL7Vuu*R83EQ?u-YPg3?hiPlW*C7vFGlanwZwa;p^+S_O3@1@eADo#_kXDgj9Obp#Fi?h0X>L`WjoKF(3JYb+ndECf2|7_B4rPKBS`@ zE!fH|`{%lu-8TcgH4q{JK@0A!ut5+ht~8;#7s$cL0J!F3!>jSI;7kFehb)i@0L?6@ zfY!wbg2jT&H4sBY@kM#gCKySb)n?GK1$MUj`B(hO)><6++8pzP*K za-o9QGuYYkFmnMkCOQea>?1r3l>P|xB1!O}L!t*HcW2yHJ-6|aR~O?P#dtk<2T+p_?qC{@a4O7^CC zvwEDhr-<=tQGBE`W}0p1ADW$isTnZOb`q*Fe6Sv&Kw!>!21Lk8kO55Q97i_1cee%z z49Lu7V6GNMhPy9^@5{Adi577hR#{6f9qVWSuTd?Ss}&d9$nt3842uJ5tGby?b55Ie z=N{_`GAzV`r;+dD_>p1i#1i+JaRGy_={Lx6AZV&Fz_F@o0a$9w+;MtNgKKslHSnt` zK^t78DVZ{C`qg5!)IRiBwo@vz8pzX$;mN9+t4~RS=>ZXzK2G8N>`O?|Vemwp$y?;n zt-M9SVLu=*wahu)RAVT`)frP}fu(jdAE+8N&}o7;Rtf{cIXd18bSchYq5~jK5@bS{ zZbl%p#{wwJtix8Sfi?&=W)47U0Uw@56H6;9<%#y|k%RZ&b#$YH)AKC&R?5>T+|jsc zWCYa9O9InjFdZDs%=sEjvBNa8Pq}Y{S$HMgmFma?-Po zKXxCpa zz$$w%H=@y?v8^r!tDuKLIuSe}Xat+Nz_tQW5qiN- zP?Hc-H^EG~(1|E8Xyzf^4KNea_?B)2KHNGDgD6QCHLxCve&Y{>V_*u>tP(ud#8tdq zPD|UmMlu_`b-OrT>yCiNTgv|h3IH4weLa7fBVN^v6zhawBM6hqoc)w>Xk+G%p042s zwah!tJ3_dCRVJc%(E~Ka0ymjGL#dewUQ7!XIMgyfN9(p{=Yz=n4AuV<&<#n-_rfL; zB?yoPuT-k2d`baEwgtgA^1)iL#JHShLlj0vP9odpStkL4crW}fIOM_82*OZ3q9uNr z1H$ysF{%k|T!2>-c#1G{6sK@|)sZ0XB5281_Hj}N-gBG4^&2ZD$fmeSWb0gEiRx@2 z*cLDr+!xum)b-j^_E!=gc-(tO@Ap7>ReTKijGTvHxwiX`DoEo+c^^#D3{ z%z`_9g=5=7my8Dj)Hb@h8J^~RttpITV2)A;$t7!$8nlllPc%N2-(8}jP6X0CxH7Mo zXYZQWcZ32`VykR=es@x6j2vLR80ie7uS?{@3(Vya5H>Ynizv0qitHuB3Z28poKHr- zZ`KSVKTAB5G-mx`k*=@WhF;eBU7bIX{s|h0fZ+B~112)_J%Knb0J_v)iSgi6oE@uhhN;)o_kXfDcW zaeg8>W(O@1DLqDwk&g%c_JMEck%pNyaLRh*8Hs5FF8_I|Yl0`y zLV_&2gmo--Gqksh$=NbzNwvoUWm3OL9_D)f_8+6c5rJ|^(2Uud8gea*Q2Q65`o}!7INp+-g zWHa(0PORO^t1vWR15^+pd}VcHgo)dJ^632?^ks$Q8`=3jm5$*5fr15_8f`3OiGL@# z>7jXCO=XR0as>JXo8~QF4bJ35*EQ1x^Qy1$xPj{w9t`F;ZA@ z6|OD=!xC>>Wls~$JT1E+SB2a$nBXc09pl054j*0+U z>y`ft@~h=c(6XGNFreV)hW9QKUu78vurj#niBg;iI;I$s#gLw)BEbE8UNF8fL6ec$ zQ1gLE<2!v0=L<`tBm=7>9023xw;W|j&^^P5!=hOlkvLIiPY4rCbN>m9)o2CAGhAhR zC}bF9XA7~3djJ5(R$z2NEi7gV>rRxm`3Z@1a$zE^Iw7eHD2(K$2`U$0_=AL%H%N>} zYwgR{Q{mWF1HY=HKvAf>2;C54L$uV8AWU0u`I<6k%5@_LX$!96+EmwbIayQ)4Dxs2 E|L3^AQ2+n{ literal 0 HcmV?d00001 diff --git a/examples/diffusion/python_stable_diffusion_3/export_onnx.py b/examples/diffusion/python_stable_diffusion_3/export_onnx.py index 6c3e9fc826e..48658876de7 100644 --- a/examples/diffusion/python_stable_diffusion_3/export_onnx.py +++ b/examples/diffusion/python_stable_diffusion_3/export_onnx.py @@ -1,50 +1,74 @@ +# The MIT License (MIT) +# +# Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the 'Software'), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from argparse import ArgumentParser import torch from diffusers import StableDiffusion3Pipeline import os -pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16) -# pipe = pipe.to("cuda") -# print(pipe) -# print(pipe.text_encoder) -x=torch.randint(1, (1, 77)) -# pipe.text_encoder.eval() -output_path='models' -encoder_path=output_path+'/text_encoder/model.onnx' -encoder_2_path=output_path+'/text_encoder_2/model.onnx' -encoder_3_path=output_path+'/text_encoder_3/model.onnx' -print(output_path) -# os.makedirs(os.path.dirname(output_path), exist_ok=True) -os.makedirs(os.path.dirname(encoder_path), exist_ok=True) -os.makedirs(os.path.dirname(encoder_2_path), exist_ok=True) -os.makedirs(os.path.dirname(encoder_3_path), exist_ok=True) +def argparser(): + parser = ArgumentParser() + parser.add_argument( + "-o", + "--output_path", + type=str, + default="models/sd3", + help= + "Path to save the onnx model. Use it to override the default models/sd3 path." + ) + return parser.parse_args() -torch.onnx.export(pipe.text_encoder, - x, - encoder_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { 0: 'batch_size'}}) -torch.onnx.export(pipe.text_encoder_2, - x, - encoder_2_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { 0: 'batch_size'}}) -torch.onnx.export(pipe.text_encoder_3, - x, - encoder_3_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { 0: 'batch_size'}}) +def export_encoders(output_path): + pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16) + x=torch.randint(1, (1, 77)) + encoder_path=output_path+'/text_encoder/model.onnx' + encoder_2_path=output_path+'/text_encoder_2/model.onnx' + encoder_3_path=output_path+'/text_encoder_3/model.onnx' + os.makedirs(os.path.dirname(encoder_path), exist_ok=True) + os.makedirs(os.path.dirname(encoder_2_path), exist_ok=True) + os.makedirs(os.path.dirname(encoder_3_path), exist_ok=True) + torch.onnx.export(pipe.text_encoder, + x, + encoder_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { 0: 'batch_size'}}) + torch.onnx.export(pipe.text_encoder_2, + x, + encoder_2_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { 0: 'batch_size'}}) + torch.onnx.export(pipe.text_encoder_3, + x, + encoder_3_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { 0: 'batch_size'}}) -# export_options = torch.onnx.ExportOptions(dynamic_shapes=True) -# onnx_program = torch.onnx.dynamo_export( -# pipe.text_encoder, -# *x, -# # **kwargs, -# export_options=export_options) -# onnx_program.save("text_encoder.onnx") +if __name__ == "__main__": + args = argparser() + export_encoders(**vars(args)) diff --git a/examples/diffusion/python_stable_diffusion_3/requirements.txt b/examples/diffusion/python_stable_diffusion_3/requirements.txt index 5b930488c29..678bd7d749e 100644 --- a/examples/diffusion/python_stable_diffusion_3/requirements.txt +++ b/examples/diffusion/python_stable_diffusion_3/requirements.txt @@ -1,5 +1,31 @@ +##################################################################################### +# The MIT License (MIT) +# +# Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +##################################################################################### + diffusers==0.30.3 einops==0.8.0 +onnx==1.17.0 +protobuf==5.28.3 transformers==4.46.0 tiktoken==0.8.0 sentencepiece==0.2.0 diff --git a/examples/diffusion/python_stable_diffusion_3/torch_requirements.txt b/examples/diffusion/python_stable_diffusion_3/torch_requirements.txt new file mode 100644 index 00000000000..bbd3939d39d --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/torch_requirements.txt @@ -0,0 +1,25 @@ +##################################################################################### +# The MIT License (MIT) +# +# Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +##################################################################################### +--index-url https://download.pytorch.org/whl/rocm6.2/ +torch diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index fccd474afab..39bad46de81 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -296,24 +296,15 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, } self.events = { - # "warmup": - # HipEventPair(start=hip.hipEventCreate()[1], - # end=hip.hipEventCreate()[1]), + "warmup": + HipEventPair(start=hip.hipEventCreate()[1], + end=hip.hipEventCreate()[1]), "run": HipEventPair(start=hip.hipEventCreate()[1], end=hip.hipEventCreate()[1]), "clip": HipEventPair(start=hip.hipEventCreate()[1], end=hip.hipEventCreate()[1]), - # "clip-g": - # HipEventPair(start=hip.hipEventCreate()[1], - # end=hip.hipEventCreate()[1]), - # "clip-l": - # HipEventPair(start=hip.hipEventCreate()[1], - # end=hip.hipEventCreate()[1]), - # "t5xxl": - # HipEventPair(start=hip.hipEventCreate()[1], - # end=hip.hipEventCreate()[1]), "denoise": HipEventPair(start=hip.hipEventCreate()[1], end=hip.hipEventCreate()[1]), @@ -344,9 +335,6 @@ def run(self, prompt, negative_prompt, steps, seed, scale): torch.cuda.synchronize() self.profile_start("run") - # need to set this for each run - # self.scheduler.set_timesteps(steps, device="cuda") - print("Tokenizing prompts...") prompt_tokens = self.tokenize(prompt) neg_prompt_tokens = self.tokenize(negative_prompt) @@ -357,23 +345,15 @@ def run(self, prompt, negative_prompt, steps, seed, scale): neg_prompt_embeddings = self.get_embeddings(neg_prompt_tokens) self.profile_end("clip") - # print("Apply initial noise sigma\n") - # latents = latents * self.scheduler.init_noise_sigma - cfg_scale = 5 latent = self.get_empty_latent(1024, 1024) print("Running denoising loop...") self.profile_start("denoise") latent = self.do_sampling(latent, seed, prompt_embeddings, neg_prompt_embeddings, steps, cfg_scale) - # for step, t in enumerate(self.scheduler.timesteps): - # print(f"#{step}/{len(self.scheduler.timesteps)} step") - # latents = self.denoise_step(text_embeddings, latents, t, scale) + self.profile_end("denoise") - # print("Scale denoised result...") - # latents = 1 / 0.18215 * latents - self.profile_start("decode") print("Decode denoised result...") image = self.decode(latent) @@ -384,9 +364,9 @@ def run(self, prompt, negative_prompt, steps, seed, scale): return image def print_summary(self, denoise_steps): - # print('WARMUP\t{:>9.2f} ms'.format( - # hip.hipEventElapsedTime(self.events['warmup'].start, - # self.events['warmup'].end)[1])) + print('WARMUP\t{:>9.2f} ms'.format( + hip.hipEventElapsedTime(self.events['warmup'].start, + self.events['warmup'].end)[1])) print('CLIP\t{:>9.2f} ms'.format( hip.hipEventElapsedTime(self.events['clip'].start, self.events['clip'].end)[1])) @@ -446,7 +426,6 @@ def tokenize(self, prompt): def encode_token_weights(self, model_name, token_weight_pairs): tokens = list(map(lambda a: a[0], token_weight_pairs[0])) tokens = torch.tensor([tokens], dtype=torch.int64, device=self.device) - # print(f'token val: {tokens.flatten()[0:5]}') copy_tensor_sync(self.tensors[model_name]["input_ids"], tokens.to(torch.int32)) run_model_sync(self.models[model_name], self.model_args[model_name]) @@ -462,9 +441,7 @@ def encode_token_weights(self, model_name, token_weight_pairs): else: first_pooled = encoder_out2 output = [encoder_out[0:1]] - # print(f'token weight output for model {model_name}: {output[0].flatten()[0:5]}') - # if first_pooled is not None: - # print(f'token weight first_pooled for model {model_name}: {first_pooled.flatten()[0:5]}') + return torch.cat(output, dim=-2), first_pooled @@ -475,10 +452,6 @@ def get_embeddings(self, prompt_tokens): t5_out, _ = self.encode_token_weights("t5xxl", prompt_tokens["t5xxl"]) lg_out = torch.cat([l_out, g_out], dim=-1) lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1])) - print(f"lg_out shape: {lg_out.shape}") - print(f"t5_out shape: {t5_out.shape}") - print(f"l_pooled shape: {l_pooled.shape}") - print(f"g_pooled shape: {g_pooled.shape}") return torch.cat([lg_out, t5_out], dim=-2), torch.cat((l_pooled, g_pooled), dim=-1) @@ -500,12 +473,7 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): timestep = torch.cat([timestep, timestep]) c_crossattn = torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]) y = torch.cat([cond["y"], uncond["y"]]) - # print(f'x out: {x.flatten()[0:5]}') - # print(f'timestep out: {timestep.flatten()[0:5]}') - # print(f'c_crossattn out: {c_crossattn.flatten()[0:5]}') - # print(f'y out: {y.flatten()[0:5]}') - # batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]])) copy_tensor_sync(self.tensors["mmdit"]["sample"], x) copy_tensor_sync(self.tensors["mmdit"]["sigma"], timestep) copy_tensor_sync(self.tensors["mmdit"]["c_crossattn"], c_crossattn) @@ -513,13 +481,10 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): run_model_sync(self.models["mmdit"], self.model_args['mmdit']) + # Then split and apply CFG Scaling pos_out, neg_out = torch.tensor_split( self.tensors["mmdit"][get_output_name(0)], 2) - # print(f'mmdit pos out: {pos_out.flatten()[0:5]}') - # print(f'mmdit neg_out out: {neg_out.flatten()[0:5]}') - # Then split and apply CFG Scaling - # pos_out, neg_out = batched.chunk(2) scaled = neg_out + (pos_out - neg_out) * cond_scale return scaled @@ -548,7 +513,6 @@ def to_d(x, sigma, denoised): return x def get_empty_latent(self, width, height): - # print("Prep an empty latent...") return torch.ones(1, 16, height // 8, width // 8, device="cpu") * 0.0609 def get_sigmas(self, sampling, steps): @@ -578,9 +542,7 @@ def fix_cond(self, cond): def do_sampling(self, latent, seed, conditioning, neg_cond, steps, cfg_scale, denoise=1.0) -> torch.Tensor: latent = latent.half().cuda() - # print(f'latent vals: {latent.flatten()[0:5]}') noise = self.get_noise(seed, latent).cuda() - # print(f'noise vals: {noise.flatten()[0:5]}') sigmas = self.get_sigmas(self.model_sampling, steps).cuda() sigmas = sigmas[int(steps * (1 - denoise)):] conditioning = self.fix_cond(conditioning) @@ -590,29 +552,6 @@ def do_sampling(self, latent, seed, conditioning, neg_cond, steps, cfg_scale, de latent = SD3LatentFormat().process_out(latent) return latent - @measure - def denoise_step(self, text_embeddings, latents, t, scale): - latents_model_input = torch.cat([latents] * 2) - latents_model_input = self.scheduler.scale_model_input( - latents_model_input, t).to(torch.float32).to(device="cuda") - timestep = torch.atleast_1d(t.to(torch.int64)).to( - device="cuda") # convert 0D -> 1D - - copy_tensor_sync(self.tensors["unet"]["sample"], latents_model_input) - copy_tensor_sync(self.tensors["unet"]["encoder_hidden_states"], - text_embeddings) - copy_tensor_sync(self.tensors["unet"]["timestep"], timestep) - run_model_sync(self.models["unet"], self.model_args['unet']) - - noise_pred_text, noise_pred_uncond = torch.tensor_split( - self.tensors["unet"][get_output_name(0)], 2) - - # perform guidance - noise_pred = noise_pred_uncond + scale * (noise_pred_text - - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - return self.scheduler.step(noise_pred, t, latents).prev_sample @measure def decode(self, latents): @@ -633,17 +572,23 @@ def warmup(self, num_runs): self.tensors["mmdit"]["sample"], torch.randn((2 * self.batch, 16, 128, 128)).to(torch.float16)) copy_tensor_sync( - self.tensors["unet"]["encoder_hidden_states"], - torch.randn((2 * self.batch, 77, 1024)).to(torch.float32)) - copy_tensor_sync(self.tensors["unet"]["timestep"], - torch.atleast_1d(torch.randn(1).to(torch.int64))) + self.tensors["mmdit"]["sigma"], + torch.randn((2 * self.batch)).to(torch.float16)) + copy_tensor_sync( + self.tensors["mmdit"]["c_crossattn"], + torch.randn((2 * self.batch, 154, 4096)).to(torch.float16)) + copy_tensor_sync( + self.tensors["mmdit"]["y"], + torch.randn((2 * self.batch, 2048)).to(torch.float16)) copy_tensor_sync( - self.tensors["vae"]["latent_sample"], - torch.randn((self.batch, 4, 64, 64)).to(torch.float32)) + self.tensors["vae"]["latent"], + torch.randn((self.batch, 16, 128, 128)).to(torch.float16)) for _ in range(num_runs): - run_model_sync(self.models["clip"], self.model_args["clip"]) - run_model_sync(self.models["unet"], self.model_args["unet"]) + run_model_sync(self.models["clip-l"], self.model_args["clip-l"]) + run_model_sync(self.models["clip-g"], self.model_args["clip-g"]) + run_model_sync(self.models["t5xxl"], self.model_args["t5xxl"]) + run_model_sync(self.models["mmdit"], self.model_args["mmdit"]) run_model_sync(self.models["vae"], self.model_args["vae"]) self.profile_end("warmup") @@ -654,8 +599,8 @@ def warmup(self, num_runs): sd = StableDiffusionMGX(args.onnx_model_path, args.compiled_model_path, args.fp16, args.batch, args.force_compile, args.exhaustive_tune) - # print("Warmup") - # sd.warmup(5) + print("Warmup") + sd.warmup(5) print("Run") result = sd.run(args.prompt, args.negative_prompt, args.steps, args.seed, args.scale) From e0082544ab47c459fd153e2d795f2982460abc50 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Fri, 1 Nov 2024 18:04:44 -0500 Subject: [PATCH 09/17] formatting --- .../python_stable_diffusion_3/export_onnx.py | 57 ++++++++------ .../python_stable_diffusion_3/txt2img.py | 74 +++++++++++-------- 2 files changed, 78 insertions(+), 53 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/export_onnx.py b/examples/diffusion/python_stable_diffusion_3/export_onnx.py index 48658876de7..65f6f2477d4 100644 --- a/examples/diffusion/python_stable_diffusion_3/export_onnx.py +++ b/examples/diffusion/python_stable_diffusion_3/export_onnx.py @@ -25,6 +25,7 @@ from diffusers import StableDiffusion3Pipeline import os + def argparser(): parser = ArgumentParser() parser.add_argument( @@ -37,37 +38,47 @@ def argparser(): ) return parser.parse_args() + def export_encoders(output_path): - pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16) - x=torch.randint(1, (1, 77)) - encoder_path=output_path+'/text_encoder/model.onnx' - encoder_2_path=output_path+'/text_encoder_2/model.onnx' - encoder_3_path=output_path+'/text_encoder_3/model.onnx' + pipe = StableDiffusion3Pipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", + torch_dtype=torch.float16) + x = torch.randint(1, (1, 77)) + encoder_path = output_path + '/text_encoder/model.onnx' + encoder_2_path = output_path + '/text_encoder_2/model.onnx' + encoder_3_path = output_path + '/text_encoder_3/model.onnx' os.makedirs(os.path.dirname(encoder_path), exist_ok=True) os.makedirs(os.path.dirname(encoder_2_path), exist_ok=True) os.makedirs(os.path.dirname(encoder_3_path), exist_ok=True) torch.onnx.export(pipe.text_encoder, - x, - encoder_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { 0: 'batch_size'}}) + x, + encoder_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { + 0: 'batch_size' + }}) torch.onnx.export(pipe.text_encoder_2, - x, - encoder_2_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { 0: 'batch_size'}}) + x, + encoder_2_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { + 0: 'batch_size' + }}) torch.onnx.export(pipe.text_encoder_3, - x, - encoder_3_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { 0: 'batch_size'}}) + x, + encoder_3_path, + export_params=True, + do_constant_folding=True, + input_names=['input_ids'], + dynamic_axes={'input_ids': { + 0: 'batch_size' + }}) + if __name__ == "__main__": args = argparser() diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index 39bad46de81..5c15f1bb834 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -350,8 +350,9 @@ def run(self, prompt, negative_prompt, steps, seed, scale): print("Running denoising loop...") self.profile_start("denoise") - latent = self.do_sampling(latent, seed, prompt_embeddings, neg_prompt_embeddings, steps, cfg_scale) - + latent = self.do_sampling(latent, seed, prompt_embeddings, + neg_prompt_embeddings, steps, cfg_scale) + self.profile_end("denoise") self.profile_start("decode") @@ -441,20 +442,21 @@ def encode_token_weights(self, model_name, token_weight_pairs): else: first_pooled = encoder_out2 output = [encoder_out[0:1]] - - return torch.cat(output, dim=-2), first_pooled + return torch.cat(output, dim=-2), first_pooled @measure def get_embeddings(self, prompt_tokens): - l_out, l_pooled = self.encode_token_weights("clip-l", prompt_tokens["l"]) - g_out, g_pooled = self.encode_token_weights("clip-g", prompt_tokens["g"]) + l_out, l_pooled = self.encode_token_weights("clip-l", + prompt_tokens["l"]) + g_out, g_pooled = self.encode_token_weights("clip-g", + prompt_tokens["g"]) t5_out, _ = self.encode_token_weights("t5xxl", prompt_tokens["t5xxl"]) lg_out = torch.cat([l_out, g_out], dim=-1) lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1])) - return torch.cat([lg_out, t5_out], dim=-2), torch.cat((l_pooled, g_pooled), dim=-1) - + return torch.cat([lg_out, t5_out], dim=-2), torch.cat( + (l_pooled, g_pooled), dim=-1) @staticmethod def convert_to_rgb_image(image): @@ -478,7 +480,7 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): copy_tensor_sync(self.tensors["mmdit"]["sigma"], timestep) copy_tensor_sync(self.tensors["mmdit"]["c_crossattn"], c_crossattn) copy_tensor_sync(self.tensors["mmdit"]["y"], y) - + run_model_sync(self.models["mmdit"], self.model_args['mmdit']) # Then split and apply CFG Scaling @@ -488,24 +490,23 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): scaled = neg_out + (pos_out - neg_out) * cond_scale return scaled - - def sample_euler(self, x, sigmas, conditioning, neg_cond, cfg_scale): def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim - return x[(...,) + (None,) * dims_to_append] + return x[(..., ) + (None, ) * dims_to_append] def to_d(x, sigma, denoised): """Converts a denoiser output to a Karras ODE derivative.""" return (x - denoised) / append_dims(sigma, x.ndim) - + """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" # extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) for i in range(len(sigmas) - 1): sigma_hat = sigmas[i] - denoised = self.CFGDenoiser(x, sigma_hat * s_in, conditioning, neg_cond, cfg_scale) + denoised = self.CFGDenoiser(x, sigma_hat * s_in, conditioning, + neg_cond, cfg_scale) d = to_d(x, sigma_hat, denoised) dt = sigmas[i + 1] - sigma_hat # Euler method @@ -513,7 +514,8 @@ def to_d(x, sigma, denoised): return x def get_empty_latent(self, width, height): - return torch.ones(1, 16, height // 8, width // 8, device="cpu") * 0.0609 + return torch.ones(1, 16, height // 8, width // 8, + device="cpu") * 0.0609 def get_sigmas(self, sampling, steps): start = sampling.timestep(sampling.sigma_max) @@ -528,31 +530,45 @@ def get_sigmas(self, sampling, steps): def get_noise(self, seed, latent): generator = torch.manual_seed(seed) - print(f"dtype = {latent.dtype}, layout = {latent.layout}, device = {latent.device}") - return torch.randn(latent.size(), dtype=torch.float32, layout=latent.layout, generator=generator).to(latent.dtype) + print( + f"dtype = {latent.dtype}, layout = {latent.layout}, device = {latent.device}" + ) + return torch.randn(latent.size(), + dtype=torch.float32, + layout=latent.layout, + generator=generator).to(latent.dtype) def max_denoise(self, sigmas): max_sigma = float(self.model_sampling.sigma_max) sigma = float(sigmas[0]) - return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma + return math.isclose(max_sigma, sigma, + rel_tol=1e-05) or sigma > max_sigma def fix_cond(self, cond): cond, pooled = (cond[0].half().cuda(), cond[1].half().cuda()) - return { "c_crossattn": cond, "y": pooled } - - def do_sampling(self, latent, seed, conditioning, neg_cond, steps, cfg_scale, denoise=1.0) -> torch.Tensor: + return {"c_crossattn": cond, "y": pooled} + + def do_sampling(self, + latent, + seed, + conditioning, + neg_cond, + steps, + cfg_scale, + denoise=1.0) -> torch.Tensor: latent = latent.half().cuda() noise = self.get_noise(seed, latent).cuda() sigmas = self.get_sigmas(self.model_sampling, steps).cuda() sigmas = sigmas[int(steps * (1 - denoise)):] conditioning = self.fix_cond(conditioning) neg_cond = self.fix_cond(neg_cond) - noise_scaled = self.model_sampling.noise_scaling(sigmas[0], noise, latent, self.max_denoise(sigmas)) - latent = self.sample_euler(noise_scaled, sigmas, conditioning, neg_cond, cfg_scale) + noise_scaled = self.model_sampling.noise_scaling( + sigmas[0], noise, latent, self.max_denoise(sigmas)) + latent = self.sample_euler(noise_scaled, sigmas, conditioning, + neg_cond, cfg_scale) latent = SD3LatentFormat().process_out(latent) return latent - @measure def decode(self, latents): copy_tensor_sync(self.tensors["vae"]["latent"], latents) @@ -571,15 +587,13 @@ def warmup(self, num_runs): copy_tensor_sync( self.tensors["mmdit"]["sample"], torch.randn((2 * self.batch, 16, 128, 128)).to(torch.float16)) - copy_tensor_sync( - self.tensors["mmdit"]["sigma"], - torch.randn((2 * self.batch)).to(torch.float16)) + copy_tensor_sync(self.tensors["mmdit"]["sigma"], + torch.randn((2 * self.batch)).to(torch.float16)) copy_tensor_sync( self.tensors["mmdit"]["c_crossattn"], torch.randn((2 * self.batch, 154, 4096)).to(torch.float16)) - copy_tensor_sync( - self.tensors["mmdit"]["y"], - torch.randn((2 * self.batch, 2048)).to(torch.float16)) + copy_tensor_sync(self.tensors["mmdit"]["y"], + torch.randn((2 * self.batch, 2048)).to(torch.float16)) copy_tensor_sync( self.tensors["vae"]["latent"], torch.randn((self.batch, 16, 128, 128)).to(torch.float16)) From 5665e2e352d95db2529768ba4540d8b56d7b6da6 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:06:40 -0600 Subject: [PATCH 10/17] updated version of script using optimum transformer --- .../python_stable_diffusion_3/other_impls.py | 515 ------------------ .../python_stable_diffusion_3/txt2img.py | 174 +++--- 2 files changed, 67 insertions(+), 622 deletions(-) delete mode 100644 examples/diffusion/python_stable_diffusion_3/other_impls.py diff --git a/examples/diffusion/python_stable_diffusion_3/other_impls.py b/examples/diffusion/python_stable_diffusion_3/other_impls.py deleted file mode 100644 index 02291ba4159..00000000000 --- a/examples/diffusion/python_stable_diffusion_3/other_impls.py +++ /dev/null @@ -1,515 +0,0 @@ -# MIT License - -# Copyright (c) 2024 Stability AI - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# Some code in `other_impls` originates from HuggingFace and is subject to [the HuggingFace Transformers Apache2 License](https://github.com/huggingface/transformers/blob/main/LICENSE) -### This file contains impls for underlying related models (CLIP, T5, etc) - -import torch, math -from torch import nn -from transformers import CLIPTokenizer, T5TokenizerFast - - -################################################################################################# -### Core/Utility -################################################################################################# - - -def attention(q, k, v, heads, mask=None): - """Convenience wrapper around a basic attention operation""" - b, _, dim_head = q.shape - dim_head //= heads - q, k, v = map(lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2), (q, k, v)) - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) - return out.transpose(1, 2).reshape(b, -1, heads * dim_head) - - -class Mlp(nn.Module): - """ MLP as used in Vision Transformer, MLP-Mixer and related networks""" - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, dtype=None, device=None): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - - self.fc1 = nn.Linear(in_features, hidden_features, bias=bias, dtype=dtype, device=device) - self.act = act_layer - self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, dtype=dtype, device=device) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.fc2(x) - return x - - -################################################################################################# -### CLIP -################################################################################################# - - -class CLIPAttention(torch.nn.Module): - def __init__(self, embed_dim, heads, dtype, device): - super().__init__() - self.heads = heads - self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) - self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) - self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) - - def forward(self, x, mask=None): - q = self.q_proj(x) - k = self.k_proj(x) - v = self.v_proj(x) - out = attention(q, k, v, self.heads, mask) - return self.out_proj(out) - - -ACTIVATIONS = { - "quick_gelu": lambda a: a * torch.sigmoid(1.702 * a), - "gelu": torch.nn.functional.gelu, -} - -class CLIPLayer(torch.nn.Module): - def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device): - super().__init__() - self.layer_norm1 = nn.LayerNorm(embed_dim, dtype=dtype, device=device) - self.self_attn = CLIPAttention(embed_dim, heads, dtype, device) - self.layer_norm2 = nn.LayerNorm(embed_dim, dtype=dtype, device=device) - #self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device) - self.mlp = Mlp(embed_dim, intermediate_size, embed_dim, act_layer=ACTIVATIONS[intermediate_activation], dtype=dtype, device=device) - - def forward(self, x, mask=None): - x += self.self_attn(self.layer_norm1(x), mask) - x += self.mlp(self.layer_norm2(x)) - return x - - -class CLIPEncoder(torch.nn.Module): - def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device): - super().__init__() - self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) for i in range(num_layers)]) - - def forward(self, x, mask=None, intermediate_output=None): - if intermediate_output is not None: - if intermediate_output < 0: - intermediate_output = len(self.layers) + intermediate_output - intermediate = None - for i, l in enumerate(self.layers): - x = l(x, mask) - if i == intermediate_output: - intermediate = x.clone() - return x, intermediate - - -class CLIPEmbeddings(torch.nn.Module): - def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None): - super().__init__() - self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device) - self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) - - def forward(self, input_tokens): - return self.token_embedding(input_tokens) + self.position_embedding.weight - - -class CLIPTextModel_(torch.nn.Module): - def __init__(self, config_dict, dtype, device): - num_layers = config_dict["num_hidden_layers"] - embed_dim = config_dict["hidden_size"] - heads = config_dict["num_attention_heads"] - intermediate_size = config_dict["intermediate_size"] - intermediate_activation = config_dict["hidden_act"] - super().__init__() - self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device) - self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) - self.final_layer_norm = nn.LayerNorm(embed_dim, dtype=dtype, device=device) - - def forward(self, input_tokens, intermediate_output=None, final_layer_norm_intermediate=True): - x = self.embeddings(input_tokens) - causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) - x, i = self.encoder(x, mask=causal_mask, intermediate_output=intermediate_output) - x = self.final_layer_norm(x) - if i is not None and final_layer_norm_intermediate: - i = self.final_layer_norm(i) - pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),] - return x, i, pooled_output - - -class CLIPTextModel(torch.nn.Module): - def __init__(self, config_dict, dtype, device): - super().__init__() - self.num_layers = config_dict["num_hidden_layers"] - self.text_model = CLIPTextModel_(config_dict, dtype, device) - embed_dim = config_dict["hidden_size"] - self.text_projection = nn.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device) - self.text_projection.weight.copy_(torch.eye(embed_dim)) - self.dtype = dtype - - def get_input_embeddings(self): - return self.text_model.embeddings.token_embedding - - def set_input_embeddings(self, embeddings): - self.text_model.embeddings.token_embedding = embeddings - - def forward(self, *args, **kwargs): - x = self.text_model(*args, **kwargs) - out = self.text_projection(x[2]) - return (x[0], x[1], out, x[2]) - - -class SDTokenizer: - def __init__(self, max_length=77, pad_with_end=True, tokenizer=None, has_start_token=True, pad_to_max_length=True, min_length=None): - self.tokenizer = tokenizer - self.max_length = max_length - self.min_length = min_length - empty = self.tokenizer('')["input_ids"] - if has_start_token: - self.tokens_start = 1 - self.start_token = empty[0] - self.end_token = empty[1] - else: - self.tokens_start = 0 - self.start_token = None - self.end_token = empty[0] - self.pad_with_end = pad_with_end - self.pad_to_max_length = pad_to_max_length - vocab = self.tokenizer.get_vocab() - self.inv_vocab = {v: k for k, v in vocab.items()} - self.max_word_length = 8 - - - def tokenize_with_weights(self, text:str): - """Tokenize the text, with weight values - presume 1.0 for all and ignore other features here. The details aren't relevant for a reference impl, and weights themselves has weak effect on SD3.""" - if self.pad_with_end: - pad_token = self.end_token - else: - pad_token = 0 - batch = [] - if self.start_token is not None: - batch.append((self.start_token, 1.0)) - to_tokenize = text.replace("\n", " ").split(' ') - to_tokenize = [x for x in to_tokenize if x != ""] - for word in to_tokenize: - batch.extend([(t, 1) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]]) - batch.append((self.end_token, 1.0)) - if self.pad_to_max_length: - batch.extend([(pad_token, 1.0)] * (self.max_length - len(batch))) - if self.min_length is not None and len(batch) < self.min_length: - batch.extend([(pad_token, 1.0)] * (self.min_length - len(batch))) - return [batch] - - -class SDXLClipGTokenizer(SDTokenizer): - def __init__(self, tokenizer): - super().__init__(pad_with_end=False, tokenizer=tokenizer) - - -class SD3Tokenizer: - def __init__(self): - clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") - self.clip_l = SDTokenizer(tokenizer=clip_tokenizer) - self.clip_g = SDXLClipGTokenizer(clip_tokenizer) - self.t5xxl = T5XXLTokenizer() - - def tokenize_with_weights(self, text:str): - out = {} - out["g"] = self.clip_g.tokenize_with_weights(text) - out["l"] = self.clip_l.tokenize_with_weights(text) - out["t5xxl"] = self.t5xxl.tokenize_with_weights(text) - return out - - -class ClipTokenWeightEncoder: - def encode_token_weights(self, token_weight_pairs): - tokens = list(map(lambda a: a[0], token_weight_pairs[0])) - out, pooled = self([tokens]) - if pooled is not None: - first_pooled = pooled[0:1].cpu() - else: - first_pooled = pooled - output = [out[0:1]] - return torch.cat(output, dim=-2).cpu(), first_pooled - - -class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): - """Uses the CLIP transformer encoder for text (from huggingface)""" - LAYERS = ["last", "pooled", "hidden"] - def __init__(self, device="cpu", max_length=77, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=CLIPTextModel, - special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, return_projected_pooled=True): - super().__init__() - assert layer in self.LAYERS - self.transformer = model_class(textmodel_json_config, dtype, device) - self.num_layers = self.transformer.num_layers - self.max_length = max_length - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - self.layer = layer - self.layer_idx = None - self.special_tokens = special_tokens - self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055)) - self.layer_norm_hidden_state = layer_norm_hidden_state - self.return_projected_pooled = return_projected_pooled - if layer == "hidden": - assert layer_idx is not None - assert abs(layer_idx) < self.num_layers - self.set_clip_options({"layer": layer_idx}) - self.options_default = (self.layer, self.layer_idx, self.return_projected_pooled) - - def set_clip_options(self, options): - layer_idx = options.get("layer", self.layer_idx) - self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled) - if layer_idx is None or abs(layer_idx) > self.num_layers: - self.layer = "last" - else: - self.layer = "hidden" - self.layer_idx = layer_idx - - def forward(self, tokens): - backup_embeds = self.transformer.get_input_embeddings() - device = backup_embeds.weight.device - tokens = torch.LongTensor(tokens).to(device) - outputs = self.transformer(tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) - self.transformer.set_input_embeddings(backup_embeds) - if self.layer == "last": - z = outputs[0] - else: - z = outputs[1] - pooled_output = None - if len(outputs) >= 3: - if not self.return_projected_pooled and len(outputs) >= 4 and outputs[3] is not None: - pooled_output = outputs[3].float() - elif outputs[2] is not None: - pooled_output = outputs[2].float() - return z.float(), pooled_output - - -class SDXLClipG(SDClipModel): - """Wraps the CLIP-G model into the SD-CLIP-Model interface""" - def __init__(self, config, device="cpu", layer="penultimate", layer_idx=None, dtype=None): - if layer == "penultimate": - layer="hidden" - layer_idx=-2 - super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False) - - -class T5XXLModel(SDClipModel): - """Wraps the T5-XXL model into the SD-CLIP-Model interface for convenience""" - def __init__(self, config, device="cpu", layer="last", layer_idx=None, dtype=None): - super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=T5) - - -################################################################################################# -### T5 implementation, for the T5-XXL text encoder portion, largely pulled from upstream impl -################################################################################################# - - -class T5XXLTokenizer(SDTokenizer): - """Wraps the T5 Tokenizer from HF into the SDTokenizer interface""" - def __init__(self): - super().__init__(pad_with_end=False, tokenizer=T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl"), has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=77) - - -class T5LayerNorm(torch.nn.Module): - def __init__(self, hidden_size, eps=1e-6, dtype=None, device=None): - super().__init__() - self.weight = torch.nn.Parameter(torch.ones(hidden_size, dtype=dtype, device=device)) - self.variance_epsilon = eps - - def forward(self, x): - variance = x.pow(2).mean(-1, keepdim=True) - x = x * torch.rsqrt(variance + self.variance_epsilon) - return self.weight.to(device=x.device, dtype=x.dtype) * x - - -class T5DenseGatedActDense(torch.nn.Module): - def __init__(self, model_dim, ff_dim, dtype, device): - super().__init__() - self.wi_0 = nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device) - self.wi_1 = nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device) - self.wo = nn.Linear(ff_dim, model_dim, bias=False, dtype=dtype, device=device) - - def forward(self, x): - hidden_gelu = torch.nn.functional.gelu(self.wi_0(x), approximate="tanh") - hidden_linear = self.wi_1(x) - x = hidden_gelu * hidden_linear - x = self.wo(x) - return x - - -class T5LayerFF(torch.nn.Module): - def __init__(self, model_dim, ff_dim, dtype, device): - super().__init__() - self.DenseReluDense = T5DenseGatedActDense(model_dim, ff_dim, dtype, device) - self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) - - def forward(self, x): - forwarded_states = self.layer_norm(x) - forwarded_states = self.DenseReluDense(forwarded_states) - x += forwarded_states - return x - - -class T5Attention(torch.nn.Module): - def __init__(self, model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device): - super().__init__() - # Mesh TensorFlow initialization to avoid scaling before softmax - self.q = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.k = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.v = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.o = nn.Linear(inner_dim, model_dim, bias=False, dtype=dtype, device=device) - self.num_heads = num_heads - self.relative_attention_bias = None - if relative_attention_bias: - self.relative_attention_num_buckets = 32 - self.relative_attention_max_distance = 128 - self.relative_attention_bias = torch.nn.Embedding(self.relative_attention_num_buckets, self.num_heads, device=device) - - @staticmethod - def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): - """ - Adapted from Mesh Tensorflow: - https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 - - Translate relative position to a bucket number for relative attention. The relative position is defined as - memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to - position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for - small absolute relative_position and larger buckets for larger absolute relative_positions. All relative - positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. - This should allow for more graceful generalization to longer sequences than the model has been trained on - - Args: - relative_position: an int32 Tensor - bidirectional: a boolean - whether the attention is bidirectional - num_buckets: an integer - max_distance: an integer - - Returns: - a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) - """ - relative_buckets = 0 - if bidirectional: - num_buckets //= 2 - relative_buckets += (relative_position > 0).to(torch.long) * num_buckets - relative_position = torch.abs(relative_position) - else: - relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) - # now relative_position is in the range [0, inf) - # half of the buckets are for exact increments in positions - max_exact = num_buckets // 2 - is_small = relative_position < max_exact - # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance - relative_position_if_large = max_exact + ( - torch.log(relative_position.float() / max_exact) - / math.log(max_distance / max_exact) - * (num_buckets - max_exact) - ).to(torch.long) - relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) - relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) - return relative_buckets - - def compute_bias(self, query_length, key_length, device): - """Compute binned relative position bias""" - context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] - memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] - relative_position = memory_position - context_position # shape (query_length, key_length) - relative_position_bucket = self._relative_position_bucket( - relative_position, # shape (query_length, key_length) - bidirectional=True, - num_buckets=self.relative_attention_num_buckets, - max_distance=self.relative_attention_max_distance, - ) - values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) - values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) - return values - - def forward(self, x, past_bias=None): - q = self.q(x) - k = self.k(x) - v = self.v(x) - if self.relative_attention_bias is not None: - past_bias = self.compute_bias(x.shape[1], x.shape[1], x.device) - if past_bias is not None: - mask = past_bias - out = attention(q, k * ((k.shape[-1] / self.num_heads) ** 0.5), v, self.num_heads, mask) - return self.o(out), past_bias - - -class T5LayerSelfAttention(torch.nn.Module): - def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device): - super().__init__() - self.SelfAttention = T5Attention(model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device) - self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) - - def forward(self, x, past_bias=None): - output, past_bias = self.SelfAttention(self.layer_norm(x), past_bias=past_bias) - x += output - return x, past_bias - - -class T5Block(torch.nn.Module): - def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device): - super().__init__() - self.layer = torch.nn.ModuleList() - self.layer.append(T5LayerSelfAttention(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device)) - self.layer.append(T5LayerFF(model_dim, ff_dim, dtype, device)) - - def forward(self, x, past_bias=None): - x, past_bias = self.layer[0](x, past_bias) - x = self.layer[-1](x) - return x, past_bias - - -class T5Stack(torch.nn.Module): - def __init__(self, num_layers, model_dim, inner_dim, ff_dim, num_heads, vocab_size, dtype, device): - super().__init__() - self.embed_tokens = torch.nn.Embedding(vocab_size, model_dim, device=device) - self.block = torch.nn.ModuleList([T5Block(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias=(i == 0), dtype=dtype, device=device) for i in range(num_layers)]) - self.final_layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) - - def forward(self, input_ids, intermediate_output=None, final_layer_norm_intermediate=True): - intermediate = None - x = self.embed_tokens(input_ids) - past_bias = None - for i, l in enumerate(self.block): - x, past_bias = l(x, past_bias) - if i == intermediate_output: - intermediate = x.clone() - x = self.final_layer_norm(x) - if intermediate is not None and final_layer_norm_intermediate: - intermediate = self.final_layer_norm(intermediate) - return x, intermediate - - -class T5(torch.nn.Module): - def __init__(self, config_dict, dtype, device): - super().__init__() - self.num_layers = config_dict["num_layers"] - self.encoder = T5Stack(self.num_layers, config_dict["d_model"], config_dict["d_model"], config_dict["d_ff"], config_dict["num_heads"], config_dict["vocab_size"], dtype, device) - self.dtype = dtype - - def get_input_embeddings(self): - return self.encoder.embed_tokens - - def set_input_embeddings(self, embeddings): - self.encoder.embed_tokens = embeddings - - def forward(self, *args, **kwargs): - return self.encoder(*args, **kwargs) diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index 5c15f1bb834..e3b1de63ce1 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -1,6 +1,7 @@ # The MIT License (MIT) # # Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. +# Copyright (c) 2024 Stability AI # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the 'Software'), to deal @@ -21,10 +22,9 @@ # THE SOFTWARE. from argparse import ArgumentParser -from diffusers import EulerDiscreteScheduler +from diffusers import FlowMatchEulerDiscreteScheduler from other_impls import SD3Tokenizer -from sd3_impls import ModelSamplingDiscreteFlow, SD3LatentFormat from PIL import Image @@ -137,7 +137,7 @@ def get_args(): parser.add_argument( "--scale", type=float, - default=7.0, + default=5.0, help="Guidance scale", ) @@ -211,7 +211,9 @@ class StableDiffusionMGX(): def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, force_compile, exhaustive_tune): - self.model_sampling = ModelSamplingDiscreteFlow(shift=1.0) + self.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", + subfolder="scheduler") self.tokenizer = SD3Tokenizer() self.device = "cuda" @@ -219,7 +221,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, if fp16 is None: fp16 = [] elif "all" in fp16: - fp16 = ["vae", "clip", "unet"] + fp16 = ["vae", "clip", "mmdit"] self.batch = batch @@ -227,7 +229,7 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, self.models = { "vae": StableDiffusionMGX.load_mgx_model( - "vae_decoder", {"latent": [self.batch, 16, 128, 128]}, + "vae_decoder", {"latent_sample": [self.batch, 16, 128, 128]}, onnx_model_path, compiled_model_path=compiled_model_path, use_fp16="vae" in fp16, @@ -264,11 +266,11 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, offload_copy=False), "mmdit": StableDiffusionMGX.load_mgx_model( - "mmdit", { - "sample": [2 * self.batch, 16, 128, 128], - "sigma": [2 * self.batch], - "c_crossattn": [2 * self.batch, 154, 4096], - "y": [2 * self.batch, 2048], + "transformer", { + "hidden_states": [2 * self.batch, 16, 128, 128], + "timestep": [2 * self.batch], + "encoder_hidden_states": [2 * self.batch, 154, 4096], + "pooled_projections": [2 * self.batch, 2048], }, onnx_model_path, compiled_model_path=compiled_model_path, @@ -345,16 +347,33 @@ def run(self, prompt, negative_prompt, steps, seed, scale): neg_prompt_embeddings = self.get_embeddings(neg_prompt_tokens) self.profile_end("clip") - cfg_scale = 5 - latent = self.get_empty_latent(1024, 1024) + # fix height and width for now + # TODO: check for valid height/width combinations + # and make them member variables + height = 1024 + width = 1024 + latent = torch.empty(1, 16, height // 8, width // 8, + device="cpu") + + generator = torch.manual_seed(seed) + latent = torch.randn(latent.size(), + dtype=torch.float32, + layout=latent.layout, + generator=generator).to(latent.dtype) + + self.scheduler.set_timesteps(steps) + timesteps=self.scheduler.timesteps print("Running denoising loop...") self.profile_start("denoise") - latent = self.do_sampling(latent, seed, prompt_embeddings, - neg_prompt_embeddings, steps, cfg_scale) + for step in timesteps: + latent = self.denoise(latent, prompt_embeddings, + neg_prompt_embeddings, step, scale) self.profile_end("denoise") + latent = (latent / 1.5305) + 0.0609 + self.profile_start("decode") print("Decode denoised result...") image = self.decode(latent) @@ -471,107 +490,47 @@ def save_image(pil_image, filename="output.png"): def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): # Run cond and uncond in a batch together - x = torch.cat([x, x]) - timestep = torch.cat([timestep, timestep]) + x_concat = torch.cat([x, x]) + timestep_concat = timestep.expand([2]) c_crossattn = torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]) y = torch.cat([cond["y"], uncond["y"]]) - copy_tensor_sync(self.tensors["mmdit"]["sample"], x) - copy_tensor_sync(self.tensors["mmdit"]["sigma"], timestep) - copy_tensor_sync(self.tensors["mmdit"]["c_crossattn"], c_crossattn) - copy_tensor_sync(self.tensors["mmdit"]["y"], y) + copy_tensor_sync(self.tensors["mmdit"]["hidden_states"], x_concat) + copy_tensor_sync(self.tensors["mmdit"]["timestep"], timestep_concat) + copy_tensor_sync(self.tensors["mmdit"]["encoder_hidden_states"], c_crossattn) + copy_tensor_sync(self.tensors["mmdit"]["pooled_projections"], y) run_model_sync(self.models["mmdit"], self.model_args['mmdit']) + mmdit_out = self.tensors["mmdit"][get_output_name(0)] + # Then split and apply CFG Scaling - pos_out, neg_out = torch.tensor_split( - self.tensors["mmdit"][get_output_name(0)], 2) + pos_out, neg_out = torch.tensor_split(mmdit_out, 2) scaled = neg_out + (pos_out - neg_out) * cond_scale - return scaled - - def sample_euler(self, x, sigmas, conditioning, neg_cond, cfg_scale): - def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" - dims_to_append = target_dims - x.ndim - return x[(..., ) + (None, ) * dims_to_append] - - def to_d(x, sigma, denoised): - """Converts a denoiser output to a Karras ODE derivative.""" - return (x - denoised) / append_dims(sigma, x.ndim) - - """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" - # extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - for i in range(len(sigmas) - 1): - sigma_hat = sigmas[i] - denoised = self.CFGDenoiser(x, sigma_hat * s_in, conditioning, - neg_cond, cfg_scale) - d = to_d(x, sigma_hat, denoised) - dt = sigmas[i + 1] - sigma_hat - # Euler method - x = x + d * dt - return x - - def get_empty_latent(self, width, height): - return torch.ones(1, 16, height // 8, width // 8, - device="cpu") * 0.0609 - - def get_sigmas(self, sampling, steps): - start = sampling.timestep(sampling.sigma_max) - end = sampling.timestep(sampling.sigma_min) - timesteps = torch.linspace(start, end, steps) - sigs = [] - for x in range(len(timesteps)): - ts = timesteps[x] - sigs.append(sampling.sigma(ts)) - sigs += [0.0] - return torch.FloatTensor(sigs) - - def get_noise(self, seed, latent): - generator = torch.manual_seed(seed) - print( - f"dtype = {latent.dtype}, layout = {latent.layout}, device = {latent.device}" - ) - return torch.randn(latent.size(), - dtype=torch.float32, - layout=latent.layout, - generator=generator).to(latent.dtype) - def max_denoise(self, sigmas): - max_sigma = float(self.model_sampling.sigma_max) - sigma = float(sigmas[0]) - return math.isclose(max_sigma, sigma, - rel_tol=1e-05) or sigma > max_sigma + # scheduler step function requies all tensors be on the CPU + scaled = scaled.detach().clone().cpu() + scheduler_out = self.scheduler.step( + model_output=scaled, timestep=timestep, sample=x, return_dict=False + )[0] + return scheduler_out + def fix_cond(self, cond): - cond, pooled = (cond[0].half().cuda(), cond[1].half().cuda()) + cond, pooled = (cond[0].cuda(), cond[1].cuda()) return {"c_crossattn": cond, "y": pooled} + - def do_sampling(self, - latent, - seed, - conditioning, - neg_cond, - steps, - cfg_scale, - denoise=1.0) -> torch.Tensor: - latent = latent.half().cuda() - noise = self.get_noise(seed, latent).cuda() - sigmas = self.get_sigmas(self.model_sampling, steps).cuda() - sigmas = sigmas[int(steps * (1 - denoise)):] + def denoise(self, latent, conditioning, neg_cond, step, cfg_scale): conditioning = self.fix_cond(conditioning) neg_cond = self.fix_cond(neg_cond) - noise_scaled = self.model_sampling.noise_scaling( - sigmas[0], noise, latent, self.max_denoise(sigmas)) - latent = self.sample_euler(noise_scaled, sigmas, conditioning, - neg_cond, cfg_scale) - latent = SD3LatentFormat().process_out(latent) - return latent + return self.CFGDenoiser(latent, step, conditioning, neg_cond, cfg_scale) + @measure def decode(self, latents): - copy_tensor_sync(self.tensors["vae"]["latent"], latents) + copy_tensor_sync(self.tensors["vae"]["latent_sample"], latents) run_model_sync(self.models["vae"], self.model_args["vae"]) return self.tensors["vae"][get_output_name(0)] @@ -585,18 +544,19 @@ def warmup(self, num_runs): copy_tensor_sync(self.tensors["t5xxl"]["input_ids"], torch.ones((1, 77)).to(torch.int32)) copy_tensor_sync( - self.tensors["mmdit"]["sample"], - torch.randn((2 * self.batch, 16, 128, 128)).to(torch.float16)) - copy_tensor_sync(self.tensors["mmdit"]["sigma"], - torch.randn((2 * self.batch)).to(torch.float16)) + self.tensors["mmdit"]["hidden_states"], + torch.randn((2 * self.batch, 16, 128, 128)).to(torch.float)) + copy_tensor_sync(self.tensors["mmdit"]["timestep"], + torch.randn((2 * self.batch)).to(torch.float)) copy_tensor_sync( - self.tensors["mmdit"]["c_crossattn"], - torch.randn((2 * self.batch, 154, 4096)).to(torch.float16)) - copy_tensor_sync(self.tensors["mmdit"]["y"], - torch.randn((2 * self.batch, 2048)).to(torch.float16)) + self.tensors["mmdit"]["encoder_hidden_states"], + torch.randn((2 * self.batch, 154, 4096)).to(torch.float)) + copy_tensor_sync(self.tensors["mmdit"]["pooled_projections"], + torch.randn((2 * self.batch, 2048)).to(torch.float)) copy_tensor_sync( - self.tensors["vae"]["latent"], - torch.randn((self.batch, 16, 128, 128)).to(torch.float16)) + self.tensors["vae"]["latent_sample"], + torch.randn((self.batch, 16, 128, 128)).to(torch.float)) + for _ in range(num_runs): run_model_sync(self.models["clip-l"], self.model_args["clip-l"]) From e6350940b090a7bdad251a54bf3079478995f020 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:06:53 -0600 Subject: [PATCH 11/17] formatting --- .../python_stable_diffusion_3/txt2img.py | 38 +++++++++---------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index e3b1de63ce1..aec6e9d92ed 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -212,8 +212,8 @@ def __init__(self, onnx_model_path, compiled_model_path, fp16, batch, force_compile, exhaustive_tune): self.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( - "stabilityai/stable-diffusion-3-medium-diffusers", - subfolder="scheduler") + "stabilityai/stable-diffusion-3-medium-diffusers", + subfolder="scheduler") self.tokenizer = SD3Tokenizer() self.device = "cuda" @@ -352,22 +352,21 @@ def run(self, prompt, negative_prompt, steps, seed, scale): # and make them member variables height = 1024 width = 1024 - latent = torch.empty(1, 16, height // 8, width // 8, - device="cpu") - + latent = torch.empty(1, 16, height // 8, width // 8, device="cpu") + generator = torch.manual_seed(seed) latent = torch.randn(latent.size(), - dtype=torch.float32, - layout=latent.layout, - generator=generator).to(latent.dtype) + dtype=torch.float32, + layout=latent.layout, + generator=generator).to(latent.dtype) self.scheduler.set_timesteps(steps) - timesteps=self.scheduler.timesteps + timesteps = self.scheduler.timesteps print("Running denoising loop...") self.profile_start("denoise") for step in timesteps: - latent = self.denoise(latent, prompt_embeddings, + latent = self.denoise(latent, prompt_embeddings, neg_prompt_embeddings, step, scale) self.profile_end("denoise") @@ -497,13 +496,14 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): copy_tensor_sync(self.tensors["mmdit"]["hidden_states"], x_concat) copy_tensor_sync(self.tensors["mmdit"]["timestep"], timestep_concat) - copy_tensor_sync(self.tensors["mmdit"]["encoder_hidden_states"], c_crossattn) + copy_tensor_sync(self.tensors["mmdit"]["encoder_hidden_states"], + c_crossattn) copy_tensor_sync(self.tensors["mmdit"]["pooled_projections"], y) run_model_sync(self.models["mmdit"], self.model_args['mmdit']) mmdit_out = self.tensors["mmdit"][get_output_name(0)] - + # Then split and apply CFG Scaling pos_out, neg_out = torch.tensor_split(mmdit_out, 2) @@ -511,22 +511,21 @@ def CFGDenoiser(self, x, timestep, cond, uncond, cond_scale): # scheduler step function requies all tensors be on the CPU scaled = scaled.detach().clone().cpu() - scheduler_out = self.scheduler.step( - model_output=scaled, timestep=timestep, sample=x, return_dict=False - )[0] + scheduler_out = self.scheduler.step(model_output=scaled, + timestep=timestep, + sample=x, + return_dict=False)[0] return scheduler_out - def fix_cond(self, cond): cond, pooled = (cond[0].cuda(), cond[1].cuda()) return {"c_crossattn": cond, "y": pooled} - def denoise(self, latent, conditioning, neg_cond, step, cfg_scale): conditioning = self.fix_cond(conditioning) neg_cond = self.fix_cond(neg_cond) - return self.CFGDenoiser(latent, step, conditioning, neg_cond, cfg_scale) - + return self.CFGDenoiser(latent, step, conditioning, neg_cond, + cfg_scale) @measure def decode(self, latents): @@ -556,7 +555,6 @@ def warmup(self, num_runs): copy_tensor_sync( self.tensors["vae"]["latent_sample"], torch.randn((self.batch, 16, 128, 128)).to(torch.float)) - for _ in range(num_runs): run_model_sync(self.models["clip-l"], self.model_args["clip-l"]) From 991e016883c8500401439603a3a4046e4588c1b6 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Thu, 7 Nov 2024 12:22:41 -0600 Subject: [PATCH 12/17] fix external files --- .../python_stable_diffusion_3/other_impls.py | 516 ++++++++++++++++++ .../python_stable_diffusion_3/sd3_impls.py | 390 ------------- 2 files changed, 516 insertions(+), 390 deletions(-) create mode 100644 examples/diffusion/python_stable_diffusion_3/other_impls.py delete mode 100644 examples/diffusion/python_stable_diffusion_3/sd3_impls.py diff --git a/examples/diffusion/python_stable_diffusion_3/other_impls.py b/examples/diffusion/python_stable_diffusion_3/other_impls.py new file mode 100644 index 00000000000..5322c4b4231 --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/other_impls.py @@ -0,0 +1,516 @@ +# MIT License + +# Copyright (c) 2024 Stability AI + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Some code in `other_impls` originates from HuggingFace and is subject to [the HuggingFace Transformers Apache2 License](https://github.com/huggingface/transformers/blob/main/LICENSE) +### This file contains impls for underlying related models (CLIP, T5, etc) + +import torch, math +from torch import nn +from transformers import CLIPTokenizer, T5TokenizerFast + + +################################################################################################# +### Core/Utility +################################################################################################# + + +def attention(q, k, v, heads, mask=None): + """Convenience wrapper around a basic attention operation""" + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map(lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2), (q, k, v)) + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) + return out.transpose(1, 2).reshape(b, -1, heads * dim_head) + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks""" + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, dtype=None, device=None): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias, dtype=dtype, device=device) + self.act = act_layer + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, dtype=dtype, device=device) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +################################################################################################# +### CLIP +################################################################################################# + + +class CLIPAttention(torch.nn.Module): + def __init__(self, embed_dim, heads, dtype, device): + super().__init__() + self.heads = heads + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + + def forward(self, x, mask=None): + q = self.q_proj(x) + k = self.k_proj(x) + v = self.v_proj(x) + out = attention(q, k, v, self.heads, mask) + return self.out_proj(out) + + +ACTIVATIONS = { + "quick_gelu": lambda a: a * torch.sigmoid(1.702 * a), + "gelu": torch.nn.functional.gelu, +} + +class CLIPLayer(torch.nn.Module): + def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device): + super().__init__() + self.layer_norm1 = nn.LayerNorm(embed_dim, dtype=dtype, device=device) + self.self_attn = CLIPAttention(embed_dim, heads, dtype, device) + self.layer_norm2 = nn.LayerNorm(embed_dim, dtype=dtype, device=device) + #self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device) + self.mlp = Mlp(embed_dim, intermediate_size, embed_dim, act_layer=ACTIVATIONS[intermediate_activation], dtype=dtype, device=device) + + def forward(self, x, mask=None): + x += self.self_attn(self.layer_norm1(x), mask) + x += self.mlp(self.layer_norm2(x)) + return x + + +class CLIPEncoder(torch.nn.Module): + def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device): + super().__init__() + self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) for i in range(num_layers)]) + + def forward(self, x, mask=None, intermediate_output=None): + if intermediate_output is not None: + if intermediate_output < 0: + intermediate_output = len(self.layers) + intermediate_output + intermediate = None + for i, l in enumerate(self.layers): + x = l(x, mask) + if i == intermediate_output: + intermediate = x.clone() + return x, intermediate + + +class CLIPEmbeddings(torch.nn.Module): + def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None): + super().__init__() + self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device) + self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) + + def forward(self, input_tokens): + return self.token_embedding(input_tokens) + self.position_embedding.weight + + +class CLIPTextModel_(torch.nn.Module): + def __init__(self, config_dict, dtype, device): + num_layers = config_dict["num_hidden_layers"] + embed_dim = config_dict["hidden_size"] + heads = config_dict["num_attention_heads"] + intermediate_size = config_dict["intermediate_size"] + intermediate_activation = config_dict["hidden_act"] + super().__init__() + self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device) + self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) + self.final_layer_norm = nn.LayerNorm(embed_dim, dtype=dtype, device=device) + + def forward(self, input_tokens, intermediate_output=None, final_layer_norm_intermediate=True): + x = self.embeddings(input_tokens) + causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) + x, i = self.encoder(x, mask=causal_mask, intermediate_output=intermediate_output) + x = self.final_layer_norm(x) + if i is not None and final_layer_norm_intermediate: + i = self.final_layer_norm(i) + pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),] + return x, i, pooled_output + + +class CLIPTextModel(torch.nn.Module): + def __init__(self, config_dict, dtype, device): + super().__init__() + self.num_layers = config_dict["num_hidden_layers"] + self.text_model = CLIPTextModel_(config_dict, dtype, device) + embed_dim = config_dict["hidden_size"] + self.text_projection = nn.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device) + self.text_projection.weight.copy_(torch.eye(embed_dim)) + self.dtype = dtype + + def get_input_embeddings(self): + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, embeddings): + self.text_model.embeddings.token_embedding = embeddings + + def forward(self, *args, **kwargs): + x = self.text_model(*args, **kwargs) + out = self.text_projection(x[2]) + return (x[0], x[1], out, x[2]) + + +class SDTokenizer: + def __init__(self, max_length=77, pad_with_end=True, tokenizer=None, has_start_token=True, pad_to_max_length=True, min_length=None): + self.tokenizer = tokenizer + self.max_length = max_length + self.min_length = min_length + empty = self.tokenizer('')["input_ids"] + if has_start_token: + self.tokens_start = 1 + self.start_token = empty[0] + self.end_token = empty[1] + else: + self.tokens_start = 0 + self.start_token = None + self.end_token = empty[0] + self.pad_with_end = pad_with_end + self.pad_to_max_length = pad_to_max_length + vocab = self.tokenizer.get_vocab() + self.inv_vocab = {v: k for k, v in vocab.items()} + self.max_word_length = 8 + + + def tokenize_with_weights(self, text:str): + """Tokenize the text, with weight values - presume 1.0 for all and ignore other features here. The details aren't relevant for a reference impl, and weights themselves has weak effect on SD3.""" + if self.pad_with_end: + pad_token = self.end_token + else: + pad_token = 0 + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0)) + to_tokenize = text.replace("\n", " ").split(' ') + to_tokenize = [x for x in to_tokenize if x != ""] + for word in to_tokenize: + batch.extend([(t, 1) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]]) + batch.append((self.end_token, 1.0)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0)] * (self.max_length - len(batch))) + if self.min_length is not None and len(batch) < self.min_length: + batch.extend([(pad_token, 1.0)] * (self.min_length - len(batch))) + return [batch] + + +class SDXLClipGTokenizer(SDTokenizer): + def __init__(self, tokenizer): + super().__init__(pad_with_end=False, tokenizer=tokenizer) + + +class SD3Tokenizer: + def __init__(self): + clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") + self.clip_l = SDTokenizer(tokenizer=clip_tokenizer) + self.clip_g = SDXLClipGTokenizer(clip_tokenizer) + self.t5xxl = T5XXLTokenizer() + + def tokenize_with_weights(self, text:str): + out = {} + out["g"] = self.clip_g.tokenize_with_weights(text) + out["l"] = self.clip_l.tokenize_with_weights(text) + out["t5xxl"] = self.t5xxl.tokenize_with_weights(text) + return out + + +class ClipTokenWeightEncoder: + def encode_token_weights(self, token_weight_pairs): + tokens = list(map(lambda a: a[0], token_weight_pairs[0])) + out, pooled = self([tokens]) + if pooled is not None: + first_pooled = pooled[0:1].cpu() + else: + first_pooled = pooled + output = [out[0:1]] + return torch.cat(output, dim=-2).cpu(), first_pooled + + +class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): + """Uses the CLIP transformer encoder for text (from huggingface)""" + LAYERS = ["last", "pooled", "hidden"] + def __init__(self, device="cpu", max_length=77, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=CLIPTextModel, + special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, return_projected_pooled=True): + super().__init__() + assert layer in self.LAYERS + self.transformer = model_class(textmodel_json_config, dtype, device) + self.num_layers = self.transformer.num_layers + self.max_length = max_length + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + self.layer = layer + self.layer_idx = None + self.special_tokens = special_tokens + self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055)) + self.layer_norm_hidden_state = layer_norm_hidden_state + self.return_projected_pooled = return_projected_pooled + if layer == "hidden": + assert layer_idx is not None + assert abs(layer_idx) < self.num_layers + self.set_clip_options({"layer": layer_idx}) + self.options_default = (self.layer, self.layer_idx, self.return_projected_pooled) + + def set_clip_options(self, options): + layer_idx = options.get("layer", self.layer_idx) + self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled) + if layer_idx is None or abs(layer_idx) > self.num_layers: + self.layer = "last" + else: + self.layer = "hidden" + self.layer_idx = layer_idx + + def forward(self, tokens): + backup_embeds = self.transformer.get_input_embeddings() + device = backup_embeds.weight.device + tokens = torch.LongTensor(tokens).to(device) + outputs = self.transformer(tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) + self.transformer.set_input_embeddings(backup_embeds) + if self.layer == "last": + z = outputs[0] + else: + z = outputs[1] + pooled_output = None + if len(outputs) >= 3: + if not self.return_projected_pooled and len(outputs) >= 4 and outputs[3] is not None: + pooled_output = outputs[3].float() + elif outputs[2] is not None: + pooled_output = outputs[2].float() + return z.float(), pooled_output + + +class SDXLClipG(SDClipModel): + """Wraps the CLIP-G model into the SD-CLIP-Model interface""" + def __init__(self, config, device="cpu", layer="penultimate", layer_idx=None, dtype=None): + if layer == "penultimate": + layer="hidden" + layer_idx=-2 + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False) + + +class T5XXLModel(SDClipModel): + """Wraps the T5-XXL model into the SD-CLIP-Model interface for convenience""" + def __init__(self, config, device="cpu", layer="last", layer_idx=None, dtype=None): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=T5) + + +################################################################################################# +### T5 implementation, for the T5-XXL text encoder portion, largely pulled from upstream impl +################################################################################################# + + +class T5XXLTokenizer(SDTokenizer): + """Wraps the T5 Tokenizer from HF into the SDTokenizer interface""" + def __init__(self): + super().__init__(pad_with_end=False, tokenizer=T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl"), has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=77) + + +class T5LayerNorm(torch.nn.Module): + def __init__(self, hidden_size, eps=1e-6, dtype=None, device=None): + super().__init__() + self.weight = torch.nn.Parameter(torch.ones(hidden_size, dtype=dtype, device=device)) + self.variance_epsilon = eps + + def forward(self, x): + variance = x.pow(2).mean(-1, keepdim=True) + x = x * torch.rsqrt(variance + self.variance_epsilon) + return self.weight.to(device=x.device, dtype=x.dtype) * x + + +class T5DenseGatedActDense(torch.nn.Module): + def __init__(self, model_dim, ff_dim, dtype, device): + super().__init__() + self.wi_0 = nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device) + self.wi_1 = nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device) + self.wo = nn.Linear(ff_dim, model_dim, bias=False, dtype=dtype, device=device) + + def forward(self, x): + hidden_gelu = torch.nn.functional.gelu(self.wi_0(x), approximate="tanh") + hidden_linear = self.wi_1(x) + x = hidden_gelu * hidden_linear + x = self.wo(x) + return x + + +class T5LayerFF(torch.nn.Module): + def __init__(self, model_dim, ff_dim, dtype, device): + super().__init__() + self.DenseReluDense = T5DenseGatedActDense(model_dim, ff_dim, dtype, device) + self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) + + def forward(self, x): + forwarded_states = self.layer_norm(x) + forwarded_states = self.DenseReluDense(forwarded_states) + x += forwarded_states + return x + + +class T5Attention(torch.nn.Module): + def __init__(self, model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device): + super().__init__() + # Mesh TensorFlow initialization to avoid scaling before softmax + self.q = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.k = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.v = nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.o = nn.Linear(inner_dim, model_dim, bias=False, dtype=dtype, device=device) + self.num_heads = num_heads + self.relative_attention_bias = None + if relative_attention_bias: + self.relative_attention_num_buckets = 32 + self.relative_attention_max_distance = 128 + self.relative_attention_bias = torch.nn.Embedding(self.relative_attention_num_buckets, self.num_heads, device=device) + + @staticmethod + def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): + """ + Adapted from Mesh Tensorflow: + https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 + + Translate relative position to a bucket number for relative attention. The relative position is defined as + memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to + position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for + small absolute relative_position and larger buckets for larger absolute relative_positions. All relative + positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. + This should allow for more graceful generalization to longer sequences than the model has been trained on + + Args: + relative_position: an int32 Tensor + bidirectional: a boolean - whether the attention is bidirectional + num_buckets: an integer + max_distance: an integer + + Returns: + a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) + """ + relative_buckets = 0 + if bidirectional: + num_buckets //= 2 + relative_buckets += (relative_position > 0).to(torch.long) * num_buckets + relative_position = torch.abs(relative_position) + else: + relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) + # now relative_position is in the range [0, inf) + # half of the buckets are for exact increments in positions + max_exact = num_buckets // 2 + is_small = relative_position < max_exact + # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance + relative_position_if_large = max_exact + ( + torch.log(relative_position.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).to(torch.long) + relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)) + relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) + return relative_buckets + + def compute_bias(self, query_length, key_length, device): + """Compute binned relative position bias""" + context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] + memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] + relative_position = memory_position - context_position # shape (query_length, key_length) + relative_position_bucket = self._relative_position_bucket( + relative_position, # shape (query_length, key_length) + bidirectional=True, + num_buckets=self.relative_attention_num_buckets, + max_distance=self.relative_attention_max_distance, + ) + values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) + values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) + return values + + def forward(self, x, past_bias=None): + q = self.q(x) + k = self.k(x) + v = self.v(x) + if self.relative_attention_bias is not None: + past_bias = self.compute_bias(x.shape[1], x.shape[1], x.device) + if past_bias is not None: + mask = past_bias + out = attention(q, k * ((k.shape[-1] / self.num_heads) ** 0.5), v, self.num_heads, mask) + return self.o(out), past_bias + + +class T5LayerSelfAttention(torch.nn.Module): + def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device): + super().__init__() + self.SelfAttention = T5Attention(model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device) + self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) + + def forward(self, x, past_bias=None): + output, past_bias = self.SelfAttention(self.layer_norm(x), past_bias=past_bias) + x += output + return x, past_bias + + +class T5Block(torch.nn.Module): + def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device): + super().__init__() + self.layer = torch.nn.ModuleList() + self.layer.append(T5LayerSelfAttention(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device)) + self.layer.append(T5LayerFF(model_dim, ff_dim, dtype, device)) + + def forward(self, x, past_bias=None): + x, past_bias = self.layer[0](x, past_bias) + x = self.layer[-1](x) + return x, past_bias + + +class T5Stack(torch.nn.Module): + def __init__(self, num_layers, model_dim, inner_dim, ff_dim, num_heads, vocab_size, dtype, device): + super().__init__() + self.embed_tokens = torch.nn.Embedding(vocab_size, model_dim, device=device) + self.block = torch.nn.ModuleList([T5Block(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias=(i == 0), dtype=dtype, device=device) for i in range(num_layers)]) + self.final_layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device) + + def forward(self, input_ids, intermediate_output=None, final_layer_norm_intermediate=True): + intermediate = None + x = self.embed_tokens(input_ids) + past_bias = None + for i, l in enumerate(self.block): + x, past_bias = l(x, past_bias) + if i == intermediate_output: + intermediate = x.clone() + x = self.final_layer_norm(x) + if intermediate is not None and final_layer_norm_intermediate: + intermediate = self.final_layer_norm(intermediate) + return x, intermediate + + +class T5(torch.nn.Module): + def __init__(self, config_dict, dtype, device): + super().__init__() + self.num_layers = config_dict["num_layers"] + self.encoder = T5Stack(self.num_layers, config_dict["d_model"], config_dict["d_model"], config_dict["d_ff"], config_dict["num_heads"], config_dict["vocab_size"], dtype, device) + self.dtype = dtype + + def get_input_embeddings(self): + return self.encoder.embed_tokens + + def set_input_embeddings(self, embeddings): + self.encoder.embed_tokens = embeddings + + def forward(self, *args, **kwargs): + return self.encoder(*args, **kwargs) + diff --git a/examples/diffusion/python_stable_diffusion_3/sd3_impls.py b/examples/diffusion/python_stable_diffusion_3/sd3_impls.py deleted file mode 100644 index 9074166f375..00000000000 --- a/examples/diffusion/python_stable_diffusion_3/sd3_impls.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright (c) 2024 Stability AI - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -### Impls of the SD3 core diffusion model and VAE - -import torch, math, einops -# from mmdit import MMDiT -from PIL import Image - - -################################################################################################# -### MMDiT Model Wrapping -################################################################################################# - - -class ModelSamplingDiscreteFlow(torch.nn.Module): - """Helper for sampler scheduling (ie timestep/sigma calculations) for Discrete Flow models""" - def __init__(self, shift=1.0): - super().__init__() - self.shift = shift - timesteps = 1000 - ts = self.sigma(torch.arange(1, timesteps + 1, 1)) - self.register_buffer('sigmas', ts) - - @property - def sigma_min(self): - return self.sigmas[0] - - @property - def sigma_max(self): - return self.sigmas[-1] - - def timestep(self, sigma): - return sigma * 1000 - - def sigma(self, timestep: torch.Tensor): - timestep = timestep / 1000.0 - if self.shift == 1.0: - return timestep - return self.shift * timestep / (1 + (self.shift - 1) * timestep) - - def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) - return model_input - model_output * sigma - - def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): - return sigma * noise + (1.0 - sigma) * latent_image - - -# class BaseModel(torch.nn.Module): -# """Wrapper around the core MM-DiT model""" -# def __init__(self, shift=1.0, device=None, dtype=torch.float32, file=None, prefix=""): -# super().__init__() -# # Important configuration values can be quickly determined by checking shapes in the source file -# # Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change) -# patch_size = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[2] -# depth = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[0] // 64 -# num_patches = file.get_tensor(f"{prefix}pos_embed").shape[1] -# pos_embed_max_size = round(math.sqrt(num_patches)) -# adm_in_channels = file.get_tensor(f"{prefix}y_embedder.mlp.0.weight").shape[1] -# context_shape = file.get_tensor(f"{prefix}context_embedder.weight").shape -# context_embedder_config = { -# "target": "torch.nn.Linear", -# "params": { -# "in_features": context_shape[1], -# "out_features": context_shape[0] -# } -# } -# self.diffusion_model = MMDiT(input_size=None, pos_embed_scaling_factor=None, pos_embed_offset=None, pos_embed_max_size=pos_embed_max_size, patch_size=patch_size, in_channels=16, depth=depth, num_patches=num_patches, adm_in_channels=adm_in_channels, context_embedder_config=context_embedder_config, device=device, dtype=dtype) -# self.model_sampling = ModelSamplingDiscreteFlow(shift=shift) - -# def apply_model(self, x, sigma, c_crossattn=None, y=None): -# dtype = self.get_dtype() -# timestep = self.model_sampling.timestep(sigma).float() -# model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float() -# return self.model_sampling.calculate_denoised(sigma, model_output, x) - -# def forward(self, *args, **kwargs): -# return self.apply_model(*args, **kwargs) - -# def get_dtype(self): -# return self.diffusion_model.dtype - - -class CFGDenoiser(torch.nn.Module): - """Helper for applying CFG Scaling to diffusion outputs""" - def __init__(self, model): - super().__init__() - self.model = model - - def forward(self, x, timestep, cond, uncond, cond_scale): - # Run cond and uncond in a batch together - batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]])) - # Then split and apply CFG Scaling - pos_out, neg_out = batched.chunk(2) - scaled = neg_out + (pos_out - neg_out) * cond_scale - return scaled - - -class SD3LatentFormat: - """Latents are slightly shifted from center - this class must be called after VAE Decode to correct for the shift""" - def __init__(self): - self.scale_factor = 1.5305 - self.shift_factor = 0.0609 - - def process_in(self, latent): - return (latent - self.shift_factor) * self.scale_factor - - def process_out(self, latent): - return (latent / self.scale_factor) + self.shift_factor - - def decode_latent_to_preview(self, x0): - """Quick RGB approximate preview of sd3 latents""" - factors = torch.tensor([ - [-0.0645, 0.0177, 0.1052], [ 0.0028, 0.0312, 0.0650], - [ 0.1848, 0.0762, 0.0360], [ 0.0944, 0.0360, 0.0889], - [ 0.0897, 0.0506, -0.0364], [-0.0020, 0.1203, 0.0284], - [ 0.0855, 0.0118, 0.0283], [-0.0539, 0.0658, 0.1047], - [-0.0057, 0.0116, 0.0700], [-0.0412, 0.0281, -0.0039], - [ 0.1106, 0.1171, 0.1220], [-0.0248, 0.0682, -0.0481], - [ 0.0815, 0.0846, 0.1207], [-0.0120, -0.0055, -0.0867], - [-0.0749, -0.0634, -0.0456], [-0.1418, -0.1457, -0.1259] - ], device="cpu") - latent_image = x0[0].permute(1, 2, 0).cpu() @ factors - - latents_ubyte = (((latent_image + 1) / 2) - .clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - .byte()).cpu() - - return Image.fromarray(latents_ubyte.numpy()) - - -################################################################################################# -### K-Diffusion Sampling -################################################################################################# - - -def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" - dims_to_append = target_dims - x.ndim - return x[(...,) + (None,) * dims_to_append] - - -def to_d(x, sigma, denoised): - """Converts a denoiser output to a Karras ODE derivative.""" - return (x - denoised) / append_dims(sigma, x.ndim) - - -@torch.no_grad() -@torch.autocast("cuda", dtype=torch.float16) -def sample_euler(model, x, sigmas, extra_args=None): - """Implements Algorithm 2 (Euler steps) from Karras et al. (2022).""" - extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - for i in range(len(sigmas) - 1): - sigma_hat = sigmas[i] - denoised = model(x, sigma_hat * s_in, **extra_args) - d = to_d(x, sigma_hat, denoised) - dt = sigmas[i + 1] - sigma_hat - # Euler method - x = x + d * dt - return x - - -################################################################################################# -### VAE -################################################################################################# - - -def Normalize(in_channels, num_groups=32, dtype=torch.float32, device=None): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) - - -class ResnetBlock(torch.nn.Module): - def __init__(self, *, in_channels, out_channels=None, dtype=torch.float32, device=None): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - - self.norm1 = Normalize(in_channels, dtype=dtype, device=device) - self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - self.norm2 = Normalize(out_channels, dtype=dtype, device=device) - self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - if self.in_channels != self.out_channels: - self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) - else: - self.nin_shortcut = None - self.swish = torch.nn.SiLU(inplace=True) - - def forward(self, x): - hidden = x - hidden = self.norm1(hidden) - hidden = self.swish(hidden) - hidden = self.conv1(hidden) - hidden = self.norm2(hidden) - hidden = self.swish(hidden) - hidden = self.conv2(hidden) - if self.in_channels != self.out_channels: - x = self.nin_shortcut(x) - return x + hidden - - -class AttnBlock(torch.nn.Module): - def __init__(self, in_channels, dtype=torch.float32, device=None): - super().__init__() - self.norm = Normalize(in_channels, dtype=dtype, device=device) - self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) - self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) - self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) - self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) - - def forward(self, x): - hidden = self.norm(x) - q = self.q(hidden) - k = self.k(hidden) - v = self.v(hidden) - b, c, h, w = q.shape - q, k, v = map(lambda x: einops.rearrange(x, "b c h w -> b 1 (h w) c").contiguous(), (q, k, v)) - hidden = torch.nn.functional.scaled_dot_product_attention(q, k, v) # scale is dim ** -0.5 per default - hidden = einops.rearrange(hidden, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b) - hidden = self.proj_out(hidden) - return x + hidden - - -class Downsample(torch.nn.Module): - def __init__(self, in_channels, dtype=torch.float32, device=None): - super().__init__() - self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0, dtype=dtype, device=device) - - def forward(self, x): - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - return x - - -class Upsample(torch.nn.Module): - def __init__(self, in_channels, dtype=torch.float32, device=None): - super().__init__() - self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - x = self.conv(x) - return x - - -class VAEEncoder(torch.nn.Module): - def __init__(self, ch=128, ch_mult=(1,2,4,4), num_res_blocks=2, in_channels=3, z_channels=16, dtype=torch.float32, device=None): - super().__init__() - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - in_ch_mult = (1,) + tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = torch.nn.ModuleList() - for i_level in range(self.num_resolutions): - block = torch.nn.ModuleList() - attn = torch.nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device)) - block_in = block_out - down = torch.nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions - 1: - down.downsample = Downsample(block_in, dtype=dtype, device=device) - self.down.append(down) - # middle - self.mid = torch.nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) - self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device) - self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) - # end - self.norm_out = Normalize(block_in, dtype=dtype, device=device) - self.conv_out = torch.nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - self.swish = torch.nn.SiLU(inplace=True) - - def forward(self, x): - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1]) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - # middle - h = hs[-1] - h = self.mid.block_1(h) - h = self.mid.attn_1(h) - h = self.mid.block_2(h) - # end - h = self.norm_out(h) - h = self.swish(h) - h = self.conv_out(h) - return h - - -class VAEDecoder(torch.nn.Module): - def __init__(self, ch=128, out_ch=3, ch_mult=(1, 2, 4, 4), num_res_blocks=2, resolution=256, z_channels=16, dtype=torch.float32, device=None): - super().__init__() - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = ch * ch_mult[self.num_resolutions - 1] - curr_res = resolution // 2 ** (self.num_resolutions - 1) - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - # middle - self.mid = torch.nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) - self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device) - self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device) - # upsampling - self.up = torch.nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = torch.nn.ModuleList() - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device)) - block_in = block_out - up = torch.nn.Module() - up.block = block - if i_level != 0: - up.upsample = Upsample(block_in, dtype=dtype, device=device) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - # end - self.norm_out = Normalize(block_in, dtype=dtype, device=device) - self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device) - self.swish = torch.nn.SiLU(inplace=True) - - def forward(self, z): - # z to block_in - hidden = self.conv_in(z) - # middle - hidden = self.mid.block_1(hidden) - hidden = self.mid.attn_1(hidden) - hidden = self.mid.block_2(hidden) - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - hidden = self.up[i_level].block[i_block](hidden) - if i_level != 0: - hidden = self.up[i_level].upsample(hidden) - # end - hidden = self.norm_out(hidden) - hidden = self.swish(hidden) - hidden = self.conv_out(hidden) - return hidden - - -class SDVAE(torch.nn.Module): - def __init__(self, dtype=torch.float32, device=None): - super().__init__() - self.encoder = VAEEncoder(dtype=dtype, device=device) - self.decoder = VAEDecoder(dtype=dtype, device=device) - - @torch.autocast("cuda", dtype=torch.float16) - def decode(self, latent): - return self.decoder(latent) - - @torch.autocast("cuda", dtype=torch.float16) - def encode(self, image): - hidden = self.encoder(image) - mean, logvar = torch.chunk(hidden, 2, dim=1) - logvar = torch.clamp(logvar, -30.0, 20.0) - std = torch.exp(0.5 * logvar) - return mean + std * torch.randn_like(mean) From cf1ffad302f66519c7a940e282f88040ebb32ce7 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:10:25 -0600 Subject: [PATCH 13/17] update readme --- .../python_stable_diffusion_3/README.md | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 examples/diffusion/python_stable_diffusion_3/README.md diff --git a/examples/diffusion/python_stable_diffusion_3/README.md b/examples/diffusion/python_stable_diffusion_3/README.md new file mode 100644 index 00000000000..be0b9fa3a12 --- /dev/null +++ b/examples/diffusion/python_stable_diffusion_3/README.md @@ -0,0 +1,64 @@ +# Stable Diffusion 3 + +This version was tested with [rocm 6.2](https://github.com/ROCmSoftwarePlatform/AMDMIGraphX/tree/rocm-6.2.0) revision. + +## Console application + +To run the console application, follow these steps below. + +Setup python environment + +```bash +# this will require the python venv to installed (e.g. apt install python3.8-venv) +python3 -m venv sd_venv +. sd_venv/bin/activate +``` + +Install dependencies + +```bash +pip install -r torch_requirements.txt +pip install -r requirements.txt +``` + +Use MIGraphX Python Module + +```bash +export PYTHONPATH=/opt/rocm/lib:$PYTHONPATH +``` + +Get models: + +Make sure you have permission to download and use stabilityai/stable-diffusion-3. +```bash +huggingface-cli login +``` + +Export the models to onnx. +Currently, optimum does not have the changes required in their latest version. For this reason, please follow the steps to build optimum from scratch. +```bash +git clone --single-branch --branch diffusers-transformer-export https://github.com/huggingface/optimum.git +cd optimum +make build_dist_install_tools +make build_dist +cd dist +pip install *.whl +cd ../.. +``` +Once optimum is built, use the following command to export the models: +```bash +optimum-cli export onnx --model stabilityai/stable-diffusion-3-medium-diffusers models/sd3 +``` + +Run the text-to-image script with the following example prompt and seed (optionally, you can change the batch size / number of images generated for that prompt) + +```bash +MIGRAPHX_DISABLE_REDUCE_FUSION=1 python txt2img.py --prompt "a photograph of an astronaut riding a horse" --steps 50 --output astro_horse.jpg +``` +> [!NOTE] +> The first run will compile the models and cache them to make subsequent runs faster. New batch sizes will result in the models re-compiling.* + +The result should look like this: + +![example_output.jpg](./example_output.jpg) + From f247f38bf263efbcbd12a24c6dcc206f6f8f8fd2 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:12:04 -0600 Subject: [PATCH 14/17] update reference image --- .../example_output.jpg | Bin 78161 -> 79465 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/example_output.jpg b/examples/diffusion/python_stable_diffusion_3/example_output.jpg index 9d729cca118fc3b1ced96926f2fc0a66303754e9..ec1eb12f1b1ece41340c76229994b888868a8149 100644 GIT binary patch literal 79465 zcmbTecUV(P^e!B_G_eGwgA`3b(9k;whTakskSYPB3DT=d6Vx1f7ij{ah%^mNL=gxG z2+{&zC=wuoF5O+8Qtl3K)z6 z{0I9p3R8z2qot*zr8!1NM@LV8jDZQx%*4pZ#D3~D3!IySmxr5!i;GWC>;fPEMFB1@ zgbecHWt60(B<}?|C0TJrF$qa==q42O^z=-OOl-`|Y~ts*&WZnDfBk8KojgVjqlQsY z@WUuiQc#_w_|ppG1>>ZlAb-LB{i2|xqNbsxJ4VmI2rj5T0i&d#qN1dxqM@Ot23Ow$ z@5881(wsVnxH2^SAbcy>%ct?E51wr^cf0_PP@&v51}8#{;4`3nf7u%wi< zjI5lz`c<@srk1vj@wMwFre-(H?d%DPJ`3D3BMLdX%didxuJ~1gdB{ePm zNk(3N0im#{xa9fESH$X?+PeDJ%`L5@w)PJlo&5uYpNEFOd>xsZo|&DSU--Vbw6VFh zy|epsZ~p+YivrmB-`8Km{y%n|1a?tUQ&UmXL3UA4`h$N|C#h-9p=eKC!O+=yp5_-1 zI|f%vcvkhEUO)o7&WiKuV>lxyIVH3KnMStk|DR#u|4)|vZ^Qn_t`Qg$6$O|)s*^Ak zSbm1yKwy5xLBzZ!HbRfUjiC6NFPUsaRb1HLOQEfcM`=-}Nha6lLf?%j=cD^EstisA zd7)>b`|t?H#~GI31x5|0yif)SG|Y%1Ljo-aiL88^NDyEGaR}S z0qgIj#D(di`4|k_@CaA|0bGRu|8zVSwec_sw5Sjaj?qnL`f;|CstRP^!6VeP}oggN8F9^5M8J z8nO)-ZM-=QA&F6?bAhgv09V|?Bfuqjcxq^~;Qr-tQaBSv-6Fg}cKn69=eErKKphvD!DFcy+1TLG14H(L?b7)Bt$>!w3bb76FK zkgzcf-rog`5IYT?tcSsy>)^l;f5|Z7lij@3P$mZ0d@3HM=dFQscA`K4Cs0OXboHp<$wVv#3fv}_dRZ4T z8jmQTz#|BSlo&AUs`PlIQ!cnGxWlPbJPiz<4kqdp9_gvdOdyFNa;f0J2H12oMgswE zN@B~UOycVAQ>6>i!6N><#TD}0k`VAvFs?#iO*97B-!2I~SQjk>y9C_>d^#_k9xcj^ zIFXF))53wLBVZ)BAEEFZ7#$pjHE!su3FXv4dg|`=QhBO^r=WqIo*FbFq3HzROeV-d z$WR<`177tBHX7TH%Q%6>Wualx$qeQ+=N{uxBvAxp3nAAR`d^Vz5D3ADe&7{1F~~~( zP%s?Wd88+r4cG+?O{WjjfIK+@rU^I(>HJ6savm`k+owvefeg%xZzYLxf6t|2FQ81u z_EMhA(1YH=qYUywPs7uTKy>P1z%7N?Ag4uRck>CCPs72G_&9ZSW05F445fhtK2L|+ zWtanN&qJfg1LM&#rvdYkm!+)=JVO2nIEYwBpaM}(yE+|1BpOlR$w;0kHLE((ZOQ(TjMnGVLh|Eh(3}rw%k@!x-vD0aAOWNsZ~F&JVvptSuSHw1ITZ1 zVsaBohXziBVZn4$CZA&CsERgOH~H6X68l13`ibiHe>_!15d6MwERZ5I~mTEsder2DiYafmdD{TRM2c;pGOPQDj%wx`jzDGgs%9|v+1f-@O z^b43^FwV086!7Mj7!S?~aNpE_LemRK^0A0IfH$JFp{EIdU4a}-2h#=a+6^XlC=KLL zUK+qUV4|fFm9w)4qK%9wp@6$|K}>=u2#bWE5nKnMAb<)4ti75Iz%6V9VJiHNvRMU9cYa}6sYUwBE&00M_3_zDAvLSVVUHZ0k= z1fT&7a0*C&`0@96B$}-w*W~niUK&9N2HYeHr?Lfe@RvYr(GW$6vQ0;0$uk8@#-*l$ zXn~-L%txNux+9@vcZ31hodc{!wjabaA(ytl56lLm_5_4F05jMyP$*5IK@OaN@(0iZ z;GaJ51BCMhMYtp~;lYqdt-Me`FLnRoia8?J7D^$IDe$YS#Fj{(r9l7vC7 z0$fcQp-K-z4;RWgkp)ooFWNz=mm~=ubP~WI;CeEdVtd82K#JH$3PA`+$S1&?kVvQL zcp7*egsydY$wrhux&8gURL-7&6(BHxvR4dn^NIO*<6>wiTCF{tZmPJv+o)CGQp<|>su zW03h|6F^{t7yP@o_P0-S@Sk&hSxcnF3-OO|ZO z$pA5lAiQY%44|9mlkEjk3i4+()(8cd0uo@(h!fa8UA#IH09}dmSVB6Xy-Xf|lMHx# zoOl}f{~Y}nW&jR>NxSSsfhfZMr5IIuAguv!@xdFR;Dw%pf}TrF3BVb`SKw#JpCCsR zdMO~8fCJ1l4c>tek_nI+;K5|*A_(gseSI1Z;YuE0T9DBIC&4=a$N%Q(CHUXFk--H- zAUSuzJp9cWU_KOJgMdmjgZ|Ej5Y<_T0)a#$2L_lzh^e7rP(oL$3c>4u#Dr*_0&oTp zO;FI%z=2942Z1OMr$A5t{XC730z8=si87$XQ_BF>0}cQ-0yY6?0Ia2pQr=c5Ry=UMgh^WB@=OJ@bYtDR`VvM z@xG#GDUyw=(5Oc=q9FJIHvG#NI_BmuH&MVdAi2A;eECPoKb|KF66 ze}Ix7FbfTvWG?VGq*s77P!JJ7?m+oN&LJ}Gu@&J!IQWAU`HPVx2tQAQIlzUb0FOT5 zho}%tG6XBh*aXORV0y5|RazjEMgPy|rjs)P+Yh4P--#vq1LMh;Lk=+52F3#U0V=dy zTM!b!xU{nW_!x^zg(7HUQG3tmG8v4>(91x>s{`t(+&+WmQ)zHahAI@$fYL`GN=Gpc z4)%Su){|gP`55sThf_(pfGGRYf5-kxhVWE>^<{CN(MqMeN!M?fOOFaGx_w`|c0@#y zR^F4!dcy$_7g9js1tZfv01~n_p*lJsc5ecrCrfj{`_pM$*nWRVQ34ZGksb(8#=w&O z0l$d~0qe7h|9TUQ9fDlI-w+G|UyUHYkUcgHylaFO1#$wo^snQ|mH|-xSAti87zJ3J z52uOhhqxHz;Q5J1cOe0Vtx7^1oGV=ze&klyT{hTsVs zrV6fI!GOn+DKB=FVGLbNOIaM zRP#o{WZ(d5?3_sQ@lXIkQOQIeiXG4_|HEz3*j`X8l0+#%;y^J5g8Q#Lfl60%H4pjI z+qu+&h>r57==JMsd1r#>U3I-bSl-`w$1=s*Fl9qe$SC`LK-X~R$%*Jk5C6bee^@QpE3Ug9a8vNO8IJQco~)g!2(zOTkZh za=7wiXoTfal^yeHgQhrD-0ShdU#8F0PYUIKnJj(y&u90*w@4S~$;1giyq>+!ogXab z_(u4Esb&tMt^ozS8=K6~j4@c%K(IkBeF9<~uN=T66ia^#vN3G`YQ7un0#txtJOGYI z{yUG9pvp}JO%s60D}WK~Bw@*ZWV!{P{@XGD44aos4y$A%%3hGY6j(eKiMpnh3<8oY zfuM*5^aFt?pbjunX)^eN6uoxb%QJR9s^8QO@8zfNR4n=HyRU*^L(Lctr#f~yKvv3* zfauV+qRyKhvz~@vMo$fV??q4xiP|k^^Vjt1zy25_pl8_Gs1{pTeQ|B|$I47Y^}P)( z@uGu!E=n&f>rW5%^=IIpnu}Wk&0=y(2$0BsDjD(^5wHS)Z(tv`7Ybzv+Kn5qMtlYQ zkTQS@*~EXM5d=s8|CJR+RNkQPL3!(S9F0@+?nX6x+U2HEJGy+8iYdz~rdHwTswvHq z3d_EWGXuo$w}*LO8?hbKny6%X;g)1uQheptiGu|H-eC+$KI0gnf^9b*6OWLxUK8+i zR1^`==-24v4iMIA)9epRrDx@ox)~s$@pk5LNt_bvu4fL6awAOE%GFBd>&G>=Z*Ez^`~JBsRm0@}2X<=fMnpnFf5F=~)Kv~{ zS&WI474;YJ6|VgH$G?BIx-wbvp+e)q0T)~Py#j4*U2Urid>FhhdHNyj2WgHqg3*9^ z16UPp{JIyg3y3x`1Z7H-9>#zqbd~}^37Gc*$X%gFv z8PJO$hye;hRSsGd9$E&9gF-FvNFwN9^y%U;s`Rk{T@pcGirgg3#o(clfJlVNu(HnP zjkMh`b1;Kbv3Kwnk**DK{yd@fCaOb#%k=3}Q=iJ7aUUhi?C{dZre9rd?wy&^$O$Zb zb$@rnYpJnQCgdY;&ug4W4|*(v?lJbahOOO8BZZRXBbCS?S^I=DT3^IvnO?}B2<}V8 zT{jn2GP+ggsPigK0}>F85*?gszbBC{oXdpDf$}??1*lZ2!QJ1U+70KwVhpet&D6b5 z82N{f9p8U6`BrCm*H!#x#Zxyd~RK4p%QIy~9Dxs%KPLWl@ z*$=CDs<&d_G*%Ebhkl!j#l3WFV0}+wd(J_d>ubd@v>NwAXEo{f)cZd$-ZrA^cUvjh z5`o#JE6whgo^gwcvR;h>jsZP}e!vi*X9T)AI1HW!z;66N14gwCysY{F)MfzuLFErk z5=1Hg;xuSx6hN91YaAoQCwW&x^Gx*7AcDGGFP99R(c9 zR*0Mx8LV2p>0IIH7nJ;t$WtbgT-E1h$MZ_3Cq(a>-OSk-u`!x7!&+>}Tvk0!MZ#K+ z<=ATCu|<6Kol<=YSh1~6@^_1G=1;Fj+EfOY$4$TEa>ZD(?L4}kAN@K`=vp1OXS>US zyD}mGIH0gH`pS`l&z(ifpAm-+Lgmlng&MSlI*G#Y=r!9=6-CoazGdtVx12u6$x&~Y z%_vIOn)}G@dvb99fgOsea9;iDI*QcHHxE0kGMV1{QBg=Ukz%V-k-GBW4@`bDZBW<8 zu&16{bJTILYU;qbDyFmDnC^^vzJjtP;s`*b%BsBtD z1~l{+3Lut3kt<2S`;-tSGXU|O3XmDnOKynNR9?&p z=6*FGMY<7CvOQY7@V0)d?EQ&ub`|BZ#Pa-lu87!tR_1qcD;KBU#FH+j$(&^oF1AUb zXfiSgoHQM3VK>m0m~{**In(zoEL*$jg$lb~>6G@6MArwrVU4$?&$m-+U#3MQ7e2op z^xO-dY2~=LB2#?4yNnGJ+IZ!{_6yH|PM0Lit-XuPhIh`2JrrJ85IpF8Nab%G0PhOF zo*q0v#a+5n!7h^jEqPUh=jr+nqj916%=YlSY1DztcKKWy>zp-f!izVUf)8vs9PUm& zQ%GdcKv}SyuR)OXN&tJcvd&K{omng{8!f~7o#!S4RBuItx`K_|I)Ox;7Igt6L7)nX zTtqkr0Zj?18mJDggF=M?go?I70sr43*~}ezY&r+8hG3-kR2q~wilt|0xj5>`M(4G9 zfqnG@>l8jD&?EmS=nu@;dLdBJXI66rZ+9yt0VlaTz)Pb>7hKx*==(J%bKWdN=olm#?FiCiIikrnSBtk3N2-nEM~C zkJ^Hnvp1c)c-sHLqxw$?i(|X}eUWnVC52YjT@=U*0q5ne6_-N3QE6bY2Me84R zSNyRAew8c;Pp zKLJDzxjSS)uHuofP_p2Ide1=ZkV`QtPqYF&eSrY-U~Ld|$%df5g(To=T4>b(Br@oC z0RibtORj^#3JBm{b1^EY0|xrhf7|YspyVXe#WYArK-YjT{#%Lxq7JOwK)q=Sz|Sjy zh5rxD4zV*RAO1(mL%B#vfUjV2#R;HUNKyz*#${CRM zL3+HW5;;!4N^*XllyhZpSmi}Pgkd!?i>AXXeY`gQtBMcL@8h2RZw`C#r5fXy?{mHc zgU0^8L@VnSx55WmZSc+6-#>~f%PHrl^{>pQel-zs>iD=O|BGUxubiEfwn*xGxsf1-f8MbU*d-dQWRH98f8aG=+;+VqHZpyctAm{vV|L*1+S1$-ss4ub zOG{PxL$xThSAOhq%`z|C1w!nhs8m6$iM7W^`9xuMe$-^dCQpj!yiBF>W|W|qytlFF zi&r<03q)FTVX3wvE|84+`M%;= z%(n4pyL>%jiS1<4aDsKVh11s6O$m#xRMjp$(oL6ap~``8ES`ey^7mICsSs}T-PN`& zN18ccWq+~>G|x?Vhst*I}=Kq+*?du(gRw7!w)IlD1%U+PK4a@hD70k2+W1O&2 zAOqV&yZ6ncejL!QjBf+;SG@ab$DCz}LmBS0Ygj1ydk-vM+_t?P+ ze2Dz&8wqMZuTP%*o?K#6A>-^XF0nW_{Iu*E%nS{_Zsl*|sxbQ0)kLVfsuoui+0hje ziA&0S5p#aFeqjgXJv=IhBAv zuxydHF=Ed7;RI_fo^k-qnb|^pIWGUayBJFMBOz_4O?dmP^svpwsxS4oHLbG4(r@}^ z`z7eHM7$?8rsjz>C+J^&0VU)aP+3}XKr1QWcLM1wBLvr=hH`@%pHP(4k=Luf0@Tig5pppgsDLIoh4%y10)QT{90mJ`U4_=8 zZXt@M6QK?xXpDk7q)LMZY9T->1mMh+t|}9FG)BwLsR*M7saU8V1?f{FF7tkJa?@pK zIVzl_qvOj~sFn2{lrNw=14ICfkB}FQ@ut5C{;WYH_M}%L9YTTD$d2}#y7z%wIlZG?Y+^!p z-gUIG;}{#-HhiIW3j6gdrAFEF3nLfO=4)%>7ed_DLK?oS5UvmUvb$4gCf%(VJdUsL zfmNJ6eZ9v#`-lTy#K~XUBes^j}yZgHx8 z=ws#irlYShcH=#F-JOZ!HrWsR>&ty4kY8S5DKv9ggGPEUYjc|wYOGLcH@F~39bY+k z*fNC{A4IRszh-=FKC$IngZi)-O2O??_^@!4R4mjQ+$pOdF3-DcD)kRCbx5NYN5?rl z9Q%2GC^5x~#U$F&`D^C3x$N5kLO{W|SFA_zwy=bR|)F zh>nh*Wu=YgLvxqudcIS9+AM$UeR~&$k6QLlN#n6Qx#Ve88~NtCKMm3EqT-;DT6iFX z+|2NzrCX~ISJ;P7@MWZ6?6d@@o5wy=5W@-q({7e2LhnI;nT(8^hjw&z4@)H{-f%Z;fAxwrzYlFtuUsu5O-3CcKTw*l*_+CRP6) zfBG)7vk$giwp5#|+5?sJ#wsokKb^!nXD-v)Fp5iXIVswDk166|g>L_juD*4L z*s24|>Fpik@0w-)#$^mSjIzR03x>YWT;aHE4P^DRN1)dU+9Bj3mR$1!(F39qbV>*$ zDo$v*GFRObEOSz}0OkcXF8IDz(1xH$&S!6Dm!xFxOyv_!yK_@MBP4Q7XwY4(T#;bL z-*n~hYL4+XYpU4umZ=4+)zg+zDsJYIX-24F8d)KIZK11i4 zb9>ErF=e}giZ0#o@-yK??Hk{NSv@M7uVW$_`=&BJg|GSWo_-rsBB%B8ewW`L*wU<; zSGi%Ktnvd8PlQi|F+yWM+sD4gxbdc&K_`NEQnq(X@)oT{DlNFRKlpZ8SZOrLD7tHW z_mXu`-kCg--m{ptVeS%aW0$g0$g@&o$BPAwWhNhr6XzIT6|$Pkwq~2HKEPzl#X1NhUi91znL*j1rZUCWZ=VHir7*B`;^l>7LrtoZ4AXl|f;iUqvLtGzzf^uY zFMSk66sUjM&@x)ZM5`)5Z7Ow#qxtD9iEq$KkSR=+E|)8(t+ojN)GsI>X*=cKUhHH( zR3(_#uV9g-SKKA&WPR1fGB&T_f*p6JKNfw_=|?4{wMDNe}H%VH9=`SruEp@pz zGD|f`RxDLq!@&;I=+BBO!EPlJ9b%=sUuI(na^jkE@EM-Wg=H)6R-H4!`AJLhNn7&# zU^U|TqVc`F_qtqf9PL63`^$#)96pu@4*c`atK&!%n)NDJT+4u(>lY-KkdL5Z6Ld>K z!KVs0aw1)30IL+BCl0h~!8x!b!3HD?)+iM8kBrViO>`y%&?T4i@aE8B7gXBj6DUuG zk+(=dE4yGJ2x^dCk~Bi0(3hs5bcI@|&@K?DxeXo)#4Z}p5KukjMtC?7$az_DE+}+U z33w9dnE;)fm;Qi632If)LIVq)P?>|LJr7m8Sc-=Ng|6{KWz+8@T+;9cDMeM+4St^5 zZXZy(pYpZ1quj2o_8;fG#y6g!MGI7SI^}+Sv&`suMPz@=Zk?w|Q?@i}tzW?YCA`%~ zVBL^ zylmE)d?D{vr+4g&*rf@{1cLoXtjEmj9P{QxX*~8_OZKrJ7A9$n_vmZWr`1rOg7O31 z1O@Yt)2Uk+yI)Fqat)LC8X?&l&4EK%ttn9G@-3)j(I1t)_G*58O3r?KM_!>LyT?_e zDOb~z?YJgi&L5ZwGL6HHu!3JFdeYr)VA|Nj(E46t)IGD(;WX8xG7fxyol#v;;BJ5e zw}Z#P<23JlQJq*}Ok@VmzA3fS8pMLqTz zxKlWx*mU}hHdRC${r$Wb=Hrb`AiI>HhkAL=w zOuhm0=(e}#JHFg&D#e|dIHK#7|NF-KPoA-^W*ww)wz|R>pKkUZx;c~zi&P|B(vN(V z;}Igi_?}5zo+oANuG>Ydh{j4eOXsys<{0y`EXaZ?`jaV-u5p`X)9v*k zH5fL(y!I)=>YOmWZB$T6Rq{N|v`^EkCc$=2QDWD|C4AZV+or{uR@2;TiIC9U7dP8i zU9PS2p?E{jzcJs>A(nDndtF`fQ_hj;7l(N3^p3N1nwxxDZo{}+aaU$#wa42x@gj~> zD>}PriB(jt#XX*}BI%D-i1jeXyG$wxM=4+L%Dsv45!H~>n8Typo_Phb9i=R9&?q=m zCvGaAy-;%3-e#w2aAM@7>uov(_nQ|#2F0{UsgpP)UlVF72t_LH1KhS$C8^S5`8%aS z4{90h=WOP9jD=?swnTkoObP=YRL*>QT>Fe$J#K@JaMxW^uyPZ>c28Pd`QY|O1#B;M z2HQKtCT3)#+8}m&-l%gffC>KVCYL|!_>H~nr4F@E@ZPwqzqf4?6w2s%EDXj%>g@i&zFOB`nNi}n#^msD zBQNBPQb$#Gq#hR^UW+c#Wpqq1 zxOeAzR9e<6xHoL@?sqXEzq5)`ofvJ?eL{|^Z&{5kzZG!%`SOi)dwi&!%9U>$CD8)t z0sCy$k-g@m;R^&eoyeVFw(Dhm>_P1bwx^uWyb$zND0kp({R124X)YNVy~%DlWt!-G zlbA3YlYUyT`@Pa{eTzYyLxz(4IHSUe%Z<7`;g!ja1^fJNY+mZw8utB4txdu?yVp!& zv+_E8z2{VzXl<0>CXt2l7t3e{w$3qVA0*1|HvM)DHXuryn8n{euxs2J3}RW{8@3ip z8~l#coet&>Ji~g+tVeTQGh~zhA-b|A`dGg0dATJ2k1lepyTiLlY7H}Z)$X{)=$i{e zL~lLY9!-q(Mbvfv9e?Ft?D4iJ_`U#-Nsa5l;IgDhd~MQb#Ae;tJIo*0P}!!)!#-av z;%@`F3iulvf3F?OjT}#_ukSgNql`ZYJY(Gs;JWInuK%e^Vx_Rg#(LoE%CCg!B=|If zql9irdUkk)kV4EBV*pESOy*y}#=aRQlkaq$>m6D$EhBkStALy0>HiLGB{@1mU z1?AK&<|VbR(zhhF-Fvl6x-}7? zf?c1g3oeG$KIyoTQpXRwq024r7?UCXE_(_6i7G1vDV>MD>CP~#Urt3wwusp>&NLn6!X@1$HpbKXOrHZ znabUAqt45PB^H42%qbK6 z13Mq%ROVC_l`z^vo%oxUWq|8CTanXC_)Pq_N2SBVhSE>msFYV9RPL)T|F959X-I72 ztE#CBsK7mOc#w-0ls2qUFu7OVb>;lZoc8QB#-X&v^W`VqTw9d37Ou44ZLUQ1ZVRmHRT;po)phJCq)@}z& zDt_$tLax+P-ZYkn?$efQr?@xoDX>fWO`P6X5`F1B@=ERrr;BcE>5ny$UfYE8(b}gU zYOquLS1=B)(7Y`f&&TPC$0|${--ymIHnQG*@>Abw0d~5R*JT@lTV4COwZVqI6vUa` zjn})bByzpx^NYl#uPymu@hrmkmeyxIt;3y5^FJnf`q_a>QV8@=An^)yX)g+iLVIdq z@VecOZ8+5@^Z4)mOG_OD*P2C-(ybBsmAgOp!#>T5$bYs%c$+VnFg!r9rpR@p{T&!% zbS6II7rN6!x};P9OvY`q{^K90)DkDR^@&0D`T@fwpI?o_vm-X6#8Yt%+0jZ@xH!tr z(ZCdA+ujPDb25v@?`YWL`1>zBS!m69>iU>Z>$68}lD+$ipIu9f zpJh>L^s{mnjP{+_NV+k(Xiyf=#;~^FXZMH@+1%r6m6EYDfZGZjWx4W1>pJd>4Mh7fDyr{ zzFCKZQyR>6+Ifr8XTlX2sU0yb?Jlh8zlQS}HB_-fE%WM8!s_uMlUGB`U7Ujk`x;)b z(0?xx>MZ|pw^;T$w$eRNVDYxi-S1V7@r~_+e#N5OT2h85N5ATY>zZA8ofRxz%*Lyx zRbenBIj+(9dCiy8iAM^n*nM=9J18l_Qgo=XXwR|j;N>w^a7W({aadQ7 zH8ifdULyP9fXOsBplG@2!>X6$W+(B8o|C-S^OLsTZ;rQ{2KBvOME2H>z;l`*`cjmtYgiTFQlA3A4(aQBl~VE9k&Ouzkq9(n^0-!Lx-%vbtV>g33vK9nP< zY&6E{1;o{(%;8oX;#$k6#OJS6aPRFZ_m+ffcue(N879`(-t8jKbguq^^`%eCsBC^b zxW}?+7SOR!RHm`W-oxqtt4K-`bvj>4mVMrgf$iXUq8Ig#dHpbRd(gq5@_E*&Pp3C52QHSSnfkcg&G8Y(%MnQKfz5xHUdbyu z4+=<%pxy?Ze72$!*?uLXg_B;X(n*!xT_1yDeDhzeb1Fb5|S&cetm zd@1d=U=0`?N|>?*umlc+w1PE2Xde=eylbBxEK7mqsQ>vM7L%7r!v^*(Lh9q6(7&7L zK{-u6rT}dr#DT4!BCKGKM^Yn(A1Ist>8vJqpdUOnUBsY62&$mwC**}ifm$8;L40Nz z{odVMnQ!RE^%tx54Djhd`OH5spAciqH~s^XOG`I8Gnq1vSfyWlw@<%vI%;AxY)Ni* zC@xG{pZr|3;l-C4pS-cf>l#TgWJY^sPfa#nUYy{KOlRH- zJJ>^RcWa5M2j%E zS>2U7|M~otOQOGLw#yv$;!d^Z4d{q#Ep7>VoNABNRwC)~&6N20BS z*%_k9L_4*~>@%A5NxPAkvv*PpC~};nxSpxcRQp`9P^VWCGo_lRS}ze4yjw7N{B!B~ zr|x}Uv>oOZe^lS9z0Vn_g2W7#`n9>Llgu3mg^k?N=LAkzv~#z#4|Fr8T^LRps+Nnj zG(r$)6SPEZ)ZduRx&}&AAFU~Hbd;=eIt)7ge*Q!xUrRw}0quReOze7u|0ja|x35II z5X=C7wb<+}-`|rkAGZ0@4&>c@ucn0@>(9~dv5%{TN+O~+GI<8s+2T2s_N81l6+1t| z=t^Kd&k6qaGa29YW|MEaq};f6s_`j1Z&}cf zhj+D|%WnjJcrRomQ&i+G((1@>!Rcr7Cfeu^4E5V(H?H*4vn6`rQ^k7|`L8pUo;~vL z3+AeemM^Xt{JBXS>FKKu@KrDyU8`q1M7x~q4k{V9;!(_-Uiq-0fo#R%!f3&Mc4*_I zWHqj{&0@bs;DwX&cC1I~B52PR%;uH1v(+n`t=W%T9lOH5UzO=k!lA~FJl`kzU2mu; zWU;g07>amqW_8+=0%71^g!mS>H(lave!Zum?0gFC_N{qmpoollOMVIz4K>Oe=QAk& z)aOcZGTnVH?VR;3d@7B^!|_Dw-S((`db``j{Dmg2`0)Ta*e_Gj85Wwcy?f@ngTw<* z71R;po=i|&&@LmVh?`h5qk>thhuX7~nm2`SfL8li{Iwe=2TMhm!ddKWIPniex;sQH ze?`B#ToLBd|9Yt*2i5IyP9q_fGN!G>Qep8m&7q7JyHJj|#&Y{bjVrv7zS#=v*V`ZN z$hyPN^j>`$Q7n}3E|T|)pVCov%U(Z>5ltzrs<~LhUWsjzu zsp8WS1z+P=`G>NnRO3GVtQ1KdTtkzF(;FFUgk4wlx}9&AOLuYI$JidK`luwdPiaPN zKR(p2SUgZFz9Su|liJSEsCysAF?_i#SWDPx!=uiDUM?%{!5bL|mX;XT){O3wY!=j+ zk<;g*^*TK$_~O#MPlb#>_p6nmz1$Z-XP9{AdvkN3xYE;mK|fn{Eks$+y%$fy`fu}g zyE-f@AQ(CB3nm&Cx+c_ZynBp8O;}l3Ih%N$(IO=HU;Rio&T#fzup@1~$Cp5Z9I1Y( zvKtyE)jj!>zno7itwgB5>fd>EWXz?@uW;2P(DVXV?B^vz{*jSxk+qShnG5U{LhEsl z#dfk>7)!hCc=GS?k19qp5EE4RF0Wnrv1nZzd@Q41Ywtaq@{ClMd1lF=M^b~P{tQCH z(?p-g+i@emg(LPIPqLd>p0)!@bnLx}c+*T(kHkbHgE-DH+MX)Pz&q?)#zvP)M6KW3 z3kkVH ^?35SKcRH@wah>jscl~nnSU?;NKc&zC#qz|giQ%s3_ZT(;E>rZmt zNn|TGKPmWPwpf-Kd+HJ9dw`y?n_oZ5!r=utB__(}#P!{h_+Ez2vp3LJ}rM)dP zmpvYeI&$v+@Rl!@L55v@{QM(sT0^^)ZXDL z!Yjn){FYmMQ{nkA&DRTuuHM?j^-;!u5I^qxkh1t?8aadSW0GE*L@7L`j#RYw+bQR0 zUcJpRKN2WiDu9@YX$jju77cS3!RmFtG)f7H-g6|xVQol^ z@AK@C9*f($oqu4&d-qeC1FX>;1x}sX6m4|Rcz@r%FEwbnq%m)br~T@7mYF}U@q1ol z^xKy{_1UXOk2XEGElQoXZAxx_+!s3W+L0!6(qtlT%F3|yB1hY}V&uNHpMnujfp>@O z_KR*R?Ieq#)!XIl88^PUv=&iHEt%!qq_4>HDH9$r9GI+n2^voiJeS%Jeq~Jd%xM%{ zm($ohj5FnL!>vnk>zsc1wLI>KZThw{?;;O(zvG}UYWLTpDQA^{cMX=uyT*QdTcgDy z{O9iFdCsLft(niL#M&cbe!dqf+WZlb_f{oc^+n%v|a?=f$n3nZlI8*na z0;!`E!lLhfojelQ=ip(;<|!cPI`(GlN8Yt!YkSpTrGF7?{Gov46gUlzM?meDyl8M9 zJPm$Q3tDT3j%|=P&uW8JbLe=PIXI{YmOt)+3Qz)0TtcvcBLkpC!$>~@me&_*$iL^q z!Rfg#K!tI{K<5RTz&T|tXrnLG#DUHpgI&C1V7ndIMGGCE_f`b9cM9}mB zr$B(x;^rhykxL12@-Fo;d8Ql1J2E%vK4G* zO0lnw6L_W5bokSbp42Vw@orS;?dcN)<+%z-x-Ehj&|&QXtQt)B|CUVJ+JYUiFw zmu6++^rl0s;!XR_)?HTLr!KRU_fzJ0;l8JZ22H*syaLzm;p<^; z(d1##W-aeT81K-V1KlU%*;$5u(vOX?gBh;LSv|W{PTaCZBLPuc1|Ns~v6>>jBSF9H zMhI!mKQSLGia#+&>xpxynazAT?!uVGAgj41Wg(Kny9!q+uF#+t41DEvyW;t>%l64_Cezd#OcRuXxqR zsKbs26l)n0pMPjQcatqFk?wBrVDuFSE>_G?X>**fKNl+8+Voi6&INcL{^|E1x^lpU57HB!=nNK3o%Hw~&#zh)R?dekXCiK{z2HJNKmi zPKmvtFfLzq71J?zQfq+Fgw2O1So+dQCgZ}g1V3IdD^$?(;8ap!C=wvP=z%H7j%O!3 z{eVC3?fl}fpIob^5Pfay(O4?+p;snF?82hi$j4D%Z>ho7mpb(Es1e5FpKX5lH3?KZ z=^UA1E_boKq1%)~zrR7{29voG`#Db{QAB*IZqejo1y8@*42{-%x@U#Tf=o>`eA}$_ zr6`9dQSV=(9zCfIpBOJ0HV*6^;SsM&Y1*}mTezOmWox!BAB?T|W)uk9($mzPddE8E z^poPkejXd$@1!p|=DThp*oW7ux9o27L>E2J*_wDmpVl*IE&H&`Hxp!!|WFv zXfiGL=1J}&!N;+JqnW4oDE5PgZ}Fa(S?jV(IvHhbYLWdBY|f#5QcNoOD!3TwRgcUW z4Ep>MWu4)?6euRDAbt^p4}_=Bhnew5Y0t+VGxd4N{rv6Dx32*q1`+zw#x1Pv8FlF& z#06D!77owdeznXZvG%()&@y4{T5KqMw!CuzA)%U~R2g&qBN2E~eL1 zZLQbc^R-#i9BlvL-k|hPKl6yR+H`h0rZ-VMBz>=6+7`(xiTdM-hUaeJ^mE(xP2?8a zSq1IePt(raS-!R-ZI73sTC3|QEuZ5^17sP z;HUhx?E7%a+3r(9G)kXx#13oBMkF$sPPh*LfjO>nhg3`H`x!}FTovt$yX9`d8~WR| zU5r0~!L#=M^KTO~0;!Vm zd~<HR6SG-HovIZh+_Ag)|I*k2N#uaL_9_CcnJXw6!;;J{0r4rV92 zq#vUtDyw`_{3#4{dQ23t$wRKmzpC7MKGH}DrM%zbb80%;FwW@=H z3L`3ZSj9<_4DL5NuAwO_a3Pboxvt*l&ZD4Uw_gT{6t^9oq5i_WY|X1}W|Z>aPVNpTmjvftq1H+&+7eJEFIa*oRohDn~0m zaLrORz#OSJKACT;oG@IlP`~wnwvPIAcH{HUZKZBSFEs^el_pb~Cr?`T5Io%PVCQ*v zjAmi?geUWv`CmNFB*+9RVx3tUHjx0Q$}r`Ol4uXwD!ra^qrS_}r$z zODS2nOh4Vf+<$)k=#{6VH$2Y%BL^}?i<(DMwry(BdRr*+ht**dcI!%9FeDylt@~|e(kMl~snqV~jM`vj6dc3#W`O_ga zK_SA`llNR4HxxU9&pW))k4uP{za*f%M}1#v>8-$e+5^%2@!(v|lXv#rT+A=>7EjlU z#E57POk0eLjqkRECRu9rYt`RQNNOM8*%`gqcG{q?`lIsm{_3?mFSqnb+F46%J_7uS z)p}ba0-&V?HuSMZ;nKj_oxGXKP9x-~6N$oB0sLJIY(FzCP!{H5=fLq^JfhA_N5>Db zhz2K*baho%WZ*#XfPEVzdFXFn)RE-{BvTM1?&4fqa3GaLd7=+*t_SV$K%DRa2a|6> zf5rnGS^|HB#RD8vQu72G%gINdZ-GAp@%Jxkfb+QEOcV5{P{1D$LDGPuMbPF%%1WS! zQePyXz}9G?vnSFCdCAA2Z6Km>OY+f8^-Aa^2AKVj$aD4I+54j&UF>{83Dz}_1kh-< z>ufMT)}}q&=qksoo@N)r6PDrJBj4{Z)3j*UES^GoWwo-^{u}1{p2%kYq@lOILp!|0 z<+G+0=bo^gk=@t25PfphI$?v?N|0IkQeCue))%3NdO`RNH?NMiJi?>EkQ9~+vxP{ z_P5LPbzhD$e(Xy~VW>CW{CzKY@al+GxG}ZsTdmKV&(79($eW6mn3-Ezu~VNcL^WTs zDf5xfEakh!LX1ek%2L%I3@dr`yi7whT~-m_{Z!`s@#7dKE}+Vs)Eu2niT~6bn6(1!NucG?_AJh^)SGp}8#=v01gOPkCZh-5oeaEAuNB470nl#(D>Tw%BBu8dcE;)ty7^(Lv#3Gh8OS(oE(DA6{K)trk zk^0WQe3#I^PdPeDPoaHi|E8`y{Nd3hxxv`zaWh! zrrY3mLljXM!3tUvOIJ?~g;07(OOup~*y!@Z$18T!=82{DxX%ZaGMsduzR=~Jwlt_Q zG9o=rGDV=*+1~^H3ypkA#z-W#yB%XoGafdcuP9_6wmht38tZP6S|#WnHbH22g&1GR zly?!Z4^?AmIF82*nB`SO-k~&Dn01jf5myQdWLKstk)yq>8EBCiLH=I zE=Lzt21Pj?-{RpU6bqZ!VlK*7fiG@cwY)K-?I3RB@aQ2|Ar884_a>y7rI33}`v+*6 z$ozM?*eApa78mzUZ0S?8qN2Mp=ti5x){YDqsMb5THm47nw>5Qq8`abdef~Hd)!QDZ z6Ij*XEPS_-pmY7+#xa$V-a~b$5HY%_cqW>Nv*Ws>g&xoH2mQImwfR2WnNxL>`P|5s zO*--wu~}nHb*C16wo4=3Ya6k)7PZsk@K4k%OJ(k;R8K_mNn;KQs{?PXg;nUl;-`i^_`Q-~JiO@90-(@N;OsZOh%B=8p z@;JbR4W_E6Uj6l|y;R8Gi}uPcsyTfg$F+MtR%jY`vRy@J zn=BpSOhBYX1sS>32~PW+uVqUb#o-oEYQ_FxSxT>u?pvQw>nfxoF5bYA-Ych@-#`0lsxd6 zPLm&&G#7&q>%*O3U~;Jo#V;Z(#DdkUVi)wFh}??Hvc@D=+c&MSvzSKJK`JRr%ry1U z75+07<6byNjmz6n&TiCGgz(?)3gn7*9=a%6j7G3R=Cg*J5W<2M=4U8x_=@FDw`$Y% zo9NF!^-DZ|Qaq&l-HNo@C+Lrs40?xaUpWkM0@rhHJr)Pf`6kW`(JaBgp~w|Gz33@- z0rzmyxyBfIV7`m1wnI6jbR&xSN_ii!krzA>QOkA1d!$5E!Dmo4`iuR|^7o-+1azKo ztD}G`fv2LV7~A6RAKruvMZ<;*8)h-KoeB5n_8Ku&%@dSweKAWp!LQ(XkjDF^cuDEe zndQMcm>)JjHSmv^GA_3<3L<$i(b37bx;wv_t#U;XFq?z@G>vBV%HZhN+jwt=i$_ch zKZ_q{%jpSu0d*JszpNbDUUn&M(y3*Kn7YechNKCL>jlQ}O>2P5+E3|KMUTS%QCBmwXr_GaynOD!hc^ zfzje$m>DncA0dHR=`-L{+5ZTn9ssNh09OFECI-Iq-{jzUULpm40GCJ9hV`@B0pB}# z>7~D*<6qAv_bSh2UX50%>;r-JvPSSlM4klg&ihhN&@^c(k1Ip_>^LvsTtMaFO3aozr|TNMr>i$Iidh4^!515jX6m zK+Vr}_a_0bq-V=P)%ay!YrF)x!AJFSc-PD8XUEMR?TqEa&`aVEF~Y;I-isdF*m0B1 zMi$!W)g_rB{kTCphIi8g9-3lA$KC*$U)46T zNqE{f3$G3iQYIk7(^48W2{l2#ETP+2w0}b5Pyd{A9om?`>0_%JwmL@Xd7bvLo$?ao zE$QGv5K4@_&cb#Prd-P%nkgOw$2N9y-MWW6}wL~aU^rkbR9P*lTQ_!g~N>*({FkxIOqhwHd`DKOm2(NQO3~diN4|5 z^m5EuXo&RV@2Fo92&$|>XIknl7uBoZEXG+TN-vUOfgM=pcRy zJ!boalZx!=edJW7r_*7Na4hVNZK@O(Gma7IX|el&*He?mB#P8SBw10Gz2W1HBrhDt z@U%_lg*}`6X>eD{O|GntZHus1t?s;ttYfC^>Oiv~>04#cKD=(K>w?hbsQGAS;qr2E zgqTcXdp<9Otk@IH;){6J7tJA3MdgLhUYkNK{SXw@mQ6eg#mFd{_Q!XHMK*ocPKm0P zlf#RU=x|*AHV}_;CesG5Q^plhbQS8MILPEQFNPrlygti(@V&yXsLxF6)II7zy}a2# zyUk$-fr8SDeU@^9vCX|es}HBAI(i5_dJydoHiztbcC)}q?>pQRinOnzhu|Pyqs8!y z5*bfVH>H*lgG+wn&cO}E262WBMz@_`B%9a88YyCiD_<2|kJGqGZAgVwy-2O*nm;(i zXc2O!m8FuS)DGLRE>xUk;~tO$j;-8hvJVF@FT2!Ef`I?N$_J|`xLk|bZhY!gh-W}CG5H`y`Rl4PHwbd2=lBV5i0@i z`&Iggf?^OO3di#c_WMR8jla{I?+P;FazbP~VZ$vgiOAeWO2&E}pZEIg@07_k=~njM zz0#4UvL=^-VptZe)L#~I=@Sjmq3}I2@!pACTWv%*)H4@;HWW5I-10GG{7<^ zVk(ivc1e0rPzoF8j3X*$yzU>+nZJZ=hJ{o9PF-8>lZvmk-)YmBVpo)=drp4qk!P@) zJfg{OG5hO8nnd8eMoDx~stkE-Edc6xAxr_Z4XrOgY7YQ^S zwLy8w^HBl0h!@D=%h$h96OiMJ2Oj5vAjN0k#C#hM9HorF0WAWM8EHDh{>6)@e*%m? z?1mgRkU=8A>U#+Z{v-JTVbK&x8Xw?hK0t_15d@I&uzkv2?4+~mgB z&KZ1?9Ydb*#||FTXx>{u>_F>zRIbVNP&tPIi=|nlfX^t|ieDBX^Wqt&UtNgdy5M|} zDW+hb`GUl+@>o$FtaAXH?%cn+|Wo5GSu!s%tQY_Cxu*mGcG z(iNri?%OF9T4FdM5=;&p+dmAj5`DfR$tzeCp-+MtLQ%K7Bd!>=5Y;qkNEE4|yLMMb z(w+wi3hcla^f3q2c6vE3%pdd9StL77SFq{xwK4pR73$FUZ>lfAGdjzF`z~$E9`LPB z!?H1T-ub->-B$P&5*mVRZ>6%V+mF;vPB<+joBPQ>8_-WRA|sO(gyG|m)=WnXQrkqK zqftR9cQAH$bng@+>$a%s9qFDb7E;j`J4}mKWym)jh7R>$*crLj1fD{*O7ss!sVo7a z(!PN(gl^NGW|PdW_^E{l(l9Be()-*psN&o>IQ*K}KW)LA_;R;`8X;GO$cA8Qk_7xL z*dUVX1<&_5q70}nuq>ZQGnzlfDIzeYYD?W~bAEK=fR__{s>F~! z+P9|TD8d_ZQ-*$9+(SY=%O>@)Y*n^gOgVEv4?o$X&z?l$`)TX*%(2r#Ld}MF?Kz~E zYW`WhU*(c|wv}VNS#a)c@Azg5(4V#;!#Xu^`zmr0@H z;F-F*6mly$?}~jskReYIv9h1vcD$|KUkl-KNg@q0TVUvRS_o{e;!<-CjEOShBi+$( zTh3qj9#GT}OCrh}`4_aI{K$nGOmxZ8Yp-;3ZLmUO?>(=DL8X;+AL`(f1*>Xp-I+iPRg%Z{>d3>S zA4{N&9cxFP#KlZhNR4|NhRC+Ls)aq>^YP^vMXc=d2ccecqF22Z%gu1>ixKvp(PAkr>-A$JU@!|nQ zAgf^DeBsU~x_%Vz5;kLIclMe^FEt5HR{Sk$7(f3(*JT6bG^ov!v1GJ1y6-s#+{EC} z{TL{;LH6hP5Y8zR{h3mBo$iipM21U2J5St5w08^PxcsV@8)q)0LkY4NS#fMX+1;64 zUF{jU^6+`edV^e3T*~RacW3kc{_MuubE$QSitORs-SJTuCWifR2(NEuvLlgdUh`RS zH`v{O!rjw?t~>E`#jIzQ@Yhhe;1XFd!hE4%QjNZ3^IQ{yB4aLIn;Q?Cnc3W#Iqndf zRmmF!TDvzC6r|~DDWacWGj8*}(h)lCQn6rgy6H-K2Rmdu`6ZZp74O6N!yi}wy|63% zVD)D3rYjq|1S?xs0-WvtY)>w-0D-W#^^&HLhwYX;XFJnNG#6z=yQ_2sUC0#R;a&9#C7ZkAi+u?x>9vJI5cTrvFhT!K({w3g7EkLo^O;F&w9@7Jrc zk&2kON;w;rYWY-^ZgtCk)3wE!lyv4iYcKWDfV>?xE=O$Z1PH~Kx*FGeH)?7gMFgq( zE^)3q1&+NJi%vj8(7R2l>Xx>yhD9-y&gBJ>^x|abvQOmd^uW-u|(XVH& zR>%8IfsOqcj|o;* z=>FrlcRwro&Iz%v#c>6_rMzm&<8v%8!&Xmb0@mSR5hmy6j-MFb4triGMzLGGxOS{=2 zLH7CN8{2lWJAj)qi~qX`d+u;IkukJ43)%sbHpBPFp!BzR!^b{9E-0mu1T@T^agH4` z!~AMe&t#=w1677`cQAV^`d&)`riX#~*EUeX05XH*rDOW#YBe>#ZcXIQV7oPnAT&2S(QB%Y zvXk+U?6UWiiX(?a=MR$-4R(5gy$@M z7vzl=Cn#pEgcTySswv0Hgt>yOC@8VT^Y*J#P}k!eN6I4<7DGy@8ia}yb%m0m1L6oR zSC#ylz49dqcib9?XNto|1vTVDL$n;@29Y1m&Hz#xGZpZo zDkfpwV%cC$I?>{dvR|4+jg44p6fiyg&~P^-dv28@H1Lz5D4~H z)Hbb@Cam}&8>^5X91MEqH!at?>s{%+Q8{x^wa2R=yff^!>HfWGLx|T7Qzno$H{OWr zch8Z&`8w~*Z&&S$#C_QYYId0}>C+uwhonX|=wUA_TVE1*>9wmsyOII0pgW@zCP{}~ zF#bus(%SIK$mlYRwO{avsr}}cdpeSt)1*w*a@#_ljN&kixqH#&Sc|OvPb>^^1Bz%M zD-oeORdQ6lOIcoRl91gIvwYLUyu-M+8`Lm9jnT2A58Xp#%$DB6DO&zj5 zb9wvKymt#DC;6C^1_AXXF8rB1eR2s~b3SQZCYD{WT8zxp5!vP2_3`{)koac-N7DSe z5trVdrGG(S^VyrzXp-fwKYu~*3LZs8+!&7Q*jKWxuLy&ae}9#Cws{Sv)N`ir*I^}N zrE@i30S^U4h)N&`KA9ipCkq7z#MZiYJ&AI(Bee~g4}MRG6l znk)6=jw12~c@j~hhOJ2&ABTzdq_(n)s@b*}2-D2v*(O{ui)?6Kvd_|Gw zeI2LDJ&8mqb^8M9a_}O%vKBXkHDM`pm7w{0IA5j9WAjy{CMA7OIM_+!7B@Bu33ZYW9$yb6&k0O( zA<$9V{|M*`K!z2R{E`)U=@a<{djc#G{^3LcwkN(Q!d@9t!I3Gp;CO zi{7@3;UFxu6NN$fIwBwcpjmVzXzoY-ir_Uhx%J!Z#ihhIaIPpyJg2XKiL~HUo7Q*F zMj4cGC(4{mg{(j#gRk&#@8YLG6vdAP+KL@AS9oW|f+fiT7cIg*LF3 zShQ~i1F)tuf9)yUj%iiRHjCcyIK1Zl2v%e09mZ8Ab#023k&)$_!LCEJ-8Abdh3K); zX2G7tVmpCu_H7rk5`Qk3cS=*QjnpQ&6855%N2iz*SWEh)xRfC zfNeV_2}ZdrjDjH@%&V=+xL&8zR7+9c4vX=p?chzq_} zvd%_NMQ{}2iwnt&($MUNAL8KrjQU*tnovR5nteF?BiM|RA4gRByV|Xjy0}H&dD4S_ zqTrfl`Cm|X9>@86af3caS=~Z@3&c4o@%<4B=@xZinN!c9PYa)-DNXIbW7G$cevSAw zKOK_!@+?fh`}t&X_j!E0x+%4~=(&+16f&JhC<*X>ueCXcxx^i7Vq!mCQQ{eXz&kTv zYTC8Nag~X^Z0-_9u3v{-Wp!8;1%ytCRFHYayKFyH)TQX>BH;16miB*PQVC3*D!PBO`U}D5I;dAdoaonW=SN9tGFdG=LlF;gE)1u23V>!mfk#+_;>2C&lc? zpKJEy%^}~%I5FJp8jb#f-e~EPOK}wFC+_xxMSY#;kbDKs%1qk>=Kr_8EYR)Kzs94U6QT~E3s1j*%(?VvsiA=KN zd~X$zNtG;8l7iP-Ji!{Rewr4zBPGTnv|8bb$Y~7-zT@uBd8P!lxf`OM8b>mzYp2ZC z$O69^D`u91m@<59EVf1K-u5-;xc=}Dibfw-{gof_$+6zyiHi3py@H8|vqxpo24Mno zKZwX6sDomvCNqS2cakw~ZPB(sceq2pE);Qx*nU)q_$i7!e>%mI?n_miPE2XDki)#1 z0UrWodJ%%$uMK{lIRbVD^iG0^$y;M8_fTy5xyk5BAR>Vg^`?c_n|5fb>l4(d4qWvSu`~ z6uqmm&hHrQbi*ZS;VR->VJVN3`yn~C#-fhTbi+cT9r`eTGG>3TDz@fs#%8IsZ?kd4 zT(O|;45k>Lbz4=GNg?Es?QDCys+rYNA2dK@rT>0uhuw3gdMHh*|VV*J;`Gn^w z2-9p0_EULB|Kt9(?*`5Br;VyBsg{kHYnVrzm$Zoa4`DnS(0>H`wi$r9u*zGaZz0bHbK z$}>PjrhD+zaEKj~@@o%2#z;%L4}T?A6{TMkdu*2kR!bOYZEZ+LKaHzF|HG`P7Ob|w z6;1AqI}r|yho*OxIkYikTApi?6O_pXCqG_yNWxP@o4o5|d<> z0`v?p8(?8HNa*FveIc#>52LRoY}x|=SF`_6eZ;Q6d9fH||A*KIsz!iREAxy0um|?P zf(~He0F+_@*^6KB2H}bkz|X^qUH`vZ0V9b>uW%T%EbwD)h64tLmtDz6z&QzQOvC`t zYS`;eq}^)nqqRCU*f1kI?X!FI!G=d$^+t~ivsR`Ijsd?-1g&eXQj|_Va!aA^c}?L2kIOkD{hBOj;TrNi>@d&%VwGq4@0 zLzaxVRr8bmuAn3jDn;z{vC3bN92OJVH@j{$+i_LxT$jd$zHT1FqpUuA1_=CbSgVov zh0pKZtU9>)fgBWtw{=an(!PLJ*y@zWmqz#|dvf(2>P2!y|MLM{B|q%HAaub@oH*l9 zv>C{_&eqOU^^mEGl@dOFbDMni2Es+j0jdr!TS|A{Zk`qb5^G!^|9&lG!F_*(zd0l( z2sU4!r48x3rIBOF;+l9sKW7COtYhC~mTQusO6>La{vF&9KIH=0z=01F4JIzC9{_|^ zPm*=eoBcEJvsF5WGGqR#G?edsDKx98erhWc?k+==!Z%ZBY%hg!aeg5o$IDn>Q)A4Q zMc5iUwuy+|w$MPf9A`nzgJZ5VdzH(t@9F1K;$u9K7yGUk?rAp(!PL!nba%4hsgFlb zGyK69Md%x*jC}egzxMX)&?yq?lSvamu+gm$aGB(OZom(TX_F5gz0S7TwfpwKZx!h{ zJm)9{@{0FPzKV1XXVEl+c&eEn=0m)COy3E$mv$SetiHw}v&5ZhmK3UFYP0;7O7Cn5 zl!}$;3KD_IO&}uQ;saMPkRKy1;=nJXbm$`6Er?bdx{i=Y*^|D<(&O8ixvty(98}-c zQ3h$2{?)ZE4eu$SMW5+~kABLvcS{m{pv$wOA#LK!(9&(poox^qT%bSOTq43K;Yu<~ zP*)S!nBAC0kYIRcyO7;6!Y_lsKI)jSPYFsZ$TLYv2#bllLL`&>R+8MX9vkO~Ph2>k zq2E4khRsU!k@6m?0WHCn49+Lq{R*<68No3qnn2;IYiO$70)j3|BMisE-1yH{&(E95 z-&xa3t}g5T9NSVKtXBD6n#t&y92Ft+|n@Sed#N6vm@)s0n zyzJsTKxg(w=;@AIFiA)qYmB|?Bt_Ke3amJw;EfyNb?C%VdNsd3xBx@pJtl@n>T{x& z^v?O6Y@@6CQ|Nj&TSAFG-eLF_Rd+5V_ee$Z?WDpDgblO1kUSyIZu8JK2y}%*jQwWc zL#ou-B;5zKL{0%dAbQ(#U(5*pBC9~Pu5!K}Mu{AAI%-3N1!HrUWJNq#qUG|$Y z(J2862+voi8ZWNuv+PmjbTp{4BTt+EYQ0eD^G^Q~FYf)lYUuK9o5sx7S(VaR zrzF$&jtGR)9LKVArMv{)k-u|dETI$}=br6*(vyyqOO7I)S}}lPHW%svF1<~3 z=7A2yqbAhK0*~p(H}@%1!AcWYlC@JkKlWdfrqcI*SP>ey>J!Y>UtJt3G=D2*y}c|; zhEYW8*L{iiI=2mLUR%Pj&Bpf0oEqJD!8j&~rIEU&O#-_QH(lR2 zBf6*MFX$w1pyck8x}w!aT{-v(PC3O%?+7!KL3Vi4K2GTeE;l#zO64|vrIfZ7z2=NF z3A8h8a_Z!`XQZX3x&q&TD@?SV)K^fW3M~|p2|H9WP*~A+0V}^i8%fB2F+k`#qBy}; z*V^bRiDhP*;n}s}<6pP#fp0DA(HeX=9SMbb}J&qAw#MTmx9o*I->ngbmm zs400-stOgDXV9E|F_`+=D8(Y6yhNdL^P!^YPF3*qpzAMafv4G;=@XGlij#hNI|;(N z_qVy~6?5E14=VSekU!zx*bzRmzpmJ`RamVKPe&BS)F&(18F&C%wJ*8;)Ly*}dW}(+ zE7EJxjpIJ5mJC@eiK64W@#@+Zm434w?P&B;SHT2(p0O_6*r_8PkBL4H(LHGUA~L5f zsl%iFtx9MqWa-byc=FqP!AZ#Z=(yBdG43()s?uK#6;19I1sTX8W5bd46p!~a#dL`Y z5lv}v?bf?yl8kdAz1kxJhDeL^$>zvqiWY#6 zB$9twDFC_$AVa)#;Q{N4m!$*H#xe09Y7z)x0^0k|T88AXpH321|c4*5>Q?n#w-p7 z0pbetHx)t&U`N6X;J#ky$}f=S^(1Vx&R62&Vhmd)hxs1r3Z7AtHu(j{s!9(%GOGof zssdC*mnl3p37+XnBWh*uJ5atfroDAnjr34#2)f^1%dr_iF~b#;?g&9`tXZwxuf#a@$oG1YX5b9P=gC=XoteqN@!j;>gBdgN4e96Yq6@Buf_BytuGK|e4 zcD+KCZ?Q3!)*BNVQ=|oUlv|a<^(x8sJ*%{s>dZQ6QRm)8YUT@NYN*nXOF41AL*9z$ zaU~6wgp4nZy^rqwy+6ssMY1_(ZIZuWdhZbUZa6t>?U>eJ2i(A2PM3Qg^THWH1f9@8_S)d(!53Vuf<~guY{H4-tgF0rr--?rCnb9s@0? z(7~FDaZ>`5Ywv68kWb+zlCjdI69b8X{rX8`6!|-0Lz(S2m@XM7lI7D--9e-?$~h9H zI0{gE0R@Wf$>$m9`^oG{wuhm;XvrftqaREK%Uy7!OvE3jJrLHV4nnExCQlgOqdqhx zab3Kto%v4W(1#BtOWoC1hAw{6y;bbSv);Ja~MKm%o}D3*U3k>b+5d_6T6U{1?zsq!J^xt`_o_A&y}w=`Fu5VZ!qX7Udz z3ZFk-!9_=PBxXNw`a-z1rL=!6NU3F#hL|7RuNvCY2z@I|S3T}f zu&BeC7dh&O0Dg$q)~9@dao@)g*UWGa;e3~1b&Cf=d}w$GY_=GJI~u3$RM>uSr~rX_ z0qzQ3Ljy%~&ZF9lr(*9j;WS%e^@AY1X1k#lW!8*3x&c$$Y*nHBi2LwD0=x9e$BU8* z{x%*L(c^tVg6VX)_6SAkphdmSyrYq;f#-OJ?8pND11%Xdl6cd)!ubAQCc_A)5VZ{T z*W`9?D#dTORrCaxwI+zWogNo^k-go7>Yue~+@)WQ(~9IJ#*5KmgBNjfNs^4-5++Kd zw#v+qCa`#_TpKByeI^6H2MMOjxl($!9Q))R}Fkr^=GUZ4BzK&bsn-}|9FUr$oUH@Q!3m}HlJz49G{YAyO1toi6tYF_m=Wh?k?J!Ox44{S^j3LHvPcJ@#j^q7T2FpnzGm99+Luo;vJ9RC4f>v74%} z5!k*Y_2GZ+(>gT=*O-Y$4IFe&nxS_b}menlDRwVzfWMml5Km;gIrV->rNIJq~CMS+p_fMN))wHS(1 zOZF^~fjx3qgc$EjjLIHp8`sdQ5>UF~#Li?B`gEwR726VOzMafx`jIVPV^O0(i*@GgJbk`CR<;6esDW2F^TC3rYk^^p`>!v z!J`?IO2M`y`lO6P9Xkg~%2G^nh?J+GQp2hFnY=p6I=#wLFq&Ya`&<`#{13p=qXHhm?PfTy^_wN%XBmdz|59^`7mB@`Y=?AeAC`^wT4 zS~7bG#Tk9dHd?MI|GY8h<4v&X%hjV%i%Ki;MvxVC=!*2oJRq!LO4|ZCUkm50#!w*` zUTqx66_p+I2uh2^u9aF5L!#3sdF^Ct>{A?s8Ys3$-157$NeGuUkLGR(oD$p2Qf=1K zJbP|7O-`X;9E;dZH>aQUUa+UtpZE-<6)q8&;v$-D5D$xZ9#Uk{O`Th5>j}17=muVW zU+ffxLD&E7t++I690%7I>7mNLwH{+afxnijHxIGAl3-@<@ezbB8h+Nn0@5WyY<6BeX73zk<-Kl3mB4f}yBHi3rOaPkT+vuNqi;rUM52dpN_Y}= zmajP3WF1*E4^CI-($j>Ov=m?;dFYh zP5W**aTWi9UhygQG5>JO&QFl=pudi2olY)&Cw3bSDd8Du?V1|%wG(`KOdf14i_H?bY}aMC+AoK*WnV@ z0thl7M7kvJFO%)7X7$o9MTn5Pf^xXVxk`IcGn8%t*p${SlSk?2uD@jF}<2P zPP{JJP^YxyL2XKITo7?ZQDI`nJzL}S1`KU!vkn^0zmGTT<$f885s)57vfj>VTUeW* zl<_ZBluQ(w=1`vujC)6@KKK0GR6)GhAj+7D(3qWYWX~7Tm3Js6tvev#hfW5DsY_MH zL_Y|<_a-uE5t`&iTqPh`W$}Ac#$PK{SYrWK5j>hQ)0M-_E9XBTFhAl(2U5#)yFA}| zsB>`TPJLRhVm|R;&t6&3T4?=gK`fxIE{2yhigi;qR9-W-EDEyWESvReuM$d%H#G^L@D@dD#Flm-z@ar=j3L@2He=xs|kt1N4Yz0%F;fx?@Rkn=&oZXH{+0}uK(uh>E z+`-FO!54$X1U(z%9*ELGA+}Vr!~LmU0;KiaNy1a}_fJ2?&WmDi-*rxT9-+;9*K2#n zl>Wd@F1)rm)F@=1-XdM=3s}uOtkuo!&fNI&$a$KQc?XB}d7)3|oQfl7cBUMIr-rqs_qbveWSjIK-IyO?f` z7RwsrgeOV+8!T2bjaAwa`7hd zoOE4yai2#uDI#l#(dE$nrxitpz^GZa(X`}7dRFQ>EB<3~IFK?TE5Ih7)Z2BFV=ojz#6)a#$@)V&DOhNx21Hi)5;b#^dAt^th7+p z&E3%7yn*jtcn!A~2$QSlyxn+$?INr@-aO@*vm}IV2p|8^5dSa8%#JSZhsC$le&h`E z15xIL>|+!7B`emfLYx#%v<8%X@>nyz-#>4O&Y~p8n@G*7f(~Cb9ZN*XYn|AQFA~O} z?xDAXqupUB&lrcK4T9qOU2g63m%p3~7&XN}HhIa*j zy8fshzwur@^3`Ki*d%?Tdw!sU$au9HG!o_5V_?bI{s{r(N#~T(LNkM|!Ui0vu_+5>L_o{!mAmP(Uo_-C0o}EWWI1Kh$F=KtBwbFY@9X8aFD*$@bEgmhK+ZR z=E!_PxGG~x@Qz%PC zMywAD1_)H+#gq?gLcuGz}|u-PIwPZoh2X z?&Df(PQ^_zBEA;3N8w)eCWe+pj_8(}USv&S&c3dRHMz>ulmqpxwB2;>46#G;ff?~P zJ6a!5NAYYDr-ny{Wx~|LH{io0f79DUC-^`;OKszz1Ip_XaE7P#e2NPAp5)cO*aehE z7Iv~mK)$O#Dp+twv^E%@)jxk6CyLTx?pxDNj9G^gYPMIl`lh#Sr4mE$hzQP`nz$I( z9{3k3qHeW3ccTifuS><`rj9Q)%gYkO)B0$M5J-OA&*h5|Qs?lw-M;BrCY6a6%f(r5 zML)aR3g5Hz_LvlESFYzrs3N$gBYd#@vTRZp%~C#wz*V~M25aalN zW#iSri3_|B0D!&&x;I|l^+gZBPccjWdEtTP+q|^OO#u1`kd_3_OVxkn883RsI-srq zu}Xk1^YfSvajo)%9Y(Fz^8m@|UV}z`3CT@Qi`uxEh33Bc?QI>8R$D z4sv{`gc~54@d{TOWlq_O$>knIPx+)6G2P6?#?M_=z1`Sgq(|u+DLU1w^vSlz9;T~t zH2Wc&(_QT^Xvl~zs(Zb~I%a&5h@_^6uQe@wHaJU`r4a9Tan%q)SBw00lQvm2(fLN$ z?)OK0M|<+tn!*O;35F=$J|97iLt7E9t;@J3>?)oS^|uS^8uKDs8ehp}TAY~yPX{Mk zwW^^(c1a|n#K4NIJYY-tN%>Bte5I2-gJ}?tCtBQ4eyAS4 z47+(SpF#Lts`-5v0g<{4a8C(BT{p6D)@0Dq($ue%%!|quzv`LziVvJ$ZfE=$^O*8E zm{-!8%90Bcp3F7k-uu?etDI+|ADp6(!~8l$^{nVyVhFm1Ss|W5viU@uu&kuno@`zU zWEc`MLYD6g!4c&L{&@1@<$0VMp3DW)8ntmM&b5C`i|{4ZX! zM%u2F*GM{Awc?sXiuw{YGevJpXI-mZcdB5uyMryLR~A=^qlhCs3ODd6rX3Y|dvIFj zKTtA%L}Wxq?R!-*cUo5T$h@8|*yE+RDMS|LoOz>o20kZ_BIsaZFJkEUhX2P z>Dsxz>}P`$WP7YwjlAKq{|^Y=)a-XnBl> zn6+ly$F<>qtD@bT;GCO$9A6RxlYW-AF&{bYQ*7$dFf8kn|MB-F(3g4j$#)@aPlt7y z?dF4FV_moFt|U=fRMzPK(exEiQ8irGq=bYZ-CZL{cPb1pbi)uzcXyY7jC2V@N_Te& zN_T@GNOza?^Iv?we=YBliF@xTaAKdc_x9CEv#CL8(MwEDeh-_U zk;C`Q?%o!)tbf6vWM}m9{|}+>?W;d6hCGUO)43Av#|y*EkLS$@bCmN@qSij|Bw)<3 zo-z#$(h}Lw0t`-jY8mbj&|B5I1i9Z~_}|`DkP0&3E!@qIZ}enwPjgRrwXR#M$Q=`! zsRwkF(BJfK*m__WGid13_GG;)ts%ID;>#yKXimPZXy>51MN)BeV#GwJ0i7+KRuPZ- zs(kj-7WG`a-^2Q$M}-|pwOoLi*@bt|z1?n5#@dbIZ$r-ULmdWzzWVaoL>J${Z}_}X zF<-L5j5qye3%_v$5Do5aJRI{x z`*kMAk_F|_(jA^{x~{0Qq3Q3>a0=EUdm3Hp8>Jsk6Ei#4+e8jXEFvq8b^O@Jr+@p- zBL`YL=YQbE-?6^UjdP;(B|9k4ek#KDVfCSU%&~23eS&}0J_q3ob?+U@nnF83KOXSP z`O`|c!AsKR;;IW+foenvk z?8tCdRN=~PI>2n21#lOJasd4y49En)jvfGe12*vg-0Yis0LDKDP0l1h<$rz?qC89u z%nF!I0wRaKfKU%GuU8ep0CI^c;6(w@`Tc`uZRT zpwN~Fc!iwSGfAkQ_pAz#p6y8izJe8m3l7LC0=|b_3bMQgZvZz8zyb;a?1_0C{#ah`x!o)7-zs(442_mC@jO{AL%~rQ(8dw)n`u)No3BtPnDjkZ?dhwg17fTrt8Ghvc0fRr$ z0v;BbpdZK}Tl~)3stL>Sui`nb$qU=`CWTC5t#A#p`jm4F1bW&oi3*?JU)u^po(4b+ zXD3?DN%16p13xDtP;|tgG3@%LrJ0OkR6DGv{XJ8=!lj`J!wIhiFH9x;(Y}MJR%5PK z{7f5T{zH&$n78|79=T3#{(jfcFT^i!B)cZg8vR@&BN!Xz;6}o z5QbOWNt=~8!erYg{R>x;Ddmvp0_O*#o66kVp1I)h6*i2^%FW{#qHS5&5DzZ5~-^{?CyObJ!y{*gyQD7qoIZSP@S76OEDmJ`f z?^|NJ(mQ&BFMBj-rU8BdeDRjW&8_DT;i;lwjm3M3iFrgAmZ zbj*L?C=XmIiSPCkV2i)xg)mbF*kL_2yr?S5d(FW7*AAJ&qa0)i0g1#DeBZ}G5E{b; zHY1DODNxx2dX^>kVxij|SmVPq?d%qm`R>TH&d@byrWPW zK$n0k&bz47YXfHdqQ2!#j8qI8@?23s8X+P{1)Vu<#9XR(2`=w%tvn3cBr<1$@X}3Q z{7V%n#39mlf8DH@uEG5p~3f`7MgK<>7d~!ywdiHW6A$z6& z{De~WTV-7ro=Wj!-OnkA_$>Oxkzov?!NbohG^*|P{}7~FiSA%!8M1E&0>3>Rl*05g z-)c3@9Gd-dclg7x_7f*er3NLouf+x=u2UycBEZ7RKDlde>RHfDc>sDZmNYDZMg#80 zf7`rD-2-4ok-W|B_OHhxooPwT$Cu-MV)riT=jtTZK(#ZM_aa|DS1yu|^oo9BiJ|dg zjoJ*J7Tf?R)2#}2VdVuTM~N>jZ_?1PZwjZg)eQJ(2M*oAMWPz}u^E-@;@#zFj#_PMS(L`>aaiU5NGJc@xqtpeVMipbt*!fqqPig)r9`Y?)?(}g(*fCMAUHhjD>rm(ouq0|~@SY=# zpKiw9!Xa-UE8T1o z!_&p9^+L~$8 zcH(>#xVlt`=tCFIqbza>QeB(+#4na1qJ8h=A#fcY-Vl`zATvYkr+b1HLcjXB{WFv~bg>)l_hqS6o8O_a0&i)(2E)8muW(4r`*2d(9BE z==^s-UFbh1pA1%+P53clE*MzjkK2oG}TcH)+&yrl7b&h~KrPkhWPAA2iyUc5YMLaqr+! zo6NmnmS+g+oK5?R*uk}uGtI}OBV%dzdRmlh*InT)fmaqiKpP@#D2Xg2s@V+E5z%T( zjGsw_tZr9;l_I{47*e#O^S!GtKU< z{}C(pRlM^91Pqzad*`Ej&c#``&0E~dFM!`y*+F8aagei#5BS(A0?K30EkGrbDIAFN z$oMIM8VWF$dRAxdKP8})^`A2lKk`IbpRp*_I`9B&%$N7 z&xuO+IbQ%8EPzNm;Jc$fHR&Av+>8L@tY?^rQ6wx}eFBw(rG@VK>VWYcKy&(cx&JjF z|MO=z`FB+q0-&l2nEU*n1rM-?BnaFib1~r`Ece`S0pu%S&j{h)&4FYI$hgA`%5ALk ztkwU>lO{S4(o-4^{-|2FxfZv%_(0_qBj@)XISNN*wdAt2)^zTf1ITup*}T7JO#^KD9h5oCO}$oon^B0-A2 zE@4a}vMqZo-H{dnZ=P>!2uH2$_g~goh+px7acEazT(8WV*KdYnF1N{b=b6gDo{lBR zm4YW~)%|MPX)2AlK4H@@D!8Gb)>X^7Z^qhs&M`K`6m#(w>e3?C=p}L+aBB+f$TVKC z3|F{~GM5rv$*W01v;vVAsk2~3D-#^4?khs>KP$3-Id(qeN3kIRgNrZB&EMPFWE8%n z6=(?|$zvcq@fpEb;{bxXNd_xX9Dstd0J8jT7}FW&pt=xecVm?Q5WF*tZ+3rH-OQ6m z!TwBG(v9i*mYqZur^t8O3#>+I!;wa(tr<+9Ps;-&D0eaa=N0T6 zB#Ok7CEAo|g`B{0W3zVq6ir&dIWBu=XuMwh;2Y=KyxOuTPP7V-jHq8J$LJ>LX0ji1 zUgb$ZOY-+kA+rhFG(RYg+{GYS4RDr|cRru5Kt~{@Ib-<(M7@YX-auc<&W2GVD|k$T zlr8Vfo3C+--ZZ`&@hYd4-+1RnEfN8{b4`0R)}u?lQkfB#x`vrzS^77DkA}8+@}T&d zuMTj*NXyRCp2>I0qLZL7<}KBE46Abm?-5%Dg&YF{`TN>DAicMkr zMJgmVCSz`NK+K&@C(-Vcp>>mf2)FM%HWEsB|7V7JPj}V>N~tSXqC8$F^dG`2{dD_) zC;G%jkAq!hTOl9VuBcV8pS)<$M@Sj-j&7p{W5>g51(F28O>HwV66AqmF5k?iPse^s zFQzja6<%)nLlPfx0)^KZK5%$OH!t>3cg+`kDI>p6hK~}-cP(!Rhaet$?eP8Z3SWR8 z{H-azRZI_;H66doU+PHHp4!SpgZgu<1$`NbF>p|IlA`Rj@^V#=cR!eIh)y44f8#7- zKWLCa-IYDmqS~vAH=}jndsR2L+S#;QP8lvtwQ)O_v^`Cbn^lyvWaH>*Y#LaxeA|dm zS-T`DS|4Rrso(Z7zSq@fwA_O%X4o~|8XN|xwuL6c*fFvRZBS!WgW~zwt9GbZiz|8Dcjp+%C%{Pw^J~Yu z9Zb2SQAHtAVCyp&AdIqe;sf1*DhnAF5ZhS|JTL*RNZEK41h!^KyU}9DZpL?4Y@3DzBLM4kgV<`3Xv$kjv&)4_?2x*{(BmhpZx{72%fQy>06wuIM+0_T`Z7{q3x-I-)dL}XVbGJTnQKo>+2L|A_NW~p-W&4mr2{oGO%VgKi0a(O z132f9sU2)8Mk`GgS?qvOu2-JCmXl_lcg=OGNQA72CgYN~A1Y_5_hL#&j711#(^s3Q zPWY`05qWd5!AGscxo^%{UKOwDaJbkgnPq5mRe+oX#$;^aAbPWfBs7m8}ttx6%HpjUQy(_PG()1pEtaUXX89xFE8A zy09s3-B+YBGNPE~;ud!OXxN+eO)zT5?z`0&N=lU#oaoLEQS`eC# zyJGTwD71jJx34+pWIC+H-sM ztEoyT^mZ;m;I)jMtI`+mbQ8+5)r?w2=Ro?9cnl9}_EvtuVp|BM&S1^GKmzslnEVFm z9<0n$REHRwf&T!h=@ev!d<4fJ>N{6>IdE(u!A+Hw*Zd&kgJj6QTsutqhai-m-jgcz zig5d?EZ1hd=y$n`&^@SWOiXNl4TO=`XuF_3IY)v{W9m#YUg0W`IOx=k@)2CrQWw?2RhQH|BP&)P`n6%HPj^@@;K0an*Jd~wJVudC9gt6bc zKZ5elE~szhKR+F!qiyNJrPRt6BS^t<^r4S)w<^0zkl))DbMant3g+&AlPUT(X59O? zpaH`5Do1h-#Hi+<5LTM^?(lRcq!`o;^dRUdtek->D53hoNwpttI2J8Y1}z1W`X3Vn zB~sAVbUsJ7n6)Oq(`mJ2Py@S^jYA@X`Z#&Au*KkcOw~%6uhpzB#V9olC*II2m=mWc zBP8L!45O(HV9|VZRoyc-oAgsM-m2DQk{&0ABsgE)`uN+XPFz_|J9xAwu-E64ihgg; zcXR(s3WybW=m{!}yueD zTlnR#h9Ia+DR1A)GW@TKLNU(nmgJC+lg$E&su(Ez^hF7Ke?-N@9@?+jcU%tqjZQ_o zKGgmV%R$aYQHGOS%KRQ!Y7nblHADq@zzTps4e~Fb6*yI^$vk`B0xAI=TtH0?Xbdi} z9c0mj#pKEPY0Itymisfez=3mSMg0(Ra1)oo>f3C{`H8)-0G_;TZLuRa> z=MDgRgv!Xy$ff%%Gf)j|g~@pUzSJy)pK@a^HC_TQ5kvst8F(oP*jf3}-3y2lIxj$g z2=aU-q;BARz=wcGidKfnY7~K;wx9r+K@~0arrYtV>N8nDy176?T&c{PHu>|OA}m=7 zidPb3UpAP6P4nN0_nT9&-Z#(NxM=AhXJ2H%_si?=^|J|nuM9J%qb-sQF75@x9}q)+27caPm{pr< z8z+;c@WD=U%FS0(3e!5|hp!=&W_TNhVY_1icArt2WDH0tz#vHLQ|?#ZJbmr$3W*6$=vG|5Os`Dz$yh&K=BpZUfcv4!%`a-j9Q z1jT(|pG_gkJF2;B%%;USLmf8J==nrBPp&nAT3>5%q5R>^PMM?^zg?Uc?(il;_tDo|UwcwsFO`XwjE|FU_8iFv^;wz+;b?4Ko7CS<^mbq;9EasqykEU5t_{wbOjq$$7%;aS>b*@w za>Y&V-1s^uq%F$FAoWmBuvLB$H#a`XD}QY6R%K%=;D>v^UBc-uI?+1IB>yvaSh$qI z#Vy%F?WFpm($t7WfLGrv^@agu#Ym$k*KcX3vqrG*J+;koxjV}hy69zEQXs|qn}EW` z!s*{Bk&4b`uY#zY;bnA@mF#qC!-mem^94|Dnk_KG2`p9#m#M)9Gv$0@z(1|k@`5_q z{DYnh<$~<6a(;VM)o0dhBJLvQ!OtH9+cf9qNG@->LftkPn(bV#PPF-UV8Ff%Jabr? zpgdP_+L|G_&%Xv{=MgkeLG>3|h?{>Rki!|`A`c5A7PC{R7-4rXX|>X=*rXppSdE~( zIGC7SvRyc?Pfv|EU|h}vfpA23ZwKz8W}rL3*jmcoX5BLKT-ObmWTUA-CQphnJj<;8 zT@Cw)nQI293=~MK>`swqN{Az0@5->9gf%noQ42LvaHKo(S|}k{m`T(#_yr0vR&tc_ ze3M1#OZ?mTH3sd@aii04>^q!*yp>wB&vqzSWD~VLYWa0rS^>M%J+?y111r2Id2cAB zVI!EKT^e(x%6nNju4=zWE(Q!^nVSlPzhhddcr;6P3vh?i4_y9Oz1+ zba*&zJs$6$(wZ*H#L+`U#tEd1xq3Wn2JKYtcO@U4wc6kRgucy}y`A!c?i>cNO;zGb zw?<`;XkP{pIO#mmi!rbH#Z(*WC4|ew$I(;0WY(geD4~uQE1R+Z()Y@56d5;;x`UMZ z=ZTT8ae;i!O{`&aBo~(FnStyHMbI(z&EGwvn6C1Q`iI)5Q(w*l!Ata4LBQW9VYP*#>44>ylG^kqrVbt7%S^b`)N3>f z{8ggT{|~_<#*8A$!K%nx<#F)?<9`Tk3TcFc96wo#yp{G8w)iLpgs6>^EV4Jh7f>nO zsf~ojN98}TePB|Y+>|KJOGBeSl(|u#$$Eb`b&Gu<%Z+PE$Hw-duBkZh@Mxc9N9u4> zMn>V<Z^=e0rkZwe9VsTTaiWv{7gSIs91m z-X-|>)KaB#<6bUnxz4WhW4iIzT4(awxa_;0_$z?b@}b95!uAYY;e;do)umc|DcU>Q zcjPMfaVBb=XA~@>D6D}#P?(mV=Yqtg99?a#>#TOkI}$)-1ZIXQUW~pcKO2?rM#}Bi zv~+_urCTos?3HXTErjarFVN*%%PP*fgtwBFSSjwR7Lsh^}Fy{}1s3sBq5 zdVHh+jCJQ38sdLZ;$gL1$g^jED;_p)RqfyAqjrTBd}EJ|myW-ORS?uJ4N)L}gsid1NV?Poj%1OXhX!T1cEushwK_xpdK{@{DJut_l7qkP6p= zQ}Xiyw%CT2Nhm_~q&0#C`>ciMCbYFVGg>lxaslXvUALh%VNCTbG4vwqM$rcI;o8-^3QEi*48Go^|4X;XZ} zL7+sqEMfXs9RK!1yIe7gQgEzb?F=| zV`qq?hqil|01!N0AJ^-S6?#Ldf6nI$brjG^mb`}%pA+i#sO1T+Rm*m`YfQ0RupRLG zv-=|rF(d^JkJk1dWyw8aTmOPM@TObqnn=RrF6l$q$VDugC+g?p`~b_h&()W-GFG~a zpL4wK(d+_%fi?6{U2Zc;AGJ^LWS~f6>J$*b*P7d@jY9F53pje7cJmf-7itSxdJWLCmk9_}j$uX&Lz7z+{0>4;PX`H=Ao6d0^W z;X;9vL)h#bA)<|!W!?nNvh~z8GAisHtbV{_1GWANstGX|+pYbcY|2WqEnN)DbyPqv zVSC|a-jE}tu9vjnh{Tsc=Nul(*iqVNwJck2dM#JADbVmX!N0UmhCBlJ^bxf(fee$I z@S0{HHQ>+xPEaYJQY#}Ra6Xc)H$GJRFvRN(26QH1VtKeGu}BGHm& zfH14EscsSDAz_Y}o06oa=BiH%qzeMWFUc4RqXcRmA~g8`SKX-=$0|1=v)7Rp_zTcc zv>KmENcj@e4Yp`DvSRiS#WN%rjYm&^s4}m%g~f z%lqiEi*HRZfhq_AM{ICEAS?NT?+34^2KXmdEqDAewrj^&m}T{ZRlZUEaRXDaH6bZ9 zX-buuU_h$71lb}4t$-()cjPLPJ7Y#!&UI9f%7ry{Sk0t>9qo8MJ!8sM7K*WL2LX>t z5KOTqGVnAL?i-=K*!i*8+hYD6A!G|<2Zd{VZK}N!Fa)j6)!bjEnFA6M_H0ODUQGG*QJ$l4%zC_|Z_hF*nuMg05= zx9h4GH>nb@uh zzKE8Zqj2y3WPpWA;UzKI*gmtI*c<6tn_d3p0H7fbR?7;~s0anSnmIxF*1 zJcZelMO_u+VSX=3bG>^uK5y|6FFVIR-^X#MfuExOE1>aQL}Kp;&`c-&SW^Jj)Vmm& z4crH(`J^_pqeBrL4ss~VVdy@DJfS}U$g?K7XmBrP@!2|FNgbG#(G@E~3~SIUD@6>= zkC@-hNiC3PW(jvHtd{Ty)D;7_;6yundWs$K!^?w!HZ*>gD?ODz+d(&DbZ>K}UpINl zQWmGA;G-=!=aeFG8%2Qoq*n@K(_B%Az#lU7U-HNNQDm1y5%Fzycep3+ITS1_9m&v8wnq0Q0DIjhdH(#ck0S0I`{o1)k2C2dzB=UBB9Tg9W<-ODN&J9lR?ws+? znx!NNN*uY)8@Vpt&B5@zSL@8n4PyAs>WK+V(*^}#0F-#Mw&_wgZFyK)Rndm(uH`1c`5#*P=Qh z4|0>#GG_{X771<%d{$d=*gdA-`YE*4wfR0(9y+h*Yb>Us@v$Mo2kT1dAsN0JHdAqE zP}AH;Hq9^2G=<48vSvamx9zc(0N>8F(vKbcq+T@Bijifs9&p3@Q$$s3XDPLqP%b6o zJ{to~#~%}6jAranxbFTa7=O3oO+StOS#g7ZgE=|Is>0hV;?C26!_jY+VA@4eDzA~*3qe5u_ZIFpXM*4zHw7%)=vD9jMo0Pel8k$wRbV${DX*3f9I6 z`!tLALJ6Sw*A`E*`)f>3hGLW@ZsTS0-F9VRQlj3#>5$SC#jW7j6;N!f)Y&iG4z={1 zBT7X3$dtXVUPkJZQU6MG(Ckz_QETf(C7dNkB^5k{^_{ej=iy9e&DV4t zL=YTWj_>`cN?8aYKx;&wNs2w0+Ks{)zW2aqQx-7Z6hIdsL3`_Bt?XBAt`WgINg&Mo zw@F=z?Sz}l133s|r^1bYk>FK#t&BF>30oBMmDt|7O-f4D7MN^3V4_Qadc=t}gxyn9 zM1OdCRn+ghXm+;E*}i58`yVzjtzl{&P*42c+?)m8&nz{9bKDNSQlJ~} z$CZUgTZ^UcRz)XdHhEW#66OIaQz?{yGXq@7x5#%!efSYd2ZUHPMN`;Jo?IM(Rf`MFlDCPg!5=e&C82lOaUg?Yg) zlEtF3ENZrP5@9d1OMu1(Q`KNv4mPz__zO3TKHQODZ_laJm zSAxABS#BOJ$3_%Oj}bP+MJb>Uu-?;4i@Ax#jN?xsJj=Rf0WB4+zeuYAtuYIrFapF+ zfyU^0znKN0mz@!amd{`P?_doJ>-}GQEr$eS0dPfvv^-t|)Z9c~i_9)4s{kf9>+>Ub z((4@5+;lh#;*13l-bHv@3e~3(mr8i#We_;5ezUS-BS^E1BEc~Jf4>fN`YB(cQ3-NW zic-sfxl%%A4-lQ}d;YK8r@@4x0xN2O@fv`*CaGHGt^r6X;Lpk5ecb;N}NNLx<`2jtq|6~wNM~y#1?QnVd?e1SAMI`R^_G-@^)B| z#Y_ow2=bkan;bf095Mdhn$7jdhKrTtulb$>0{LVrI7kt@WjNz3gWls@ife4JPzq2Z zU_Uk7>0A`PB}8C2?NFGAV$ETt4Wcy>?sda5vN8`wuXM31i(E0rhjyryXN*Cu6@_oK z-dZGr76&E`f9&2svJr%O>Rq4o$0I?>rstEW~obX6z` z!zpg@4fHH&wk=sv`tFQztFy}{t_aD`LPN+(>W=cAfq_9xV*|ao@>Jy3YCdvpx~5e$ zQ2}(N+==i;A5AP*dCnwNbH7jTbWVs?T|`{p#Ybu2LfL+?>Uy|X37#k&$@L>8!5(QG%A?XnfI{LC`%J@P(Q?b{L0oYWpLHN8^wrX)KMi{#mS>9 zA6PPFDlJ0x{BASH%xGB(&We0(=mjG}7SSCDMAQ#pRkSAh4}oLyd$zju3SRZSSXf^H z&22uF!TY5apm0kj{&K7dT?Cx3NNyKfC~nU%IKpJ0i7f-j4Z)XHij#I6?Mfy33wpUH zT;+8ttl|dp!Grx8M=R}P9eHbRjVYYDHtKXpE8g7Vv#7G<9ZtB&Gq4jOY;67R6sVaT zpgME68bi-{6tyAtZgXSbHo5(Dr3QN-YtEL%gR)!N7~yWy)o|tey6=}SY3fXPOke?8 zjz4M{ESH1hbJbb=ol@87Up`OPG)wcF&KEIX6vk2Tw;7X5IY>5#NH1@P;(u01{14&H zQ1-=<&I7F}c5irrBlAehXCgFGgBp4XRZwNisli=CO@wq?(|32n6{W>g%Ma>Bc>(li z<=?UPJnytts*FpZVhum?mDN?+!Ry>y-6Y3iWfU^A{F83k?fS5ll|lY)Ob+2Nl6*bO zu4Qa~)H*tZv(d%!I*~o!;OVk-VClZ0cFyD6Whw8O6utv>)n?_RT)gS3ZH6LxI}bD3{z&9cbP7+>u+VCM)SSlVZyb=Z9r(S zeiQd&A_T7ZFw#c6A9(kx(td!exSzhm5dDNckyuOu>ID&B-z2*p%z9F6{b)aPyJ zYPz;R+n_;J!nh#HX|E0-&q@Qkc*HkvnzLv0v(wY@khPo7y@6O=N)YaykC1BXnTPN+ zmA>usqJ()kEDTz)hK_|1o8#gx`o#wh!T#m{`_?BC~5vEX%2@7 zuSGi8>SfF*Y2h{u1$YENod%O|2n(Vs(52>}j*DyaAmXwX65+#Gid^h~u#6oR6087IE&R$kBMmTJ))NUSde5W!d5@D!;ZS9% zA>QcUco|otP66CE_J#X%(e4G^*fBYVlC0;wtR@66kc*g$3IAZOcG7qaJ_wXsr5us^ z^1C_8cA3gm3do$6=1Fa8?u@XBzl6%e8;O4%-L$SOE5qM8}OI#VDBkr}t~j*Wsv$Anpy zlAuBDzNC@_@Lr>+%F$F67pBTvP3kDO@WVg7{tsbX0Oe6+zQ~jr6kYUf)LWk1AlBzH z`o*r%?kPlNq-?8cCh)4ppyiVzDwp|r{qL9Epy;#cqQnx$E(#@*@%x{lcP4Fqu6jny zwf7d`$9de6o>4w-d_P0V2K7FaEPW{$m%pWQg>~3@_Mtgzel9u6#~VVVAp2`k8B3P9 z74PKU%Q9v?T2B#EKc(`wMwfCkH2GjudIKiSNG>$a$HU~@57ozY!6c?Yo?grt4xLJ6 zn`Xd;Mvuy=*aR=35lrHHkSQy%CBmgtwCQt5`M~lqeQa()z&ze=&+TQx^;Y=dkP|tZI8@sMJR1wFzko6?nY4u3m)ZBH)wsNm3%gF0f55_sNp# zLeCS!?l2D@%*)Ab8Ec}ydvgx^0)3!SbF_$Z`>bPLZB0|c`uh6@Q>lc&@d{<6^euPB z8QA$fOUN9uvn#>-oAg9Ohp*qpD{exj08h@iVNO+;&)FEROdl>2K=%58Y+qpyjLCY5 zD-(iSDpo(D*#73Z+nBy6|A4$`C{w&IFh@Vc6VX)Xf%=d-xcoiibr1rY&>-KgD}yWE zz<{5jd;ad+cbgD2wkdEhp1%vit@D6rMwX1dkrmyqe|!3 zYzF=lmJ4dXD*A+}rV`RiK3UcI%2(XjefS<>=J$V2$p$avmqp0IYLHQbp&9x)yETzT z?I^61=%(P4qpR|{Gt=CH+088X(b7*E&2@vw)M+mMg2sD>VyW$zfv)WBGCsnGG6RF6 zmozv(JEYbWUJAW+SNfHX3s$ZU4wpE6yK5E8+wsdLf)S-9{+sn1Pwhz+a)%fh zta6c1y#V&vjY&6FGnfpqdNv1bmXw&vszia4&+PJ3X0gE@-Z-1S{=guCRX;gT@?ZKF zbq#_JyCxB550`sWdxpM#qJ{3Za007~T?1guGznvC&?keERVNaa4jN2NPOp z;tgJhTW`ZrSl(=*u2z+#XzB6r-B)vEvb9FGS zSM)U(3JS)=-@W=o79dyzH|9%^htONI$`@{biTUw2A4nwzl06I#N0VSBuMd61$yc`g zfmh*(G+ny_cn?7jWs6s&D8hy#5h7YIUkS$v1JJfS5!q}(Oo6>Sd8%5jmsY0>#?Y#1 z&4C`Iie0z=8w6)G!zg>b?2$)q#*OdmNOh;=J0Zd7yW4RJ{RdusnI`>zotA%Rx$6RF`j=`;Sy*C!SgNNWj zF*b`OUZlQNbY-(8Fa>S zWoFwa3E*QvrZH(J6_i@~sv&Vi4!@8C zBj+{-s_f}y2yfyA8y&K_N{R2p?A{;Y_aV!qh_RxR?pg;6(KpCb1ssrte#6TW(HL3V zcsWgw@G1{Q%i?~SwZK`xWN-%_9V1xs$^^lt#i5?I$+&060W9nav5s=qjxwi3D1&>X zyOu!c8-w%hyPWrJ()tbNiykEIalTqpzza$ek8q&evdbHxPyW`LU)h>-Do6CQUovH4 z#2mYMOWwA3dQcx2JobJLJ(eSn+U~&ZnNA;kd1Rrgg2g#}t3&HTn<9jRfu6rn2nx^C zli&A%u+n2QjBpg47L#`RVP$LtD}QKFGWgOQ>yW%e;HuO{4j12@wy#E7*S;@*iSLVm zs+ZUc9%9duPdLS|%(vI12W(>*-9ed5=d6bV_i_>-LhRVCzhx^l;n=QP!Z8_#jIC0P zJ@~X51kJZWhf+fHXdPyI?^NKqLRQvtI^WE7#C*F((ueRs z>#)lw1;(3x??|MuCC8-u7a2b0DtIgi+6kxzBu~9m0Nn($6p7w(K3J7$bJps6<77Ct z6ugD`$@=DGGVR~VAHk07$HN^h9P)k18gh#k%NMcfh^9|Ww+bw|%Wx#ZTHP)mYzcXX z4rZV9Me(p5n}jGGD%8M7SxK9>;QOkq-3^Axz|^2Z5tv;8cWsG4xMcQ(Z; z{{Hr3@S&2F?>eO{MB16)CNpn&1gd;I=xN6k!e4w^@y`+! zE}{y`7c<{25^6=K_0KFnzeNl`>yZP`#R$TpsJsC)f;QQ+fJk3c!ulz4H1yXNkkVBT z$Fqu-ZH3qbY23puibvb9Gu_kWc3cYQh?!XFrjKQ$SFY2i6px?@0K=CZCP^KO)sOQU z1KDfzq`b14$MAG_CHt`?^wWW@g}#NPO8P7!)EHPG{0O(t=C@{D;NOz-R6mX(%p6%ipLqP{*7zVxkT6dyA1)BR z9`d?9a*@}9TS8LEZBLcONwUM6i-pZ|JCia*UUjxC@@XTMwq65M`%L{6XdxCIRdOz(mv$fce(*TM7~70n#6ku$~VZ07yv%mLBAg;-;P%eX=&7cuG^( zQ5Mndgk%{6#R$4gC4=b_uAIU|h%)P^Gj$O}J4_jdDLlB#DQhUDoV5~fqDNrV&udYUoyrMR3YwT!kbJV3(YC!UudDlvQbP*uY ze<_4Q^`H~7KUZb#UA%NilRxt1kjKEQpw!ZcxRm%ufwq2e8s41M7+9z;5W3zFEmS{h z;%ncg;jszyoR^hZLh!y@vnZ#0ET4@Er44ez&gr$8Z}(>`1C)TiNN{eLCF!rjp(=U7 zWjDcsC@R+dW;;#HL%~r|uB*!Jv`g)1}2W0+BH9kE=dp3)AlD zfCL^$IM6A>bk9JroVwm`;`iZClNMrzQ56!lw4<2mHf!=WysYGr=*i4sL#hHlcwyA| z?7EXT2}oDprFr#HWG>ZhLkVn%2AxWGezR0+(EF*&{fTN*Q-?g4W&<89ItKi^WjVmE3o^oNgs{A)WJTQiTCf(a znNLHgj2kIsAS)kU1uIVEV$Ih`UDa@rAgec}TS+V$EH=tzP~I)aN|%)~8Y-~A?1_?U zVt8c;Nv6sH1h@Vm?r9HW*}qX_bj+w3L%Y%b^BEwq4<+)Tk{HCu`dm1>r%zHQu@;xr zVP5NeRCQx6s`z-&rDt8UbjsK)lD>fXt1fsuV`P8pb%HEp!@WXsUpOO?^>N|$HT5zf ziLQ{sMC;fIa7<&OV>x#9io6|_v_6&HY$E5t_bR~A3BgK92UU(Tf*6cM1;1NNKYf4#ysn~FN*Hfh> z%6`3uWS;osk$uC<7{TJxh*16BS%iSb`)amXf&44eM9X9K{q0`*R(#Y*Km7vXQOV4u z^c&wgTt5hif9iZ_f!yQ$qAO%TZ45p0&EZKl9!Y&o{+Hmw{mye{G34~m^WxPsT}-md zBuj>D;dsYy^b9Ud-(&G{C7(}}1@|#=B<;c%ve6{V=bgdW5$oVMZ7#;RRfgcbjc$p? z-C<)%=6C6rmEBzHBPASbCHt?F%r(XjXPC&&XXLzKpSCjAz_23Nv^=-DHUEV_I}!z@bsuHh8og}UFTIqtZ4>ctU)`wT*Q8VGkm{c@j*IGIp zi<`sM%2DXU(WNz`ayr&**Vz+6erAE((dE^$gL}Bmp-HhKq z!hE0Cd5@SDogl=7=!eEo;3=-f@8r!*p=ec1uPfCdP25h_WUkXS|6rj}vA=9|74&4aC=2czVyU~L#uX{d`6=cX;_7TC$k?xHkI6CKvQ>ugVb7XG|s>Bk+hh(d*kG@;_OxhK2#DmO&?6&pvk zIH)hg!v}ADf2pak%%R7qA_pk4lX{Rgb-dK3|&pYSnp=hG3SH>WpP`z6`{-hvxkt^$d(= zZzN8cvtuwmb&@Eo2ja@B4G1Uq2o^0p#`y38&&mN zf!AmJVqNVsKgM`_u&Z-5^1SjzR;SX$HR9MQ^^g7F1h)k}qYeFKBz<&QbKePNj~@4``{U=co1r&t4A$hSJCnJbeezez4T z{wY{{BRUv5W#tJyn#(gQ8@#z8L;9hE+^RpMN}KrYncJhOtWE{;qgpc$>?PfIO}|>Z z*{|0=e0oUui>X^YB2psY=FSb6u#V^>^nZ{k`|oIlwTedplrv!gcj^Os7=xs@lxxks zp7U5dYI^>NUGdkMWk*q6D6zmDYrG;uCw3U`C;S1seCwI>>q zn0==CTSYB%e}uggXD@rsn+I#y8bMMT7^myAnvC82fOt@hrQ+2FVoFWVTf6z+PR}rH z5Ay=ej4vgYDNj6RF6p&6ItNNPgm6Wd4%o6ix@V^}*jKH@-)#tRJ*O8*x_TKYyoM(% zXBL({;W^?4Y)IO9ugXjR;+F5@)X$*@h>8J;Yq-zJ0^DA7pJ?el=_%jkwoUNpYA)1O<}dsWo3oYKN~7j>HqL+Td-d17CKM2 z^H{uFs6SEXPsVAxIdX38!l4QrOV-~$%_Y82CezPF$xT6fyd2crNhX*Q| z{RbKGXqsk61iAc3@;{txV+w;CKU>>3w^1TyoRL;s7Vb!L3YTw*Hr;KUj}oN}^7zP9 zBG2&b1o+{6>}stJzSa6R_c|@{%8LRuLBF$ri|s*AI8t7G8OGP}jB8*`*HtL8^Y4ar z6OHmkMKiXg%f4sYrQOF>X3Dbbn;fC^@W|1HyBAUa`#y#XXMW!Zh)2!duOwvkXN-qr z&=pEMsqf#pHMgRYnPy8j4T4+B*E)FgJWH|M&L479NilfekEy1K5K`zG*O47y0*T7!n4o zIQTJy*#B{$A#5A}n*;_S;673B1a}3nB1SBwWnmWZcLXb}(?ngdd8nQoxKcIPkS=9` zZ^x`If=a~(6*(yv=WD>8A&DklJ{7QWFd;7$jdULCLYrB6 zcYsMj2`=UG@_e%^ zd{110DQ{}_o=%w-LeX1U@bBW8MXuOld-ZpDTQB;_*p)Q-8?ixgT&ZLa&jaf?<#hf` z);xW(EnTm(L$|Xm;W+>Mi}VYrx@{wr&d#2g<7GXljG9#$3GrDu4tuBHc1p{Iah1j24XcsY#VvcJbiH6- zIr)Imrr!_rv6t1r-re2lQ$q{%0@1Z;Epml@5wbOPUKGOZEa2lF{oIB0yR$~h{c`?< zYaFT}N=3Eg>++M8r@nIy!xYN0+CI63dzwBbjz<>t9vH3=~M@ zdZqS)Bf8h%1fCDrtq5jMB)oKE?$(6(B=X-DpP6s#?>V1EuhsEVL`RFxDFMGCxP$C#Cs!}wp{xpC^k zLBD~pY+~3{3V8&(Bs)B3k9$v0Hbl$jVP2c;|1-Qi$OGhLH=WSsls`wwimYmF2~^Vm z>|h8NcxMI7b=6%N51hJeBNg`#X^+@w7dP!>HR+RTUQIIj2_}`5mXzI!Esst#fqgIi zqTV25b(9i~MzzP4W5lWVbof**zfy7FXouR8C700~ZC7d{?w?|@<`31pp9~9!K6)a4 z%F1knRjsNc+OSGqSekd*1f4d5orW4QzQ%CQTOzZ`){t}tt4)tQbKc)cF}c`OmoS8G z_2wH*`QkRb-3d(Y5CR`4-8akN*C$jjr6`!TTWgM;Oa86lmGR_db)*IbX&A-ve~>sU zHRMOe8@{#Q>s}7YY+dyRSz;IN$iDsN>YF9d}cSbcj7F(;8UzM93+#Eicd~vp~ z#QYt4B<a#8#dq-1P;K9o&U;%>Q2vdmV2hKKou@AXlyBd7raij0~YbK2R+&cnbgQQKnsLbA5di%NTOWy%oQqxnG+aFbL-r z0*DG8wFiPYbs-+PX8@RY`CoAbY7xc;1hDZN#V=4nr~Mjy)&I#!lrfw@ct>_$R2|sy z=>P-9bYTegW7lQCWUD6`iVc#(nfgXuJ+0d|-GCccbb`%W+D&p#dK%ioS zWn?z&0{;+(o34K8B;kLGdJjcOkVg>B@as7=~9V@UD^3L`cS&WH>J?Nt*N;72_$2eZW6tN2(}KiN z`5no> zH|y506=k{d-8ZHYp$RJljd6(D(=;)5iZtrbC+Jn|B2$b{)Ik;-S&_v0`HCP-^_?u* zY`{3E5%C@GxtEC>E-hObMg5Wq;z{(et?O>T@yDs>cyUtlxMTC$IV(rT6F!a?wTk2? zj_p4dvtp(Vp$j|GcotSr?zOBbZTHFx7y>xX@>q{-MvhaOjhUD8}Q^!P7bqS#GJrzFd(W~PNB zf|q>`?Mg4rwy#yBna1%DkkV^9kiEyYL=Yj1Su6vXZmeZ6F7NCommkz7>z?|5l{wNa zP^Hr7m{*7m&e=+?F?3~*ua9|JZdhQe11JpY$@%P=#DyPoRkGOt^`g$sGj!>-y}f?L zyl#;9OljuZ$MI$6WW24rhxE+udRQRzfBKlf8-{;klf9&5wbH^fiSfE=f*gOH6oL3tHT}f5ccb}HC zF1<~cO77iZ(SPh@`ED%L&xc<_F=0Gq6@9M5k7Z2bpM2={%9N&)iejH5=G=Z-qPj-E z|0|LsEBEpbK%W5|?ZD@#uFtAuRjn z!DWcX#p#8IjWa8=?lZ2IsKwL=_Z-F?EbbXQ%jIief6VuXl4=2BabWQhC^5weC{Tu= zzktFQ^mARGKw~%R4G>;@PL}Au?V>t3=UEBQfcXx9Qt?6t;M92sh!K{7u9&T&`5v(9 zbZdUi19B*udw#2v)d)eL%yLGs$Usn_fVDM%2tcZr_CMiV;w00AY${q@%ZCJhWFFWBBN`y1Lff4IOh=gS5N5e3|>NY~ByH z`mpUyw-jExyS*YqMtqg!+^NxKOt{QKpqx07HJ~C~2N67B)){`$8F(QmOPTem#v(Xx zK;l*+I>|ja&Z_nM;PtxQ8*wu|vWGsh=|~3dX3rT-_4D-pb$gXZW=1H8kqBf%5WPzi z3Fn}+F2T%HzgQpKy0W1}kTUVXJtyc^Nh=E$@7-#d88-?<6^{8`-J&2r4J4=H?N)_& zZU9fd-5-g!PYR^ozDvv!L^8)7-u@QR)jnb|TzK{9ap5)3+8uInYiR5hCML5_fAwjO zN33jZwjSM1o9_~IdwGQd;w`&e%ch5UUYlg&R#m$@Am!zV*cP&dd?yNC2 zlhY0W(jO=Hdx`G2Zr{&CVfjbfn2cN6dDt3p83{-70^~LR)VAbL#>!iU5DOCd4vTdmdQ+j;|91dWE{b78maY?dce&CL4yjQ=3a5zjU}V1jb_ z(A5AJzsDQAdFpVKMoGq=xe3k3af>o-;+L$hc;iOY2j^-$EEmDEp~` z;P|DW`|@eJ{N#!&CTfj*XPX#d12Jv?U_(_gHR}1_NFEOG3fq5!m zbHjk2-Wbxsj0U)?h*7=UP+-yc&*ln%cX6N#5yiJ7c$xvBG2VU)Fb3H8A!X1EV0_h1 zny;k^r_wV5E|SD0z_E*<0da^yuec(p#EX=2m$f{ zA$UmuQv_uuW>m|KfWH*(7|B{re-MwlDLH?8V!=4+eRB7PK}5}CR>5^I7A>-rqoNQK zC8d*rDxIw^gKw`ex~m`8zVdHXJ#o+Oxn*CkaZ1Ri)IBCqc;EX4;8BL;@R5tioa4}k zwZGqg-Jg_qDBw5U5lkBYP@kAN9(+UNa(d4!shW2q`Ydhlg7WjlDMym;Xd?Rfcl0!W zv|ihUvpqU+{}$h$gs&Vuph1xEqda1bd`JoUxkn}0G^%XTV@j~i_^EM4(#@LrNHHI?^xLb71nsbz7# zNiQtumdku+$ZM^ImPN7yrC4wEkDfBeb;XzLr{1SQU2poumiD(#4$hxyQl8RI!e3kG zH)xsxpEY%30u&gH2|)Sp^b}PFrFucg@wB8{@*`|?=g zX((mTu!OixD(^B3kQXWc zl+A|E;as9<%P>TPa&XAj|F!ght_dI0)pEN!3r@oz5p!3G1Pngc@LEj3dr;|t@DTqS z8)alyQX9Bn0Xs4hFD4>@ga4fXxF!^lV9)?KA&?%FeW{Cv_$HLMlM7MoO=Z)F%1)_IGtL2wg_VR&6gRGta1M|-D*t3B#<{YXYQ9t!(aEXksZja zEudIxB#CGb8uxQU;x zawHak)`8d{fFZ~|MTtG;VC$|E*=L_xq)~H*A+&0zz7Z7eJ86fS2^aVFZcbd79*cZ@ zc(~-K7XdBoi%?XeSlCF@=E*D#y#1>;r(op>ei)mp%|$fv0U^VJbE9{@_QDls=Ct1= z;Q0MHFG6GcvB(pKM3Ta<-(((2uTTk!ytSVjCaQP-&BUNHeljD65{1&j9i#Z57*yS)8sC= zg)N#*8eAclW_kqKlv=}fF;N#=L zXifRn62hwScXD&qg2Gp`-6c6;jyhdt05fLId7z6f5d}UZ>3ZvBinl#4U<~mEoPIm@ zTEQB#R>5+bF@X(#|BrvxfbSL~sr>Li@&Er;1m0n*bR!T|#((nP z(U?Z*;IAcze+Q>HEdxOS)=q8&)I7il7(}hU=)GA)DvEJ=ifpMBe`AG&H_VO@uA%g> zQRu4_FkB=eN{CNKWfWJ%to@!(f#H6wWzNZW+D=CBadY4L< zo9J^gyA_S1NI|EYjHJ>-jMXbMZR?RPjMb}1%Nw`0YmrK0Qeoc*;Uh#^y*JA^(K-%h zxN_^qx6WOp@9nHOK0$e`G|4#G>xHX!$Oc6M2I>V?gOg)d40@ScmpgElELfa2vikby zP5s>i8B^jHLjr@nJ%ujGKQc}E5|1kGf5`fN@2*d4jmEDb-?^{}3~7I?S6|^$zVOa$ zm3jb+C=oZ+LGNR$ZsN)QH`jO&2XHy@!mxov6Lylsuk!#X8I^gI_GY@3BDR z{3{>t&Vkir$U63e0Rq;QH5<=~eqXe?Za(=FzE^no94mgvmwx8*?%ni<-?2-Qr_akm za{dfiY}ePD2%)T}&Q-YXE#F(Vttb+Rnxe@lHO+y>n9KJagETqAUWiFEK9o-Bkuc1U zHp9V+V(i>VpY_w1yQU~rYF#aL#rwW55GeAs4A7c1m)^=(Df5}?-V=_eede#5r1+3} zS+wJX)n947F?_S~-e9@yAkvR1dFh70R`BNh$KudMMb1;1fImcGNJi#F%BhH2;|spo zzzQ!Dt&L!*c_&?y939eO;sCN5<=?aS){SgPmgPx%IO?%~Vh1|7W$t;^R zGbYl)OlqtF9ZAy+^tX}}-EEeJmVf57*FMaGxiEN9P{c2f7#bT?7=xwn0t^d%~t z6=VewpcIf(AkiH#KckMyK+OkV{>(~k5b6?n9S+z##3gW$0U@r7QikCmPcw`Sdhs`v z!GaKctQEoD5cx<5TI8eL(|boTqXCd*;<8dv5G0_5PAe0iBd)m>r}hjA@xs(0KKAsI z9nt#Ltnh;(`>K`6p?!_df?QN03zH)e^lC|@U)29p2gF^NF10<;jLmr0%R(}wdx!S} zGMtai{@Lolr?N_6cB#zVt5xe!#wc7*!I;+!C+&$|LhI%W^!Xu#PW2E%a9EdOksKB} zi7TKU?`R6=z|3Exio3~L`1{V6h^z?azA9P%m-!omJoL{;%FDazkvjQ1vlC(o`zeA+6ABXzPXd>#VmKnTzYteMo`B(*ZkB_J}4Z< zmyF}C2x8)*noF|!&}3?EHb6O2aFjnXsq#e{wS9SGroF#MU(l7^L_X<|e7t`$WH)?y ziT-`t^@t66u_>R~b52(%VlB}^Vd(1>AbTiY0B{)(lq4D+mz9=!PY>{=5Lc6czupLJ z)|^7kpdZ>-$;><_K(@STB3gz0NTU0IJGiiC@D`g~51~f@dA6JL12!TWYHz-1yYxMm zReRI1R~*52S1$Qb2SI3yu?7=%0?Y=DWp z;`Ha>U;5}2N7~wgn&-K_-<)KDOOLWQ>?$x7?f=_Lc}8V?O(31Zds`n`Trm(J#OKa` zwdjA3W@#A)k4%E+EiEgD1hnFiEgf-P&{m|ONbwEP(?lcwB7j6f$w7h7-@soD5U>!! zvja!~4hfi64aUkvj$nY$Q_-DH193W_-!T#eVU?is0dJrW_EU}=0}&&ADnKtsKM+6z zIKR0V5%7RcL|xI59Yxt4l5_#q8NN|!uY}ARk;q1UE&>rHa*U;ThhA`=pe@C~NInv+ zHv(?kpQ9702=F4h!1!?!ypkgPFiDCA0lSgFRvwE0j`0BCvsrHoz(lwQ%Nadm)xH+8 zSE!l?Bu$fpV&4|TocH}u=R<1*K}dy`&D(l8=T8M!W=>0=u4y?lIy=TI7ER!C2d!K5 z(hT7QI!FyaY@^8CSBg5vN>8nt0rz&a)T==91|4BU%>Ib9+ zQkvd^^=OcI)H?5#ZC%%M`Hp9y5lSHs>@VWRdxL5_7E}H9Xtihlo(zwDeTdEKRrA|4 zLU!e}ne_VF?7Z3HAB5UCnjrOW0%0S9*+UXWWbEzs1hfWOz(SZIN*fL2Jc!_moU!TZ z0q-lNQ$NMDMMB3ZzXIC)iy4X%KKQ09OG~Y8k5#(flR>7NWb=xRFCUbK3@6uuy@oAn zn^?od-F1X{)tS@c!BP$ZR{mS~s20uR6#5=<=gGU0V(CrMQHSNV`Wm7LqS%GNi>+z$ zccYi|ZlC=sX1B(QoEv+5JlVMA(@$yF?YbHVg2nY7v2+JE{c6tbe|hK6;E)nQl&y_^ z=PRbqb6Mv3`X;@?$TV3HN(3?1)gKfFqbTTxMcu%oMh@tQ87)O*##+=&7@`U&Xmr^A zXH6%>12`tdiT{vv@IaU0MNOmXLWKY4_oS=b1s*JAr20)De*#?(-rxg&5&$MIL4a?R4RR6~LOYS2V=DLEdWGF~3|6LeM0HWrH^@P^m)Q6*dI8jKr4KZ)JV#ZbRL3yV}(Ilo4_`bNlpmH30LmoQ) z0Sgk!2|`(mV?I6C0odF#aQ|q4mkfw6m!!2n6ZphLr4(i=G?RQxu)V&_Hm zBkt;pZ5f{CeCM>!Ik@dqLhIA16jggbzps&BX}ifBb7R5;F5dI$MfT*_=8nB;Fv}88 zJZdMlaZJ;r>JIf4uPj6V;r-XHWFI8UjTM-t9aAS5+L!EqZj`1NGyp>oj9FkL*KZrQr&wL<5Y^p%1uMj^9%8CBX6f!c2{U6DK=A8eh6Y; zOkO-^K984Jj$nba>OZJcEfh?7H>7zI&mVC+rPI8j03(sz>ervdBj}@Co1O}((>j$X zXDMX(8mjr()w74=xNwyNvvtr&zw?8{e~~7kVv9I@^7=CXQ@xE=8tl+{-g6;(71Qw90aqB5Ffw(_EozYZ30?_VxnX3uY@@YpYnuJR*X+q2=SfW=ftsInBMx;ESrM;$gj{_pIYnDLUUG53EFeP%&a#1+b-J zvNCxI5e{8F(tOagTRq~(Fx-ISnP~)nm4vYX3 z|4mR!ZOi=;R5gvI*(RgKS^;<3xw&ErH#dH~`n_$JqK!qT-<8IMKUZjL8j}~-7#)bI zE}^35s%LMs*|S#*{tvQvXvg!skxd4nU`EDO)Jz1TuslXCN!uuvt_We$n4H{J5;FsQ zo|Xm>hfeHe4uz-`H*=!=#Tu@2KhqLWSt_{|wQ26^HyJKkH*K)_KvGpl%G#iR&18=8 zWF6z-WtzOACCnz(aM!b6!-g*X_h88T=tqmV;*a(DP0fkglu=*aD5!_B$1ic`l9Vl{ zG^y|3P~h$du50{_i@&L+HB08yf=D;&N*(DtVnb$zoqn6P-Knz)c0bn_yF@(vh`sOP zbXoaF1n?3*6o%s3aJ_FIJuhvPKJ#u(J*gG68-F#<_c8yb02QZx^r!oyu@89sv|8EM z(c(7Q-Gd>GKd#KQ^#}CU?{jmnD!kTVE*=uTUm68nyjlO|3LyiIw*17a|oZR z9j{-Xv8FLWo%?m7?T-zg`nLw!zJ;q=lC|dlz*!7kGLL0j>}lkdcgz;7(*2#3zsx*x zFf*fwBq4&(bk)B;tRGWxmn!33)ViQZctY(v-*1*cgtkE?1osm3A!p6m2aRvYl#31w zRMqxc^QJp#6*emGJVj#c)r`eQz2AJiPl~Y^G?_av89l?~z^#Qe&STnnF9n{xIrbEI zUH1YZ?oyPor$_at<*v`WP>O@cc1PDTH|F4)9e0j;stt7=t|#P{fIp!W%Nk6Y=$3N}?~2h+RMoxsJXMjf+E*++0eB z)AU56Y@&^Sm64$w-|WN6q!468HFDn=mr{R9eO8%7dn|YtXEcSH1U>POj7dj(FBm^H z)Cr^}qDL;n$LVj4sXF0Idf_Hj&u_pN^~zDi(*F=eJdjT0R#v)+2N6vOkmV>bBsDJ~ zc#Xzdx1}ipI6L zO|I#knBbUqm9Cvq8too89^WMmL;6k}EE8mep3}`YCfz-eS=AX<{v4kC3Wd});fO^k(o+- z@^Co?ctkQMNQSFCzJB^xr`~*;ASpYJ{qpi??Y+1{1NExb%mZl|HidWF)>iYF$=8ed zgkG~01&@3EEDUNtTBT_qH7p)TTeAKBv=bmCv3Xc&aP5!?L_T8@3Y6#y`{{ovTYP1f zw~%~4GS{^xL)WcwnJ7d0F?;v&4f;6VqoQgDNy=^bt9dbg-PnAx#s6hvZYhaY2}+%* z-2e5UA;D=!dBxuO2Z-XB2g`fhgNW2xilxySSBE({4y(huM1hD!_}lxRC3Pb9j%z6q zoW*I1jwFF31#e+&yh>pvDB3?JQ{joPEt@$e)iO`M!{F1@{3G4{UGA5BwX}X z0gP~Atg7cGy{=J9@)xZ3s^QIOvc_i2sdB)?t(N(Dh4X-uX3Uu!lRWc);?;UBJ^rI}C$rw2a;DoCxiZkX0nFR2TP@ z`hwf=kiFtdVy&&P^<1dhv4EDIuu+Q|qT34f#JD7)AgL0Kg_eozx?Z{oiOGS*BpCXaJvw zrVeZkmfz%n&5p76&0A=7?TPe8U$(o!n9Opt({;p`yAP$8&=y255RryDsePh@L;_e= z+zv;X>KAp0#&=BItpczPYh}-f<4i}JbF$(JT|C!Hf~UNZ2`XwR-v>z&w}d9k3|Duc zL{2_Kur29QBIus}HU2WdyCD=eSiJ9NXqOS`+9*#&K@1stx2+|rs|R`~3M&W#8BVc% zbRjmGbX{viFE%ixwcv_kG})PjF)G>3vEa(hTn4RDMD1%aas_olc?XXs7iz3!i^BI1 zjdDX*J~drTmK9h+wtIvtcqW)d1hb zY&5$hh&WIpzf^%D2>CE6COmplLgkzLcjM-BbiWt@k?~iJ)Eu4K$3$X)vP;CyMgO(oqpw$&eFA3iVPeO2?i2ueN^nl~he2JDZ#kgfGs| zG_}^7U9RPiA&MuF^%pk?xREp$cxm*$wfndP%Ik-vW?5mU=q zwd5Zy)TH!HYKV5AsO65KdVT^_ZB9Xaeq>*eINHCl;Zo$A_3-asG#0`8% z2hZCR1v`#uab;R)3n+H~GiWJ+M+T}3iDkoW$c(^9kDPVHf>rAukm6ekzcA6NRjWv< zY!sfwp7~Qb!nP4RuQa5~9iynHVR0g1rp;wVecSAYjGl2G^||Bj%iBWJ8Yz zlqMstw3m09R1>9>S_7}SnJ8FrDBy5D_9oPLpqG%fwel2Rjm>WYgG-b_FBGg*8X7vU zK&S0s{9s!njYTx$V}ub=%H$0BDBonHk8+%2DsXIxF%oSs3Ps>C6v90It6z}iZ!pZH zJiIWZCcM)VCW!+jlia~-rOj}Gh3$!J`}Ytl);e?PYDJ2N@-RL1wTh^g`|+T(F*k9cfNH~)JsfJ zGgUB8o24^K4_oP02R;}erZaXi8;Uk;1sqzz^0#>(qJ|U%hyAxZU>7aYwcFj-pZ^YY zk|stqD?GeAkuBj`Am_$xX+W5qznvXHHz@Q;Gs&E5gka#pqj*nVx{*eUSjy1YhP9ja z#Ws4w%bHQhZ0JRRv&zBy5%kybTI$R2wlj|tfM!oPwK$Nv-*bMj)Oc5-GW+&}dvRpM z-X6?rOpXXf)`CKvZA_A-c(O0`gZZK$n@Hv3DubTStQk$+U*?!@e?K?VN|5+ug0nDc z+Uu9x@3v6(sK7HEpfyCqf}TEVOAWKS_yko1}?nH>J{?;rJ~6_lr&eA1V`sdc)zJRA z`y{SiUKh2`!V=fe{uFpCNKvDY3`H%~N(jO2^-F;Fk@kkw!5ep+$>G{tT{K$ec$v58 zoe$5T4z;s-Xyk!~W3xpcbLvlIZ<_3Q#)27RDVsZXpuy(0-dhm`1A!jITIDacBpGY3 zP{=5)0U5fCwY=Thq#%QUM<#SkQ-`4F?!wroiWgCp1oW!hP?uC@D^mV)3J3VE+tvR8 z7BtX?r%^y=mw|{vTi~Nrd*z^x3CZ1Ih>?HXI)3281Xy=&z>b5&0nlw4Xg;Y!8%Unw znLPNE*5ai%09VWcj1n~11Oa#!NHXpt#Z^kdlFSpzDjcd)uji3KCQhiAL!l*$&mM9ha zwIkPRa`?fFbhv|0)Q)XD!_wx-XWE2@_bcRHC47o6V;_rHq}%gv-mh)bc7U>e4&or2 z^sakNYhrVB&IXswoy^+$tKmWT>#K(otA*7T`KINVGHa^3&u5#R1euijiYFHExA*$G zGdMCm6^fJHGnR?ysc-mY;u?A(Z2~~5_ggbAAgBq4VBq$Y9{A}|jOv+_q{uUSX-|b5 zwJGgz{k!&a1wHuby}yM*X3mZa(v`yA=cJ$hvu!$S%a-6sz?)lR_LMVGHZt{Di;OEo z8y@;5;#M_OI7O#D3xqhT>V-n%&}9Fj6#-6IYpo&ix!dEMWNrBt&9B==TasB7J$HNF z+Z2h*g(<)hmF3+VYD&zS{ z3=Q6xC_O|32_d^EOLX~@OJ3lJ(`*!D<4}5;hrbB{u7FkG@ z=?{G+k+*x7`-NmRFh`!~gpsN>MlCgX2@Sn^u%hv@lu&O5JnCMA5QcS~qIIHfD;2g>fxZgX}J)41b;+Ky@wf_cK?=7^ukM?%zWR{GYb zR^yjhipo+c_pDHJ$!H>h+#`c88mkWsYKd#zmd%QU`JmJ()uD*ydusYF)3Cmr^>#i% z>bov`+XiF`TQH{sh~OvaRB>i9`2`Ye%NzIyHKa6V><=mhGM>9xiK|FMf4pqGK}id3 zWRCldAn1j6eFCCcl1kt{MM>4mBR}TWd`}tXww4Hjd!Ws~HSoD>Af6&2Hm0n!II|Dh zkXuR$=%u~TZ&+7QkQffM4SMOLC79KDVSO*r7R;ayc`j;f2@|FpXO+5EBp@CluQOm) zaAAKp4%7vcyNBWVuvbyP4Yq+b1G8^sndwh=GXhPWSE4WVZ!Q(|LPjPSg%O;J745p^YKs~Z!;w0@)TG{8zggVg+=NMa~a=G+--ZNE-)I{xYB@7Q8YD0n~Iol_>3A80G|&K{+h; zx(4{M!OAeh#vEAOl@8vNkYc=UN3g7*_v3W!w3YD1`%K3ExtY!jL!j&l=fik>me*eN z30X>LSN)uM%_&PYo}}a1Q(0Hke0-0St(&jWPMqpHvA&mAs4S$V7cBTHZ9gB!QHAim zSF_6vD+*y)(9U*cdQqSEguZW!og;E!cn$6-!G~OzKA*H4a1IV!lLOwVe#23$LB{W{ zPkFLVj+6QnH14bSs7kUhriL)pg`8T{*aDScaP8-ipUzjGUs)YVtbN?iwM!ox5a1Us z_pbg&NDHg!UFUsw<12?{Zo#!0$z6%xhv~0bp4CzU=RLREkhK~Wh@JaGxhWKjx_Va; zJ(o4a&%A4?I9r;V+1WNXG|$Ca)3#+1w7ABEHIfDSs_&)S<;+ z+wj_-aazgjVNgM^mS0Y)>1^q6c@t6vu&!k?)JvLmZPFs6zgw;BuWwXaa83rZGRM|g z*Dj-F)W3yZ^ALQ)Hg0G#`5(kDHgrj;k?jTqClu@=pRO7y1MZGl)D`N98ns$QUu*OV zaSu%++=3V&_@((>9>R0_I`4aD(tCaO=AGItb@%1aHYF`nLR+;W9;#y&Gy7S!1V_G^ z8p{^Geyp+9Q)8<%PSZklX)0d{pEMX&Sy^i_n6DhyT}$Z+dh`DLc(_7>>$EA*=Hrcu!4x;^^81sH8kU%hZ@l5B5pa`_SYosfnjuh3VeDlejBn5! znIcS`j7vd%f`AQr`RHYPp#8J*B_zxXy8ZWT2Q7;OS&zk~!e!yN%YQla>J#Qpq>jpdmRdAC|M`*+4@X)50Z= zgca_5Pj#R-hTH$mO1aN_$MeOZm?k2#Yk|mKQhQkm`t{^Og z8!OVcj{zdZTo(VhcW2UR(~nL;b5S`dUgxaq&W+$Kf@eoFy9#SU7a)*w7kyf;v@c>G z@niV1k^NJ`Ol|*Y7w;i&o{3{t_^tE$-tezJOZHc7BkCq4$k+?r?q5{JoJd)4U7wrP zQ`#(j{o~u1DPw#6;g|;+?i=!``d&Oom*;h*p_;P6hAMWsXU-;bJP4)y@UnX4zFwtR z2+4-%p@d!cH@m6xJ}FBLrw86jA5<>Q_%#$#R_*yd`S?8iv{TC$)$W*Dw=d!T*xIax zz-8R4lw-$6=@ff>`w8wm&eAa|NdLCao!80GCyg(HmVQ%PM)dc!l)pN>9;bxySj)DT z%4I4&3FJE)sYn<#eF7c7X6v+Z<&r;GmTKAbAmy)U?992j*>ImBZ4d4DpN%ZhGbiqm zmjEu&0q!=}6a+H44I5u<4ewPZa0qsHyFNMzzf3Ta>I?gs|AC}}{Y2=Jeoba6aXgGa zVOr~m(ksa%Uddzk;(>xgi_Bkjq{?22j!VzE#D&_pCC52x?+Yx9KV@3$Bu7#3RL4R7 zYjq9_*4kxxf1qX?#9v1sh<>66Ghz*?J8nf(7hzB#SrP@41k z5L@Lu-Z5Bgz8@JA(C%GX@^*D%aMZ>E+zT;;uyxME@S^DDdGZY0HmcuViqX=-4v9l3NSwZ?=dJcWlqbcrY=gkgZguSLGb(v7OK|X` z1ZvPF)$RLa3xyK%DmSyvz<($Jk;1$J=|FFo6m4gD&<)+*#T{sAEIgdgt z1vh~$$@4lBkZbCn4U$H%z!d?~E*qgO0%D9LQrJhX@F;`1es+xnG=KH3ydIejPRT+aBe zVcl{=M!L$F>Sq{H)MDgCFJyPzbU*DLIol$Z7x_G|N-5|q3*u_(#A-23t z?VPIoq7z}ZXOv91cqtu`aw^tM8UrZ2E-Q=&SLO`})E#j=dWwUzx4U|^+2gxEKd}pc z!<~G{J-o~K9J1nDZ7b<b455Se#4^B?!SsFapj`j;vWA@8vq4in76E;{ zpWR*tS>MVfWi;RvI?n%Ibe5b=^?|3n!YGecjayK)Rq`?f{mw+E4QDr1eNLGz#~YTt zy;l0+7kwjY(aoMMiSW=u1sHYe^abf{4vsUH?6ekQULO~JTM$*&cHul&DWLR?xJ`n> zrnKxZ*?B{9e-Ls@Ci7{SsDbSYa$a!FDtX5K*avb{$Z@fys4*Pxa9*ujdgF)Ubw{M5 zlAO!!TnE5c#@3KdJth^9hB z;0IMQ*a1W#_8b#I!H0aN9@K*wT<`v8$iPTk^JHLdWLO&STIY>JtCjC6M0I2}KmSSf zj(@0&nGhJ6Exq?px<}cxLZII4amTbV8su8c)`@)*UZ^5uoF+M;Tf0_`)Y6 zbrzwyS@aOSnz$MFU5+8LUU;tEmNKFv3;xhVhBmHy%2%JHWM`-;WjN$3VEWlYjS zw1NE}g&Wz7$Pd?}&a3t)-bZ22ww$lrCIJ>CsH&4%I{IO0ic6f~{ws^eOW!{H=EJbR zsugre=ZLG>fZ>mY1R0@os*UEW$Lj(=m$-WC3GywRFn`~?{{wK$etgQlz3r~qpGI-5 zYq#zUA59vkowxkyHTu=NYtr#yyIEC{;zK9l@4Y>zUR29RYFwfloYHXkahh~-dbqxm z`!8st%jwSy_x6f2pboqO{9n-To*9EkUX<{aTJDnGGxNOzzLj>fp)G~Gl*q>G1ur@g z6ryc7i=TDkZeDf?^N<05>wG3l9V^4a*lB@#t21KLP9}gIPTiTyc^89y0r#MV#QW&G zbS=$+T$7xLW=o0{Q8nNr0#LtstHPB#b|<)8>)0n>l+!Kc-wS1(@Fk~(8BxcICzSU(FD_X>DV(mrJ?>055=I?6@y+10e0s6ZjmJmP z{0{8qwfcIuW4Ep4U;|Cn*W&~V!qZJm8<)?eO3G&XJX4U` zLK`J-#8K7ROW`<1-!*@PoAnJ)EjFV^wGxJtmPvJyz&P38P-&%LA>^aA}jySWN;q7NF1NwZ*2V`xqcF4!ISxNpDS>ZQN^X5Dv04yBog4gVm z2ZO`6#qXkyq9TifRgnVbW-Xs&s~5#WNBHQ@QDd&tdsiD?bNq99te}t1$L55^H`aOo zv>p9i_x3fR;vH~ildHa}fUfutLbv!r@R|nt?oZFJe%xSS zJKN7_sAo$3BsB647bsu7_Yy!p=3Vr?93d~>Ijys-wBMBqrTR-onkGn&To6xv>K_{M z(gf0y3Goi1QAiSUzv}Gwk6se>nhU9sQM7Z*{Y@h7q@e=Tk?4FWr zQLXYpl|Wcw{ww6yb~lGA@hVe!g7Np%@QYeml~?mWo_uKxeH@!*a7eV{2Hs(J60-E= z7$<3T=F;p?1?{|?=MEZo=PgehT+!ib!u>AJPftqG+O6aMW{M@S#MytY6s7I)+ZiN< zm8fy)ha`#;PAC{9=EVmlx|XY-&2yF?zt8{fm?8wZSkN+9rzw?v+GZpzv^GY)Xm!rJ zIpa0`iB`$GHKFj|b&#eW>FwE9wy$O8&xmSGW4Lp^Iv)bu1}4cTthBso>x2kXOO=uF z3bAGRtDk0e?5IY47Z>oG_eD26v-ZVB?v)rIEkw_LFa4X(*sfO{v0V>0PJAzT_IlC8 zQSa7&L4sM9qpaEL=1(lk-QcQE6Z<4t4J>ziHzXrriSd_VwM2_PlA92uPaawVzHqAd z^p0LNqdNV1Jp>%UQm1MF+MYUS%6j`R$o5|Gt-N&FZ(e)S3B}xxdY6=EGT`hqv`i4|?=W9hN8_IaKd{g5YOw}ubGEpzPp_vf=Dxues#|8g z>Tw(qI(3>yRF%V2Jio3uv^S7v`nmD%RKhpcNfmHT$J*0sk%w2_L_cS0|D6_`+vrr= zK6UN_c8Wb7R(br`9UTbQ-+8N4Iqu>wGw%&wRg<<|N>sbo?E|}?xT5%=wWU}OY2#pA zUeeYtL@HwB|=9OEMiHo+j^0tbezf9hwyq0Nh+Q>1vd!^q2 z)0=FbC@nrXKQ8?dQFyBcsW&8GWabeNT64;-V(q}Ca|MLiXRhP}^r_e6#?`d6kmBA` zX}%uow#lO3owaY@(kiY?@+*z{|LZK#Y$p#(658CP)&8Dvyk%1Rtmh+B%zSYKcq3n@ zGdWMLSxDq#EFgR9>F0h6K1bU?wyL8_dgy5@0~G&3W?|PQ8}7NWFK>3-;N?+@?@Qn+ z2`i`+rN&}rA)QKO5N>lg6Utt9b zRWw@k^rpA!Y&>^NorNd{8xjyR#{os+WzVjb^Tk!Y3KuBe!uW-F+2PH!6LIvhGO}ss znI8$_J#`N{{soCB4KKMTEOVflK+R7nU)$2m7?1^nUV{+pg(e-lN^i3y(D4@K>mgNS#} znJ0T%M_auEAk53ll+WKjS2nG0?%sS9c=~?i<#1jy+uF(qF*-t4VN%Y-svNz9&Z%90 zo~n=>00fwwI&2sIeN86+gj}@WCU36WR>9}en!33Uk;d-jc*UY{zi?=D*W4M%oNH%{ zVy0qwB4Ub!ED&4GHR#27pLxf zJx9`NfbJ;ih zBZC^LRpc-FbR>80xIdGbwc^>fPqgu7N}SZUc4ESK zh@vqk0oa0YK&C+SF1WdjHxf@Qp3Ns2QF$Ja!hz1Z zF5?KrrG!gVn9T;!+i6o-UH5NKtpbdf9fxVc-+hB#kz@rVObxWEL7L)iI+5JVv2L034M1v*8#iR%VV*9hdiBC|aIz)A#vTkgDFZZVh?ZvGH4LvaUN z+qj@#e5MA<<86iu5-q$eWSa&i(W6@x6o*_?hnntX1+A z`9SRX70gb|?|ZUG#Bpm%`)kL#%h)np`>hBjlfBu0d*R=vAbg?>9-Rkz^=*FCy;XCm8-(Omk7khni2Bhn4mYFN+G z8@1<#C}|KIvfB47%rnEzQQVtqc5Y9M?!>x~zW%n!5%SC0zN1(hYi&1EypJIKbVQ`6 zjZ7Bo3$7v7?<_G6y~~noVRz>wn1Y>{JNRko!IRtD5)%o~mrA)iT0mIGvi&H9={~A@ z8uiyn>SBQ9V`ufgocB+E5&eNLGoXC<0gHe4%aUN2Sw1!G6XM&>$9Zi z?x6%s*r!b+Xgm@M71DWEW69W^QM zV$4BgzZru_E(BCH$vz)*tygeJ0_7x_q-)3BLV)x;0sSywFr(b9aT3`L4U1lgBiJ>Q z3+WI9$hUyXlE<6nfo?2YX{rN!^VEYKXA6OY3;!e8x)FJ;^9MS4gnZV}oyQdW6YmnO z;h^rb_O?2r-Z&|0_;7}A#vmTVC<6G;!vTQ!^*?Vf6Lvs$37oi~Zz~LlosKn6|y=W68pdQr*&6}{2rk)K)cLGcg$Qd}3@4hyuQ(h~lz-1(Bm@!)7 z@Fv*AZeTv@@N0oKX`R~8WGQHC6%f|Mw`dz?)aCT7GLVF32;o$vN3))K25Qt-i+Y-1 zcn^f=-~}bjqqxZveiXoQ89{VxlygoR+jmXr4=0J57AGz{fL}96QB!$U3+3jXRvLy~ zFz^VXaC?T6sqPfSA0AHT`VWfM%I^S<2DTUE@#5`UfZL)UFLJ)zEUyW{;O}Tvhncc7 ze(guRoe~l$j)$f^2>WsGHp__kk1OI{^ zXKVxSgn(Rz%ek;+r*?Umk*XAxHxgn9?dV3dlrRG2!%2LyFRZQPjLJOgd zX25`gCSskN<;%OE8Yn=H!TOWS*QtKZ?8u6r1OJ$3!Zb^W7~WUTj=e}c_W(Mr6cq|l+92Ib z5Jbbk*8G|X86y@*D-IsR#s~voXwec*+LQxsb0%4I7(y374!+qG?~2s^*gHT_Tze>UD`os;$Xs-bBh)S8#zfL zn{Q*mx%Np*Fz|1I+hKsfPA!54&WX>%psjd;>0LBO2L&=s1V}!B4TCA5N0}cc;RjG> z8&^djwLFrm^j?-yj~r7P`2!4*o;IR)Dx0N}foG{D5{H|&n5PKyiZG6p%w~|BEe;46 z59)n3GDOzfv*ZSO#F3paPp{lgIEzQd^=Jt`%-A<~mflYFBjl2JO1(Jsi}s^mh_Zw1 zxkcM};C;h^?U2={We$m;(Qxr3K|Yunqn1sH{xe2$AsWsDu(xx!ryhtlvE2zWd?sJr zyfzSYk;<`M98M&8$T+u6WrrbgysB)Xyahr5-fUA%mCl4s5I2OY@Fq~^RXG8@9qVbG5=mD=>`(iY zxP@v23qh$P**L^_URth&AZ{9}ke8+*iw8Bf(=^EN&$AhkRDs~BV#A;IB%FIcp;Jfx z+KZ{L@3+3cYTTt9Gtnz2q`mv-T%naV)woH`!Ts-?cp;#MRe^lif0R;AmYa)lmL+BDI;PGq#N6lbl^hH zmI}9lEgPS7Hv&YUT&k6x%{~xq#FOFwUo=G`3Vsk+e=8{=I-&$?lMRY@>>y7dIh7n+ ztEr9h0UU>@vjuV(9*o_xHB24`PA-{VugF(5l_`|bLBheH>oIlaGEvvou!l`Z4q*0O zk*VNLK@k8X3?F2R`3Br@a;%JK6e3JPxPizPFJc#?Vt*acGaozheVy_i8Td4>`&fi@CXQe#ge-XWZu6*iZIT#Oaq zpp~!u$25X4u^681a#L&|)PJf_P*jc`sEP; zPvv1nUUMU<{6FyUSZuu9Adr3qLxB9ofoKvQA!oZBxMhXTPdiI91_oq)Tz_YfVAlf$ zwE%@Tsw__d2kPA_SBqqU1f}F|RYJxBh(}(^1c=0NN1%lQiJ8l`hLr%n4(w$B3JDqq zER>*dJ913vyp42aAP-Tx3vYwMf&t+@sIh#N#?rzQvDn$1Y<_t-z7pDY1DXefiO_n0 qRW=XA0N?{o1rF@g3Y5x+fF}vRraCaNogjF*6pdw~lBoFa$o~PUsH4sR literal 78161 zcmbSzc|25a81Jz!QK?aOvNe+>6S5^zwz1BrXhIs1CCM5>w5TRKA=$!M62jOjt%g$8 zBq7PZM95N7-RDfdd++CS|GM|;LzXk=yzjGopYOB0^SAf!5VDzIWNL)4upkHv{6qfs zBL@*~PEIaP4sI?kE*>6kUOo&z--Zo*!dtd(!ibAVN{EYyiAl*S@05~Olo1odYTy)A z@M>ymk~_6@HTUdOR#n@B-h_pRhlg(ipAbL4(4HM)JNEoPfBb!h2yn9_>1funTZ( z*?~X6xs`a5OWFssCob3sz)ysRq4_Yx29w(UZ~B69LOvAA7o>U%XbwX_c& zGBh$aF(ud>K62F7?iktml#AHlkI|7&CadtZGB9~%oOk4*s4L++-V zclnvN(=JS0(oq|7PtKzuI%@R=T^(#yhI>Gs4}Hfdj%NIorJ4V9B2qU>(CHzfianZEZ| z*KkV0!9c1#FByw4gmP0cNE(gC5HCh}@qNj>x|1x}JPfZBcFK&3^I@>^ktmt^tnp35 zshEZ$)<|x=8C8=u9I+KcNl8rOz)+|NSr?1(F`=9<6d{n=cZgZz+zMEXuxh*r-U!#5 zpF?pXMw(Ia`aFipnt9^I+*|}K1M>`vN^QlEx+oD_VOvJHh-A`mG>L^v&6zWUgWa0-e14$- z{I(BPJRHyS?Eq$4#XX`=r>j#R_84sq8=_$%%!#q?-N^{i2{1b>$fUn^I13Ie2mrTAlB3d3?DvSU#nHqKR1%wP#y>S9Uu*nx=@wh z37)Y6$9sFBYdH0(8vWT>khl^e2N(Gd$MZ9kLDu^Gu(!JWCe*ZOv>Z+_@pr9qpqpm0 zy)_52^MIi?6*-IjX2}|D%?5ht&4q>IE%OV|sfcrDF6C+T|1q(1Ibs8~O!%)Vu z)G>q#N7T|_UD3P&LOGmW!!RPNK{hw{v?TRq?5V8%_K?+ zcnKvDT+BSAlNx~ThoOT>er8St1j4e=DVaua?>rE#K97kZ{Dp-;4aBlh4{qkQCyh{b zI;5I0X@x}nMtD3}(+-5jz*wu$7zlt7cw1c#JABC(6JZC9r$k0^VDo7?Y?$PyWOHlQ z$aH%@{o#~IbcYN%M2s{d4N7`uxfGR6HGpT6hEp*ScvGfwjd1QhAUOC;*gC z#)Eh;({L{zkiL@~JS3hU1NLu?fb$3eiZC$r0C-?pp$G@Em+FH_gCC;MJ>hUBd7OZw zks!jn2EZw#E+5r1+)zpRpR0oExC-1D%37_MXKolu>b_1B6h~PTGpX=IqUDS!fT^<@ zEXmxO%?e{vk#0#xt`MnYZ7^kO00zk{VrY^m5nI51!N|lwq#zY@(l8(!E0s(@02w)9 zX(XmI?Ia`EX01ZFo)_I+*Kc=#a(Qq@24{pY)!wE8RYWcr3}6Fh8pH!y0e=7))#ZTU z3`g4(u=Gi^SptVp4h02*Nc}4e)=YgcF#keljB`3d(C%WIJSBz7DUZ|7XK3=W z?|kzGy#v{v2w!stHInH+Jc( zwHt!ToPv7;)Xl?Qz#Y;50F6P%L@@a&eV%AyoJXLlX5df{KK94a0RVV=Q0q~*t zcSWGJF0ehg9z`X%C>?AS1!Y({z%&AJjiIcW!`Z>5RtPqbKV=Dz7$BO}Bw<+0d2!tX z)+zviaQWus++<#$3xq(hL@B00tDsH-cnkqe9*gAIJhFD%oJ?BD69$~1BL7Li4dkQ@ zp%T15nnXpsltGdWOoRbbWzlEy0xFpwe}f8w$0wPvH&+YMrVLpj<|XqH^*M2Xs$g?& z@E{5ipo?|m5+xd4KJJ3GyCH%Athu|PRtQeW4G<}Zfa5e2q1vfKAu-y|G9P~Uhwe5- z<0y2)lOW8Yp{&`3!vO&w?HHg4s=qOYI zU?vm0QUHZ*BH*TdL`#702=HpW{>uiCLo--60ErPU+8lxlgko$VCBB2rL@hT}%~_56 z;D6?g`VvMn$=Z{Bh?549Ahah;1X0>&=KZF})rti_6IU_cA*;IfoT`hJ{Aw$eC`XtO;cZ9VR05c_Wz?@`L!4xx! zA8GjzdMJ@V_>wRPYM=o0%6i(MD%c;YClat~0QyAu_MgvVzLKmtc98=$b0e?gd<-$% zczsq>KWTZuE>ZA_)PITLJqHMB0B~pNp2LeLs8aC<5r&+SP(B_jje_SvJb?)~HtyX3 zq~UZYppg-qsn)2v2btSDh*F&{RU=$C9qHp{Xj3`WHfrTHXhICe8)9>)>^np73?=a* zKy^pAaa1`e(IQ|wy?IO#Y9~f=fJ;Q)0McTthEfp_`(|Li_F#(4ZBlW)5d7WX&WIO8 zdp{=Gv1Tzb1e8SMAjpGszK{|_N0Ldyz`nsMwQ?vByZd5cMOuZxs^H}q)P~H7z~0b^ znpe$qe3Ota7QDU~fD;-E!S$F=2b9C4sUgY#q6&yte3Auk91_ScCDy@8iL_+wn9;vP zMbKDHNv)PccH|d|pfL|{mvb+~KhOypCQ+aPTRRK%D4N*Hx+Q-3pFzs$!45&|pjAE^ zSPEc+Q&%LQ9XI$h+1>|Ujo2(0B5FV)I`tb_Q&Qr=B2a*v1^Yl<6@V9b4lfy^q`7M< z22uz&%+~-=D{3YC6Pa!R29DkUWxn8cOb&IK0FG>^og@rm244a?%7-ov*eLu+>R|Z? z#-KFNJ7*_YK0mV415aQojTHG0`v9TY0e(^CpwxpKtolE0-iPb9W@SOyJ_dOQiZeve zBm;G!|Kb1|qVq7*c>SiXSo&9H=trH4$4no>0*fK$zfU1}y@)yK55}nj0n1>*I+||) z!?k8HGX*31N4Zez0e|K}%`cgmDWUi*2ZV|NzyV6&#Ih#yGNG$GIRbpCh`}lfDk7k) zf@sb;1dL7|%fc_D#w3+=Fppx0#4?yG@>m*e)*OP#EG!wX54gfd1fc+^GElPz@SVc9 zxv^*ifc;Z1@YGacCTVA`1C41y$sly0oC8$AO}2mF#A|L-%ngQypx}nmc}g-bN_c>3 zs6k#??8X9WSOb;XT*cUIPEMxNBM5jiQ(zcO9-0P*{*_48*Fb5Wy$wqmjWxpv%a8)h zvjTi!$$>k8cfWw_?LUqRo)4yAfWnI~*jck0m>pG{8X!bFCu+DKs0X$XHsA#~VIxHO zpHbPg72!<6(9vyDvZ>rToFic5phf_gVYq!CnS=vQh=G&{p+x(cQTbIw3|X1H6SkTU z5DSXk%(NsSa7T#uB&T-^MyN{&G2<{WVXBh`m}Gwb4(=S@068{32u{EoQCP;O1IgBC zK(9nGH0PH!Bny3HT*Ls;3e_?|7fr@}lW}piCM{I~Fy@czK8fV04}y6Yg-2N|Mt6O;Uv1a$8J>gjNSGEN`-MGA#j5daCeKl7J9U`36&$$rKJ zc6l7GTLi@)H2F@AhV?<7?2cR^**t{%iRu@!3=pYKJIQ2iVmg?r(ryn>6sa@117HC> z2b+UtlxS#4#rV@&fwqW{H<1x{W_&=)67nE8YdjDkQ-BuOIfW8UT0zx^XW)cUW(wF4 zh{JBCfM_Av2!RasMH0&T8aRO{JHf4)v6KZ(aly+9X;Pw8Tz6WI0TV+Tfr0?Oc!?K3 zgJ1_fk{iK`Ce=t@DR>Wxx!~~-!NA^uQ$u1}$7E6v_O(D=Xr$A@1mtiIqk;uLzgGJ1 z9=jq@El?@{%QozZH9$68Gi&}aRUZPN1&UlKV?onVH0*eRe-Ds`Q3{79Y$zy4zk_Xq zyE2&*kZ$dics`V>6`+{{P#>W05vZ$yznf41clIZ96YvBMfd4Bbkzmjo(&q^4zylaS zjEv_8@s@j2WHhrZVCXmPEJjPSt^j?DD+4 z`hrf79!jM>zO|e@vQ+B!c%#p+{@c(HwxBbzT8v3rbWfuSa>TvzA7Knc}?h~dH?akFk)2k$7pFLqu>Wi zWrl7vse<${7sLQ02}A+{1-ng&KA($*%FI~cxbnb!>c&%0W3bf9myQsPMkNNis>{tV z1kVS!Vr*xD_y*b;AiaA8H3S|bRWn02Fo8xOYV2U`7f2{#2Ld^PoN$&-iRS^xsW4R8 zim@rmXC;aFP3_*&VPtjo>edUI)1z0LAu4=Q=*cu2U<96=76W!nN#mhvlM+l#IP!C$WCosy7AkskBSZ|r^C@W@JJ4td&VdFC zl+1$>{rd_;6qrOZsmq!LvcLfK^JWjiEPpEH$+EqvT|JuQvFoe*-mf>*O64bxWHfWX z*z|PAWcBB%hw26Ckre$ClBx;cMJk+6bHr3WKO0Fj9uBWQWqXVK=uTzta#4phc@E*# z%t?0-_+}^0TE#K#^W*4#+OVkC{!_6A16jV;Br_eZUVSV|w%PGo=f+H{^JmBBg0E63 zsw~Z~_UMV^mzlX<{asy<$CWCrFel6<7HPk+km5hnURB=~d{npjM^9@LeN3p}fX&1D zSwG@0_rYeqykfEOhuH39$%J-tV)}QPe8z#30V`=O4}N}lx_Ylop))%9R_Az&vhrI< z{M_+}O^BDxESU*t1p!T?fljgl26c5Yqb(`HoD&@PE6{*vu2{xC>OqD=L|Q&;rr~y= zMg(Tbv>=jTD3rtgET5sr1XZEJfc$T0+60sw4MR&5lzJ5ssRvFz?t<82ZoQ3{01TiN zB~X-jUUoS&zB!8m2O?48FGKx}sQ-*g0Q65r!}&kz69wf0YnGm*1~lsb3rwZ#U}R_# zS?r_F$6(TE!DkSB(OiScZ|rUA!01tPiwy=g2Y`pk0d&xZYK|s=Hd;ntg<^^qLSe=+ z6x59P5jF5OH1GNj^{G3u2`}!9xg1$mb9{E|Q+&e(NOljq1ROnBFB<nRb$FSml#I6_y^y{ z!z}%}twE!{Kexx5esCV`iD`W^V4>977-f2Ar=q7^zh_jlQEO1_<9lsi*G#^Q7-a7n zSlhEj?h?+}*wFheuaDryJ5s4;^msn5t3sQe9l7i{>UjOGhM zlR5b>_1g^l1{8MZ|Br+EpiZFT{v(IUtzo;Y|FJdqf3nnsk{-+BfSqg&{ysD0-Itbv z=G1S}qo%){|4Gm=F?PcE*%77o?6%w(TrU2;jMlBrQ+C#2@U&ljX62`#*Z1)PzNYpw zFJ$kWSvLC?CvZEoRKeJF;w=8r$m1*9sP5w{e(&7sjaEAcG1oI76|jla}!f-I;DTK zj|lY&&v_|h0=68F%xkQ#X(K- zzH*l|c1R!UtrOR9PV_8{@pGkej+-m_ca(qg&FH({I!l zj)TZ*YQ@+=w%rUAC{P!NpD}Eb6Do89wB<3iWXQ2@NlfJgA_!>G`7c5P9B?zycEFkg ztzn~qj*0_3kwgjfIzviwoR)Z)5aAJ);WLdwluw0kl`^Vbr69)%NlOs7XlF7E> zXF~6(bGAJc67fH=?`UDX71oz9v~ik?P`0pjtcZP1M)F2JlBi!=D$!bP;rgOaR_S|L z_~k@THwCqXjOCkWR{6E{cAC?#EXQI_bQMZxZ1*TyLaaR}WpekE&n})iMWp)V-;=@{ zaR8?iLX8BQa}Y{Ey?K~2Ho$JQ9!SN#h_+#&J3*b+1n7ck*UgADU_E)5jnAoIlpL25 zQz0QlZ3u{9sPK0iiQLDezsRa>Dv_nd^1#lIC4KR;PGS7PT+yX5*Qn~)XFna*$JR8| zTd!yDGM|0koNlo>DBz>l*$6$KpX0Z~n?`59s~lcP$atrAdCWsY{k{rzogC&-vlNDc;Db<9ws%TE|)~Uz%7S(R7NVJwr)%78tIhN{&*P1<+%C>st zo#nW?b8o=?Ec=$mQ>W9b)JrUazG%;W>MCwB8Qti4^s~H1^6qa*{Bhrua5}ua(@n>( z85=w+KeP6M9*TMR{Y*fn&dxEnIl=IHo8Jnr%8%ry$l9~fYs)-4!?|8#7hI-yCB~d$ z#JRj= z@_qU{$q-WV67-`Y+@2S98a_MAFH~ra6+$!EX?{o@z)b;Zn8h=Bl%ccKsd=eQVh{?TG)rUX1gn2L;N8bV46H;PS0 zm`QXo1n_7`sG)3+KswsBf;Q7)lTrBlSKGP-oZ=r88luT4p1?&mKyWSfn&0#btbA62 zV%wLaHCH66RnxBo-==Aw&95gK+cj)EcZ*NMf0q?`@#*GieCrPTIp1L6Hih$!@5v{n z`}JN4eap!BKB;Tr>&{2A4?b6Oz(zJv;v82)kl?+CRrO0tZueA(hH?fn?qajTP5AC^ z>$`hY??uPf?OQ6bRo+?C(qzEd&wF7aMO`_;wcX!n|B&p&Q*z6A&7^e|aYd_K@`vkT z;dMWw(lcJ8rqVmVeh+P784@|H@2`-tg>~RmWXBR~`R6{^sK#c{h3_(2xIdT~u&&_t zA^nn`a`p6=eq;A5KUxakekdsPe05r(G1YJK%jvteC+5d#Ick&z*85Z;#Mj6u_`a}| ztHa3LT9V_fGKN>Nef6~HZ=>T8Y!5`GiLqQ??4$0o@BMYl2?;wNUQN6mfOYp1;r_6T zN4ON{xMy2{)7{O7UjBMfEUmD=1E0m|o~o#Ey)*LgXl?Pm>W^)GS4>iDe-Q|U<0KWI zsS_saxcTyRnJi)Vx#9jIkAtBR%ZL65{`L*89)HRc%G)=X^KdRt;7@x^Xk)bA2NS)V za@U28PrbhU)-B)p=9N_{yXk6yU+3YFwX7GVE5>`~PJ5=e((W8DIexpGw%>m5t37Xj zi*h|!Rv-V12s|d8>fU&>aG>&@UzqHfbL}7ZU+!;^Zx|Ndr_1qn(IheG4w3y)@hf`t zUxa(9c{+I3(KQL5yw`Hra(rK+m5qZxmUcgS+Okh(QkT<;9^_}4ymz)|D$y#GQZ^#^ z=7(`(x^&P5t9N%u*BuOnj7(X~D|SX$IaWW~UqL(WaOc>v8Mcs%QzMO)KV+9jf0o+R zo7K<(N|EbTeu)f@QpFzck*+%G5V^82kIQ-^Q< z^}Q?^h$~Q+D>fA-!8x2uc?~+y4uqmS$~e$6Cz{+EB0GCfEYpY1PVzIzH+a#k9c`Nd z&V5A(O`s~#ZNrieY5GOzyIfx|WHR%AxrZ|Z$!@4;pd}Q@teV^y`_Pv>a>uta+vzAH zgKTVnVp`O2BAQU)>2)+c0^VFvoNj!1F@|6BtnBjsKPj z0T$*zsMb)*>ch9{2tyvA54A8>fXb*??p4h)&&E_Os+)|j-o*XOKa6u$XB{{O{vs~d z-a=rrWgFH>a&{T}*829g-=EjZ>Z7iL{T>-s=N0e1k#v`d|4<(K(Yd15A;CyXc=khj zSal^K0=ENS6??^b`M%B#vjldFho(Z_&fOmzmBQb6d*k9fj5iK**)*v-hW_r3BS`!@ zGqKdH@i0S$WieLs+wnaz2Ym3?PYT{P{mLS^^D6zWQ0%uia%0YP=Lejn(={};k{dEI zo@TG+299pcetOEAo2+qqr}OsAFEhQ}>wlc2LV9NBemO|@Nna&vHm7Z}Hkq>j6L+qK z|FN6ds}b1{Tkm1f5|_9Zr>h?kt#7tRElj1|4K?=$`$bfWg)AH$sZTs)S1ab9-%x{p zMYvqMPvl;H39I%8GCkqteL2cN0*}tpj5S@JKPJmcUF~GwpsT&hIg@tHl_KFcP7^r{ z-l5qGS%pJD{6xZ{u5HXL-hPaD-Ih(peYtGzrDNB!%Kp~it@zpWpY?GEt)j*~Pj5LW zBf}Pdc=(Y!#YNEhQrR}uv&QeNkIQoJ7 z&iQfmxz1YRC#$< zca6g?aCXhMgLSaku3FnMSLncwpR!gBq9Jhys0)7)$#w1bbkT|K0?9s+fX?QAY4)o- z@4AR@dV0`NiM(-7mF~3WRqQ9RZVBc0UQ4vD#;-Ng;ge#KU&FNSdEI+p6r-P-aYJ^e zbd@}d=9yDoJzxppNw zWUV^5l`%4C|2izRKeO$~CiVO+*BX>w_mHJ|T+^q6mFGs!C-nO$`r38LwoA<3u{o!; z|JDYzDtm17_JmkL%&A|SN<;Ln{1F_S9<{h2knk|=jn|_>Rt#6a^@Hm5Ic2|*J zTX{=xZoK29NIuR-__JiHvet?W%`eWsssWA>K;pECE< zql}5V(uVwE+Iyk*FL1g?ZqIyu5_cF1=Sg()S-nU}?y^Co??4vJP2hlX5XmM6&=vY8NidIx7LLvV z_oYL-7)4!HNIV1p%Td0HK<}p0oFZWQZ#2O&7p8L{w>0YlYJ31K7O^v-ojJJ#;ZMBAr7ElQI0~!3bo*RbExMx!+$G~6mf(?FDKdNzdjec z1|}C$a*Z- zS%E}#XwhPY-oq=tdXGOPhgk$?UsraCxA4JzlI`+pKg*@L!ag!z_VmOalS8~byUg?d zBJ`m9o~0Wv6z^}ldFuwJ&vdZ*VpnM^@pbkIk$v=TVhhAwjlTP*qOa`FJYF5xx70z_ zY5Xm+Esm|9=AR`pKdNmM?~=tL7Ok;O*I518=hyPp!)9?+xS$cPrEfobe6#79Ytmmh zEbPAeGt3+?=B(Hod#~!YX{lS0qBi|FIR5awPM-d>?yD6$2zFxI=;5cXB`O`DR=|DfjWXccd-Uk!Y0=O67_kG-sZ0;!NXx9iv! zx6iUpzus-0Hw-AUR8n(kv<8i~p(?n{G*UN2uDq6JLU* zqZT#GmE?bJvv+ov%vNui5mBGJ5!z$h$#EOm>B;#@SKFrk4tdgb;c`~D_OSYDE8@}= z=Fxv#z+2(LlEPB%ieMR4oLe@q^c#Or`d{SW;{#qhRH`m5EVK7`jY=Hfm6fZ~80L|_ zv0ROR@M$!^k6>;Tc>G&hvIVXO*xh!2yp0H?+> zdzp3cRTR1eQxGCx6b3Rtm_9%Y`FQXJoSW=fESr2vQjQv5s7I^Rkzb?hbu;OV_xlbE zU*vMI-_TuMT@(1$FmJcFvawFfchRLul`w@)e_i1-xxc*H_!>{Kb#ndqa62gONY;qe zqwd>JJC7glog64W66eMq;#xQT-sX|np^zZ*M$v}1x5H)cC*5}KS{Ij%7&TekrnkoF zkvYNeI`W}~-t9kw+2?Fg;i|X#Zs@3GrCQPY?RQqS0#&mCQuATP>W(8MdX&_eyCz5z zYi@CSH{S=Y9;s!M*US=^(=$~%|*n0#u(R?VL@ zpW8wgzCT-O|6 z^&j`IJ*ZE#=qsxiF&)&g3mx8{KNMb2(%;I_5L0{v%MDNY@7_#rbnTkG)Gk^0>)@p4r@AwOTI{DJJ$^QmE6vOjWEHMwj2Yiu zD)*g_dbIueJ{|9KcaOSt7)s;Xx-@?Wm3#9Xv2gERpHY2DxH8~cZP_#k_N`4CezZyH z23pr>hE4+%B%pLaf|&tys0Es2P`PB*FO;$9bO1UE0dog1dI6iAisd@G|!9WBt3@+b2fE$ z7W6b=Uc_@~`;URKN4ud#xkFGT9ZrRx1WY*eyyWqgLy*+|$Y)4BNP}hyj5LM7AX8%6 zrD&oV%soQe8rs`vXk+$cbD<7Ofr&75<|Ut%CB40S{IEG5*%G+b@-vS zkFSR!`yMJ0dc?OHT{f$hQXL6ySDNkA8n%1F_PE$* zYJkzN&6Txb?b5TdS$O;Ms5ARYYJYBI+f}D)@>;T7Pgefr(OYBfm3@t=Z2DB*fr9)tLiPI$o83P==015} zdaamX5+py8mn+! z^sh_)DvT`OyEYcRMluR_Y?yM9Ex^AnG0VxHEAwA-P4#gp_=|XvPi!}=b(b}>l{n?q zd46|6Ncx1xisy~N<2OW1Wo4@>UdtDDG_{h{Wg<2XZ*ttFYnm5E2#h=1I^ZS5S-gV3 z*)zIZ$oOdUo2>VxviA}=()SpBPL;S7_xNhF$0w5~{rUHAuErlFO`fgoXiT^6YI$qA z`<-6Py$NcH3#ECV$TixTSa+p^1qyx^^?NzI58hH1*%z6e=Km+)ddT4!jWwz3E9YL^ zTj{-Hzmm-pG(5Q0rlYXhADST+_gL4n#z5#Si8pKcd+Va8_)%P*qF{vAn*jlb@=KvX zuUY8by_FLicFguXavR>-Wy~L{(RX7yIj!KOv=wiwz{Y464y&TVSkDTz6;0Xn0o2T4 z<_R6lL2LeC&CI3*Ob}2V=xP6wXcOkxBNT>7od={@`m)`Fy}ioa1G^SKm@~rmXotI| zv+QJx{*d}3l~Mi|;VErsy|Zbz#p3AA&Us&}j*M{3!0v4hk|DXq6+iwWjm>6Napu)W zQx-2~7-dut-+k9C*I2M$bGec9qOZzhWxnaq)QYi1qW906nlg&#w|*Am?V5Kj3pE`n zl1(`oG_}>i0v9LBJ(-hrS-0Wpc?p^Mv$u!uTz7r)Zq)rGiTbNWETCCDxO`Xih|q=U zRoww|tzCG-3Z zn?H^8N-tpac$c^Z_D(4>PP~;kGshXU^ryN?^3we-_0f4*=^ftd~S}GNwe-hF223~ zFCwypQhWNN81wo8+Ydh*?KYuf z7ONFyqMK$Hd}tT!vzwn_J%5#yj5$>l|I)oDX*H}j(erq*lhA7#GO3^NoPD`((}~9J zaJlza8!Mh|5-1x8oJjhSUN1j3Us^B4XZ)@=eQ6J2UqbT4g1XP?x~$-)7`;o%ZEjM$=9?aZkQASuc@3DSFH;WF(Oin%kRrq8>{T03+oVLg^ibK=`OeMqyTrNTAjKJ*KsZjD?-=uK1Qdja2}qM$SpCb zGWFfEhfVV3_j|j8SHkW01DUg zk*pO`G`cbJs%wxfLnB=1{n6oVpV{JfURm<_qwZ(j@V(3S_6>KP)t&1KDYpz>_4`FH zq|d#;z1Y6rM)#I>U5xs}?zKxVR{D=TpsqC?P~1{5CbhJ(Kk7d3&o?ihD83I>_m!)t zKPhn_aod<|b5Z%-Gu|@YYBR?tt>)$^$1ovVyD#D$8QLvwKX-MDYm8c<)gccj=Aae9 z(jDBCj{svq6d0nA-(>(ZduUusfFbWFNSS`qDgQ?BVbJ8ipvElDqeWJ}6ah~H(bBxvgE!%#{x z)aB4hhBJm`2Ko}%@$5JcWF;SeALe_)_IT{(b9V)tux?enD0}GP<=(*cweDHr1U~0W zFNK#%W7MBKcku}plB|nN_IV%2T8B@5XX$-+a%j4){IvMTwjb?wcFP|z6)l%G2g-{1 z*>jBNf9UTDw%T=g`Pu&7wIgu?R{rX&$8L{0$K82a=6S2yrM_ECby&am@ct(gT~ES{ zZ`Vs6%9cHB(Yn7!v)h-phx_$Zma*WQY3;^8{xN6u-DnAQuZ5n8_0Yz1)ZC8B{>*GT z=rpmvZsop%!L$ZZJmzWJWN4R`qE)FkLHknzDUdAm;4|l?@3UJ3)NVFrjAp#wJ=kV& z{Y`DP|2A^Wt#s~kwZ`3@cQO*pySaKj^vl=kZLx6Ho3X#7jI#mvX8H4;<~d!@o8P-kk@B}md2Cia>kQz0;Zoi3vPuw z_ua0ksc#!`K4Ev8R`YJcRx>Ctev?6=pwTDlvv03 z43%$vaxIvBPomNs{`pkWhvl++>V2IVwj%VZ^5YpT-P^7hlq%VFt)($y4pEXyXXkDAa zLrnLB)}OJ-86G7PBlEd^Iu7#F-Vd7D7fq;vcS)~fMR!((*yb1a3;1PesgGUcRWIKC z7_a8%^?lZ5^A;lXrq|*2Kej8<9NOAk9c+2$OUdl+5&NoLI#a3J7VS60JjR~oo%?F$ zyjXttM7laxqNdjU)vf!ohb5`%;v4L!8_XVEzniG?cE>K-KvHx)7aLoSu=i!~EYUHb9$kPe|brDbYj%_%7xmmR8hi!JV zdzsn!fMRKT^=|61F-Zl2=r>%mH~HI(p63ReX0|=e9y&=p^0e;4*|gTub)BHMi*Gre zY<;Vfrm&kOZeg>L>a3$^$!7y)H47u2DnB1l@~47}4+Aes^ItH2IhJJEHFy6KhB`Q` ze%Lv~XeL9_(zIL}YMIwsQ>g*Mvre%+T_11VI?%LerDNpuZAL$Y_o2dXVsojddY2(I>Nt)v*B&=;PHlIBO`LHCsHI&z1&v2Nxt{4d)v9tSMz$>yZ7k@Rwy;5Bs6Be z<>LAo@A+tcIWDR^=UBiu!P>N!@*hvM2>jZndy)5Pu;fduj0?RdKrnvM<#K3Fm@nR5 zeWokvT(am-iCY}xy&0zx#@CEu+@|~#bWEk9ukt^4)K!RmdgY~W%S&Nl!4Cr-0%e6t zjHt22@0ACeZ?_Nb3ON;6=1F#zt!N3U97-Q%yrbsLm58gI);LnHGw|lbV}(h*s7lk_ zkxE<4$8o0&SCEOd5AE0Yyqq~<5UTbUN!apsBqhb>FA`^)8Rht0jq%h++yXi@)Vccc z7^r%{=`jdoFwLG41343#slkyB=70h;nc*^+J8@1A$1+%jV_`fFKG~SZ(qLo-j$8^D zVK?yf!N~xX3zW!Sn7k3$+pz-*E)XBfL~g*0j8Fj_-Oz+ud?WLe2b5hL>2LxBoq=Y^ zK~ocs!8v!Kx#hn>8c0SBS+~H{G@M#M2kFtw5Kg8sR|p3wAZKO3siGWGq^aU+T$t=gN<6=Fvisp;v zy@yAaU!I**h)@*>mp{hc`_WpnG)`dJZGrtpRZ~~i&k6BgM|4#!l?zNak43&dd(+WH z?u}^G$KsMo)#?qyYsp()nJVTB#74Z&-lNHqEHG_%KEFrp=%p2{w8+f|o=scj+#B8+ zQNIxx+2^!Id1!OAW1`Apprkmp&Ij*nsfQ>i*>aWomz=pIIT*nz@Lc$lU1F2S*a{!3 zFET-`BaB*Z>y9x!ka47J-0*MLRT77x|?xW##A ztJ@DQ4&r_ukx?$7#QfYTi7(o?r&ma;cwWG07kz!dap|D|*RD?cpeZx&9Z_>QuK5RQ zmkYw&@2mJ(y^{1RUJ&6*7rwA(LjBSK*Y=a$9}J7{y{3I^F_nB|KVBhxc%sEVdT0~* z#l}7nI?3(T(Qr#E!ELIxqc0*P%YBYynw%LaJ#+icjU(68ipE5xvqrRvEXa6DqA^=! z#`_z}pYGPMJfO63Pix(~E!!1aeMlm6=&7W|X}#WRnTK19+ozIx8bSu$=A5=3`e?Hz z^7B$Jz1B=bEI6l7uW+&4X5NZQ^JT9*c~nL2;HRi??+)7|A}d|Sav@?Z3%bT&*~X9@ zzK!O6z?mx87uwv?-K2vy*H=61NKM-c>81ENn%;lf=f9k=aY*HW#I0#Z zjh>xOYIij7Lp<_6A9Rm+zAdOu`E>oK*P8&z#Wt(?a#HfHUE4g3c3E!pbRhPJiuG(B z^r(5hayCKPV7X<xA{5)Dn~E{d#q zyrDE+l}_@FpxtO1)|@(_6MZwr*01lf@q6{2m|fHsbNV!JmXt)cAKE9Av;VS7k@0?- zPuTeY>2}3-3C=IQz85ot|00a1+F?Yw>rQ6g^>xjbPg@!Wr{{DG&Epy}b2cixS@)yW z)7xNap7UBazywdv`io?%U@ENkmFB$_!=1ylkZsKD(FQ&&0owY9jv$dSM#k1Kr^#YlKF?{%-|BNvkXAQSWD&MYL} zj6CIujmoONmYl!ecavV+JABY1V^>ihvQp-|+L-QD^)s;NHP&%7!}Y<3Up9@lqQuwc zUt-VQTrM}5Pko_KYA`M=cQvbb=uWEW;foKieG6AFJ=2~qm~d6BQh^-%b)@uLb>@M$ zD}mO{LVuBzXeuULiQ#6tx76KZb$dum0>@x8di@h?)iuG*nU-8X!^_CRyGQRkB5 zt-HZrl(a7{``POwO)?+po9>do1m~ypWtQ-WlABv9PaL?QERb{Kw?Spp`r6U61;_GO znz3P?ANCPp$j|c0d-;@)bLaM-phrBet+VD-cvHQ>eRjvAcXO$$9V$+PdFu*Vluto* zuijf9?sI#tZ~CnKZn)_8AtjAJeHSNPpM;1>hO8ZYhQGefmCxsl-?l!`BNX;_;UcRx ze?hd(WLedvmv|T%NXt>&`=3kX@PccwJqSc237ofpc`1(Vf>E958N@m}0%S-~N__CH z2fAPOfufKWK@hT6gbtY%)E1o~C@m$UHJM_(K7_X%QimFh4@16Xpb2N8FPoyLqG6sp z(vI0WPG@~-0z=^xD(lO@bZcm(@xE}fv8vurRc6EaSmx&rLGvpcCdARwX#Bf6{(d9q z!-3Z_Bk64XwKq=ttXV)X5RZHEYpkcVUmgb zcejT`pYeO3wY@%wlaHG#x`?0EojPaYLx@Y_s6eI^*>US9{Sag=QmE32Y zdWd~;x;fOpI3P9gFJc$Ybl<}*HP>|9r(O4Et~e;2o#Wv=`HRNIB~eiJ zLuBy%xbdQ!-)osohwHUtAAD_lA^sz}R?~cWb-ARkblujszppRa&aN<( z`?=^1Ek&b{q~E*p&-+=i$(RVmOLM>CIm}j}i=(oRBKRpwQib)^1FvJ)>){ zl=}J_vwc^;ZY1+;+A~YsVr_HTxNmbtNT(DOG{)6d~>vD4X1PW<=)>Mfy9!F0U1G5-hoaXI7^q=eM;=|AazC)J>y>ir`XXm z+`tRr_%0M%SWe>gF^}ARU~&gL$*&|Eop5OJO@avPTVp5fjw``&*H zeF_q?L@rPGCJ54AKj=PL7i0IVlCM~O#VF2yH(f`8_EOl%c4AWX#C9(ylh2Q`GCsV| z;z~5j=5bpY9%@YJYdjV3Eg>`S-OLF*l{~PzqvB)m^y9Po7gh`CEkbelSX~Z7@#JlD z1|=Hj+Qy#prp4!ov9zDGzsmnTZvOtYdCY6s-}A9XyjD{9Vwco2r;)^vFi(|z8rv6t zPWaG&+ASI+=8XRFXHm|RD}5||q2>rx*)GZ6Vx@FjalnpuUh*or>vNb9>ZAKs61Br- zUdpB-;#$sIb+VIHq_U^tn#%qn9zXmDw`79D*$VfMFY_wg}b`zV=+&k%R9KbpG2@+|c4oLqE>6yGpi7!3OWw zgoi_uaHNcYbBBZS%xWLA^vBZ`4F?_n9q~oq+$bMI=S8$2r=detL?As7cp?jgtPm)j z>_FcD0FoaScX%%bbm#*8Y6EZY0_8xn1N}oph+ccxnwCn;%NqS?ulYF<+&4-TVK8r?U=pt!LAYp)Y0y(G6@zm0SrsCB4l-SL(iW?yk zbPz$|$TSb+vVbQ#&k>QC=Z|kVpCbnaqR)1{9ko0%kGbx9c6bD@bYw2HSGOtX zTS2>*-pAZBO*UexSB1o_;RlW@ZDf_DliZGAQ0R3uieO*(({Q?L3v{8zG~_n*raiw2 zFy0{QW70mrLxq!|85R_)~Wfj}b*YUbch$fRHi;-%w-0v(O7S@s!5tkA8h&-G+F{i^u|OrIB{|G1|x2 zdzWgSRy4~ix;4-)hcbm;F=83ZJLun-k}ESCg0p(KUukyL)g^ko0vxk?$!{vvBiyxf zw{7Tpr55o<1`Ii*-)Sej>>0*pd*My$nMFx3uCE?(*Xc`_5ngojB0@n&kEEr-rA^zDxw1A$)K4sKodoUSnJphk<=F3IZ`arSLugE$1p`4F7haKr+ zxWFFMo;LSqyTWR8=S(B!nYf4QCz+i_aknx_2Qo_&!M0icg^>NEO$!tt_!h@UZC;?M z8c+!(Je6r# zX+R|aGCP3B1IV10V_EF$Oa6EV>KU3m+{l62gRip;4^_7@D=>z(8kW}MTNzVm00EUpSI6toW^b--3`28B({t!GcudiZ3bDwW&4Xn14c_#gEHx-i1qD zP{fsLVO_i1_KWoTBJ5o37j@BUXh7WNuk&bgxX2~Fjc@$x#ul@KLFI>o)LS`^$-EOHKv6LVC-hd`z} z{4{ii&~8r)ma;4~kRoGap#Fmr)=AV3fzf{VdO^I9+a91_Za+QXFG%E1z#X;%vNddJ z#uf2OO%@CtIasYU(WM~03o=5vXy&PEvx1PerTrPC(4;ZEB!h`zkJ-)I+`;VM+gi3F z@f8s2mpB-^Ch-lzY6bnV6hlg``yL?J?U+`x;4n_TT$vwtq}+r0Ufl-}Kk*J5?BMi4 z#bv+s8Y>j91e9KGi6Yj!5*M|XRxK9t@OiL!{Qh(*&fVqpjM=X+2Xlr}2JnA>BJ5CM zh}+y9gR^!#d|Y(2GP44s7gnJ56~wvF;0oQ)gBk)1a85q3X{!4@_j+a^xpJJ)$GYci zMZ`n@hA}G9H=vClG4CsV)=kI3#LBtD^ODgQqa{>t^T8aIg+Sb?4)H(!p>dEd#2Wlc z%WWOgqDN2yN3xjpti>Q8Y)CKyQ|i!7Y?Qb$3J=ByCz4e0YbHGFc4p~oOss3jhZNKf zj<#$<33d}#@}Vo2j*XOMGe!#rY*hCA)T4wB)K7` z8G1hM^+MoYsxEAQ5|s@I!nWx7_&ZzY#sKkXcHI4z%QbE-m`-oE-1%5vsT!L}4WgM( zjSMa$1Z+@kRqor$GQgo!0GOeGbLs;&z;TJ0wg)Iwd7G3No#t;T0u|t318zw`rXIN7 z07sn4w*Nm-@1G#o@D@S=bi)K?SrLSV_w7Ior!7rn{2vtELERnaWN78dVJjhyrO-@XXykUMB|Sbj ze+|mnjvORwertna9x2XUSy=F5Evs?CZAUZHZqf2}pXCdh&)vg*$AAUVvGQh;e)rC! z=7g%=rK8zw^Y8~4OLHCTcy>^dK?>EH_sP>@&Qf<0pF?*Jn$mDoyJ9tFe4&na+%HlJ z@-eTWgDe!o=t7-rjp)fNO3~N=nJlR5(sT#Y7#W)J`Kom+NeTMP_-28tD0V&lx&E^P zHX=3*yyb;6FdJNA*xn@uap##;Qku$)vD1W=2MZuTjLVjvPoGQL^NgcgqOMrH-&li* z11AgR)tI<2#*0ISArJ!EQC>v)Tvc8ZwL^{{d|wlVYPxX?#<0rVcwebr6NBt9W;Y`J zv*eN~oOj}^Twa+4Unecd7>ly;2DH-y4yv~mtgv|dkXJ!3+khBF=&7!k?#eih5DAW( z-*qc`P`J2)`#bwEhm9W*jw*aKz@GK)wQYDenMUQ`br8=t^7jB&tiRk{R(RG3XDbn<#qOMOI8WD?B35@#9uHri_A#kbPO2 z$vTt^PD~1Y9&Pmd0P(1qf#J_IO(C%4_tC1)$LU_^Ho>Wz%(LQ>j;X!hdlMtCH-nP>yrT0<3EZ3NQeqKqzPruFTL zcLt~(cx7?un=Em(WuRSr=QHP z*m|nftho}|cXe0_-jfb*-`~^n4U(m~KrVduo`>V+$gkA`ZVpj4eEBr?!}aZ7GW5pL zl>eaEER}Qjbwae5Dd-B{fm0^Wxr=8?WlON-D_~#!JLwd0zUux5^-G@3;toN8QhhnL zu58z7-e`hqybaWvYS(K2!XyziKi=*9Gce+E`Q-+-*-g)H^L|7B6SjB~bXm6~cLFNFzRAKpX(?UN(mD6y( zJ2t7;PKHn8wBFGpW`*ST_5(b@@w9S~X(&S*#RuGOb8<9_5M>9R)$fxgdvJh{shp!^Y81YeptSZ(6@1r`)8Z+BJ4r9spgau}` zVhY~o!=|Xc+h3DLNrtjZ?HoR#$K_A&*G7}cTfj>6Tl?um1PjFCo_$ToyD~Z2&%o^L zYto&(gS+&VDVBU0T`swFH3Mkym#9Kr4fC@rHe9z0uAv%Ev@P*L7>o$j{#Cikf>>jpP{fp+_c60URJ_3X_K$M%Z+!*}xH9yVr42#sc15rniiA@NNM1gOB=(QoV@ z3?y5?4iALQDG3kF0p~s7djFmmp)w1Qga9hX8zD-h5MX1z0|BN<;B-6z*gycj2sIE0 zcPBKUiU3==Jbwhx8w?^K889p>LBGkFDZughM&kkwTOd0{Q#fGWLxG|rM%mCg$jyE?ngJVQK63Vqxdt1EHwyZPi(r4SFHhHX_Y-`*k%&W5g7$fhNnnd<@Ie%^B z{Pyc~meh`0e+O#BZYwpkz$@p7xdyS7tHYNXLq8AVfi?pio|NGqUI@<#8V5YYTqtvM zE1xoymUER5R#>c};_gr+3tPjn+1x)TITVkGwjf-7Uvg-T;{Q8f#sKGnti*vY`^q-G4@njV};`@MaLX7*5PJdbA&*=bSq^$^HpU& zzB!oow`zj>N4&N3&I(s-S2Ssgk2Y2edpl2~4UorJt*V%c@APOn=a7ZA-|%0xR}Y0q zo84XX^_Cy(_Y}C!K0FX2f3TPur*@abd(j5Wo)q5co ztTMIFYiPS>ID&5bZIfu`%Ef^V>qaZ^Q{)ed6v8gjLnCp*9mt}OnyiqL^$jr$)rlDi z#M4Lw3Yv=a+G(6C)b$O$BeeAJQmHN)nDjgZ|rsWXG9C~m?l$v1S>8O&kk`07ok1q@MA2Fd%p6W;Wi{d@$E;z@^8 zE;8KjBA~q4H|lI;f}Kqda4^xrb=ajrApOt!-nZ8F?qxEf@o7^n`+Somp>(Au;&+sW zf7s9FhVfe6rK?=Z3p)r@yanFLmdLHBk8 zD|VrILb4~RA|t}e`b3+svQUDc>ePmNTc=X;HjoZ)$Vkx5Ya>{5@g`G(Ot!sMjz)4G z+(7g@T4sf(zcGRc_UtfQ<{KmoL>WbzYAn52q#qFRBJU0t_)0Np=ai&bnS%VjJPQMi z3wKkMJ@^<|RGa&e=Agm9BXREI`eOs8>#7@2hnzG zq?C3?cHjA*HvNVx1{G*`3P^m=k)(%Fs~~>wD}8bgS6cOtn`ZQ(iUS308NR$Yw)SZ1 zpSF$1MDM15)?B;Ay&hIMm+xV7I}AM)ctyb~o|o_xs+2dzI-MUeovs}wEx?Yn)7RpV zPkFpsskas2wkCzZ+_88)Esol?t+NTWmUFw@GMDspG`I@8yTq{{rwv^shl0R z)|9fhL_9pqS}W6E`2G%%ee@jKub){vGn#pD5Y9mee7vo4HJwOs&^+A=U+sQe46$=Y zAcwPQ3}X}jal|W}Y45{0L)O|RTa7&Iy+=CK)DlsiJGi0w_da8>p1YSHCzl@Vwwmy% zWD{kq>XW@r0)3tVzHH&LfZ2XTMgTv-T_xFRJvvV%b3ft)Vld8d0?-L3qVmVLyn&SmjlQigLs(ohLR2?Hb)Hl_@9|nuPUvs=L^INu~;URM-H!;_)GomzW6FNJ<@Q zG{LA&#qm;HSAoU4e%7AvXC?D_0n&8vI@ovwHQ0jSZ^C8AYS~tl(SsV_J>!S_rd_a@&Ez2xYI=& z=kZ#oarMhulfama*Bw}%k}s~Ax32)$2KE6mE@16U_=nrn0YI2Ae*w1IKiM_I0#F8l zHD91?phzD;90V@77J%CX5P*S@#Q!so%>lb+@H9aF01)W^v|@<#zuYs<2Ouv2fLZ~~ z_Fpg+_~;*?5b)Rh_Mq;9mUZdHF&+7o74oA8h;KN6n=aZ^s~h1D2qGJry1f zxGFmp0^37?Ve!8o4VX`xfZPNy3kdOy#nI6W3n0p=hfrX}c248cu#_Lko9I;62PT96 zh2~sI=PwQ@%h31wSRlU_S~Jbmb=?h{qwz<-K~F(?%2kglg7zUTwf&H`^=BLV*(9h4 z0`xn`LYIn)v)P!pHCd@BdCJB2?8Cf?-}_-m&zlF1=yBfTcK48^9RWE=luCRL(ea=zM#@gmD zXM3c|Ub7U`GFt5Kg-Xlj8Vemm{_Z%@%PRK)dn+t?<%V$t87PWG*UySPlQJ80gB+O? zc7u2c$=E2cC*qaes#)IsO4Hj5obphA+PE?W`{EUhGjmlOc%XA<%l}YOHP3 zz>ZIh{Ot6A&*1GgbnttnV>4Gd0#$cqyy0SX{I=B;`}Tyw*f9od&*uU|;B=g}-xsCp zpEjzR_eskU8N~rkCywN=-v>p^?DWRe+N}9%aIxOHbj*@Ah&;X_QDs>;$c%Prov9Fj ztB?~W^Mij1ot|-_I@FZ}mww>iDw-J_V zoMD8;Bqz3vm2~AjzrD=z*><^xHC1k-CcA&lgqquE~IJ zz_03BIv={(!3vV6Okv4dtKj>q{DP{KJo>>j#Xv6Y=5RK`ML?!;Pq{rqxbFqyw|VVa zAKO9&d$i2O7ep2k$0)0#kL{RU^-1&H^b{s2i;Z53Wh=6VzJeX$jrlR9&&T1|$l3<2 ze>UkHdlRoohrHLE4%IU4cjx_F{6zOdm0%D5rtRqvUTV?>4_KTyB>ZZzjK7Dd4UwJ7 zmBMe%sY}aQ9LsZ(3|Le~%}a-pu`i@q^r>1#Xep=3=op7eB0SEIdJDOqe>o+p)InVi zXBZk{f&1;)FP0`zW|O+`Xfr;!9+TLKd#d2mz;@CW|g}X1b+@{X2n5|Pxt8%l>-r`yuh=2a>p&4sf2mO8)RJ0Ye zT5_&NTXnNLu_@%N$R%Srowz}b0K*XF?1Wtq!Eg0kr=aF z;}XnkM&z+dp>a8kbs}6>8=!pGO0$%aO8@$^CbOVf@bi7#vpnN^-znu>98wJo(&;8x z5Q;N?8^nCGI!v)pMa12twJSmLsr%+XD8EY-%zjX*Wu3)kkBL2e_ejf!!M6AZ0(JA; zi^9|LFTehSI`Q{0%algU&nnJ$? zZrgoH@&^o2$}l0{E9ZeLp7&`2ZQYJ{JwF);KQlr~Ra>H;t)O7~#_100_79#_in52T zI~2?-Ri!TG%UDlhoW?s`t-rtGZ&Y0zY}sna&A8!mNYUoQ)OwAc&3O)-jn; z&`2MukV<~lPE-fFmfoSXaTfc$WGmxFJV7!;PZCdpEfaf#jq>nA$na^Pl9zPwqdbaS z$6O4di-?G18xZ?7Xh+Zr)Y)Y~H)oJC@X?OW4_HKRR~17`BoeuRBmsQjHdR^p-Af{2lx{LHPCi#P|A zIXqq|hi4V>d$xh6pQ|Fig;Ml#@n?Jz4K-%(a+9rI$-^dJ7p$xbYUjzZW=CJElzd&8 zr*1h*`PV9Q#mTAtHWC~X9XdCDn%MRj*R=&YoD9rtUL4U^oll0?IFDj!LH}q%jKGDb z07G3}SBd7(+D1&Q`mJZYCN8=ujFZkt%#@(mFH^t7&NSp?vrdLWlv-QN`b zyv2cpW`ydS^Oxg|TL-k0P|^AURTcn2_>G8}k5TkyT>`=>5(78@RC^o1m;p=xl@<^fO{~^Q*kkqQ%Dl!Cj#LeGVw}*Z?LUiEf*>Ip}|pPr#l9usHzs z?>p8sV88GlZ~?zj#c6tqRc1xlfbR;VNC7AwfU0?838=~f<|_cM1z`2U{nk`$fB_0H zPZ?!>V4{ULrDavI*^am2_Zw4I?1xdd#|``X#vORyDMa30VM%t!dN@x{^yE{cr>C}q1`@6Fa5H8p~w>QcX=EWv7YO2*yk*T5}8(JVlH zjDNRFnmbDHX=yX@B8M>AXbSH_Ni*tPb-+M$4ySMAZlMK+V1gXjtC zgHfvmLR-|1YFlc@cSi!*S?ZPIMHR_;+|TiyBhfy-iJBK^VrCzzQjyBGg;Tpv267s{ z@uH6v`aevL`wvyi)Luko5}u`9y7};j4>3k|m~t@`349h(Nd22#>3-N`xEI7LD2#W? zv2G(ET*mV{5gECr3%2Po3E%61p3TUzqOCU{{9-kE;zs%OHM?+CjN)Louaz~+fh&&1 zCo$G!8;9ejWj2d#O1fPv&q3Ce;%dOMsGKR0s$dayB>yZa!giQCiT4I$sCdTCdDYZ> z-SA4-ZQKv%-I|(&GuS(3sWH9vM%T(}O%F!ZEaP&zd4%_N5^g2iZmUy)f0@y^$=Q58 zHr)Ol9Bjq%MkE?-k2~Of9CT6i(;4uDbz#-`UWW$=P1j_WWZL^8ThT!dEFYWb67heEN2!WW z9bBM9R57P!h|~*gI2Ew7#kssF5?amY2J@RI;UrsItv+UZR2N%ltVpEktTH1)u@QGH z-3UkORy(3dElUnYhE|`N>;_+CTYgh2h@G2DNT~`;u_M{l40~3Y4hrsInpTW=;|^Tn z_PDL%snQ*ML|7EyCXKLdX&NhGopZ$uCL@^QvrlxVV`e|_Te9siEne^RoO2-mf%~X- z5uE^AO`CQ2Riwg=kPe$9USk*0ZBVA@9_B*cQWj4^EiAf`CpOH2oIpJfA5lzNUOK6M zG(u9+Gc8Y->~tk{ZWC#tuq&3l3n%cJ)4s>-8!LIyUt68RIawP~(Rw<3gaW9VrQ+6O z&7JzFW!F|U==6zAkw1@5wI*G#@N(IJV?k77y z*yRHiBsNQrDK2u%g4E)~o0bw95{MG|Nb?)1J-*HjeV$9IZ$L9f4`6{P6D6!L$hw%h z?wmR>Itao_Q)$cLITQrC$OtKj-fdT}Y4sPa=#D;D0W=2tXF z0TwX`UMM({JW8BU1PAJAG2gGOw4KX-hmx?_va(|!8^|w* z6NC&N%;aV()uvgMd&4QU2TMj2e4>9}`8p}AI_|izSce_=847)1n#Xau{S^lKUfs+L z8?lmrg#5Fy0z4F8)K$Y+-Ji#iEDxJ?%>LQ&GtGj$GfA6LH5iSRf+#VM^AFlDdf{4z zss0fJt(UWd_Z9a61M)IQ`0Z+*p{9rtB^^lvaD-QS&r|X+>BbS|Spyw9bDarm+J9zC zCy9*4CyE)SQxs8~zU%*@sOLmzkyqC$5u#qj1P&jE*Y?{u z{q>Y~cFl}=&uoK{R^bo4zIe-3 zCrKzRj}eYvC-IYwmIlmWfgtkMx}D;Os*32u|DXiFQ7#^4{Z9Ng$Y72<9qsYRI5?ti z{|@J{-(HCP-rLT;(y_26csFiF+-LX4>QnJ`xyl)6Ka$q=U1>fsGev+m0><#^etqWY zffz=KkBP}42q}Hl@KGH}-eH7LRXmG!MWnV|rZOu6J5cQzyG&o-oaTz@E;8ba*n}F& z_RZt$vxMg@%BOWZN|Qt+>EbTm$9d0g51nk-)c|-4cA7hi>_a7`Q^}2OFw&nb5(e&a zBJYu?qV}JkXoLtFA9}x`W`DIxN|#}mj<2tcW0OQUjg~p|qJ$RBDWN9Kh<&lrAx0e! zzqhtVm8A8Y)|cC?d0STi_GvBv0MYUt*yeu&v>fZ+0`!KA!3>bwVPN@xlTu-*VJ7oaz)HtWBZ z2w+YC%3Xj@=aw&3o;K4biN#)55~%pMlh0{5=TE|>HX>ZgvyVrF63*4M;HsbTYo@x5 z=OnAO#i1ecm%#mmPP_h#u_|ll**r?8!&ca4+Cu*mWtfelqP5q4>G&%q0u8_PUAa^y zEqVNG?$tr?%tum)BfVd4XwUPzsx%cF0oCHeLtb-+2`H!!nga@O3srw^le!2ytRA|w z#jP>2arbMs}pFXBBDjdkT6b)vQy@U9O#R_z)jkBkhB73edg7;cGE#2mFmIXJ5C zI-&7FwyJhB;ljqgqXRH29=ng4l$g<(ba|#AN?{tJhAQ>QPk3wV8{)B_#_&nN0#TOu zr_5SOt{tuQNenT@X(H;9-jg!df8V>AE{r#w+ZO8K>c(WttX^ma+c7rF7imRGB9J05 zy%d>{S3x_&t;B^}rIH+gE}PsM(+;s*^Z4i8!OlItXXtC-N{e1QOPF%2B6{hcf}PvE ztNy>M4Qv}7pQxs7_9Yvi_=9;~g8Khz0d(~%m#@^gi zWXy6mHuGmAT2S?#@ZH=u;W^!}Q>g5L!Pz%{w*sC~^lS&eqPe4(IV!pJqIfO{$6Vs2 z9rt&we7wO5|!y|h3tw#UWc~0Ta zu^gfF?kw`$nuL1MLx}rKT?B4jOEz)4N38Er5^sy>mJTm#F@J)0gi+m02YxHgJu^rY zwk!qXEW;aAle~B?w0NNA(>ECEIFu_UF%o4@xWk5R+PpHmdGD^enw02rS7BbLc& z$8OR7z{^#%uvzzwsg_U=JaXUoRiIwstc{VO$s^#p1^y3;wcTzQ_?v+3`{xd-)b&b< z|DfJQPF|PY#-1^o2o1_e_jA-8z{R+cwLC?KeMR&0YEDMamOfp5+^(m-?=6TKH7PLA zVWJ5pT#7g;r2_=cQ4 zgE7m5zB44P#ns!HI-s0iLt~DAB5-yFgsQI+nwZq!_56v=>egh2<2lr{DzDWas|3dJ zONh%XI#P+I5#HET(M`I2C<##qwLWl%D#goK+sg@U&OP6%3K0(5!y3|Lpnou({^23c z)QVS4^+O<#7J2G%kXKH!D4^(w`W-1`!RkPv|9N>8si|KmbzF)n_q}~AN47zN21Gep zN2JqZKP?*Kfj;gFp;;j|_FWfkP&ZKB$na40#B1E8M~J!(&ai0co0?$-0Yqkwp_(-n z?)RB1DeD$_S^(V%Kt;mD;v9J_D<%^L? zE0H+@S4&-jLmNMQw;6j~{WZvf&qX%_)1t~MscJJ?WSV5o8u@U$O+#Dyso&o1_V$i* zP*1OdY#YDb{#>s%iIx`z_JCfWsML|5NldZXISz+1&W^(=q4q%pZt9z+{-!*>?8T2D zrt9`U>Id~PAsHEb^p`B2hW=GvztVTral6nghPy9hOX6t!q}Fgl3BK(#6P9I*e$6iP zqMH9L&R3L7XHa1rnj_Iy(!CksIzT^pQt^G!N4!;I_uIwL5&c-W%F=O$9==>TQZTP5 zVlly1u!Lu0(9a(^_o4+%6}=;K#aB!MNr;y`U_ZrgJmYQyrQh1TY7%S-(4*zlwV8^O z)@@cKS!o$QXT1}=knc;Fh}ZH|zo1}d$bVureULMAK~-Q8ywFHxw5{%Iy4i$cnqLY+KmrQ&t6_=OaG}0+Ye(PHk ztNOokE?X?pfo~kic@2PN@{xX`hp!po}?2s7# z;13m=J5~%|;XL}3}Vru)4j)jE1o)J7B^#U4flpEVMa9x4R|t8z?C0p(}e!~EInArkQ2L#jvA@@kU zIqm@xEYMj5P&@$}{5wNnZF58$6uj+6JE!vTEp&w^^ZwVrEeR7AAS`733snLmMxI|y z3Sc(Wdy^mlkQAUPKqPs9k%Q^YQn8dX4M+z-p+`!wJq9As=^`7Z`R;$chyQvufUlCL z1TeOVKvl>tL6{Ga1>33BxExtk^3Q7vHP-vaC1iNN2)|;mbVeCEcM=&^YFltS?!kgn zrs-i)c!Zh826`p2_vPE{H}tNmozA=$(5$;NJ&Pu|zzU}-5e+bbFL=(skM<}na5LH~ zU}dl-Anc2eg44NiId{&>y(A2WPDFDM=1cqCTX#xkVI(3Yv3qc=7n`rc$Kfi|iR-z@ z^GQ`E?C}@BiZ<6y^k1V|qw|v_?nu6Cl@hYSN>4c<{93Py6T*?03OC%6o%1i1V~%=j z^q2#nqoqUfQne-JN<~Nh-WByRD!9%N@d3dj)*kZcJ$-CF2TQ9`m!L38q|sjojVuva zRx`QC(pr}nYzNt_G6#wc9RXJEr-p7tDs@a#>{-u<`33iOiZczqC;OtVI73&|B6_95 zPO*zbjm-_pH5yf@c523jlyeVfx(HsxJLykLxv`auOo;H=?@T21PO1Mm2qhl9e)>7@ zY}z$<#$*yN+qPnZ=H(tDDfS*t^!e!YcQ9$!1)nAMJBmiE4P6Pcz%~ `Yi3pcgDi zrY~L6ZBX>&0~pbky|q(kL`k_R4|PI12aV6{oI))HHUjaF8Jz^^&i2HH-Qyu* z^A%<$caQ)-WGoFiOR=oC`9WiUTE3r2>qdeW#<2USZOu*a3uHb(f?!ed%td{@dR z>fWQCD=yWdDlXF3Zp*m>)3s~j2)0&dF;^NlwZ-ASL;hY0bBB4IHJxQO*hPLwY{q9Q``Xx#Ywi(g`K3GR z^?6p2T=2*g=70m3M1_M?j2piEN^b%@1+;unmS^%I%TOgF{{3>GV>xn;S#)vvh4iqP zDn~i8N;W)vX%E#WRmGAr(g;C1f!*1)Sq$TOoKa5{<(2FRrOrrUlCY$46;i`~P9^73 zZ3kMvM4s|MeL2hiNS9#&Rrr2^t9TJyoMLZKQ&8b30~25H$8nbp{BYT<1&NNf&tIXN zt;-Nu;EFsBFne{I&-D4DeCZB3Mycl;qiY!aiHzioqbf8>d;pQ^n&niem{K2kWcOa@ zw^Um&ja)K5?67k9XIJ7jWPY$93TFD|C+7~{op1<+UysXwP-ZEo1M1NuOOKli;w%$3 z`wea>dc7>jxga}*c|glIO54x?k;g=wnUM1cpk_5TlorEQJCO3r^Ak&1xROg=jE@I; zH9dx0fpIove?Q=j+=rgmjp~KjfF=EUDyb*L4d^DMl%!1Wp0CuZ?QA)H-F(i&95k?iB^`y1^}y zQD@5$j^s=zUte#&|MW8J^v1nN?q)mUinnT0Z?P(weWBtY7|Fl)JCnMs&g4o+Mna68 zBbsmLRhf#;SM6=Eu4qN#g;BG1=$oA!^P6*qR$!DG_ih%H*%YF~ZZiotsv+i7fpgeI z$~KUho81m6u^>nG_;y=~Zk^Q>jgxo|P}fOw(>p|PYx?1D+(;mTTL)FGzNao|gaFT*S6pF3=q zbCE@rwh*w&$&2Lla3S?sex5IY)lk1+*8Es5T9gnay%p^g@-T2qwBB9K7bgx9PkPGd zo#1wQ#A~j!oH@qpTtE3i$1 zu#TH_6vdVC&Ddlr?B}x zUSF1zrDWU?(_V8LQ3Uqu+=TCc(s?A~M_7tpX!|6*y%<(8@}pBX@vOcNUpSM)j}`JhL|$TG)PERoleu%ed)nXwb*orSb^0=@GSFI zX7qFYdgI_T%#{Hx?VtmlIdvQYmUL$9+%6jd>izuS=f_w9hOdK*DMCMf3#b@%5ce8j zH+nNLc9mz!OhE}bhN^jT`wn!*i(GIeZikCOiu5XiB)?K^i{FwpMT+0@nn@~i0#md% zX5pA55a>MQdE>(8W3&g+kWAYHvOu8X<0Ahy3ija&-h&Mp$X;~C@9r+Q{gXuJr;}_M zsP&;21^OXpgYa?8{K5u$%0ZDAOsnth_o3UeC8>BX^SV_leyjZGbh}XFhOE$?c!nqtVtI z@s+Mjn^>V->j=I;l5r{G5ROncd6}iGY@aR--C>wxcC<5OhXdY9^$YO{HF+3MIA!7y z^V?U^8><_kys&+Q!L#Bp#XCK`^hU44;`_P{Tud7qN)XROWY25kVH$HJQ&ZmB@^2dw zs8Qsb=edR0wAh4Fp3Hkf&zX6A8xe)v+b;McwXn9ha`jYXV5)#2N@&U3WCtqe@^)%~ z9zkV$VCDeY;97#y{;3p9tPKFb9_X@5DeUJ1)*nD?7oo(O1I|7M z%69WHfiFj-cpvX-K@?3-*B~O)W)bfbs$)-xY)( z=TxQ@wt<#IAd3hB1Rp@n`PUu#mifSwaM%!>`PtwlG)wccS8q2&*uJP?5%;COmxAdX z2&0r(XQF|3bGi{PDpP{OYtkiU7nu+Bul5X+;*P#)YhgO0WM$!<1HZ_td%6d6w?;t8QyodelFfPX$sW4J$@3Up%p%CRe zTsXhfest~^cjIKgSjiQ%oJRWM#?d6kw8chY@p=G0`biFr=OSCrgt1i<&ShFRRSB#|;L zo55e%A2kF5t$TjR-Bj`nJgHf~EEv96wIh&+qv8`~o9%?tmp9tTDzcUsJ14?3AZD)Z z;2vC8OrlwO*oD|yZ#Of**pOAH!HaYxLU)>4tjN#5CfzBE-Jz5AI`=?DAV1%gIao9| z-GSPukXnR{cB7<9sv2b?%-lK^oK^B8)242O16@Ze-Bi_Ps?yy96fkb3;2s0@xqEFS zb?L{YdNs|^ba~?l?)0vnd!DVjpAoiKClTBdO}`_MCEu}&4+g7vJfVJV-o{^;_EImr z4}%&BFksq(GhFkL?C@AoYdzjdZU`)TJU#OKFk%~V5pO{HtT+>#x#gsuh#MN%O3#&vqnd$+N!XT1J*%W0Y7M?G6uQktxg{s=)N@X=gq{X z77fR!b(BMcuS**eCAv}&8?s^O5))@$BotoEOTzW^H9w{IIL;>$p4>gvSUO^xtxl_< ziYpbAhBY^=wL|1fj`9J5-h)FD6p-(t>ekgW7tkN*q$fU!m5w^|V!7DtwZ9xfO-!qN zSZIm(nS_IVjX8c_44V7Pz?8OO<^I#@DCvr6N-K(Gd2_!ZKdiSmNs*#1o{F2lb+ps| zaWcf#*`inITZ817eRe15=}nOuvuFMo=kFgL1JRqZ?WY=|1Q%`cgmp1X25o&iU95WE z5s!%*p%m|L)tiLdS>v>vhlpiv*tcn(RqdRLz=9n^Qy^3$TM`y~5j_ozi}N2#s5KOwzh{+XS-gzJiYd-(EK*y?dbY(y zvPEmwwqU!K*M`p{d$DtbVJKdzN+`S_e$6Tiu%xjmqD+MGqjEz=1Y|W0YE4!{I&q1f z-s#&$!$-Lcn|@>WD|}y)>;%ubn6*(<=rJRqMz0|T^EK^IFg-Og+{i{_%rsl8CJfK9 zJ{p@WPOmKwv42@}(}wv5WS{wnR)J3+6T31s+F#y;1fr72yYC-o1)bLi72^?OIUii; zUbrFU3WiXi4>(lg_F$q*vS!gSfWv^&q`o_0I4`|GRr3?)^B8>sx^53Vibk&=yARsqgB)?qk*A=ac6n5j4tGPvih>P6t4rcDfuqyDe z!a#NION}tgcI&!tzm7{dKF36V_fz>oJp(XeW7`_(##Hf=Tvw?My56SEAx$qy4@vq= zLkIV3E_emqSmF4P-&dCz*frNW!VtppExzNNAK$HHxCC5P8MQ?`QQW*RG4{~T&z4k) z=~01GYOTB&IeENG=O~sD7oc&BV4xF<=CiV-`8X60FC37C-r!nr(qn9R$|t@Y9}umw zO(52>XsqGLI%xbOj9r;Qp}9aIyPl--?h#u5G&;QDMGiKI66V$#@}N0NI&%GdtSfzi zd7I4EpvtS{pvl=lcgLDZaVu0Bwx{QnB5=1|0Xr#NWHtCKd_jFWBlV%Lj^g{fI}ze+ zK>6t88+fJN(iU&8|7}mzEH4;kAv)-&T5ZWHCxyg5xuck`B8CC!SpI465>V`K7%^5koVSBX1*oa41+;!SC7?PPxO&fo zeW1Yr4z_>H%ePVh;CgM|QwRwKzTf*WrhoVoDVcwFb6`dA_A%f%paeQMWo7Sy1;E<_ zX@T{@zd{+n2nkfEy=?(NA|HSX_q6TS$bJa|Avt^Wfa`cn^&$+XAA?B`Q zpI1WxF>{#Xr@r_>OWtMwDK&DJ5DNM7tQ$DXdTMKRaF$PFfi{u|tfbqcQXlg5>vz0} zfbKYpTvQecPc@2f^FQ8~vsN0J4#f9=8Kf!z%N^>7AEd)TbCMj&OZ-*r=laEpm6=7` zaAKM>kI0LW5fNsO19Q>S&+Q^jh~n{?H3%ZIe4t1#LjTvAAMHp6qSgUBUbhDlN?T3+`7INQ{(n z_OCXOfP_iOA^MmR*4aCyRAP20mz|%@GFL;k^Qy)frGMsaSLV$$=-C3Jw=s{}oQ@iq ziqndD%uU^kyuVEtX>~03S`)(RHhN=juTuQ1B_oaJtR|lkSCTsX3Zq=vsI-UqIw9UN z2t&KyxmW!k=&6@*j0DK#13rqfq+bZ0tb!i_HFeP2Jm859m5*uwACdzZfC zp+1NTB54XU4q+YG|MOGltRWH`R5POwZvC$JS+s440hzDLVqjE5Ewf3;BUuD1iyCBW z(mu1@mhA)9UELi`<~1F)YP?g2#n2bW{px4r@DqD|KdWr3nI*&U;%*f<<_s!j1&= zi;I`H=16!Wc6d%u6c-;BS<(G?Z>ng|TzZ^HhY-6RPb^{ihd50SF{)F3z_pZFWX>%V z$S*G?+OIhkswmfhpdQsj@+FD6Ve{iiTV4~8``dLx$ucP;>c)6gimMwv^4we%E);`R z)#_9QM&mgc9!zea9v=}eX@+^k?lmR0e9`sq!D^uGb;@=Ec?1FJpF4jnZIQXz8tFa% zkEOE!YOCS8b1az6IHlyJB}@B~fSftz zM`peY2^|N~gQ68_4lAN>`8l!7t+^m-HHFv>ghvsW5PKfs?%Ty@e=e{b5Sm7_P=Bv5 zsKwuWkYoD7b#>^&&R-m__v+eqP2?3=$=NT`4V5L2#90upoXs?XVPch`(XGY?z77@B zNwJdDdgnE`Pa1P?Qoj7)Xio$&h?x0F)_g{EN}`wGJLQo?NApQmhrXptRCVUFDg@IB z(D=@T>zU~Qf!4@@n1HYR1K?c;l<)Gu4EP@_vQZU9>7R1H#~v`m0d3a5=IAYw@&Cth z0sNFeYzVM7W@iS-V8GaS8c>D-^<@@-i!lJFd=JzsH2_R$5?GtXYz^R)k^k+n0j9In z|BUW{RUK%#0E6BC2R8$`+MNGCC#Mid)cLm||DP}}+X{*54Ufww|Kl=gu>tMSW8w@d z2|=WYr;26wEJ?Snmq({{DId!5JVfyJjU=x8^5>;!%wLFl*SzOQ+>^bnUu+_tpmN8& zFZ~?-*JeI9!`wv3vPi=Onf}2rp}dm+eU(BjO{CC&XQ{Df+Uq=`HFGfm})pci+)liPL4Jy-ix&21AK>p z)?A4^PvSQ3dH)J625KpU4&Q)w78(o>$ixaOEoa!$Bk6PAoBEq{*7PJ4+;>ln(3Zv+ zNw$WL41q|0kNYUz8qtU`w4@5AxaCT3R12AAhD`42&Bzb&l6Qx{-B!V{CX@cQaLX(L ziA?y=n2`n+JT>xKdu}@Z@rt4*?4<|=?`ZU2B;K9x53*(1yXrrf$^TAYmts^Xim9m@ zPZPiV_|hWcV)(k^wk1!8$UYj^520~zhjH8B(z49e-Jj#kL(}y>dmwBJHPIsJfn&`2Lz=sK<&A<8^(j>eZ(#f9=#SxOHYutt%|N#jqXIv<;k zs*PLzE5K7lO$llF9t>yA{XO%(A7}#joWz43$!?ol7Gs zq4leuXuo92mVo?;OrAbCyei3VFyAgbHRL?2<_+Kr$Bc1H|Jx1dT{aCtw4Q3olJn|+JB#l_Q zoCKt3b%9IN5xirlZRhKG_L$`zee$7{y8d4#XEk%MRzFsIxh!vRQw)yoBz64wp?C59 znc+C>nYqNx3k8K9qr)?HTU>tjTs!WpQJuFc_M2mLL&M5hLIZ|)e<0^4Q`*ctQ~oVc z=!(sJZN-3@Y-gLMjx!CTcplNHRhqcPT-9nO&c;iXs?Qg(MGRG2wjE2S-Q&)5ctZHN zL~sL(+U>Un97nh|=5E7!**V)8XdP2%OMuw;1-4jtmAC)8?aF2(?+;8=;}@;Xr+-fZ zV!SFC;4J;|PByw8Z5{|Dcjs3k=JS$uT^aMur*;l>!M^V~)s5-d`%~$A z>ltAMzfiTBQrn0I(_8rz5n)R$P7bmFpoG4tAd+Q;JFOdfIgdgoZSp~aEnCTa$YsgI z^`?5_N@Cut<k>@u}@MK>PuPYArv@CBw{E6N0Ssx~3hO0*Cc8j`ho5Bya_cJct*&Gz zzW;bFcH=wbPgox%2k;Fdzs)xce>kN?Iw%bAvLvZ?2VIU(d)wxpPc$}H{^30w%o;3 z7Ra=4*?5dQuf{w==L-ZD@4#`;sq zaHc70jm7Y`{uKP16=&2x_d-do>f2WK{)~|H52CH|9mg*{t0dI-_*?C$#@^dkHR(J# zM63N@$0>UouRgn$$F04(res~|WZfd_YLt98e{C~NJz(y0WtN~#pCP2z2uoDHGYY?@6O&Wl+ zM4D|x`nLf6SNQ(5T>lb`4S>=Jj5S|K2?58bd;d&n!1z*+6ELc)NxcREnsK1v1InSn zKPb$!57>RjzLpnTqFidYGpL< zrK|2=gmPEjJ*6-18s^)nsU{YTxVy0BbWAb27m0d|j<}bOE>=JN;(7VC?KkxNSZ=m* zvM7&8Ia;dY%NxI6UnvN5cVbic$~?%mvXZM8grAKvZ_7J+pAJ;53HQE7dvBgw7D8 zLB0`LMQ4~b>P!#G9uQ6Qnx~G0@El$A0Mlv+2Pd#%WWR;BX6g7(cgA3Go>SD{wWZI) za}=gsZwP0slnlxKz@hF+9uwrgd(t{N{<|df+Z51&GwVTVCKSSzx2!mh?JTr{TgArC zr)t?i-X^|mYpJtVAqjc~KYCM?U^y$7t)#FEA>5Wy)4x#Oe+idJ;j5Ik(JOhwTjo^FrFDkC(bJDv~JF!g%3-~}&EN&%Df}x(% z*ZI6F@{?(2zsmEEGnbb#@5Dxg%&uG-f4RuVZOS?!!Kx3lqDc>~nC&jGEqwGr`*zI9LfrUc%U_M1)p`R4P~%$e71=23UDlB{Gq8%PiH z(b-H%!!dn%F9Rx&0^d&vP-qU}9I0D%kKrs6Fhmw5+XV;zG^IAsLOTszkE~i!C|c1w zylu%{5s1N^O-jXyg9y)m6MJ{n1f(m}=1h1jKNz3_{G5*icCY(qMhRabDSX>*=MW#` z;oH)HBn%t_U7*scwokNYqLP;@CwnU%d<|aZ?SfyAeSKhi$doO30{OJZtigfON+&+w z?UVId<>FOk*<-6iw>e#{&G-?H&P^Pu0Z%tYOPGX~g5~8R)wYQC_xs?56ET|Z77^6+ zx@$(>Q{ovZCxWWJWIC1}j~br)7z?&eWkmUg!Ka#;A0O3d^G&>bj!$3%lHnsvoXtx5 z4XFyEGO8^>C|t4=rON1N#+6~r5UI!E-tbNRw6qtv9rk_%XzZrg{Zl!~dyPe`uOJKq zuV+>zF@DTa(tfwsXa;dm+%T@^Cz8{LyBBgzV8&8TAWi!JiZ62m8Ui7cO-T-Uu1$1(_zx6gcW*S#Ud3KXzb9OZ2Yy`B zct%ydWx2R_bdAJna*e&AMHU?72qpo*<4!WI$6CRmQN^}=oKZ8VWGU#rv%bnx%915- zXtu8kM=1^@-gW(f#<)~-3C-?@EI=Va3!+C3#EjvnK|^?YZs!ilIb76;{Fc!q*fia{ zs@MYbloQSB=y8dW@^Neg4uS=h-rYoaL2gqe-1C`6CR1poFB*NHqD3^+=p0$EcbkN+ zJ(7w%aJu>;zETFgYk6Xn97B=eEltWLd3!ik;Q|+p(C>?w%x~ti?yN+w(8QROZ|= zVz@4C^&^%A3&goYte{eR5Nu6^kN#S;Pe*M zDP*MKGuz_x-;riBj2f_YQf+a%_EVbM9c}Jdn@=8Es1)&DlROMlmFd@?!#ZspOLN1# z=vy+4iA4SiR!@^~pc2e9ecj%yviC5t4I!HS>?%qRF5j=at}#AuiK_garx3uIF-Gp$ zhSYF4skhGEE_n1|;@~7yK%HQ&=S>h8QAniCi75yaF>)E6LI|)8O$PR6-wI$r%;~?Q z7eJ8%WJ&)p(tyb5U$Lv*gaHrHVE!9s1_Gw?|2e$><^7Q9=>d*zbs$8W#~6Vi0~-V4 zXe=~B$P}Pe0xGA@zgSm*oC|cIg~Ts`)BaO56^Swxv%UzC#$h82{{q(2Bz3+aOnwz= zdhp^elP?aN+Llm-U&SEVw`$3O-+#1`sw&vVaF-a`ZdUI;6FIucel5r`){n=XiS_^jQ_GcR~57Z09? z$xXQGF3#_?2?`#njfygR2EnVO<>>#+NG!d=ffM$%mW3e|6C^EHh6MpTO+}NeXedb{ zhAoFm-wx}2Zdaiu%j^o(EZe;{H5Jgjfja2M9TMGFTXi)7jFxgc~Q zlkXCYMWPczZlz=KKANO;MV!OM8ex*rCf^5M^$c=Jl2o9BQ8f^p-N;T`aM0SBNP59g9@%?V`%kQI^AQAS@o#f*s{VI7u6X(XlCb8HQ!&n;!3%u z-kDFl*PA$F+Oo|^YgW&Bt4s*Q`00Lh%Hn5>c%nksk~K}#2&g2WejHVb>Oc|kCt5Pn z`&&PK8Zn%p_?0RsOuR~}WLbZ41xTdCqOcLuP}5TzP*dPQi#UDa&9do6d>xwI93iLJ z4>h-qL1r>Tm2*&hX*#DYp2|A>W&FGNenDfVz1KOaH2iVR z`TpWwoBD#Khnm=${|!frO&#AEx+GjYh$v$^^2EW*#KVwUA^EBuA8!+WUH*%(a$bRz zytZ>lp7>T|b{X|$f4-i+Y>`>u2sxh9obAxjofx_|hG69E{t7QQ3-(3lmo^lt8=tv? zwkCb)7(I96FZA1VA|V>E_>0NQ(&$QCf#NthK789hXwj+7*@knqR(sg~79gh|qXp+( z*Ftt=vFWa_lYe-%p{Bf%L!oK|uyBa4D$e#SY^#VARb&-nk%2$4(VWLd2?fV6S3T0b zrm)S7(2hud^}|*Vv7vlz?wyXd{RJP_HR)ew;lk$%KgK3=ORUSv^*#?`4aW8D@kPN5$q34=K6^$&r!{2~t<__|AHg z2Ri^ki?rFHTU_bLMKS)WEGfyM$><`u-E#$Gmx7asd|7ADS(rdWFj!3s;|rb$zO^Ye z?W4wJu_?Hlh15#lT5l~sog9xGtbT@Td(fd!)Lqxo@i7JVHr}6(_bT)UljO0{?uKxI z`^Jh$=J%)%0VqHWWaYcAV~X#?>mqO&m~x{>5eoFwGUto0X|t9)KAs8<6sUUhi#+uD`{$t=j&=M>XaYjFwb@eqP?ggVn099C5 zdJt+z!e>#h78ysp`|>jchZgO#8_sjTD|~99;hx(F2sVoAk6-OTaeZ;=0CwpIKmy~I z%34$NjqGg+t zUoz^Gn&4f$Hsa`s(ob)RU5OijXOq`reOpuT^m(JTQ*u{{U`fOjTlTT9Q-6Spq+|Mw z)I2oWAb!K`iS>nKRcx!0?4EpfSJ^kSxzvbYKX8CvbP4a<`j-WS##BvwKd105zC$ZB zFJvSURd&T90;CjE#BV+}I>VJkZHvzLBN-76M3P$4bhN4%$A7f>Tfi_?AyP_OfW}(={we0uGqHvk{ntku z$|gzGPImYkGh_=EE9&uRJ+Juqqk+c__5BEuv@KOl~8G$ z44wA#eBb9;{F5M$>%xTE8-wZ15ZnoQ6@5F7V&WoqUDQ*Al^qt-`uih(koSJk8A874btpxTgsV=lo8R9RVa5F`@!Q4XrwMtGh+kS}R$I%b+1tvcer2 z=AtY~+-7de&LqWi_-us99=Uij_of0Vjwh~-S@j`+j^m2x@RrXx_aR$`?r}eEw3E7T z`R9*LjG(axlnrxe}Wz z-Ug~Pq+zB};X>#@TU7ArC#64%ThW$ZD1=cljL!p8GPZQJ(zz+!SRCVw@AUf*Mc1jKi814gvJ z$3OV7vQEb5RR5x{U|@WT0`Jg3>33N5H6GX?MnYz9#GN{=_6+o;VRBcUKUdPmxuiy( zZ!cS83TaZlZ(k{*U9*GyxJyW#t*E~b{@Y%JzmD?gwNZBC^z)o>C}0oqryT?l9C=hx zOYZ53AyFM8ns~W4#b2JnDCM+H!;l-kkKFEQng?k-+2Y?><9@PK@nx44=juxo4V4Pa|JB5_<6xv>>oBVO{(f9iR6T- zwyG)|;4MBQutFI;ylat#ny>G20>Y~|86(cP#PL~&`?l1fH}6BSrNWBCABS!2gzu)q zn2nM|F({zS!Z3q{d~aDbS)V;94ocnLuh%?r{7MaTTk7pgPw6U;I8!-r@hoScm;U&uv?R|umE zPWnvD@?_X66US;A{v}M%)A&9v3|&(px!~`QteUqy<<;H#pfDi0+PyuiodxiVT!_Am z?!1EjsiE{jB?!j*z?yi07kGVYx-WIfjsgE35;veB@L9tavl$^XsYEq1{zjR#aiJ5d z!kYYN?ZedmE=)`k2spKMX&VN(&GI7^3=Sh0t14nyX_a<`DU9Rvz7bBI=Z1 zh?>yRS$mRVG`EQfo70$|^DSy4sNL$pfMSU(nk4~|qD~0}-rW8N!t%;LeWzjLk*1QJ zBt0ZW*W^hI3$5DuvKesyVk55FcHa<1$B*4yTSW->qJaqhSV+aX*hpT%x*L<! znDgD&;kRCyL7&?rWiAbrg~{a=YPL988m!P_suA#K4*((CQg5Q92-Wfb*WdgrHvq92 zz&O(rs)Za%K0Rk_Wa!nZ)(rG%o6QVHRmTznM!fVX1~8)as-QTv|sAi|3H(B z|AD5I^e@qcW?fgj;}xj<|8PB<j*7w`r4*UJUZr_;62FGq>%1L+Ra$`N8fiTS&#C%9(jR zNJGHgoGnC!>#T?xn`x2Bg_K7_bX8yQjOlCYAUxw7D^3Qo@NrehmKIX=PsZLyDFVsX zrDEG)CBDSy8PxGH`GiwpWmA4S3j=%;|DePvQj~<_fX6Dg98V~jQyOLQ8o4aIj6t(fknNedpDz#m{8{@Fv-N?qlBjOVPW%%}B9 z!<1y}yUKi_Sb8x*psfNa7hmbP+z26pIiF-ykExiZLe)UQoX1sPiQWO-Gq0Bjl8J*N zO8x1+#XZ1safkDFMO7c=%*&+0TfsrTB3}IEo z7h5wRCOFr0!zl|{UJ<;c*%ntf*a>jEnzAGuq_L#CC?iOAVZzFhJZvL#Pj${|81nzY zRBgE>x-wF-e0YI8P}drdJ1Ri^bK%qbN}evv^`k$xOtbp%obP5&1-WOoA=A78_i~-u z0ZwC=Jf@=`mymkEPj{sWmAY`j(d{W`wT;^1h&u&|_Lh}W> zE@(V8M4Twm?uE$T1|;M3tUiC&81yb2)MZ2GkP`GklwYhz8&5my!~>0aiJBUn+4YWQ zD8*hp?Uxx|uIL&O(;^Ob4U6Ik_vm_wT!{9?B(XATPmT$YScuydmxm@Ma+9B~i z7l)rELe-`)H~rakGvYAV`l^+x`Wl>{7(-x@-KO%MVc~3>Y|3N3Ma#SUd@XhF$6ExF zsyV0JYqUj-eJ*jlCMtGRiUu8e%+(~|rm;~xX|VaXnEO1Y zG?~{{=L;)3UQek3zu{*=s^KCzXjhXT(_+pVq3;Jn+DM6wFiqoC{Qh9pZx^roeSNQ- z`O=#O<$h84A&Cz9C=Z-4W6SzHr?AVVkU21pEyt!fe{q>I7&xT8!RD~#&7)bEA^Q&$ zW(UX}R=$U4|8SAotQh*LxpBOPYZ1U!*u=P-jGLnu7Q^oBD=UJqAFi+`Ifh2V52R37 zPfE}x^^!523@|}a6wYxz2T$BvQCXNyRgYs@;#}f1c&e|lzM9WoQKQ{i&?Ml}sY_0hGzKGg`quSuVV|9BHu@py?DaccIvaiIglp?h}U(VTFC8i>#!RARIWGfjm?`bjr z!6Xf6KRk#(n8aG;N7dG2$^&`|-4{?jW@bQlL3oBd4ba%wISPvk3yFbn0zjJ~&xB-E zm>)f`2zK160Dviu5Wiy76x2>zDFRnkiqmY5nkMpqpo7-ap3^ zPh4~K?k1t_DC5|AyXAX2+M}6BbH3Dlom5|2MJ&N2)$H1&4P8p|m;)D|W#{wR_Can$ za%m{pe-GnZQr_Tbk>7;`1+ui+UE^BJ7x^`=rw@wW?-lHoCX6l#l<+IF@EUiOE%o;1 zMf+g?MdJ ztzpEbLujt`ZK)r8lk~|Nvkfpq?kl*~#p-Jxv=`=E3dmVA7b6WXp&EmZ(O-vBSuIj8 z*hU^AL9qISw75yrq^wQng(ntN+?kkkF3u6UCRZczy+>W%)BkTO1Yd)Dm7KgT;L*!@J~{e-?Kg7a%H|Y)Ro~XS61M zR4s0Uy1(&}0o3=vR(+e>d&T4PKw#z{<7wt?`5*Hr`qPeoS;sRaw7Q;6w0F%jgexcT0Jrek9EllQ;E!eiB`ht;?%ST)Ld~MWg zh-qYV+njf@@&id{A8X;g|BnlG@=%D!8)I>@(L2TFHakyhBDc5AaVm3k5RoYp=QBZ~ z-IK24^==w(pPHa|YZ{i5Bu_D3_5<&thx^f#TmgC@#4{4qb36`ml4fq(C%nkqZbHLm z4U&D`B`-mTT(CoJV;y*6tg4zKP;|rvq$PZhm{UH>@|`P0ld1YpF@d8jz%wFSN97Dn z5fmuh+A9H}+!U&@#3fI4`6q9*s#=2{%1LFLz@Vhrh0?FAAJ}df;TJUH-d{hyk?Hce z>sJV1m2AzRKDOLmMbeftl6KF^_2@zD!I;-rE4M!EMNi8~^?2A|jp_WZ+;2?(D8*%O z?1!TunTv;Xf$D1A-ZU3=_VUxCz9`&89P@fI%!6Cc%XTu|$)S*oNqh|jxy4-t+iH1k z)bR6CVato5Giiy#O!q3;0$EepZHa_ZBsFOea`D72pE!?2BD5lr-HIU*iq38|{+1BK z!;7}UNUz`9{%z+)WrA$0abF1`8B#^|i>=?_$<46U=4A&$#IHDjgpe$Yrill*u9pk!I>t+FsgWR_x*0?Y4zxn~#bet7!_QW% zix(1{5c@{99+QDWO554c*P?EaKb%Czb))H5{UhAxT3BJcJpUuhf1s}MH?X|OBWCL; z+7{_?ak1GzW@~OK5YXeRB2P;ZV(ZCs%SjMK>SgWa$VWhCXaJ6BvUdrhV31Jfg~-eS z)0Ik*3=B;eBkkYg-E%~QI-YN@WQwSauS*{aU?1tPL`$zk%f+nr0_lI9N?)j5DM-9@ zBy=RABt&Arkls{-ObI7C5N3o|ai~c|^5f|NU$M}4ujz+x3V120V?c5V(o#||0~qp7 zqlRwLIFgIvG%QN&+9HsCxNx}G0;WWam0&1Iw5sAL6b_YQ8kLqr@7BN2f-S#}{B0M} zBO~m(=Bqth88BRfcR#Y(wl(+KD3bKF1j$_dY8+1SNEQh%bIQt!WA+fUIC?1tb0exNK&_L!sO8&bs+37IG7SbvM{pi|X;G8w|exXyv{ZA4*5(pYrD>>r;Q1edP% zBeeAoEb4}*zAFJ@HSk82s_SS!yym3Jos{f8luAu!G5AB%M~?u1WdW&5zmA>#`Er(n z^bXluJZu|2*G2sLA&ev|Fp7teb9PG8+p5gPEBnn9osC@y>MAc)$Y+B6Bi7F<-Lt1V zpBVrj={p^yYhHHZBVP6}c%<9?HGuiRpyli%GSt9DmvOWioY1Q|dVO{D(z}IABrfI$ ze~C@mWh|(Yylp=9o1%ZCn^+S&_j3HUCl6Z0?ZvoO76-RBhT*2hoNp+SN|k|8;fR%1 zqm(tZX96FiPuh4O+i?q`yZ;7Um687MT7PivI3D)Ke`M=>s)hFYjVohg63XW{4|U9SZgO@jb+X$k>~Py#g|I9o17&(v{qX$&bF~IFnug{ z>|Cf;;?zOd!KJ+vavuQGFLL}U2;-S`2*dksYM6_i$USb5^J!*55o%pcP~w~xqyH97 zrVxi)YT?u!%Qx&&j`L@S+(z7ilP&BO;^%xJh#sfnr0 zZ9jpeDpgLaK|+flAgu8%2uPH-h@KM0k|HES?v-NX!361X1A}po(%qo9TG-{I{b;fq z(8TB|Vv)4Lb(F+W&#svnzlO`{xJ|LS{d$?E2s%^>}hmhN=EmNY8IZv3Og*5h0?h0{LdAmGygL*!qShs0cv7hkBzMHzEs(hg$R8 zn<5oa3PF>@`_u{5lPZnf5qPZXE8h zUjS|+=!F9C`lv}USdlbBEK;Ka)#P9XQ*St>*sZxG&`9$W1Bv5`82o&+UPDE+zijHi zO+Dagkjr7gc)r^HReM)p+_cmYtb!sgU*qg;YxSb7Ees^|P>=R((d~MH7xz`J^4Bm! zd-gmB+>ZD}P1T<=fNak))qCf?Gu=;kS#qj)dfEQ_r5r^guZ6x_waz5lc>&49GKSkH zfBcA&%)6&BhA&VrlnSP;U@9KiS9CuaM99JXbe76l5-OG5_|7y|jJzT(%S9&Ce|4XS7_O%@sLTz4n#$B2 zs`QW)Q+HyNloBq+pP1XIwnaeRtN5f#un1PJ@O(;uH`A!hF7SGDudPy00M(!6uzH6; zR}g?EpuqO*XXWpQK#xOB{Z%W17w@g|XE_{n=3-w7hff8>SRrFzf-nS-^EAohDBKT~ zT9d{iT3U!2n=CjD%6A1bPWj6Np9&KFQ>rjQ*x|8-D7wuYX`~|Qc+W!Oe{L$=zE)VE zWJYVK)Vci!N>6{StO()vuqnV8NWUJ}>9%`kBaCa^T<6dW$5}Ykt0D;TH1F{G(AVi2 z4QoKQP@6%9;eGbE@u~~34yDv{g`* zDke=$()+&@iUwAj82ykx5y5e{O2bZbDKz6S)(zZnHI&`*5hm~Els$D@w2x1uJg*Cy z#r-wf*6|+CCcrlKK#35ylGHb6SX*j~{8!Vl%`4kV)*-<$>VLF*ZRLhnT;8#2a-?*b zTD5X#mb9l;KQbfCy@$XT0=-Xw9ZGD5_(?3lkr$X6m&b)`-Tin~v9C%$Raoxn^H zbX`pj_n~ZR53m~~(Z!0`6L?&rpspdQ_JJNOXEcZ`JJqgaoha~HzJrT;oiH!7;PC8E zkyV#u!UkE%TU_NBU4zBR5*$@W>ZNFI$02ecBc`6@n0}ZN?q1(mEwOr=P0IC0WHV~! zQ?((B7CKP9Xt*6UohiSn~aYF$)?aZPkjj zS-P6E0zP;fLQd6j{!3Otl<-JdJiu)c^;W{ipP|DbXQ=G(_Sn2wPp=dt^RgO@s6wwk z7#hCXXC00wGnn-)^Hi_nrynORwn%a=`m&nD(^h&%#!m)ZYUXCMh|W6X_c~uUj$b?G z_wP}|xYd61IOYmQG8H@C7Wfgr=J>;$amcS`yhmTaOSLV%Efu_zqIHSnjPeF00h-}m z$abtqF7kP({q?O;-rh%2olFNBTr?Lms%;?g%~L*Hqrog0nJ?a}=JkA|x0~Mw1zlVx zE^-P2S?llj57ub%(gJia2f;&GbZCN3LGTN39pSRmjyg)^w20&@rj|LZb~Ci(pp+-u zDYqMD5?|}_pCYC>!%r21TSHFDVqM1pva?N;wC;(^meKrDM^zG&<}I20cgc6Cn)Ma5 zm#(JQ$(X9l)&?0kv`POgo}O6P1%yukBS{#0kcqU!z6d#`uipvqcb))R;YCF? zsrU0y(7u1ent$tLslFbmx6A^CR`$aCD@Zv9s)I z_luVZU46uScz!fU8YH`!FL0-k*r{ZKpVgS7+|)qsvv{iV^mT?-lxBbNx2k3OyKg5Q zc-F!9;PKlz%6_cOKi4ybqIgsYSZTlZZ0u+m&Poo`xY(M$#zkrciJ?n~9(CxGlB7`L z5y31HrRlYb6^bIxT_?NTj?D>;O*mwaHBPIVe^n5URHf1kOHx3RLrLQMOJux7oVk!m zHlnqsvZ-cH?Ri)gbnNM%gi86m6fU?*Ui;x&ai=zLQi>qMPJcnA*ZRR@5S44e1Vu>h zE-nVmpDR5GS9u2&;{|IUyzlv4YqQeO+J7K#tb{og$G+B1*ZJ=3`-aW$ohqC3vv#8j zu68KBw3%UAGz72*zlo9WRJj{_)qM~ONw}U1l~Yb3jm zR0_3cV1|;}>H)hcnY$m6;gXt-QR*TaS9Lx~8jad!BWsrhdI$~`n7YO0WljDq5pMoK zd8tT_Jc6EO!w>1U(olKRq)kGVZ#hd(@r`n#U0EOM->aDOOjSud6lSQ9)q6s>m zJ)Ko`TK|l`j_~la?pl++(-RQ>T){6!FWaJb=rMnLHC#Bt+e)tEgB=|b=d;w1&ZbCP z;)IHwFsP?9n%je35UGq%`RtS8qs^xK>yw9SXbN25!G%RM{Z>3nR$>LJh_i+())X@l z6eMOk!%OqQ?+2zT5*>;_8Hy_+{EbeSY+tNR=;&)Oa-$jjQ{&1rF#*_D#p?Q~~Ssr0+H)sPm^QctovSGl

64O60R_n-otmXD~1=+cPazA{pWw7s0-e0TJkC z=pZpdQ@FF6Ufz^7KogR}S>v+85k*L56#Sw-nr|muvsngMT;ZE=Jw;o!7K~3B5koc< zi9h5I+}Xg8+8O)bj`mbcjMN<_!jFH*_7}eliiDp8k_*Gq^}fXOqFG^+jIEai>$ zKi4{-qxrv&Kj6D0j43}T^exa#prRWP3_54?O%3j5s?+Xn(OXiS@-EGC_c25!gh`SV z@_fCXu74RTW={V~ZL33^(95qXQGY-snU9>Zty(kuF{JvOb$BHatT{8(n3YHJ|aRSGnVA4*Fixg>a7G@A8B&=8VbwQ(k@X2NyllX2 z&>?iEJZl1`qCm<*rS_Z=g`&MY368Us_R8nlN zXIUC*lEKz-bpvPOoA5I{dHpP@x1msBhwE`+PJ`=TxRMm@dTY`>jcE+;V!xxn#xG_q znC(4(vwiy#XBQIZpEBv?Iq=e&*h!L4Z|(DV&olPT)BRIL)kSiuPj`ZSnZ9Boy)qU{ z3!}*PRn=jWFUbhc4IYNqkgqUn4DLnev2O(Di|1{M>;FJz+X=@;yfr!*q858xNRZv+ z*l%Q~b|$uQ%qiYu!~){%KG%1HrpE+X@+&`CepyAvo%FtJ3GJ9d_k9@5-G{40r3~JKm#(!?2D&a? ze`PnsJxkRXm9;wmTb(HH3wb8N{cJ_zB1-hkt5wZ!su)kl3^q+P=3m~RdX`0-YfxY# zYR7hN%j9s;aTH8z=hPY-&e60u|K=P2EHOU(t@XjXQiD3RW<~du;zE-+wb)fDg)T@& zElAT{R8|I@fR2()+7YC6qi|4Zx+s{^eLyrZh#*V$-;vJ<#NOHNTrKO z)D1KC0DehCXy2UhAE?tgLB0PA+hC_T(JQwMWX^q02b;fr`T=7Yoz0UPrsD=P?gDQ6 z_-jito>;*}BraS6G>=+{c4vk@tW>RSlkRA}Em}Q~@V%Mj+408KZ8uj%nOcs!xY|udf zSykDuk9+SI84`M<9WCN4$Xih|;{cZG&ZspW8|pi%qfaq14wV6z(9f^NFgOn*>@}iE zh1!6HaKBr82Y2(9Pttsv6%e;*PeO09?_ynj<`BU>NnbKA!)E9iQmNeV#kE9NnT_|Y z-yzd8FCD5$>(nfwM__{sU###vST={2+FapthT#N$~xi2_1u# zQ#V8%s0WaGLHiVK^vqmn9@kar04Q4*`H&D*ZE246<%7A%ToUQ(MQ23&3XKX0z9;~XFLy} zQ7+i{j4D+;081^vux)s*=A)1nAL{7OetMj5Ec;tVabvTx7O9|vsB&Lp*;eU-n0$Aq z)a>B69{jppW0iEE*LeFHD?GA!2baWy_xjgY*oHsL{nsNQdc8{4j3~}Xuif~&Czf*Q zqn+uLtn|%o6bg6JlJa2hLs=QdUkM5`MXp(%cw0S(>J?4`6D(AP>T_LX*ma~^Rdiq5 z%AH?nX?Cm0vMXj9YkQt=a&JilbfDxM8qjL{p8wn*fW#?iJ)4y03@E7tW!FJfnY#o^ zFi+a{7XeG-|Ch7=+g>RAD;5BDzOQdDF%_zf_0NU~<5u1@2BQ6Vs{d!CXazV2u5vJT zVGOGx9yP$h5tb!u)vA-{X_3Y;jvYZt55cH;Iaf*54{2@w@?3R}*J5*5&u!$pZOCRe zxBbQ|n~Mv4fh)zp<6VcUif&^~YqevwXdJR$uri{@vp>iRes_Lp4nlq?$Vrdo^#giE1aAg3bj8pYu$`E-25v zyy9%V+^6>E`>ppPs4tL}R-efoM89g8VZzx_1${i!T!mBws?C3Tr(2NG=D1I4kjBK)ebKV2ZDljnH0` zG^#2wGB&*?`c}x(_(}?HTtt{`=cnmK1^Lkrm7@yE*hG?=11nXe6BF)3@lx+VE-k)x+4pe*ofMpPg z!!Pet(h0xl3nro63H!Yu%GHmTn?iH3V@3*(Ez5b&@9b^yYOja$#DLvd2+en;f9Bm# z68OS2SC|doVXYOWzeMmhw_ygI^w&!oPT$tc_Z=A#TAOicuR#g^N=X(tBKF-$JKS6N zvvs18cm%KlJvY|dEanj5Fn z#Y1$7^qck0s~5UYTV%5J4i}ZpFLgBXSo71L-iQcVe;dgaZ)do=Wzm#nnyqS@9DyFYv0Xs;1hvgh?kb&n(K&aUZA zJF97*D2&e131Z5EP)|k075&UR;T0_3GO+J)04|#Vlnx72s{t$0f zttc~Gck{_p4=jQ_BxIy6`Z={Fu;P(j!kCdVVvc41*X*5p;+qr0z06Ji072k5KI-6& zsEgjp{Zl_v?V3*U$Jx<&2oqX)(MO^uh&B3R2Rj$9%Vzk?5s^zH(!=H6X);`&Q@tFQ zrLT!{T+WzV|K@P z>X~0y9BdT=9WS(tC#{nS7nwJ_(xHVn$MZ0E zhUTh>T?{UDU1yEnEwS?GA!qU*;&$PcFSF-QGC(Uj-1kdPBG&J$(gigN%SI_O%2&g5 zBw1J(YIHe&GBmdL&`C0phz2LZr`z6H4^&tyw@e#vC~#@&5UpCtAiC7e`r49(D7NSVHnl@}bWhn)n(o#AhI>w5Qz`@82gYpo!#$H4N9Q*LtuM{o0HP;ZWnza+FvoVqB>Di`s0wMhNF zOPxPkvcdd}DdFa7{Ra(s{|~}XasCXsLJdy}>)GaK3S?0TJB})GElgcy2}?aH1vbEW za7scWs?sF0Z%L~2ndhA*Z*OnIJ#pic>?Ct;(^>u5f3D#c`NPWTq$^|3L+4wo&&9$_Y!fFKP3p7MY( z6h_bw;AoryQJ}zF3jEJW{{Q=6_(#|Ps!&M)P-Q%m#}s}~8DC)8FmbhhLG_nkhigf# z+ucgLw#1}e6PVEFB?W4ZNF1QwNW<%{PRy$Hv6RQS&U3o;re9c9?{($-YGJjkT%XUU z{f4nxeB~*=Ea{*kJJdmH@AhnV)HPStQKibR9KmrD9ibnT7=zkT-it4WAEmvuUGYm41a#-l zD{zGaKD%{L3?9eH84uhb+ZZNjr`gqTH{OR79Tz}%01!-t|5NZ`7NrVDLjlBfGDR5& zxX^(@=s85~@&zV2)lf@as=zq(JQRP6NmD8jsi?yADon3=0bn)y-oGw&P;L_9=PMqt;r0JuZobn9{ zM`5aeYDymmr91E_)Zkf=@MR89{9THfkZkxId0gYd6TEg$T{pJxmq$bVjdLsEAA0(@ z2b&(#lO_<+i|f649_E_4n>MD>d$(vYnM? zyN`qR-}G+tI`E=|Y8kN|q&AiJYiYrJLf!zPDOq6~eujaaj{|WUxd}=?V8)VbMZM25 zR)aXDvp@g63%C(|>G;Fz`t$Z%h}0R7@E&SWS{{_1!t&bgdxKxy*9enH7iHuIAEnCj z%^YjW#~-Nx$?r`-D|a|Nn(f?DcY1WmWFo=a1`c}(8XA5I-hB6); zs5U}MY#l9AN&`Wv4dQQ)fJU(Ia+lTQ%-hfHGC*8}LeesyVvr zch%$K;Ex)Dwcam^dt$6>xz)a@GD7H?tqbg%<5pf2zm0*;_mj4$i^sD=CyZBE$XYKq zi?5$K{WV3?V$yZ^ZE z=5$c~(-ykKw$At-Lx&N5)I6^CwO@zv@%jHi9!)PSQvzSk^x{>=fL;?(-RhH<|G-)3 zj(}L~mL(2W9`c-XvAp|`Jd^8ioU;!{@c`~2C;a=ZkGqUQKT$l~*s_T#s07phKuUbA zt5+-z8E@8I;fo3bbi{DY?IN;5{(cZ8k^7ITstTjiLvj@jgTgLFI$8~hXFqT7k9;Lg zlYRE;VOjO@MegjODuXS_)im8y*WMXjmz(B`7WxiJ>(n@kpf+Mu*h~)I7dujxwx~P7 zkaDJ8aO;?QM1NY?*rJMN%eW9F-w+`5a!kRWmRew>WTwavN+4Umk5w;pZpan*Jv{YX zf96iq7GY+xy>t9-B~*=XGg9|l!Oqc5jAaY###B5{1u|~SV&D*P)bR5EC;sYl(dhnU?n@~-V z$r}WkeE8GK`FDo%SopH>*Si8S>$5}$`}x=TfgEvV=9LH4@iz;-0W=n<2|%GS^a7YS zJ$#b`(S{0wHnF>cU}gV*yqDsJW^LL42A&8&2Q5K}TuK1B2E{2@c&l0y*;ep6*QLP% zt<`g?%jpV0+i5iFk)42R_2|i_r^KJYmZyf}D;L)xj~%OjO>+xg5^DOSb{PM1{0CY^ zzhm!1y|Ra#;G5we%%wIr8y>l}?jyEDCfb7pix*O|$DhshrEuGogXDA!vnGd_$-x$g zv9cw+Tw*ll&c+yE3G!*%d1aOXE^K-KG27Z*#WdWh8nH4(T1w@XtH5dW4P6RHo_uO! zGsNJ*;GW(?jymwkhW<=)SclpBHpg?NaL~c9k=2H@4uqJ_H-STv5l~w=ZPKoezyY|7 z0-SpQ%+J7!mM%sel2!A{%Jq||-gsf;A(=hE_CWy!Bq@Zru2TuNWdN+y;omt5?aff^ z%YJy@J_1gkI%UVNl#%9_G=-|jscv_c-9p(c+C(Ea%!a&nC0!qrWe=;K6oosLw>x3f ztvM){8u<}btDmuRHJ7ibXVi`(-UiCIg^U+EF24Ah^Ev44Z8;r=mh0CAmiQr! z;n==FQN`YaYSx-0RM=Pb>w*kVEd#z75P#`#~R z>b3n{dq`T}Z?K4z>0qCLZT*nj*;wMd6UWueEllq@2O*gUAW?d^Y0o{dbs}J>?*G0b(m1#;?quqI zyiXLQw|Ks_cZY6tAad*NY3=maogVLN_)VnoWq8nrlkJ)5bm#IW(<~4J9yF+;T{(p; zr`#;da&-+x>mhpn(NRHDw?5;4R644UwLC+IOZA$HSlT^foRGWo{JV&~ZgJ{i{+r=5 z%o>Jo_XtN<9<2Z{ z&n5OLb#$g470`{w3R~p#o`TBBP{$l7#sPWC?5q+*AZ__u^dZ!N8Y>jD6j!-*WIl^| zXjzu!^M>v_&mnuNWw9Fx;j1{){te{w|PgSbR%GFgV3t z&F5Y}2B-GP(D_ckIhWq;$+^bOIj5!MYP45Wfn5AivH5|_JijSOR2T?>!H5U<*)g_E zLT^a>Ha?wx@jByGh{AY!L1)5{yS?-~behW&Da-4-NxSJUBGHMfV0NaOS6c8`(m5wr zGG$NyfJGwyYDu5t@Ml3@n&{M7@ywE^FL*aYf6X?y79aTU|DE*EBfX{TE0;a;cy;*o z)*m+$07vGB<|Ev~;i+`In5EfSSn10D%>1NEKRqA*x!W81Gt*|@(@Z$|Mdm4=-Nu_n z)GqL998~nOPj_38oucj*-{Xcn93*A`i3G<7S~8Uk{$V z42pc%cUt?P(eN2Z=T#ffUlzY3jj1_ngHk^YHFLuQ`I^6Q@KnNT zJ>#@uVB3xiSN>2)4B>^yQmlI${6hs-7HLK_vM&8a z(iuXr+onm`a79o{^ z^$itO)6r+5$c0}rlzFWwZ2jWy4@U0Mgs+dpE~l!0zeUcCv^!;EA{X*xME3VD7w)Qs zhYz%&;HJ!>Y7OdKReRCq&hj=vLoY=~HswLv+Yc9h1bu1eh?zAoihi2;!-?=tO}z+> zNYk#C9evv6ou5S9uuzYbPyL))9_TOZG}vBa>=j@bt=pn)CU$Wq$l(k)X0@@pG4yBX zC+A7T;H!?*>z{{3c}L~F{st&TbB>tHW6J<%U(IZ$_!leM2`tqHBfq9k4m^J#oHs7g z&&VZBQsm%|pAa3;&`&m|Rlpj6iWD<{rdK^Z|LLSrB}o5yBH!jZ%K;_gE;`y`2yH$5 z?IgbvvhQw7eRjyIV&q(7axL|zh1q?cs7AY~^spB;`h8#Ga*Wa%>!y)C4)J0vqzw<$ zH`BiR*QR#5jCD`;?k^Y@?{Iek#xot-;s@j*==lKnZ45>*pG<)Rk(TIy`p1<)V1xyG zrdTV5*~l#X-yv_dRxTslUGW`HYXnkL4{H|Jeg|G1(If=hQ|G<+8wM0j{3bR};&VOX$QEfnn? zlZjN-%rRlXz!FUG)f>AeVqJSon#k~U>Vu2xNrTIWEZyg^m|oKo=e3nQBqPDUKsopNf?TK7d=Yk{!siQjqmT&{I@!(z4W5& zuMz@H2L%?*uOL6tP|sRxh~GcEp8&kJvWvq{b@ZLu<{@#8vkeLho~+5H2#d-;@~^mW zR9_T0jypGMkjerO`?;D|B)AWGE23V_hxSJ*cm^G;S8jPMPOP@AG{;TQiL?xYmBQmQ z(58GKlx@Pnz~}bTj9e(w0|Fc-_^%?z!~wz#J?<0evruGhBVJdas_#nUcwzYICl(S%TlRhV;iJ0q(W8cD}J z%%a;>;N$ggn3{%lc)M)+cz zQaIfoNY40CdKgs&4NISYTI3+}K1hzU;p#N+w3OA{ukg7iDDla#v{RdRm7P+U+9|nX zu$7pyuryy!dFdS?G*o}HNO&f`=Qnh%u14E=2M&HkV0`5pWnmP3Tez(|7iGT9fc-!Qd~(19?#Br8d+w0~9}{BA0jmpMan!d(G5 zwSCPW9}76p9AW3+!VN0zaG`kIE}e@~R-3*F69bj^Ae={J{oZ4EiLUTOFQ|t8llRSeIGtXNtEa~Ds?zb%B&uUl)Zw_*{tb~Ww(uImBDt$6{O^C?7O&L!5_txG zE%5ByLfL>zFSk8->n=C#k3t`XeciLtD_kdxHOy#~HU~Yr@+vvV`$f~Jxy$Y4^-#lR z3dsJn1!Sp+I=si15r)h3toAy_R*Fd1SbeP-Z?Cy3lE^?xIz2u0)2J`(XRe#I86&dn z4wd_p-FE{;H=#<})``$hqwQ9FgBwY7}X!rU&3pF zWU^&)`kigJ?8;7iF9R zqY^KB-P7xG-Mwv%g5>8v3wmdBP6^TO%^ac*LJd4J<^BhqZzdUS6`WC_jf3x1K-rCIC zS;0D^|x6P^eu1=8_WH!I;F?5kAJ0>M`d@V-X)tm^t3nG;~^uqgPn~m?+rfJ zxy)dBZ^rQ@=&wco5^k%_L%%;%{e>ZpPo1YwyRe{E-fw-gOHBrebiC~+qB_dbAN+xg z$jSm?af8Gt5|{PL1?KzAi@f7GqwoKOdPsccsdyFl-Q8L%UPD9XP7vM1!!%sD4Re-rg6ZSZjGVkv=KiH=b;>y7 zib*8bFd5-sHV9CMuwZ~81#}#q{V$5K!^kA#|7%A!4MOeFXF*C*KrhRmH@BJ;SfFKa z+Vj3{=J0Rxr-m`8j)I!y`BeKJLU(YiQno=AB=y_pcHx^xl{pEfL#_9zHBq-{bDJ)s z4>K9tdv+=T+LT+2$=%|9m!ow-uP#Sbi@(LI7GGq#pQQfCZT*ZCJsfx-!M|`RF24PW zn!rJDX*TzJwyf=qjl1N*xP0ij>2(%+>514c%RV|rN}u>2r;M!ETbyRj|hEJHPL1;KHNu(D=myZ|xg z-zi2c9jw4rk^QAvlSo$FqYYcRJQ`?l3k|JEZo&_Jmwo6zRblvZs5z>oquxt%o7FAU=;P5z<2MJ5K9`6UJO$x&{w_rcRf>wLArRf2-=e(%Qv4J%lr2 zfx&K70zRUTWq5xe-S0&_sTn`_;*-z&I6QN7z4YTuwN=wj7M;eDSHfRRExy%ZP=anx zUdW~WIQ2(n?<8WyLv-ri$GQ)$t%gCHX>ay&|VjK>H{MFXCid5Hyz+`SsK|)ExF9#RM`Uv+zCBnQ~Tz z!#e}86j5aRpM&Vxf0FT_1;FOh3?v13oP%sI*iNj!ps*Ko)e;95;_tm<`>k#Iz_faP zs9+wt*h9B`ohT7HyWj8w`jDB)o!J>9D4zGl;;ZwjKUi9epAk`E4b?2u^C78bW*vYZ~kkQ%9R-S9)n) ze(_I0HJuysz`!i>M1c00syws#k&~l1n%mrUKkG^s00U#sINF8g_ZC*R@`pOB6;&Ok zw#?gw3zYki4c;tWvN(L4TuQ|ZQ5G+#4F8S6_v8lxy{6TP>f7#v;J0<-pwX4PLtK?}(+so*=HwzcVD@ZbCFFjfazu0p9Z}lhtw?6M;+s1Mm=Zf423bBbAS9XGZtQpVumP_>Z<-iBW%#K6ccFRZjeJ8t1{it7?Un z-MQui{x~tM%@8Zv_t)+3r@u4X$g!GU44zF_B73OhYoJ;E`?hq$y`!&YSLch>lvb-H zRO;PyQ1 z{(ETJWWSY+vOD56*&|HLfqv2!< zh$EU@nr5$xt9XjuLO#?G5pR~`R9PkFsV=Q@k|R0LF^=#(ejHoSk_59wuTc%5C*n2`#C18-Qzj^&Hh5*UdAk8YR2l zMecI85?(T*Z$Y!?k_(AsBA@@uWy{jm@2|3wjKifoCz#eAw{P)b6~ET^O3fa*7NT6>{W~a7z(l zFs^%pbart-h+;v6U;+8(8#7%uR&FH*X$;WP?rk2NMt*5vF)DUFIybAn+O7WErFRzW za&?~#X#tk(1)9{ce?afOnNmRmzV$%f-MWOln=l8wq(g!f9lJo|}V17-8r~Sz0 z7_DAh`ESFr;QqYnY5c*s>$0yq@!)=Z$o1#13q%q4Ya5O&XLn*?h^8Lg2a;)31w9w> zZ%7Fe0tnsz(+~$iOEZAZwW~3(O$SqGOSDqFKoXVjADH%kHI*Lre;YUVV{rV0}3A+0Fz$gYKA7v6J+qQ5(JtS6$Eb_X3y(N<$x-AG`dNcaPmsXsKt(t(sqi zt?QxPJ8kr_u;a9S?B#xCujOk$<3aYXU!LI|&Rg-_`m$J?{o=_;n}7M0#b@0dEt=(bMXl-S#fy2@ zeh+IK3>eP1B%izu(UFP!`)La)1_u_U^u;+>b&6z}K0v6;MJ!Ct;Kj@s2%bQ5w4D5J zddolu8qy?j7?Ah@=M08O0FESP3WX>o@?a*ZIkAr|Lv6+2oRWeuR7|=#r&R0+S^6pA za@SuR#}gby8-lqGqT;!+Hmp5{S=)tSa9c=7RQdTOGUI&wQ5E5QK(0Pk zP)1SS^p+r+&y6|_+CN}DWX?q7lel@l8^--jZ?Fq0y;kWaIr#Z zEinIjs`jSiYz|t0#+=7*LVPYEe#k0TV>}4wTB{#TUzn}=qxop$+w1)QKn8w3$1d4b z!x~#BOrR*tyjcFr;|aN&2mg*5&!0&&Fj&X8m)%7WaR{czS`Hm06lH$~PuzzpM_N9K z#Q{AI^#jych&I5kWm7lb42}Zs1;bcNcrRep16;loA)pn;M3Dz5Kup_aayS^+PMTOt zhzx*%8#=R6T~iiQ*u`uA)C43o%q|%TGwPTRUHuvj{UE(C+E>Ld!?B!NuP31dQLlV5 zk{&E0dX2-NI#}u9V>p7%b&%Jo=MLDaaqzA%Z zt3MA=ctK6~5E$TKJqSlxTY`kS5g0!_$0Sj^ObGbHvMrJsKL2;IpAFQ;R9iyA@XER;xGD>d(Er(OLG2J)E0b#RWZwrMY z{$FJQv2!)G8$z_PG^PRY*ZfYUIu7^`0z*9vKmF{)NHZ>(&AjKa@CK|Y#034 z+8$G!I&k9?%L;q(=Up6OUxaaA9vp_!ZuEX(-{No{!d^ywx^nx4 z?Ozr}IPAek|0+(8tn!Zp&O8ZKWYDcwE%L77Ud4Rvsn5Sj!xyrb$X?X0fJoa^x(gpl zfUMm5KI_T8V~Qpgp0_&WhVbYcvbx_pd@alvA^8oiZnJ2<$%yL6j8G4$$N|oRB^n_P zS4VOI2Plg)!B|!f9Hj5ofKSwM8Dj3=_n{AR@B)N?v%lDKiWZ_BI8DI(kHnAzP{WQrRc?gaD80sL}$W4Nyncz2_GSaF#Cd zAt(!ox)GC!M0dZkXahiTao_Gq&ga3*TRM)BNhgxL9+%anog4m zcXi$XCTO=c>02(RFW!);w8}ONh-0X9ZIQisBA)1tvI}BA)br1q?>ASm5BI>8nR_=S zhrZe3zq!p=Ok`3w0cO2oiy8i z(s?6_h4AL$HayaouZ_jFcD(Ct6DkAE`Yd1ij*H~};p_ZofI;sZ)vxTM`7cIP5sac9 zAyUPF#DzNqR|W$c8kmn;ae^#4^)$F}Z>kvVaRDm09y(r}Q#aq52$T~4OVQwfj0_w> zngZ%GJ%nF@HIl0gu|Nfqoh&R4D9YOZb&LQVoh%MlV3-GL%5p%SX_;(=ZYt{Kdh6Sp zS%wmV{o6PPaqmgO?gYMnC~nMm>p|d7M^L?=5&+jXf`|X=Os))Y?}M$A9I_9~!hTWP zn4~IwJ+Y|LRi1Q9J5hV&IO2O!D3XJbaJje81u-R2LSlYCQn81&lqvrJ;Jv^Df|aWT z)0u}o*8yHC*uM+`fWru25)l-L<?5Uigpqq*-k^3z@skW z{MhscU@zp!t@`nJHlpSdxh2Uw(z{(Nnfa?S>rrWcg3U53Nz?;JaKP3^*guKm+!|exkEC&Thf}U!-texF#>I^>BoF&@?vZ! z=TEv3{WBFCKsfU`1n3n>L}h8K5a@tsfH4)I;KnQCnjQiX4jAXj;m8wUD|pi(6a==m zWk|A@Y69#AKsHq03E&k5I?*)^xV<^d5%2b`2Fn%+NK{)4^%^m;8x|AE;7;WuB#uyI zPy;HWuBDMHX|Kfs&fJedavX|i;*w4d`A{23K@Yv-F8=D@=G-Uxif@?JJ=RzALzfpV z7NPfo6npn6*Eti@rBkA-%NSb}K;f}2=cew`0#7FRF{kycHNZZg^;4sWzYw6mJHvwNf7@#(Dz8G);dGN45YO762+h01wR!6fc)d%U_4Kkr*k0 z2oh!r+=QX7;6==IGh#WR!WhenOeDvyF@(2hgquYb^;P7v#R(JvO@~#g(+dWgesm)+Vs{5TNIellDKNXx+eVbW%ojf?C-vAwIKh}Wo*sae7l&;C<7P|8HkCC7Op;A zb}{yFlc`<|&twX18OBD4P-RjA;2t8tcGoa>1FSOE=y_9ehlc@xQ&|QxkI5$f~OVLQR%E|u?Al&6m*nH zCE>VYXxGo0>}C$AsCES%e~b7j|Kk1^lP;G9m&Pdi{qw!Kr~V$9{S6!%Ga=522w8`2 zjnrQj(x=#hQyvmW66qb>9Fo_uMX_vuHp`#MG`1!k$Ko64vpWMPUm8p#vhaE?sLI7? za`0vuf4a^5;*hJBqsUK^fhyllMNUO9c|3+$y9HNXU0$OZ><|77d{v`Sj!A5-SZhbO z>Qlmu`pp*bg(fO(HV7`4m$ZJR@NWQH=|V+ah7d(YNkvVz#h`|Da;4ouls_aeXdtI* zt%ZAg7bzw@lE$eFbkwm4o{k(x)sgm0X&);6Y87d*?Ibo7nG>yq=A?jZgmWV?w?H-l`+w3@-%glhgOhMt(qAF3iT38l+uqJ4l8t)z+pKqsU| zfU6kXCv7-whY|fsI=H4%yzDlzB8mEc4X9H@_bGo83zcu1BB1eoR&0kAnO5q=^!M3!H)wCDPl=w4jzMi3n5l~pB*%c|>nN;+C^mD~ z{^8ridrrTcgG+=$0x>GLxD=i)ixRJYF>x)tx>u(bJf^65BzV?NO;c~&b#}V9>v|EF zVgZ+fNl^gW+?202CTS+SV^8}RO>?2h!gb=o!g}0H+FOU|Hn6i0y_CSWRNp7$fubO! z-|H|GHMVve7r{+^eGof}XL#crNlq7yU7H$-$`AVS*dFLK{i#_$oPijHNP1tPj%mAy z!KtIi{Kuj0-WnGM%Y6o1!U-;dd^U2W=WWiL_2f01Y|kR7Keud@+Ju!MY-==MTAnB^ z=r)!?6G83A6g3UfV_Dsc4)QCC;A{gb&?1F1hD|7#1H~a{U7i5So+p|*&3Fz8RY%4y z;Q^LY;>)0k?UgUS#mDS>C(~afPOozyep)zm5C6fRIQ586PjV)ex7EIBNbRkCiW-;L zOb)LKLk$L&eR~pHJ<@p+EiB8ccQJO~LM+hvLsvWWfd0Z;Cr^Bu^zZ@K?U}3UB`ynb z#IR;8!KU1%aopAO5+^}@!1PsagrxXjsUNBN!gj0;Xv86xUUseO&pTnz%U8+M?w0Ti zSzt}T3BXjg0KmYR${XPFE3EnaI2kc?qAfhpogFub>+IejPpI|#K3Xkz<#t@r-^a!w z`5sjUR~fGq6G52~3f(5InUM%^h;W3nMYYpUVEdywh-fW+DueyBLbEe7*;$8lN^&aB zQ31C+!Fc?moMIyUv?FaK4wFz7ZY7MDTO_*EjH?!reDDE%;Rf-d7hoG_dFDa|p4FLh zXVN*8HJXJO!Egpqnvr$00)PV`nwS!(*yrZ3Q_Mhf2*5qfqlYx)U5(iL0TOYj^#C(~ zX@n^bNf)w|Ygdt2(NfH0MH?9?M{+D#h+j_KA_qlcW?nDXE1?*TB6`X8Qh<3xSPX`V zqK9L^^NFtDwabN|%P^h?K+2C#I;>EIua+yTZ&-Rw)&xK*xq=v~kF`U9 z9!{a*Wl#=31vQ1&(D!BVvtV*Vrk%R^#t zj>tUjn2_ukzp7!&HAhZGYTMTU^#_&AHP$eO>imVs3jK#EXvlMQLpy$=xTgxihL3pAcsvNNpyOcKt+eNcl1UaGP2Xt|R zx--6097X;@=Vpb(0^fBSvQ;4rJKb}1=HBW77o{tS}{AkN6Ig0$nrKo9_7u=Be)kkUIaa%)iFRWr^ zV_`MG>nTmYJG@gj-zuBH^-bN`V)Quu!f)$X5 zj)s%Q1w~C99TH1VUscx?9sE5xV9Sa8Za+-V^ObZ&%iX35sykk-7NptGK!1Pk+Pc*I zu5ww2_PZ_)fNOZ7D-$ZD(I3b@bLnU)L=O@^e|H01NjL{76Vy=AI8DRvBTqvHlyRjG z{6X?i>H<|2xQ;G`p-406;ztTBIN;n$?ot|GoE-~42(i3^S9-KkBwWMt z??xF8xIE2z58gwM&4k!8lPEABOEzl^y>uKAJOGiSz%;lvLjpigZP7NB{Fl*|oDAn` z9ZTY!mlUggAE}BC|Q;X%4$|{Ftoigl<6$vRQR^#xc~>dc}p{2fAgp^ zeeBU>3!VwO0=O}5g8bm@w5$hD@RY13K>7ycRk2;oN-sci4#E%s%o+>`n88^GB!$f_ zT#;=*%5^qS!vi1#YwWYTIG1uIklY*-;I-m_BEa#d;jk3~QtF={n4lSgkVd%N1QxDD ztTdksq;Rz%*w?fxh_@YsY@^D<;>)XqhK0!1+47tNZ1GR5Y2J;c+wSSZH94c=$lLdI zC&r-kCO7xN8*W0oVO-1WIDX2ei&}7SUj0)C{pqv11iE~iX#Y_9#n$kPwfxaO#jeqU zkfH!}NkPe*wVHinSywG9o&?EdL^Ep9P|2qcpclxf&m=@B7|1N$2zZUzqJwo0z1mw6$jHTh4m7ZSVx z`A-GJg+qQ1|L=267af&J^zxF5t1oDB4WoHu3T5@yX6_9`(4jO2_!w8H{jmBrl8@Z1 z+b&&Y{$5)njCtXG1^#AgERi&8q@@wI8$TX1l>aQ;CL>x|Gs&;EsWbMV=B1^rPKm4x z;rXd^kJ17j#&=GSa9O+xAP>pm6`mbO>1Z(TzXqtKJvs&oanDX`>SB@BU709@iRPvp zOy~=>=`%#okGgH#ho)$Z`u@n?Dh+FyGj8DbQARyNHuEQyFP6|O_f+2olEg=?)m8{G z3rfn9q<0jvTRPgl>X}t;YiRA8d3CuK-6e3}E^+3iH>W0;A~JG1Ob+MH606;Sde>HVsMXx(n?qzzq|@I02#29+Yrm6z!!QW{N6_ zkKKrA_GFHi?zQR#g#aHz0XBtq}X&SkzukWnqd&2p$iJ zvax3z!NfX{da9W~dUA#8|GK*36@_s8N+Bo6FiZHlB#A9{C09cb@tvA>YL39941)w- z7c8epX$S~Tp?7T&3mBNID-z!0;K>IiVew=N13PRCUS17KC|JrOdg@OJZow$)dm|66D%v2 zAtu0KVBu>0sO@u7H8Xn^5t09=;VKK-mJpRJ7@X2bi)M|h4gb_ za+6f@W)$=~#fqQ;Pjtj$ma%5KvCIHiSI!@d7A!*)R5Lx4hxSI1Z_!JxRfc8&@$sVY zG;>YXp7Ldp*;PifDH%s z&&L>~f4(u9yGQ|_i#W`|pN}QF|C)}-jkVoZH$GQbq#*sCybt0R%>Z#C7;Dd3ou$8g z*j4EG1U_8Tmdta!YyKZ7rsnKn`TpIF*dVQLZKH1NrSK&(_W;Pk1wr zfW&=@$3Ff|D|rbBWTFZkJ%3Ndk|E<(R_H!xLA)eybq1;#x4mU=6{c=UHo1COX?|=F zpd+&chFXZMrN{zKU?c_8ns$m0T7;Uf4u&VBa5JHsZ@CctE+A{rRsUM5%bU&c) z4|>KGU}kezwJKz=xuVPjGX1=gTu9rt0j{1i(C?@vfZ`7;hvw~?-`&7(hWu~5ovV+n z;rY0ZSpJv{vVyPF8uVT$fBGPHA^nnzxIA_H<*PkOi|aI|F}$!Ac&V-WL<+}SsPx9$ zsH3^~d_P5BhfQ>Mmzv_NSoC@(SCqP4UN;BB0n;?9V>0U*05Kz-8KH;gBY4GF#5{_y zzA9_w227|p4E*<*6yqNinQ4{FE#IadRsb_~y9=_|NrM2u>9Xl@&=aqU=L448_Bs@s zrwPqxBJ_0l1#p|;D3xb>Kn=!Fg=7+)0x0y%pDlSMjThlbJ0D!9Fp20j;~J=V_aW%zyv8ZF z^J0ky^%h>#+GoyyBjezLxhU-Sbg@#`<}!FNO47xoN-v34z;guc_d~Xxy%+k+Y1qq? zd8gTvhl>lo2kYT0<#JNu3;o79|7pMZ)Y21nUEB6~pb*rY()fx6K^ew1uF( z^y}{zh4|xP{tKDZ*0X7!I-L3U<2g_j%$MKl+)3hfT*79k+hr`DS)F;=po~vTIj&&L zXw|6*;k>Yangwy=IGpHd69wqx9d5e4Z52s&=L-o7QOu$3#$7qhrc5gX8Q1;=vC%jKbY#@uO~U*w5EeycBV8H%G$P_wx|3>y~|V9;#60QU_P0} zF@Z_KxDai!8dc>FkmWhZ3Q~;Ti8ozTzC@ARtf#N!U4tUjmi|Omp3A;42IIpAAgO#l z-Fn-g$hyHcP(&4R)!?{-d1%gDmxfvRKJy}ZhDmE1{CkSGv{lSDz_I;DU85GHUPo@d zp4C}1=ML?GJ8=}Xd9|p@oRoE|lC66Swy;8+^?8YfT=x@i+SP_o!dLzSvE=;4+1B90 zJ|LLJN`;0#IcGR&^gj)q-C3Jjon2$8`sLu+KEEs8G?V5cea8a!Mi3AMdzjzz_WxCM z?(t0he;hw^Crp__7!jM~wv{_mKQs4RC6d&NO%fJD*hrT(L-du!Oxq0KM4?oa$fe6% zMs7*DL~hAtZlT}#{(l~O?DF}XJ2H$@dk{uw+)UU_i8`M`EJ?(J&9XU~4jTX@QP}i=l)CmakWxy3eb(SK&rRmS) z*1I)L&KxOjD$vLh@{kta`|>Mp3B||wx*2OuH44dn=Q4uTelByz%AMsUxpAbccf9e1 z);*Uirsa)8F1Hs}oo$aVc|lVPms}|A{T_kEtyE~9j#{wGi0#e&ll1$1+$Ype!b;e+ z$?CT;>7Cm(@mALld@R{?xT-qcM|r;gQ(0oV!OPc0h^9cdjNoRYU_I3Eh1bv0VqL5W zc7qWIOvjpEDwWs_VM_)(dc2E+hNF5o?P6@w&l6> z&1ha*YF@!|GR(-{HnP_zGh2^X{my=ob zK6>k)E$S}DMykk>qE+mMWDd{{I-gnKwWWP(zw^g?a5AFqRnHJJ?Q}BNclU-$Vu#IG zavyH#UOc%WT$PL(UU!~&=MhJ5F-iIHCzeEaZk3(yq{#Rd~@wd4B!!R0s z{WCsRdAg^iKI2bgp{e+8_YS%+(63Q=NO4FKjx0lc@`&fRb^Y(c1ZQ8F%cxY-=Err$ zjpaD^N-C?`bPBGmyBbhZ`k74!tuiYFzK8ZVp zwkV-!k(}S%<3)uXDugu-%(#V3G6>dVXxSu^({prMPHeFXP^=kU_KPRrF4?OS#i$-S zp5i{ib8YanfM?H7_VM^0zFyuE>cm_5LGr|>3Saf>asi)RVN68rtli}rTsegb>F}o> zeO|H0hNSdF)$C1Ssmf+DRu|&1Jm$-m2U%@25Q(#;n=Pfqgg@C_f2+MFSV^Se%!dp41|1A3Fb9k@`qV za$n99YfoT_mg(k8yd4@wgY{fkB8zM8-&N>;=Fk(y|1xxZ1^|BvRJ-0|wtpW6SP$Z1qseeJU|8!{1=f!Mt`{@!k~uf1MMNDM#YFalQxRhJ>ZN&oa1m z-t)`64|~ujE|^agPC)$5{z>H<-5;)wO5@u{v5Sfi{RWo&c#*$|Tz6!@??JHE1Aecz zY%UtPjdDFEEMiZTHP*&0Dz5TzxzNrfFY&QqrUJv@K=$q1W3V}))CWOVkj~PrDX(f; zYc_UBaH&}Jb}hX+En~!`syDrkj)2?;D{7Ml4>JJQ(QNcq-4zy3xmYW&?P1|_*g5iE zCczq}vHL@d8I7fq8akLaJmmFv_V5_NV9Amj*( z6Pb4L|J-qXv={vyb;A1A*3_^ozV}|rkI3Cg3+B8;CTbJdR>Gefqjk|@+4m$({EE^Q z(y4YbTs^r(kvI1KG{`h-i&&u?t*s;a6KS4~`&aDIRXnv?M?db+o`EiV8TZ7{H~rzF z|D38y9mq!C#lO5Tb_o3`;TSx)uob2xvXMuXtI2eA_kMOr z^xl7U&U{M#F&vQm%6rE;Ffn}jZD_W3j)1T9PxKD<_T!eL+v_5Q>Yu-Ct%*Qas~lRg z*bL$!AnOSq83RUi>FAYX=6Z9gV$m&oOHH#BD}`ly%pgj)tz|k1X7qpzR@oxavpp#$ z%0{}`Eicy5>~Kje%|xX5`9ITBg-~?*Nr@M|C#rJ7 zVrrDRbIGYN(yt;;$2E5H3uBts{lBgAXZ>O|_BeI$N&1w21ouM=L;vL0I>DtQHcO|b zLX~BoZbxT8)86T=#*$>?Dc*-uBls<>rs+3Z@fP+~&ZiLf6+V&Uf0a#f2j59MMYVP_ z9$qu(_{0Fqx(;Alan!c-r!Gc+_{Xx$4E(0FZWE)LY6u58(yMq;n87EB!$HaQuEauX zO^~!Kd!~KDx{NMvMwks1d5YToEhEgEqWP1w=B@nnu|({NLtopaMQTp#W(H9*NXIS+ z#v_}saxS&KHv*v6Rm2L-k=dn90!q}}(bc?zR|02c|s$`r& zzgjkP{(Z(bwZFhCkBf0o4X|_bHStE;#g4C~L02DDT|IWMjepUlIL}ui#15nXW>dQ| zI~)CmyZdZZ#h~ef`=6bRR`O+rRMjg1MXe1+@`_E?>0)v1DgN%Wq})Uw^)g%yfR$>w zcy%qoP0!W|eDUf3b)m;CcCXvcIoyAY-`|S^#*cRy`ck`8qaQL4_V{eS2c~4UkIlAT z!T)M^@qR?LD5ns^;~T5HFI@Z!SW9j#6aAhXcw69XoHOmEqtt%7od2^-Z-SVbxx`py;UFB()_MY3h$1GNWN@Y5vkJA|7U%nIIvY5yUkaI`Zz$o7)uylv-`(%dG- z$HHu-TPN*p0dUQPV4f|M}Uya(9ouwf43`hk|PaI#1`4g$ z%_k|&+F~P4dYv*GCf4wgwsv2K>W-Ho>z~)Iq#quF>NXA=+r5&Z+B}6<*#y{(Glm7Tn{Y-F((IEFl=YiqJ~Cvib81B|D@2^}~yMPy8rn=?#NFhzw=9-V(b$ zd+Fq##y2C5&!EBA_?0{Rmbr^-9UsrQsarMyiPZj#qcUKr*^$BB8CvhV4Q|^cM^$e< z&rHys=rEdb%sq3|>3fI#r$1kupLbMhCLOmehMiTz3j@A z`@PH~5mtu&0@0D4&neeT9XC_$NLwqVbh_>s@o1TX{=e*jDUjx>n$~qKe{w8ha3Ds_ZHKSpE853g>jG;#6;i z@RsO0HL%|%Z^`cn@|9b{Tt`!d+)(Nr-CL_BFGedXY&I)86y82xefjSCU*HK8Uarx*Z#*O?XN4sq8Q=>xM>jsK zv0G9qgbUkzM%EmH)Kr9fZ!2gV(+udEJ7*W%p@ZYxd!iNw5@xHvxAzS8;4j4h!?yiwv20W2}RUDKmELU&k2&52Swk(J_HU(R$04< z!ng1^yj)Mxx%%ynhGBSjzIBYu9M~Iv;^ou{G3c^Ygd|>pJJ)yi4{D<5(pR5l-Xpu& z?R(1~P7Z3WA3O`kHJcB90ZVu5&a5`@=K2#%Cp=2H29Y2TU%@3Y0Q3tE5W{_23<|bX zcSn0r;6Y)ftrxK3{}GBf^Enp`$akoDicIDUyt}eNz|ovLg9VXBjWXGLeYB40a=#nNq{>C_yCi?*%qsu zVH5?V*p51YNPyREG&a-Gx=4G#UM>v7~jlU)eoM>KrKv_p1 z6S2|Nz!C><9=cTbo;)8QAX`Q07cf-S4BENjwL`WR%+cB5lz1He4^{@%1 zVKF1*ZlZ8fu-zKfQm78D;dJ=#1i^WL0Ev}v%z=jzET$ZFz)Tp|Z+VC3!thqFyq7RD zn4Rw32oYtMe2|nB-_4KeOeM)iz8}*51y6Hf1P*mV`f3myNzq;ky&6z;r8G2vpF|W5 zB;d7YoYl^!S+IeXVzJ{j}0=sCX}LnUAZHVBn@j8`v3 zx|N51keFsCLN~!kNg&O+4eG=Qs69qHs~fGh!j`en&9=>H@OQ=67)rxJn8g1AInav) z2$qDEh~s>vP%)E5Fo5&DYppBOLD!T3iTCQwe!~S;nneL)5|$VrjSfoigye&0!CS88 zXeL7Vuu*R83EQ?u-YPg3?hiPlW*C7vFGlanwZwa;p^+S_O3@1@eADo#_kXDgj9Obp#Fi?h0X>L`WjoKF(3JYb+ndECf2|7_B4rPKBS`@ zE!fH|`{%lu-8TcgH4q{JK@0A!ut5+ht~8;#7s$cL0J!F3!>jSI;7kFehb)i@0L?6@ zfY!wbg2jT&H4sBY@kM#gCKySb)n?GK1$MUj`B(hO)><6++8pzP*K za-o9QGuYYkFmnMkCOQea>?1r3l>P|xB1!O}L!t*HcW2yHJ-6|aR~O?P#dtk<2T+p_?qC{@a4O7^CC zvwEDhr-<=tQGBE`W}0p1ADW$isTnZOb`q*Fe6Sv&Kw!>!21Lk8kO55Q97i_1cee%z z49Lu7V6GNMhPy9^@5{Adi577hR#{6f9qVWSuTd?Ss}&d9$nt3842uJ5tGby?b55Ie z=N{_`GAzV`r;+dD_>p1i#1i+JaRGy_={Lx6AZV&Fz_F@o0a$9w+;MtNgKKslHSnt` zK^t78DVZ{C`qg5!)IRiBwo@vz8pzX$;mN9+t4~RS=>ZXzK2G8N>`O?|Vemwp$y?;n zt-M9SVLu=*wahu)RAVT`)frP}fu(jdAE+8N&}o7;Rtf{cIXd18bSchYq5~jK5@bS{ zZbl%p#{wwJtix8Sfi?&=W)47U0Uw@56H6;9<%#y|k%RZ&b#$YH)AKC&R?5>T+|jsc zWCYa9O9InjFdZDs%=sEjvBNa8Pq}Y{S$HMgmFma?-Po zKXxCpa zz$$w%H=@y?v8^r!tDuKLIuSe}Xat+Nz_tQW5qiN- zP?Hc-H^EG~(1|E8Xyzf^4KNea_?B)2KHNGDgD6QCHLxCve&Y{>V_*u>tP(ud#8tdq zPD|UmMlu_`b-OrT>yCiNTgv|h3IH4weLa7fBVN^v6zhawBM6hqoc)w>Xk+G%p042s zwah!tJ3_dCRVJc%(E~Ka0ymjGL#dewUQ7!XIMgyfN9(p{=Yz=n4AuV<&<#n-_rfL; zB?yoPuT-k2d`baEwgtgA^1)iL#JHShLlj0vP9odpStkL4crW}fIOM_82*OZ3q9uNr z1H$ysF{%k|T!2>-c#1G{6sK@|)sZ0XB5281_Hj}N-gBG4^&2ZD$fmeSWb0gEiRx@2 z*cLDr+!xum)b-j^_E!=gc-(tO@Ap7>ReTKijGTvHxwiX`DoEo+c^^#D3{ z%z`_9g=5=7my8Dj)Hb@h8J^~RttpITV2)A;$t7!$8nlllPc%N2-(8}jP6X0CxH7Mo zXYZQWcZ32`VykR=es@x6j2vLR80ie7uS?{@3(Vya5H>Ynizv0qitHuB3Z28poKHr- zZ`KSVKTAB5G-mx`k*=@WhF;eBU7bIX{s|h0fZ+B~112)_J%Knb0J_v)iSgi6oE@uhhN;)o_kXfDcW zaeg8>W(O@1DLqDwk&g%c_JMEck%pNyaLRh*8Hs5FF8_I|Yl0`y zLV_&2gmo--Gqksh$=NbzNwvoUWm3OL9_D)f_8+6c5rJ|^(2Uud8gea*Q2Q65`o}!7INp+-g zWHa(0PORO^t1vWR15^+pd}VcHgo)dJ^632?^ks$Q8`=3jm5$*5fr15_8f`3OiGL@# z>7jXCO=XR0as>JXo8~QF4bJ35*EQ1x^Qy1$xPj{w9t`F;ZA@ z6|OD=!xC>>Wls~$JT1E+SB2a$nBXc09pl054j*0+U z>y`ft@~h=c(6XGNFreV)hW9QKUu78vurj#niBg;iI;I$s#gLw)BEbE8UNF8fL6ec$ zQ1gLE<2!v0=L<`tBm=7>9023xw;W|j&^^P5!=hOlkvLIiPY4rCbN>m9)o2CAGhAhR zC}bF9XA7~3djJ5(R$z2NEi7gV>rRxm`3Z@1a$zE^Iw7eHD2(K$2`U$0_=AL%H%N>} zYwgR{Q{mWF1HY=HKvAf>2;C54L$uV8AWU0u`I<6k%5@_LX$!96+EmwbIayQ)4Dxs2 E|L3^AQ2+n{ From fd81085c298c62bb53f2922060ca367574b61097 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:14:12 -0600 Subject: [PATCH 15/17] remove export function --- .../python_stable_diffusion_3/export_onnx.py | 85 ------------------- 1 file changed, 85 deletions(-) delete mode 100644 examples/diffusion/python_stable_diffusion_3/export_onnx.py diff --git a/examples/diffusion/python_stable_diffusion_3/export_onnx.py b/examples/diffusion/python_stable_diffusion_3/export_onnx.py deleted file mode 100644 index 65f6f2477d4..00000000000 --- a/examples/diffusion/python_stable_diffusion_3/export_onnx.py +++ /dev/null @@ -1,85 +0,0 @@ -# The MIT License (MIT) -# -# Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the 'Software'), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -from argparse import ArgumentParser -import torch -from diffusers import StableDiffusion3Pipeline -import os - - -def argparser(): - parser = ArgumentParser() - parser.add_argument( - "-o", - "--output_path", - type=str, - default="models/sd3", - help= - "Path to save the onnx model. Use it to override the default models/sd3 path." - ) - return parser.parse_args() - - -def export_encoders(output_path): - pipe = StableDiffusion3Pipeline.from_pretrained( - "stabilityai/stable-diffusion-3-medium-diffusers", - torch_dtype=torch.float16) - x = torch.randint(1, (1, 77)) - encoder_path = output_path + '/text_encoder/model.onnx' - encoder_2_path = output_path + '/text_encoder_2/model.onnx' - encoder_3_path = output_path + '/text_encoder_3/model.onnx' - os.makedirs(os.path.dirname(encoder_path), exist_ok=True) - os.makedirs(os.path.dirname(encoder_2_path), exist_ok=True) - os.makedirs(os.path.dirname(encoder_3_path), exist_ok=True) - - torch.onnx.export(pipe.text_encoder, - x, - encoder_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { - 0: 'batch_size' - }}) - torch.onnx.export(pipe.text_encoder_2, - x, - encoder_2_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { - 0: 'batch_size' - }}) - torch.onnx.export(pipe.text_encoder_3, - x, - encoder_3_path, - export_params=True, - do_constant_folding=True, - input_names=['input_ids'], - dynamic_axes={'input_ids': { - 0: 'batch_size' - }}) - - -if __name__ == "__main__": - args = argparser() - export_encoders(**vars(args)) From e64ddebb9e675947d18446c6386a551fad1ecc86 Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Tue, 19 Nov 2024 10:59:18 -0600 Subject: [PATCH 16/17] update README --- examples/diffusion/python_stable_diffusion_3/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/diffusion/python_stable_diffusion_3/README.md b/examples/diffusion/python_stable_diffusion_3/README.md index be0b9fa3a12..189e03c023e 100644 --- a/examples/diffusion/python_stable_diffusion_3/README.md +++ b/examples/diffusion/python_stable_diffusion_3/README.md @@ -35,9 +35,9 @@ huggingface-cli login ``` Export the models to onnx. -Currently, optimum does not have the changes required in their latest version. For this reason, please follow the steps to build optimum from scratch. +Currently, optimum does not have the changes required in their latest release. Please follow the steps to build optimum from scratch. ```bash -git clone --single-branch --branch diffusers-transformer-export https://github.com/huggingface/optimum.git +git clone --single-branch --branch main https://github.com/huggingface/optimum.git cd optimum make build_dist_install_tools make build_dist @@ -45,6 +45,7 @@ cd dist pip install *.whl cd ../.. ``` + Once optimum is built, use the following command to export the models: ```bash optimum-cli export onnx --model stabilityai/stable-diffusion-3-medium-diffusers models/sd3 From d6d68ff555f822f635fabe1c93062b83f2bf721b Mon Sep 17 00:00:00 2001 From: kahmed10 <15948690+kahmed10@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:46:07 -0600 Subject: [PATCH 17/17] remove import --- examples/diffusion/python_stable_diffusion_3/txt2img.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/diffusion/python_stable_diffusion_3/txt2img.py b/examples/diffusion/python_stable_diffusion_3/txt2img.py index aec6e9d92ed..ef0c89dbc26 100644 --- a/examples/diffusion/python_stable_diffusion_3/txt2img.py +++ b/examples/diffusion/python_stable_diffusion_3/txt2img.py @@ -29,7 +29,6 @@ from PIL import Image import migraphx as mgx -import math import os import sys import torch