From 7402b0eefe7eee5863c7b2c164a7b13cdc6425e6 Mon Sep 17 00:00:00 2001 From: donglixp Date: Wed, 8 May 2024 18:20:47 -0700 Subject: [PATCH] yoco init --- YOCO/README.md | 170 ++++++++- YOCO/imgs/1m_retrieval.png | Bin 0 -> 43087 bytes YOCO/imgs/arch.png | Bin 0 -> 54800 bytes YOCO/imgs/inference.png | Bin 0 -> 100685 bytes YOCO/requirements.txt | 12 + YOCO/scripts/eval_needle.sh | 11 + YOCO/scripts/eval_task.sh | 17 + YOCO/scripts/train.sh | 27 ++ YOCO/yoco/__init__.py | 2 + YOCO/yoco/criterions/__init__.py | 8 + YOCO/yoco/criterions/harness_eval.py | 86 +++++ YOCO/yoco/criterions/multi_needle.py | 181 ++++++++++ YOCO/yoco/criterions/needle_haystack.py | 169 +++++++++ YOCO/yoco/models/__init__.py | 41 +++ YOCO/yoco/models/decoder/__init__.py | 0 YOCO/yoco/models/decoder/cross_attention.py | 46 +++ .../models/decoder/feedforward_network.py | 33 ++ YOCO/yoco/models/decoder/gate_retention.py | 87 +++++ .../models/decoder/kernel/gate_recurrent.py | 302 ++++++++++++++++ YOCO/yoco/models/decoder/kernel/rotary.py | 332 ++++++++++++++++++ YOCO/yoco/models/decoder/kernel/swiglu.py | 32 ++ .../models/decoder/model_parallel_init.py | 16 + YOCO/yoco/models/decoder/rms_norm.py | 26 ++ .../decoder/sliding_window_attention.py | 68 ++++ YOCO/yoco/models/decoder/transformer.py | 251 +++++++++++++ YOCO/yoco/models/decoder/yoco.py | 294 ++++++++++++++++ YOCO/yoco/models/transformer.py | 141 ++++++++ YOCO/yoco/models/yoco.py | 158 +++++++++ YOCO/yoco/tasks/__init__.py | 32 ++ YOCO/yoco/tasks/data/__init__.py | 0 YOCO/yoco/tasks/data/basic_loader.py | 75 ++++ YOCO/yoco/tasks/data/llama_tokenizer.py | 38 ++ YOCO/yoco/tasks/data/lm_loader.py | 303 ++++++++++++++++ YOCO/yoco/tasks/data/tiktoken_tokenizer.py | 81 +++++ YOCO/yoco/tasks/data/utils.py | 267 ++++++++++++++ YOCO/yoco/tasks/gpt.py | 176 ++++++++++ YOCO/yoco/tasks/harness_eval.py | 151 ++++++++ YOCO/yoco/tasks/harness_task.py | 289 +++++++++++++++ YOCO/yoco/tasks/mmlu_task.py | 92 +++++ YOCO/yoco/tasks/pseudo.py | 202 +++++++++++ YOCO/yoco/train.py | 7 + YOCO/yoco/validate.py | 294 ++++++++++++++++ 42 files changed, 4513 insertions(+), 4 deletions(-) create mode 100644 YOCO/imgs/1m_retrieval.png create mode 100644 YOCO/imgs/arch.png create mode 100644 YOCO/imgs/inference.png create mode 100644 YOCO/requirements.txt create mode 100644 YOCO/scripts/eval_needle.sh create mode 100644 YOCO/scripts/eval_task.sh create mode 100644 YOCO/scripts/train.sh create mode 100644 YOCO/yoco/__init__.py create mode 100644 YOCO/yoco/criterions/__init__.py create mode 100644 YOCO/yoco/criterions/harness_eval.py create mode 100644 YOCO/yoco/criterions/multi_needle.py create mode 100644 YOCO/yoco/criterions/needle_haystack.py create mode 100644 YOCO/yoco/models/__init__.py create mode 100644 YOCO/yoco/models/decoder/__init__.py create mode 100644 YOCO/yoco/models/decoder/cross_attention.py create mode 100644 YOCO/yoco/models/decoder/feedforward_network.py create mode 100644 YOCO/yoco/models/decoder/gate_retention.py create mode 100644 YOCO/yoco/models/decoder/kernel/gate_recurrent.py create mode 100644 YOCO/yoco/models/decoder/kernel/rotary.py create mode 100644 YOCO/yoco/models/decoder/kernel/swiglu.py create mode 100644 YOCO/yoco/models/decoder/model_parallel_init.py create mode 100644 YOCO/yoco/models/decoder/rms_norm.py create mode 100644 YOCO/yoco/models/decoder/sliding_window_attention.py create mode 100644 YOCO/yoco/models/decoder/transformer.py create mode 100644 YOCO/yoco/models/decoder/yoco.py create mode 100644 YOCO/yoco/models/transformer.py create mode 100644 YOCO/yoco/models/yoco.py create mode 100644 YOCO/yoco/tasks/__init__.py create mode 100644 YOCO/yoco/tasks/data/__init__.py create mode 100644 YOCO/yoco/tasks/data/basic_loader.py create mode 100644 YOCO/yoco/tasks/data/llama_tokenizer.py create mode 100644 YOCO/yoco/tasks/data/lm_loader.py create mode 100644 YOCO/yoco/tasks/data/tiktoken_tokenizer.py create mode 100644 YOCO/yoco/tasks/data/utils.py create mode 100644 YOCO/yoco/tasks/gpt.py create mode 100644 YOCO/yoco/tasks/harness_eval.py create mode 100644 YOCO/yoco/tasks/harness_task.py create mode 100644 YOCO/yoco/tasks/mmlu_task.py create mode 100644 YOCO/yoco/tasks/pseudo.py create mode 100644 YOCO/yoco/train.py create mode 100644 YOCO/yoco/validate.py diff --git a/YOCO/README.md b/YOCO/README.md index e61e137cc..ab3f3becd 100644 --- a/YOCO/README.md +++ b/YOCO/README.md @@ -1,6 +1,168 @@ -# YOCO +# You Only Cache Once: Decoder-Decoder Architectures for Large Language Models -- May 2024: Code release -- May 2024: release preprint [YOCO](https://arxiv.org/abs/) +## Approach +
+ +
-## Getting Started +
+ +
+ +## Performance +### Harness Eval +Training with 1T Tokens: +| **Model** | **Arc-c** | **Arc-e** | **BoolQ** | **Hellaswag**$^*$ | **OBQA** | **PIQA** | **Winogrande** | **SciQ** | **Avg** | +|----------------------------|-----------|-----------|-----------|-------------------|----------|----------|----------------|----------|---------| +| OpenLLaMA-3B-v2 | 0.339 | 0.676 | 0.657 | **0.700** | 0.260 | 0.767 | 0.629 | 0.924 | 0.619 | +| StableLM-base-alpha-3B-v2 | 0.324 | 0.673 | 0.646 | 0.686 | 0.264 | 0.760 | 0.621 | 0.921 | 0.612 | +| StableLM-3B-4E1T | --- | 0.666 | --- | --- | --- | **0.768**| 0.632 | 0.914 | --- | +| YOCO-3B | **0.379** | **0.731** | 0.645 | 0.689 | **0.298**| 0.763 | 0.639 | 0.924 | **0.634**| + +Training with 1.6T Tokens: +| **Model** | **Arc-c** | **Arc-e** | **BoolQ** | **Hellaswag**$^*$ | **OBQA** | **PIQA** | **Winogrande** | **SciQ** | **Avg** | +|----------------------------|-----------|-----------|-----------|-------------------|----------|----------|----------------|----------|---------| +| StableLM-3B-4E1T | --- | 0.688 | --- | --- | --- | 0.762 | 0.627 | 0.913 | --- | +| YOCO-3B | 0.396 | 0.733 | **0.644** | 0.698 | 0.300 | 0.764 | 0.631 | 0.921 | 0.636 | +| YOCO-3B-1M | **0.413** | **0.747** | 0.638 | **0.705** | 0.300 | **0.773**| **0.651** | **0.932**| **0.645**| +### Needle In A Haystack +
+ +
+ +### Multi-Needle Eval +| **Model** | **Size** | **N=1** | **N=2** | **N=4** | **N=8** | +|-------------------------|----------|---------|---------|---------|---------| +| GPT-4-128K | -- | 1.00 | 1.00 | 0.98 | 1.00 | +| MiniCPM-128K | 2.4B | 1.00 | 1.00 | 0.54 | 0.56 | +| ChatGLM3-128K | 6B | 0.94 | 0.72 | 0.52 | 0.44 | +| YaRN-Mistral-128K | 7B | 0.02 | 0.12 | 0.08 | 0.20 | +| LWM-1M-text | 7B | 1.00 | 0.90 | 0.76 | 0.62 | +| YOCO-3B-1M | 3B | 0.98 | 0.98 | 0.84 | 0.56 | + +## Setup + +To install the required packages, use the following command: + +```bash +pip install -r requirements.txt +``` + +Besides normal packages, [Apex](https://github.com/NVIDIA/apex) and [Flash-Attention](https://github.com/Dao-AILab/flash-attention) should be installed seperately following their offcial guidences. + +## Harness Eval + +To evaluate models in Harness-Eval, the script is as follows in ```scripts/eval_task.sh```: +```bash +cd fairseq/ +TASK='harness_boolq' + +torchrun --master-port=29505 --nproc_per_node=1 validate.py \ + --data-dir ../harness_data/ \ + --criterion harness_eval \ + --task harness_eval \ + --batch-size 4 \ + --eval-data ${TASK} \ + --log-format simple --log-interval 10 \ + --bf16 \ + --tokenizer-pad-to-multiple 8 \ + --arch yoco_3b_new --tiktoken-model cl100k_base --load-ckpt /path_to_ckpt/YOCO-3B-1M/checkpoint.pth --yoco-model /path_to_ckpt/YOCO-3B-1M --tokens-per-sample 4096 +``` + +## Needle In A Haystack Evaluation +Our model uses city-number pairs for long sequence evaluation. To get the results at a certain maximal length, the script is as follows in ```scripts/eval_needle.sh```: +```bash +cd fairseq/ +torchrun --master-port=29504 --nproc_per_node=1 validate.py \ + --task pseudo \ + --criterion needle_haystack \ + --batch-size 1 \ + --max-epoch 1 \ + --no-save \ + --tiktoken-model cl100k_base \ + --bf16 \ + --arch yoco_3b_new --tiktoken-model cl100k_base --load-ckpt /path_to_ckpt/YOCO-3B-1M/checkpoint.pth --yoco-model /path_to_ckpt/YOCO-3B-1M --tokens-per-sample 1048576 --interval 1048576 +``` + +To run Multi-Needle experiments, replace ```--criterion needle_haystack``` with ```--criterion multi_needle --needle-num {num}```. + +## Pretraining From Scratch +To support distributed training, our implementation is based on infinibatch to read data iteratively. The overall data directory should be organized as follows: +``` +Data/ +├── json/ +│ ├── train.json +│ └── CC.json +│ └── StarCoder.json +│ └── ... +├── shard/ +│ ├── CC/ +│ │ ├── 00000.jsonl +│ │ ├── 00001.jsonl +│ │ └── ... +│ └── StarCoder/ +│ ├── 00000.jsonl +│ ├── 00001.jsonl +│ └── ... +``` + +We recommend that each sharded data files contains no more than 10K lines with one json dict per line, and jsonl file, such as ```Data/shard/CC/00000.jsonl```, should be in the format like this: +```json +{"text": "File 1 is here..."} +{"text": "File 2 is here..."} +... +``` + +Then, for each source, a JSON file preserves all the paths of the jsonl files. Take ```Data/json/CC.json``` for example: +```json +[ + "/path_to_data/Data/shard/CC/00000.jsonl", + "/path_to_data/Data/shard/CC/00001.jsonl", + ... +] +``` + +Finally, ```train.json``` records all sources' information and sampling ratio: +```json +[ + { + "name": "CC", + "weight": 0.5 + }, + { + "name": "StarCoder", + "weight": 0.2 + }, + ... +] +``` + + ```scripts/train.sh```: +```bash +cd fairseq/ +torchrun --nproc-per-node=1 train.py /path_to_data \ + --save-interval-updates 5000 \ + --no-epoch-checkpoints \ + --arch yoco_base \ + --criterion cross_entropy \ + --task gpt \ + --tokens-per-sample 2048 \ + --tokenizer-pad-to-multiple 8 \ + --pad-to-max-len \ + --optimizer adam --adam-betas "(0.9, 0.95)" \ + --adam-eps 1e-06 \ + --clip-norm 2.0 \ + --lr 0.00015 \ + --lr-scheduler polynomial_decay \ + --warmup-updates 50 \ + --weight-decay 0.05 \ + --batch-size 1 \ + --model-parallel-size 1 \ + --update-freq 1 \ + --batch-read-ahead 1000 \ + --total-num-update 300000 \ + --log-format simple --log-interval 10 --disable-validation \ + --tiktoken-model cl100k_base \ + --save-interval-updates 5000 \ + --bf16 # bf16 is encouraged in pre-training +``` diff --git a/YOCO/imgs/1m_retrieval.png b/YOCO/imgs/1m_retrieval.png new file mode 100644 index 0000000000000000000000000000000000000000..9fb8d94904b60266d83b46556c652deeebbc7784 GIT binary patch literal 43087 zcmZsD1yo(lk~QuG*WfNegS$Hf4ek=$-7f@pxM+ajZVB$LAp{5ncbDL<|KZI)GkovO zS{K%VbB}a&_pV*lwL3ytNg5T25D5YT0##Nk}+fFnrYhY&()jc5(v*0sv z>aPZWUFm!4ip0~!rhip)_}$L7~U2h-+JaG`(IKQvhbp+1(=-N9gZ;eu(%+*KeY zF(e>W_J~9D-LId{qbxG{zchO^thC?r`!e$<4V$sAq{})dCMA82$IR^`0$m=={1_w+ zcn44FTT^GhG(Wu3=5=m%@Tb!GYVH%iF^-+-{ng*Ir~Bg{knlugVI>i)~#2TZc^@=4i(K76S+ zr-P(hUmQ9TDB}cF2|2AdKf46OLSoEe!Sd1S*hE& zE)#17W(W1Db{Lcz9*w^L1zv9V`H>?U>|5=3yOnyt)9P`26{9u}-|=*3!@HB|Vm4VI zYn)2L>s;{W*N-2_$ShUFdT4En!kZiOHr2_EEB?BX8!Z73r(wR&kJsO=eiw1M0oTl$>i+qYB1WxJQpUXaCyrXR!pq(>|=NA1s|H$U92BnF*taqro=FoqeSV`ElUsALK2N}D4OZ%O-Wtgr9aVUgS5%xfUY#wJ zPmNd2iF9qgUiSG=?D2!C5u1UykRv2sSObK+|1imC)g1(TC!`@>)Rj6qHuh$^RBdpr z%G(GjQ>rcNWH&#KPhgZgrc=)Nq~#+^i`K>*)M+MIY*kPE z(lJckl+$(IsyIZa!M3y)3lmdJZn!qfz{Ge)&n2d#!CE0@mXm^7ul!@B^6w%=O372f zA}JVgPMbedDD4STbjG_Uy=XJtD}L8m9fPe$EsXCnR130pa-d}Fbyb$S;I0*BZf?G3lT$@!*L<@cby=U3m?)OT=Z2K!bJU!x_Cb)d?)mBd;3VmT z5bryAx-ZHprAh@aedo1Tn0iqkoTBn6(@6@!4vMl#`q%^RD&{TspTDqJaREQjKux4< z%A{XP@HC(8+b=(UyJRjK46($)e8Oza;-+>ubQ%?e`%m1%0ezQt#-Ag>Vw@>2M^l@bqpfjbg@4 z?4m-pfc8mjuWfZskV??$#-NQ0TiR3iA+%tdr~hu{6JRa3YVscXZUiF<%Y_EJTko;p ztj7=U(l!;c_>$XGp2;F2D%0P8LXx6iuebF%|H&=M*&yl06RFy=2{b)n#=u8$*=vVb zyVbqY_i|d($pNVX8(y$1>MF>?m5Rnd%z}afv>7hFv1wE&m%Q0_K9&RwX(jGAZ5EPc z(k$y)41YBPMp-3GbQ`OP+-HpwGl>X@31RS$B=TWS%QdTyd{Czk=zvx>+aKmR<$2Hh zZbQEMMWbh)9kb2g!r)?^e z#3^MjIS>h}S((mS4JF`G`=juFw74By+=oUE`>d?Sc;H>5{wCt6$qgj(yO_9J;<|>| zmQQ7#zK-+2M70s_ZiT}lJy8{G&-0F~IPJzf4{GrNyE;HaOHSFwYI523i)t>z(!(aR97p?#;=l6UZi-I*AUuaA8oAkQX zc7t~BdjBe7e)rMqDssUziPbN%A3n?ox7~Z%)1^AbUK4D1t>VKt{p5x-XmCdGy1(7| zdf6E2L2$i++qUDWd2+PY>;*g1)f#q;Gb3VfT4C&NSry|#;a5$gn0XC2jqrVyKSc;M zZL9&+P3uSm?=7Mq?pL011J*-%j#*< z#s_t5Wek&<`c6CVhb%bPT0&Ads;6K}2!4xlM0V=yjg-oBu6_4=Rmzz=U5`Pu7CS!P zfta_ntkm8cG2ELGwAtg^T%y0}Vm)t^lm~Jj#J$2jNVigT74(C>5sn)b75vP3N->;> z?fSwoyh$>ziQ43E2)eI>Vi6HMTRRDStB;)DnA3t$^;|k1udvb>rEH?o&InfMN9fY{ zTMjS;J_xg!qs96hh<3Ut@QkjH6Cfw80AAt`%(;UXS&l(u6Rrk92&+Ij;U2kd`LHhe zrAo+B?N|B)25hmBE1m)*RA1h)4}N;NGll_z?@(*=4===6X&nbOL#$Z{&`LOnvt3aXaF3T)E<9U{jIAzeM`IdB+wa!^;9)`B>rA+UJc=W731H z7k9}WJosjh03soC>7HJ2VK_~Oxa52O^RD%5Kr>Qe_Wq3;-CKSQ(U6l&WXchH&6N)v zY7}XhC0_L8ZSa7h{ALTmLB+rEg?v2`gcrlC55xLT7eK@4>v1H4^DL1LJf2;yy+0K- zPUZ#P^q$x|eOabo@UfHnG}g;&phgIG+HSh9E}LB%4mGkU02kCWc^;qW=L)dpd3$p5 zbAp845e;?%s2*?45dwa6mEp7$JKy<*CjZOtk*p}7ELh^J!+nEOdr%rCeaJi49z@#R z3vew|=8+7Q8bzNS;Js)4(mp}|W~(lVg|ZV0z5e0Xc{2u{+=(Gkjr5UmtNYw9$y*8b z*4M=810$@&em6#wm*-R#2Z)e%IJzDO5oMSHB!GF?Sy=)*k%r?M?=T^3!ertx9?y^Xdx;C{|n7| zEd+1hZ@M)9I{$H|-e#s|XJ;&_lz&1=Bz<2f)NZQ3?Dvp2Vw#MB^Ds5}V(PsBI82V- zW$g4lbx-a7Vsq`&6{+_wIjujs8U+@C8{0CDhnGq0sly>=;1%hN2{!a16(Q`6h*9kx zu5dp@GS+%IqIgSgZ0JE0OGgQ#s5;bpi%~~fth<%xr{cFsa!F|27RspPp{$xY0Ce&Z zJM@Q3y#vE>IHHr@EAuGR=R)<>2h+hR8;v`P`*LE?6^!DG4KWJ&bLdRms0vaVdtEND z!PZxHKb-wwIj}Lx4%SsaZfii4f(-P6wj^5hnAXtTyBYZsY6>6gBVmf^C3sL#%b`gp zdXp|TP6t;)HoY6sfyQB`DEXPgv7_-c$GcLz1Y0GZ5|zVOd04zz#CL12cesIFV;qN= zu$;>a;MW(tp3G9?MiL>hzBv=sGe03TY3r(!ViUfBeszNo;>YA|0cq0-FE$9xa+ z>0rymcjGHTbZ|2GsGOU+ zGOIo(kj%FB)4^G9N-?^m+2qp5*o@tDIK5fN7Y)`@^qyK%CLFyE`D6#P0yjc33M*0= zCor19DZe1LaAwq@JzC5PB?*B9c?69FC6P|Le~|kg)&)FTe~Ndk#nF8aI=H9*WVu^{ zSjdt#5nVOij_6y1pL zl&Iw>`J>Nq`%JTaL5yW2GcW8&_fqA$IKTh-Nrp!UXCEP802T zGcidOl&QW4^t82wz)56keMO_05vZn8*;!++ZlN9(crVyUQXaZmx*hZzGNEkBGXlj$ z^LJHSPZkQapbX{e15bfZZSQ&Di7|K8`z}w%nqkeE`Re22l9c#l8yQdhb27VqBAo}r z71>16L)|e(5Ne!|=i$=C&0(@WlsMQ0jmd37-WGyMp>X8blnoqR&117ac^KQNp`7Hd zg-C(6>}6Ib^2np176nfc^U=qbRg4(0Qfjy%P(3c5)}f_DEn!ICRjn~Oj@v{Hd+^{& zuqhXz^VxU$KPpU)8T!V{4D$01MXb)%(8KK#NKwvQNO;p8<{EWz+UC^?6G(0?}T$krH-6R?)j^cIol8w9!wdTd}s-(2gScgN!A<56d|3CtY^3Jot*! zv)PXCk0fqVVewM*HacPjC9HE zYwoX1%RS>Z>A+K;Tn>dJyqEb=t{{=&8+|<2wlYFBtT%9r3`=O$5+{lg^gYaRt65=C zGCjy|p;di$cyMGPLa8zLQo~Ny4H*WWzWfHF(ukgLDk$R-K`n5R%!DoW-99iOsDl%bn2LGy za3xW!LHoFEtwT1V(&8$g@}dW>P5Yv=o0>CB{R3i5Ahz11<4FE|Ej8@!X7WtA@q+zD z>W0wqo}0T3xk=N*bT%^zjw`mApWCs@N^KRtqaFkeAFAwdmz=yCD5f{H#dTWl(}f~% z8ruphM?NXuY}Bghp;v7JK}~v$BZWWVPZybX-A+cSCD}~!i;3iA2tlx4V6Fy>_8ay2 z_SsiU6p??<*8Gx2LS>Bu~a#0(f21;L+J-Ot3Ay}k>eb$K-0OGJ@aL04yzT} zl-(!q_;%^*5xuwK(XP+gZk4fgdX?DPqnCTQ-X+dMez@3O4MXZ*ka-tvVuY>QW@yib z^Xg|`e89mk6HVMo0HP;Wb18H6hVM*VgqE4A~vu@`bLEpqB2 zl5(`IxZxVZN+iNn7%$Uw1g$X$cjRR^J*+Kv4kt#q-GV+#8a~Ki z9RdkCQcCQhu)oBSt|W2EOrW9nmeW`<+$yByUD6HvTbP&MBkvq58^ede=o$o;Xq3w` zX<8^M;t}3Kl@Vyq?I*X#I3B)4y_*`ByO#y;9rO-Ooi&xa>!)k#J7^bp(*(jv|DYl5Oqz+r0IQ8XdNSCj`!-+` zZG9}PEa!qsn$xvhkSHBddqhuRP@`SouKHfMF9eU!Go{N+w>q%4EWE87bZzLl21~5j z+=|RE9Rp|e{5OrR{Ln8eBGPk(tZW@AiquM4gX#}rXBlK+FI(kdMo=_{Ah{~*-IeSlMFv`;30TwS6B;}HLJl$^Jt zu=@+I8#9NnkPjQn&~62Z##C%^1?lRv;^c|GP(z)i$=yEiaUJ`{B`l|co>?V^3iKOh zQzN}|@A^+l1HCva!NLHnIdY$4w>LxDY(!f|Jx<%+`Xl?P6Uqu&c59_A;}DG5If{mK z(ACLCJ98DZli|W!+T9LS^)pFMtZy2h#5+v=A^pwY5+Tu@<38G||8kxht~7`YjDjkeY0D5S!^@(*ev@%Jk=!zG>$4?n%P22w~2 z031!;(vT{fP;7}`p70F^5(v=dB;?z=xItqiBjS3A3ryE;0RDi1sf}MbO}#qLpk`oG z<#N&L*LzrFqLiljZhlY?K~Q8L$DLc?9hz5AP{b#Y6lb40dJ)7Qkjm40uPdqdaxsJ% zS0t17mJn;ajB2p@7-cFHe`DZr-klZLYQe66_ci{I1dayD%6$0QIgZ0N+rxl$HX@y=Wgt!2@9j&0?QR zv6UK?BN36})=}(nqg@A%Cy(fyF_v$wXRLv!ILa~FG7eo(5ok9?bJox0S*DZB+^FuL zG~1{WJx>K*@()cYGU0K~}T~HftVNy@Qp1o@ZYbW0X(e;yt;8{x&>YC-On5a8)2kCTc<=dY(H1ev-O1Z|d z_o_u)^FAcY{9rX}Y?ajRWoNzgpLK4XW_E?k9 zE>QAPX-aZ(5&DgS_7x?9MblLfyqPATckRP-*($b$EX^TM-5oXvtF9v%tYbrepCe25 zei%Quz=r_3)SB#P`&@ce8EXxtPbX`TmXKk?yE6qaVJ+b;x<0lhF zG&bEixgUL7z7{c-FlRI~zM!&&n+~}+HFCgtA^AwQwy=1GYqS7-Wm>zJIP{xH3~$?s zG?7A5iBTVG?}7H6wBzNza}t(Vza%bi(WQ}qy1G*XqfMmb_V30sD=36cBYpJqrJ%UC z{lvaOvG#DXM9i)#M;l8Qx@=31&1<1=eGg!->lJ)2E;jt$Q1HO7;E&i`mk^Fg5(FEf zv&{@Vf?dV=Wc>5`K~Xm6WBIg8ACw!CFCKGnEzPPvsRn#*vmV$`QMQIwJ>l6L^4u_>(BVQqX3qlU#OSyG_kzwQiB7Na35G#O?SNTH{~HO-F*` zgu~NrwA!e{Mb4!+E0V#ZL>+dK4V&n2KRt=Ah=3k!ZXL{xvXrmcCbL>Su}*q zK(GD|UHu(bH#)aM;dXhh8y%PbsqGFfCrUCI;c3$cx`z>|mT}pPHxP7AmX;Ejzk88u zQQCw{R_%6x+y)jC7=PB9<8^6J zUU#iH7i!?_&}?y~!fa%Qk)KFS&x#|yF;|YgnvswViThrH7EVEST953JNRdD}{vl(6 zf|K?qFVp08fGt-}*rXyghMkSn1=ES{KnDa3fN5WH)2e1*ZC38O}d2 z1(oA(V-rZ_ZpoF&ai9fAF5cyAhRUO zDJE|3D)F>~Duf-3Nn3_iN9d=<|E{^Leo+#UL1d|j8$vWH)CNTjk<2MYn1hh#v|bO% zC0@RZ!+lPpz=tmiO4P=NMCL&H!ev+HGwLY3OiD6|j%3y-fiB$V2%16bOPWgzefWL5$Dnk}O}giiDRpy9BwBmJ4UGb|e%gVt z6HTx={>zEcMIY1dT5oIA0K7L&6h%S@Tic4V<2Th> z=SZV=6z@9)2U3bIWW(7AxeapL~4-Clf^DyIbk1`ZVX=oL{NWrxEXzsR)>6sXQ6wew0Nx(e@@UC zjZl)xP$e1j#?pbW;mhZHzL2Hc(%i8c%ai7cI?KOq5FG()h)~GiN(R2U3-4dj!Bivf zQ~o*XT^&KohFf(K^|SPbB;q6@P>sRZAw0o{U?DAY!VC(-Gat)qry)nNS54ramT_4( z6~Bx=aMURM96}ioUX8R8&wf3zthfp(tC)w}Z3Ft+m|Y_=6i@L{21JcSPcopLL@nkC4w<`)^v7vOi$G6JZi6onU8EX-1jPn3~HBA?MRvgzW z4(sb(J|cP_{&5>^egkn#V=4soB_#Kfc%z=DKkEYK`ppTYk zSC5#MQ6wi@bcs}k(9B=zexhDOjR~HoTB+G>aD+?e(A^GOl( zA4fw37BVB$#t!ttKFedKGDw0RZdew4IYi`A*)cx-WBwz(u3FHsJ5v5iQCYzo#P^tB+jD8~C zL`v(iplu{09EU3^8?5RzcG)1`luRJD50Ndy-pFw0d6QpyL?7hZ2DT~+2;ZcJ&Cq0i z5+Gb_4Z@>dBpq`>o|GUwQQgGd7*L_F&48|oyXMH~;*YjI*}rDc(d*o3!ae)!hSHWt z`1#Jjkw|!hdaN&)7I%?x1j>9euM-ly<|(9Afss9^z7rTxh1c72{kg72i)e(|v9B?h zlTSUEWd+LB!dL)INhcl;@5!l_h2i}(oy;I29&&ot{ZFq5FY>X@%^Qv480OTI2dB;n zstipE7RrLKozFf}UZjc&UL-&3>6<7NJtv}Q1{f0-%$u%>lBBCODFoi?!B<6|cI4al z+!2+0{aJfa6=OUZIzy5f^>x7|jW@RQ#5rukks!(g;<|d&@zNvk8h-G^z8Tgnp0IzM zzY{zLyfr=0P`aXG&rc&p^xk!h;FE>%y3N(nApK$(8((gDAP1)i_n9Dkot#w;ylrEdB|edKvEo& zr;+T0Wl zn^uL|g05k=D94e{nmUrt=9#YTH<0+H+z>L8l^4}`&uIp*1psrg7t|8 z(V{MZ#1ou3ET0z&9{5)iT9f{TJK35+F3)OHL|o@;PkARa&h2@+)c7}Gl}3tV!I?wxHL1I0IHHeJm!_Y62G}<4 zW(v&jqZ}Zd?Cj&)PgU?Js@O$ONstz#{enC#CGI1JDfw6ygB&nW-8lMC-}_SD9GyOr z>vMXq3iZ!P6{pa@RqC88rFB#*{hY~<+tFF$Msk)FdpW10S zSz|$^5p<{2R=^Yy8$t3L2_Jt58UC_XPPiQVDZHmZ4Rgn67XKGXl3$cbUa$$I1=h{m zzkY+Qu{P`a(s~M0=U8betB+kiplixym?Csw4eICGBWpvi|8>O8DvAqhTd_I0=fKc= zg(-DYt`(w;$uTU253C6!7B*$Zs`IHHRXfbR!TtxVntf<1*=&$bm>0H2Q0$`_CyS>K zQ50nuCBJ(N!M6z=-FbSn*<-MbX&CwT2s*gkn^-ixto*_Ela?mgYybffpO}Z7m=wQ7 zRO1pbojMkUr3ol$lsjMQB6^di#0bk^&5M5_G-4wFriQhe$u;O|4Q203OHbZ!FURBB zmK2>9x7bBw4>C5f{57E0{rQ{)F&+XjUgVYr!i6DPD3|+-7}Pg02uiCm5N?kpQvcUL zZK?@}&Hm@n8{Qx;28GVia9d?u{5=h%OjDgu6{_gblp=|vD`pLx{$~@%16a~l3uh{m zo4F(@h#C@OSenAJ^@pK1u_zet2V2Knhfz>HrhpTpFNLwHj4Z2BoY72RNU z4RHz(8E0igGT`&}ndCZ#k*KfMKpx`~jTo6BWFxFyqL>?7$u+Z5_gL4SY=Y7+MwoxE zL8sc`R|RQfguuuI{{K&862wR8kLN;l1n3{r!klvsucFQ|6&Pa?MMMKj3EK=L)xfLz z=*Xo#eW)adWY){YS5Syazb%VO`$O28n=l!MU5$$2zUViq1|D?BVl~`iW0FDs{|}u# zlw&=jN1Jt`rxWeZ3mLL#2))U?+66jTo)RJ+%f|7M+fes!r0!yff5FK85J|RRzvL&L zB|olPx?*2rKSADVG!*&^{i~~9{bMIB*V4xw~};^ z+(cTufjWpD1NY7)D&4ou|IwQP&Yo3EiYRY!CC3tOf?hBGoPm4 zlXe7nqvaP_zcxODVL5H-|;`U<@=_*jF!TdNey?i<49hWO=VtBxZiY286 zGrLq&A8zUB!QcQ4p1hCzb4g#Qg`ZCO6JW^3D^&doT`h)3^lhN}EWj5997_;u_c(|8 zLoy16U8 z9)cVsq7Zy@q*B2fc3{$!;)cBFBfcuvQdgh{tc4;?tsCX(&B_MDq1Fi-)ko$TSiDUF z(Y;KB|M@5zAeKndNmlj|r})*MlyjwPB5#L;q;4uTJ8B|9Sj##z-|d&10~HQ=A5n1X zgK~@}-Xr~Mo&r<#Umxtx5bs8#&KQU4ZJnmA&)Dr3d3Kb6g(VndfigFCSA5(ob;kNA z9pkp4C^9}g#2OWQY-mrIrpK!V^V_qnms%Q?AAPBFa=DOA?62=E5{)UN9Q!fyEXq~a zI)QsBjXNo9TFl&=G;2X>+5g@yoyRF}E*U5Q8Qz`9zbrdujTjXA^EvSC?@6$;v7CB_ zh$qwk&KEL}C#`Z5FPvxz5l}yP1{5>yMCmjh3A->K0rhVn55-2kfX7SkcIYPN{x+zB zq>POHSk?6fbrd-wHkSXf6>y-ef}mb)pwH)ih=1z-g~Q53aJkLPrMtS~bEK$t!j7Bl ze@}h@1xCU}hGX!-6R0>p%uDgHk)>$<3xxjbe#-s`%4{`)j0|vG3@DWUJ+Xlq9&QLV zg*UZa1-I)8wtD)n;@R*?E#U*fyz8YTWnGo%mb6P5FH7dsVuf~nYA|`@UB^mAP{dbS2s=h zGo0Vw-T-;%ls}caKNzdqD1Hf?M-zFjg3iYHk7th~bN}(OtKtwv5|X#KRJupOKSE4u zoVVl%fU?u*x7-viFxrWP)Xr|@gNG1g-i57{+0P&Mw|@PmKYn)-EKbYNH>^XEps80!zyuk`MY@( z)&T0O?g2)Q5`;knFk$>d&hYc~JgN7`bAwTc`P6|bSTeO@rR2jyTkY>=5m$2HjEs!9 z`_|(QMs00vzBZ-&yUBVh{>$CH6b4c{d6DT&IcLkCe-g1q>WR$bQ$^aN6|53cDepf` zosz6~gor%WWh5qLGSf~A4>*HG4WOpg%H$L;53;>VuFw^=)fx>hU)xfS3=|MEW{xxXc%zc=!x{w^HsoQqF(kmkAA=wNsY6c88c z)LWlDod1%K!euG|>Kty5YyqekE7<%*QeuXl61DZ z5EBa>x7)oqw*rhnk$SLPV!W!h|Bq#yfBA`03d``C2c#?J0_q|J!j?^(Xgm{YYO4L% z_w2|6C~6wHzTaa}2BN-=kb<1^@ge?x8zd zfB#>n?aH~5gUh)R(HeXYK0Eb(6_GM2%Lz+tdm!+Nb1iWXd^h_Y{4Wzdb(h6HSRQzM zC%0++fCATwSU^Q3OUe12B&nw`t}?DrZGatu_#pRbuAWW|@W`iAs2NdkJ#%&a5XDdx5hx=joOz)xyYC zY4TTCLk8->%CRsq)vUsXUO;7y{$G*=;0=;k;E~+lx)S`^W>)a4Gi3INj(&^_`Apmf z5w_`{{yFR8MNZLI!$nUW%@|(K?5z-pVtlQy*f`LPxc@Lf9}8p#=3F)t4)j}`(ATft z#{@Y)xj@R){BKVWgkivjjKxStbOfOWXPBi9Q(+a zNhlqHUK@)J^cnpHYWu+^@kvRPz{FN9Ri&1d84?}}-T>;A6We{R<<7T%x(O^Ovi-9I z9uPJi%+{3}c*D=l%{h0&;#r0m0r~`>a-W7k z(7P_0h*!1a;Y|L3_aAP>?Nk8&}8v@{0Gdc zAQ}+|1Hf#MlPMKmIy~r)0u=t5Z;z%s169bfK(+X#08AS8PoStNgTrdwgRu9XT|x?i zsd7k(LxdU$F$s(|dFlr0NX>6f)^ym?SdD`g9eYvPKaCNU8npA}@q7pP@bJ%{)Azik zg^{V9~pyaplvJg-jNt4u^F)YIm z8Ry!M<3vswy$kqCIT<*D013+Qe7#Mf;uo&@S_^c1M$IZP4&z6edh2P|FL=xk@@jyQ zjTTc){z6d_5D-k(7>l=8fe_yZ&?*5nJLbAq%mj`5cEckJJ1_9swWX+(z&#Vda8TO- zgajC2U9hOF!>{rAV(5_0$EDW1`%mFhf@dH3c!RYXZ<95p*WYWjELQs)tQTSqifR4v zqjT&su8-0frpoG%G|l4`5{YyEOqJ&!wO6^TYYrBQV+r&z3w^hI7M6_CGrXzk+~3m^=)+kv`MR zx3VN&n;5r%2Q{DjZvNeIO)Qd1&#Skl|H-I znc3d&;84e_gH>+^^=Xb`WcLFl%C)_wS|DjOYdgF^bNt|x;-q`Kj0i%8MnxIf@1Sv&B@j)cP9UaGTF z(^8$S)@`)cO7qzPhD;(L4+^1&1m;pAAhXbB9mE}!6E`tAbFx~btA0(~MWCo<$(WcF z-|%jt->yfo^e{uSNQRO6djdCtKay?VKC>XrhcBcCLZm2;YVlW`6&(vkZc={_(0Q2w zQmivT(X{L1cmKC>tM-==*5|ighu;Q!aCiSsl_s?TN|#u%Zyv`6-E>g6e7!Ajh149> z3`~=4Z(dnTWDyIW&$Z;8L7Nt9EK)cF)?^e-^o%Cu-#)t<9)gaVOZKuf(-WX571wN z1=W11m>RWjA2kgOvawPU5_0i)f;ID}%QfXM_ooYGlV}n%^;#4!C)+bz?*DBhekWJf z@+gEIwgcp1S(0OIJB7u?KEW44G(;PkAA5M=%q6pDUh@bt5I#j0MlilN{^&o#-eD}G zUrkwTIMDofp|2TS9}`G7<$!FMmE$;vtPR> zB*h%6c1#>TH8w{IoM3!Ugi)y1=C#7ip-+h6+A)|d`8Q(5fd4{(YT(Bf{`bW}kxFE2 zeHhRS{L0&uaDap`5#W&q0AZ&tEiHYSM+B@!ffc%qW5dUhIE)2u2Y>#o0J_1wzJCja z;ziaFz_;Pu0OW1H=n5AcI`sk6r|o<=i_-J?TR)jEjrJ0(P|?uHJ>FejexE$j&15qT zHR9a^M${tlsNE|A0HKcznF^1I5w!Ytq$m(In2lxfDlN~~Sy9T$%1)MRvS>HhrV}|2 zk*ikfaPk7B-I+YIo@c|f^=E-wMf9%`j0%WY0ghPCu8CrsE+->1-kr>#@trsY7y~~R zn_bLD!1RAM{*A9-(&2;B)KHvVT(&yykCLkNTjc;?t?)Pp1Zpw>!fXQmwUOl0>nIlp zTQOe3)^y;y&^N>kKT=sze+k`>W^A;z38Vv@;o^A35YP}+IN{5@np+}J{xDD96;Jtf zoO-8w9KLN-BQ8hKC}q+CGJ_cxWZO9NKNZ@OwVys{x44!%WWH1v1Ae6laQuhw&1X++ zs9C_DFgmFejf&*!sh2i?^u9_mmTXx!&l<$td(aZ(wU9!QiY20F>A1I)Ygo&5NcN#RiStp4ZIW1&`aoD;t=2$|45Ns|BL z$oD0ib5b)IAt*t1{(Y&1pi233Vp9=+sQ-?#X8fO7pG8AADdQ8;7@*&Jq!xet@XI3_ z(j97sL$1@U7;*y>Prp$O|GPT{d~u zzP1+;TTLhQ|M_TfFDLI$lC4Y(aQ_~qjSxdB2{j0^|7Uhj7YgXvC(9S2CG6k!;sVbA z1eJSHF_z@V{!2EzKUIpAa_}MnCIJ*Q+D(qRFZnMZ05`wbok#>CAKbWqcKxb!pbN^O znTZ(N4WO7U&?x`-f_X2#@I^WN!g+218bnP=DZcXn2&NNFV|k@sz!n+~CH_Sj4RBK` zTH3Hk(BmBY>j=C{EHDxT8;KN&*lz?#OpKfP_X?6;wA3#dROAM7O?|gy67n=4OF~8U zaLI!q_0O0;2wi3q8=8^1+@!{7Ln@uyA=^If8F-NvyiS0IOqQrn@?OoF76R--yhik67s9y2rkbP6^Jx?{88gUHvPvD-~n+tl1{~S7~erHeI9po(vdvn zLSo&uE^#NR|FYDcq(SE@Owr#WTK4CZU`0eZ3vIvgl%_N4f%#35kMNSC$l!Khen~xN zn+7ff@=hdD%Wp<9RtX&!(nQeyM$CU->?0QbHat zZvZEm)5&YSX>Vx2{4WDv$(I*O@eebSF^e8kjj*N|ibh0acQ#UnxtS@OSk@jQe~U#K ziTXn*D%-ClP>ByS{AL@^h#Be+4$omt?}eC|=vWl)&i!#`DxfymPk`sx%|&kmw2ie~PnpYv{R*gj5tjQsB|JTr+`WDo@KT{^5dC;K+JDU9Izqt*}ApH6@r z(jM#putx)0caWKu-%Zm-Y!W~{aU&D~dbinfP=ySJYol5F_oOAJ^0Eh#kG0;{Vw_;W zr|!EU1KHM;V+*`@T4aq&?q4zi(f-)CiG1mU{(+90b!6W2(f3)7_OI7w;zcBBA-JC% z+AmXbVq)ByTRe_Q5D@uRjme?0o&XW|{+rW0_a69`la+)zdJJ}8S<$8$c@Hx3j|y74bVoDXPCmE&G_t3E5)Eh394f_#RW_`ni?xw}wVn{~*h4^JO$sOlM0vg3fLTCIti3OhRfeO5BJso zuGDpRTr_9c_oN%%8|n<$OF}Kp=8Wi-g&{HN-a}FI-r`%_@Vg(5N1gSU`S(ZScu%bY ziHvb%=mmR17jL$&4S-PdCADPpi9fv2=h|NP0u$MVh7@?h9u%7B$piFxPVE?oSN z{*#s9vNgn$Q7LT-WJNzj|%{q{?X z&5jnEUlxaU$=~!9{}QF6fu6p|>E2_N%Yzi;s2L9%T!=P5pm#{dYK)|NjRL z8y7BnXJmzty|Q=qPBJ2UmT?&=B0D2wkCH7BMY2kEA}TX`i_9cKNx$d$zK`F1)8~8t z@ji}@ce%#tJfGvSUdKigU%N!QSb$S#5AG5z9~FRni9<-&21 zogbx_nUGy)XJt@BU-}Ep%0%6Bo6B~`H%u(TR8yQxd&5j?^?(;~!LOo_e1%^(!R>qZ zOw0W1#O&JV?0Fyz^(*v`PRcu!B`Jb8R1uE#*r(T=h zCu(tA0g}YAzd&sp`tLPTJgXSCOiwIfiwnvtxx6FS7 zrY_3hn_36`SMvgz1o=JtWXT5)kqjH-%1*%4Eoax;PrCr9=kT>JKR^&icQ1;Z_1Or+ z1)*SZWoLPsQ1HrMr{)*T&5NNJWk3*}{q1DA`cH>%9^xrg7Q zW|$p^3-R@_Bu{*fyljdPb3-jc#zV-?_)HxoLWCuj4;%M;R*2G!XxU_Ebf`wKp;5iT_|zH%Tc zrg@rv(`SVkml=0UCP;_KDZ6<+>VEgU`RG*~<9Y1~2>}3)49r?|UHJQyPR^*e{)26n z8rhio2H{E9oBGYl*%X7@lN|>HtRbF#Yd-Cnh$;~uMrL`G2NVO4&*IOeECw` zy$5xh>*SNw!H3-|63yKj<^tW~Gcna)NCJBA%R~On$L#cL^o&;7J3?KR$;ey|aU_Ix#f4#yNQ#4Knw|iH zfDWwj3oy_5s(d!1#1UZ%jp-@L3Ah$v)=F;M_5zsZK~DL|Md9E3_s-M3 z^4nM?o0m9-{s1S*MwyD8Tx6St(k4!1JWRvl+Cy~GpY0aTWZdwU%PAldHzA(Ac8FxT-V+n>|@Dsz{q-Qo5 zRywE+QW1m+u$+b4Yga^+iV~P*-$zq&pmb#Io3MX=Z!aLaTxQ%aRwU^*n#XW@WaY^YsSJSyIuiaJ5GaZ7(j{kfNcjI>?Co4>gG@O9^GjaZxdxl+D z_38lhlr^1Ge4Cf3^+Wtbx5S`1JH|q=lDJzQ4&j5nN!|MC$sWpq%wAs6T>I!k}f1D>Uyg*1mylzmuQWE)3LNgst`QmS~qL%;H!sD}+R@|9`_Ww=VQSqqfU2Sw+zeLl&L zNc-$E;zf|rhVo6`L6+R5Px@JjZ5`z+*Ej~8HcW3Wu8 zFz(e0mv;uC^QJqRF@!N!lGvZmda8i7VtY3s4N-nQEnVyS^5Xst2R1tb<1*IjSmnle z`o+b?uEaAJ96%5epxtuW^lGvEO(!QOZ9BW8($8i+dF~Un2RZft0YR-FUZbq{!KXeW zemEyFY=F$CLFhH1TVGNy>3fACvNiu>ItJg;DH93Mt?imm9CCb*zA)RjD)7>%(rxbw zciibC6r-cV`{_xyGeW`Yn;hMQT1E!vC%by|-UiEZh+KX(cv)Qhw5t+@N-OQchuadER~dJ5Zy=KFc4;JjxwIa&v8(Ha}M!XngCj z6p=dIYR6NoL(ty?LI_9^c8@e&2VTm?l@p4qCuS(B1-DQ! zsb!JR;%ffMBBk^;qPw80+ykms zM^|7KYuZ`9RU^55c+G^=TwMZnlk7KVg6bq=r|Wp}F`g&^dq`dzsQ0p8i#dsBhi+@K zvCxJ;^mwJkhld&Wf${LVXAWj9HzR`^5fUH~K%#dJZ#6*T)=M=Cb&5v2%OrOWzR)3E z;|a|zSFhY-zn;8FYRy=&=c=$NBm&n=cuM>?Bz&O~ZcoDCcxmNx#lvjK8Vi^ci=!W< z!Chm*?d3yz728M}c%=Nl-4Pkim2{~>L=zu95|Et4Tu>-(d1PDu7taTwW_n;Hn~d72 zy)4q=g1y9`^gHk5FfT_La_-{AJ5`-_pnj0ift4usK@H>}u1Zf;KBNkQIKeAE z!Wwg-=t6vWfYS0WM6G7^*2Z)qz)n#~$-}`DEh_|Nm#f6nG?@lq`z0}JNB}hc3G8fj zg8fIY+&o%bT~&HnzjE2hF(k^cdSl8ALd@2i{~k@-Y@Ym1y;PMCdOO@a3Z2VAp9RPs z=Y6^D=`|JRAHu&lhczwt$5H9YJiDHlR!$y=OEvH4_7NSGy5h4w#Z;WLKhaqJC<( z1v|gGrKmr^3fmAlwVIE%C^+$`vW1$R#d?0IcJOvJD8Kc74UzD*${TAY8*RRzWeBVN zJyd$B7^W=B>SuZ}jUojVww17W2E~53lq4ze&&AiJj20?UZF0F}WE6ORj7%yPyEnYj zQWV9AUkjMnqi;;moZnwF^JoCn@E}3#q+gYsZrDz+7-RH40@;AU!D=(M7)1Dwt?ufMn8l55%(3{!QVKEHYdr1cMh z$M~(oTx^NXXOB^V}82!q5WX)XtVfzI! z%o3;1`*S5-i`^-Yf~u3!(#~?{+$!L=sMTeSGEqG_DYu~Jd#|p{zrE)Oa+mL6+Y z+VCey457#BLedsixRa*`(k zzEwuilb}8cqV>(uNQoq-Tjhb7sEcm6jNT1&N_iBB!09%3n_q_DRn(>yl1m}I({}+; zdvs-(+2SH8m1$u}djm1mQ&anPb`pg^Ic>qOh z*hCuY+SA>BGum({of`P$Ed0PsTy9DMnKlnKw|*)avrHFkM4T=ibamN&kbs*u!2Pw`bU;3ySoQGQ?T-8b-@g*c6^tC9YMS-u6{~pQ z);&!tFKYkf>|5&Mi|UWdk9=@lQEY~M#SiSbUbtJ|dEJRsVp`8*NK#f}l0GDlXOZ`~ zE%ev#yMt7hI62Pg84X)&3*&UZ-?-&Ce5ON2lwPQ#tWZX(7UZWW{@avalO)9y+$wJHtXQi+hpd%O{MUFlqiq|rmOTr++BgoRf_e#u$iA$8d1 zo<*iG|3nI%$y4p;< z_<-(hBSzEdQA)~EAU|>dk_ACPW;9@u5Jr9P=FUZi+;Z+a5M>;8Z^A^k zI)~y61}K~WyjtKdzG{7htkp!mXZ0$4cxF9VID=;o<8182xp>GK#WkOAI<@@+xbcdH z9{u@Ed}qrzqK34RO=M~7agi#kUX8xO9T1H>LS^utaIcH3&?KSLM88AX@Q0Z^x9WF) zm)C8REM_93udjb<-vrA0(-xhJ8{;)WxqP}-TZJd*j8JsP*rNT!8~bTxSj$l?_W(iA zY?rY^LTp#hL;+H)F-wZ}(C$Y^$t$|;fvk-m){X(i6v_^Nm~=mQ%r=`PGd#Yq?>^IX zwbEGZeuDX+)6z>H%6~vAg;T$eXw;ZJRn3S= z(xpKgGVsIi;rZ*2xjAm<7T!n*m89A`QPJ3}&=H)<6@ixf*5*DTgbJrb+u1ulJe2&D z!CdZ)MOSR5N`F1&5A2^W9D7b})DxsDXY#pGuSJK(L`PIUZbh=E-!b{b?QBe#vR-Mt zRb@5Wn@;H?`1iPM1{oZ?_+~EPZ_N`*1OESGZmPkjq!9)ZnZWhYsw>hhWJ_Na4i;0k zVf1(p3Xhuk*+;4CvUVYA?MR>c^C$sK0YXkr&qz%3&2}?S&fA=ZuTT;mlTU=u%ZgU5 zFL2DRldvcn$gC7qwu2nz73uw&R=AOv{YYd@-p-bm__h*cu~LD%C?x5w$F{@6bgJ^ZTcr73s~lVuw=^ zhK`nBPyz;WX4W<0`eIo80_fW5#ksvx=8i)>z(a(v{6SK3KR>(5hm5%Z{u>M7QKa_# zh1kfI%lDqYTIF3kcVqRirr@Q>Id6?N_O}$xkMV}4+1^uFQ*dq;n3dn`%@pwl+(d>YtRUYXfM$f&N}Owt z`mHXHa7`{PFn{ba+HSY_a89AJQ-uK%cD z$)Q{zpt_XZ@{j$20q1?HrvF*q!%u(JH=BNYDBoA$+PC`+Junr~UBcI03?7)#0Nn=n z-=n=|M*vJI7uuM<4qY-B=AeSdJB7|V@M2Ezk-vPf4TP8nc+Cr5eYL2ATZl)0G32)q zKkqv2Q@fA|;nvYNY+=7u%kPqWnFH|lCc!@osa;OU_UJja1GQJJ0A5}~x;dhB?C(pO z25rs(L|W3VhzGerjk|Kr1%CfS^=3~<-W)-Y`p|W1aFJ*N7{!~3gZ1q~?EwJ!4G?0K+h}F0 z7v(h%I?^D1Tna5j8g~H0ljCAnQc{8RI}n!eTLPcqCEV zL>YtfP$OD{Sap77w)^5>zTNM~=I9(G=f6H_RCDce=)f#DS)v z;A&8iG+%!|Uw~DsJQiEOg_F=l0Ah&KJ(~{H5l$U}pcv8V7ayPe+G^~HN$H$}o2%>u zq_-vlCg6=e6Jm&e4h{@VD%(yVXgV?_&)U7@e+$`dk1;;N%x(Y3KRIU&*u=yx4Ga$A z<-|~;G2%e5L{p{LjDo6FN|kz;gb{m6 zImS0|fx~W)z_RBoTcO&veGY%?Q2;=C%Y(j{8@**sU(zicMk{U4CNqSz$G!*H{mRpA z!E*cw{(a{PlZpo|derkL7rQ(*)NIm|B2-Egh)Ep~F!QCW!jT8ypOV`j&J&O^J<8(o5vz643=|QI@k2K1ai=i<2uTh4Q8qP>adt`!=+Y7-Bb8`Ud>3YH zvmdP;$3c<#Y$ZZoe3uI#2JG>M<$UeqPYL(&uav00FguWi4|}Nn;VHzO%%nYddoBi} zArdmy_vq&IIXH759hNUbOBR2;pGJUq<{*Zy2^l1~ahfM`{M{B2m!%vysRFuP#E+m? z`IK5VynvVyUM%q9`J5*D54JYs6b}1`BrszO@e{anCrsidXXwPmLv4%f8zhxi7z5_} zeLOD_B~}4kZ7uW>i%!=JtD=7K@k5URVlkUE$@2Kbq@*)fU;ELN!sr6eNCY*!D&na_guMF~ zbLDSw1F{+KSGY)h79-4c`a8GZS;oXTAaI}J}yx8e`(l!pbVzqNr zu_qf~D4|UP_~1f?jGNeHIPkajF9AZN)FAmCL#9p<%PuV~{mVC#G(Vp_Ll5AA!6jXg zy;LHRwEEk^LT$SUY9#-i{+_apt8*q@1D>GC+#LD^_ZbTiFP!yXWd+s6Py}abmXVqf z^&?Y`Us|+M>R*b)OLHu>7@ZwVY=~Mt-z3kcr6y`xVJBVIE9G>JIk!lgXqwEa{o>d6 z+azdRwf4O(8BwX{sU6L$nt>VrnG+7hOvMA%jS?<~rQFv!(2w2qezhB=F3$MXYA^4O zsp$8{r>_p)-lws8?dKk}UtaxjKpWfd+UN4pDcK;HM{P@LuMKN&g=hOOWsNVQbu{cJ z1TrkWW2)DE_b%P;_74|Io}{3~mE>sN*eNoW7O4(DOsXCcB5J>ix>jtGW`DgdNnlL& zYX0O7hoR7$yPQvgcL%TiQnRhUr*xy3zKG9pwL^y468Y=Iaz^*dMxtFks-EbFu!Ya z&FpQ20PsqeQOO?dDCYg!1l0Mq>JF0+UoAiq|M1up=t?>(0xVu^w+-H8$8f|QH%!1x z@=kv??Cz99e?r;|fP!zM?%t*R3Lx*#C8r(`Tei0~*mEB@Ol(lnGpe&)I6saru_-82 z{)qY1N9N%Fw3gJtk9uSVSTBwy9n)HZhS3j)yHr2kBWq)WfPpau)kYJJ5V>1?dax|? zQ^!}$PkhO+ewG%2*f5>DK26^)?;kCC|YxpgPkMriOob&RQUoF_PV4n&Pd1#zT)T+}DDCzSNUL`r*IEA?g z>X)wv-~t=RGG(+9(mW5 z!X#D}&QQo+SJG12hsg1Yj8t{Ry%dv`JFB|*uOYOB5|=Mu_!9f|I3uD6=QHJnv)pDw z>wwUIm+i%zfPc`cA(!rNWZJ2VR65_!KMqK68uo(s)&@`ef4pa2I$*7 z`81_yu;2Foul;rsk%RB{b6vT>a8h~9X_ybjvpf}fk7@p63}Oi84DOna(*L|Q*xtx= z-2X3oaRvxR&2m97`maS#02|z(3RnIw!DzOfP4ElQfG)Qgp13Ga4B?v_=Ceq8rXTyI zD{V_1jQ%`@>G;=J$7$G#e1|Z3%=9rshVd;#UK)h=De}a) zG@0bI1V6uzb?~1WLNMP@U6K!IA;Di!wKfhW_zcI%! z$|wNKW!}}Fbb2JAF)c0n+)IsAu-oc9*3Dvv!~k#a?95EXhbt>9VOxufq9g9vf!@b; zU3p9tAx?&=*3$KF+Z;ret_-Je8>GYT(XB>zb@8?$y}&J?uy^HfKZLyGr0N ziB?`r0_8hmMnl{ct)C8BULjd`U_C=&H;&4kMK9ncEc#I7|^T-Id>Xcz9j_h>@3*3QI; zI{ECGGs-Wvf4+5Nu|H?e5K}r{v~fTCO;u_|<_Dd$B45hOr@nDLSNg;wAYu+WKSu)( zzr1}~d7}c}*GAMv4xTVz1ko8$U*hCWZI=;cs~r04`Av7$op(g>DBGv^N4ALSW5uM? z=Po9d#_+_779|$=*Vj!Ro$IFM)h>G&jBA=4q2e_xQ0#k=A+{Ih`z-0TzlT;8jukJ0{pYNGgHAWV!KdPBBAzgcr zFI2v_KSbir`uy~}v`(z?Fy6~$nlMiX`cJ{FgWRD1Ie3WhM*B9NG=A$i!%$~i>%Vod znVQCeb2EFwO5vA#)w)8}%%xAX!8lXAR)Rfb&r;3@yX}5!xk4WL*^zN2Z=;C+S5R?D z4&M=Km9~QS#y#UCa=*YYGodvsIP{A@%e>PXn_Ls%@8<|7ul|wAs{BqJq>)256C*%| z-N@m5_%nW8+N-5>LFE>XX?l~=GoA_Iz^HY%{LYq)JCkVqExubYuf?gid>W%aqc@j& z2n&i1g0vWLSr!*hpDih0#DJrMQfeXUf|VdAB^{~Iu=@9-*ywIoP9Xv{W8%K#?E_j` z-obOSyh_?~G{nKp>#xD>^apPqnj>ygg zW0C%B9YN&$c>3x)#}hRMtr7C$?UBR0x^PJK_ad#7K!O9rKLxzv+dYz4I-yOA zPueGbfGzYZA-q)|z!sGnSAmmkfWx5LYO7De-344&r)APw-3@+4H;N^ zG5^>5vxV+ZnpN3FB2}~2ZR|h!D1xcHPZv1t=L|~nG$327oBsT?kjBi043%Bh!6#+p zgd?6vUhDBQezm6{E}P^&{)rE%G)*oQ#2uYgsB+P2ev^Y z6)|-UDf>X*l!EII1O)2oLe_$C0vmD^Pt+9GDbgYOjxgRmtz%3vg(f^0k{*-)LGwOG zS6}%iL0dp<_ksu^B$NVj9c^CYC$ld}S>()+jy`DagGxvkzg6ikljS`*aat_HD5FX+xaJBR|2D`?NT! z+*keV3vsdWZyE_buHCOXCcHSi0}U5>yugL96wgpAnC8ah~s;J|z6mk666 zYS$61IVJi=KoAw)6!iN&tch{9Nw{lg0z_fQr1xlz+5OT%QHM=QBi~D`gI_?unF_O&^wfPOX}k2PHXL3eT3!dP z+D8eLrz-Em4tfy*UqG)M9FdbXle!Vs3jM}$d$C*8dr=e79Q88zZH!;mPUAfbSg7y= zTw~xFOtsVb&mn;sV4iVQAlytkHd=(B*eQ4tCGR&5e9NxR30VmlGPv+TUOym+#$u){N60^~$hYpMgoZA&X*f4bFgaqa(q1 zZ`G>@n!1pKV8B^7M)1v!QRq_0u^1|NuDyvukysR%2fjed6^V+&Ry=f}bnOz{x)5?- z07aMzBnkbGGiVwLqV<5+!6reSkv4sxLs;3!~i385tovRpZ;x)T>{PsXa zDv^k>f@45cxHZ172`zXuf_FLI_@>Fr}sdlsS3YkSbP=@p3G zmj9ePCP?4Xijz!h*VvZMD}PmQS7b${f5qcWR%7fG8XD|$nm7s@3ATFgPf)en zp7Ex)(UrY6!~?sYvu=T?DyR3uw{>n~1Ds;j2W;Z5u!4M81!?QXDnI$llswxEyXa2o z=as|=p7zqEUj@Klfg7tsUcYKQXRKmr&ane^>a$vDBg@(zQ>YY*lOu5(FJI3#-9bCQ zQS|Skw!)5^RB51J8KyMwg-=Yjrb3psfke2{!xwb#$q6L8o-O<)neiKs7OXvYc9ZP~y{^tmylReET#% z(c2?b(%s$FPomHB`{lgkxujUuWIpBk_D`MTSH89T{@fQP-mP_xJrhwHAfSn0ufYpn zTy&_OMl@4iKJOByZq6sJk74>q)CX#+L)=jxi10m{nP=!K#SCUva*V9Fckrsh8w+r5 z4an$OT}+Z0EY)qA_twsWR!w$A`O$~By^{WF?}*))?G`0nVqW#3=e3oMj<0XKN!^I0fN>9U*k7g&N1y+Eil^y&J@vots&?t}C_ zc$|BHll}stLx~%~zb9sN(Ip@hnxf>%{%Cp9D~>_E7`Eb?RCWvwswKv+B#bPPSg)LI zC7r?Q-T%9+_%}&Mu%Y8jgxkB@Ki5UVF61f%>6%E3=g4WL&=FA49dD0DXA`G;-<57k zMcwT3YsETDe20MhvC&jk z2({D+2@CyJTx>IYFEZ`;k2Iu*ynUj2XCyr=l$G}LaUay;6&%7L`h6=4BK#&Zv*S%D z@j4;A!<&~hB!N|x$;av#+a9v#&D6l#{BQ3)kKcs6i_j^P<5bF`3C<}z+!>vRUoCo$ ze`3V&sDyNP!;FXA$GbZle5PsC|GxDIl?@BL4vZai#C*pYA#VX>><|(N`uEd@pu*Hb z7{hI@dgcfmubFTKzhVDhUm8q|djp;EHd`MgXS#sZc{|YZ0>;S{=r1Kwvmnkz$gVE{ zJw7%7yRgxYE#C2$1yQULd^iKkeR{dy>(yb(N3b>_?1ML=!-rNWPta)7isK6BJN|bR zDM*t!=;P=`+6JTej{n9u6JnOAsP^hqP-IO43Z|_ zjc~m{1&H0iB_R9npj1fmx%Sxt7C|t?C9}u}XoA>XkNNLOD$tnF(0=wF&%}9ropk;r zcr7gu8)V})ZoXRMgcV2OaO=XQI!L!F)C0$hY_@Uc`U-B`%1Of zNwycIebA7lf%!NKLvmO%WN&1?g4rnR6HE&-LCML78C z3W0Wqbeub=+s_uj)-0CIy;v4BRuHe;@NuG$hg#_Y-!$P+u`W6I|L?wr^f?KlED-QYAF- zO6($duEKoO&(cy=4`YW>i9WMW9}rujxfo+4G|9X(*i2)l&*+t}tF+&GW~rcYQDwQ@ zORePdB)mGSsP~I}f<5Hr8Pwe+M&Ei_j5G;6p8g%>*U)F?j^{yVo7Oin8BCv@NZ5=o zkbIpwJj20t-h_2rh5jN15wZ7jpYOrB2_rFCjlO&O7tk83cpHFd%Ig6;c z`a_ffYl=6Xc6LQ{$=L@~2UhX*6eJi2T3+Ks7^M1{I8{>Y`eqt@oU5l{HYj8;17qsW z`qTeB>3kY!y07Ad~KUfrmp`g00Y`|9i8D6_jf-e`@Th-u z7Zg50A-Bo?g9q=Famv>f=beg~bG=L`0mU;A1!32R_K z0H|{1yEJBRS@c9<&Q6o2Mk z9N$)8Moh$dbUz3?C;rdMMDM>vn2iF8FxoV0`8aTgNr#{K^a3MrQ5e%Y%YN+BC}AO9 z?>+Xx)UJQ(uqK-s>WK*Y^X}5v|E~ck%@Ue*6NWm^5`-|~D)N?}EJPtl1F)gXE;-f! zl(`d?mhERO;NSnVlK%f$2V0(~Htk{k@6uU=a?xJ=zZTeMXOVsL|Lm1m(8My*Kfe2? zAu8dtvMa?aS3Z;cbOJHVobCO_)g{_$12*eza6R_GDIeT;#HVz<_`&*+jJOcVn55XC zZdVyUF+8J|3$D2=1vs`V4O$8^Az%969+JpZb-prQgpLGU%p4pV2{dSY9t85UgcB24 zW?f#dmOXVqR;MaEwF6krkhm1Yw_P#=-KGc#dZ#z4F;s{<96~29ErM|7#xKlC48iEE z5-YB?s>q&5$lY1izxz>e>|W0vmZAT*H+$OoDv`Z5!QIJcMIq8_W#oCT+t`b@V@rsb z3b7I@Aej>yEdHtiKiAI@khA#y`{FMiwq#5D6oEdZ(`X4)cNx$GiUTw!>O3hnR^)G0e*sfR)R4%M#Y9~Q%b)i5`L|zySrVIY z8-LlINuf;Nm9iX{ywFO(n%W*ku0bOe*qnIcxohRZidR-YV(+tns;NZp0>a8ha6aJZ zWFXQXsGzidPm$GSjlry6H(TnW@R%(q@r2L#Ri-ho z*_Gt@rmm@bJ$F#s+)HWm{={0LMS*udloOvZl=D<>FT0ZOqjIHwRfEl%okco5F@yC{ z>J7DRn#4vz|1Q3>j57~7wNm8>V-~;YylWQKnrTfV+G1WY;C6IxDv~pjGq63EnX2f> zaovc5C!<+4_Eol(*SK~%>)uAH;*ce}7?^x6OhS-V=8%H-Vl?8J^-DV_hn>GSS8?Xj zvQ;P?(I@e>8ZrWq$e!1?d(*403iDXhmPS!#tI?hOi ze9DB%a5&*ybM|!z5)-BMT=3BcZ@E7s-@vm!^tImz)~{PK{673sd{-H6zT27HS}qET z`q|mR|BOuEaYX;in~SycbNkb|znVDDp4DC!ygEOJX?&O0`yk^NtLIgT=K_GdpX#+{ zBUjwUNU+BI9U0*@>TW*s@EWDbuPP1JwC~TG9_VRM996axB$)41` z#}0#aQGc&&tzQ0egHLA`3yUjSOI6vp?7x3PwtGHM3pa^PSgSFOC2ipw^TB$h^Osj^ z&r<-X>Y#mwp?>XkBhghZbFe!-@RfZ9|3jH!H~)50$y+Ggh|e0`xifrnZK+)zV$3Q> z7c9O?{0u1CQ1MHd8-`5@>1x>taM-ZL^4+_*_*O%YWOEt%+Uu|+>3&)Id^o>Ulthia zO8exhN>8P2yUNpfkDwoqKP2ZFEXu`bdEIpTeoN`K-)1WHyM%Y#Ld;(~Eu{1EgY#d- zP5kEjn8~adpzF7$dSP-qw(hj9b=|yvn+~1cy+I^cIJ}8~_&->P ztULc%WFaEV!5MfK;RZ4{&kc|W>3Dm;@CD<Bnj$AUvhSq zcEDy?KG^P0P#o{M0m7z$-mZcv!6$zxa2cA#(!Zi-%?II1f0e1uxOfvZd10IbN>~X| z{QcDR@Lp#jYAS+#pWg8b7MmoRtLv!@y`7ke)GWbLt5;qGkBq zK1yj(uo2Bo8<9{ZA79&DW71#Z&sGS!V#7#tWh=bShicTlmMb4HD_epOi8(gc@u-W6 zI3LW7uJ51N2>+lE0hHbYagjUE0K+csb`1Hx1uL}Ps1cJb=nCWSVqyEN_zE)C>h$z6l4MviU96r?i4L z!WylRaa^GWy>Oq?*?hxLzqO@_@$fjR#}rkENOiunrc&&(k@kT5M zPYWC`Jy?4&O5 zv$-Dv2H-DBg1<63=<^H#&j6$$&dxV0;0T#n0KBtct=9^$N%X=k4Vr$+5Utj}{0oKf z|G2UbHtJ^dsT5F%T}uDgKF0R)4uI{dpLlx$5H%`gijBiq=whPKuW!MBd&X+TE%|zN z1qb5dHEY|bm_th+blq(%u#dEg=GCz3{}EoUN2-*YGotRG@ptTig`)7oF}l+ScymQs^9BEy`$<4c(q&kbh)tmMyCFMlWl8ClYMUKyp^+UpxSL5ua?O|FND80nZO)Ftdm6T*} zf|A$<-G&&mG&-i2IH42h>^D_CY6e@{y1FC}`8cI&^{R7bhOdTX%0r5r|o5)Rh)gX??e`gD>;0+Yz)MaPuycjg%)6EhK6 z7f9%hq_*bguB0MYoRBwtDscx#(T=lLDsW%edBP|(K6C;ReQGk|%!LTWbhWfxhx;|G z_bF(k&sV2_L-&5;#Sn{pW~qRkC%zlw!)OBf#R5ZNbnvBYnH$|NY~ca{g2sw*R9!yuYvd*r+t)!1!dH)$xLcjD5z`ZI`=6 z4*$sVMdPWlEyUNadEF{s1uy0MCNh^LaGW{QYW^`pC|3>=0Ms478socRUmHNCltks< z5HCU9+p@bkHF2TU>8?TV$IInr{evt;v{D6gM`234AR+9zWFUVQ*(*llTH4+KVBzcv zNdSoy{vT;^laOgkVSg5v0g@S!?Pc@Lo|S<&qvZ2b0#^{8PwqE*{QcjfZ+{>BUPXq$ zm`YrMVVGZ+Z7@H|E|ny7|5_2p$JheujWc-P>PkeGyRzHhC^#ioBEq6XRSr+P z0qjB7FiKJ_oD1+t9XqKqRWF>*uDwUg=fttV4f))_@7z24GcEJ`{_co%F$7u}RoW>3 zh^)ngC|d%=U5uG)ODJPt7Osxi9<{%(1rzuOZ^I)CAk`6?2k||3;YZsM6LblhK~7L> z&!AI;-)&$tJSDquOK0F1E-RUP??Hgqw_#n=VT+)+`Zcg->hsX))Sy+TrLWf5-_~xJi z;>Jl5U<#p*z{%Wi$g*YVkA-Q{d74mpKGNJ1y+k1{f*k=LE{cpL%$uHaOXk|sz*-V9Lh=6r!D*?MLm&GRFC`Ayhn}ET9f4>V53~#YhITlV7eXO)P zr>OTwnvWb;W=|Cc>JU_a2L0#)6L$LUTi2^VaHo@PX8&Rna@ew_$(w)$SZwkJy+I?im$mCSQwi6wm8Xy zcp;rABc1FcKk318o?!~P4xWw<0ayCnRNvL`#*p7SAMWxqW6WfTT~~CA0WuRL=mxWj zYmU22p3iDnQyw*A`hx-n!TL|WM(xT7H^z7FT=#%-PO($4eo!Labl?v?9p^s+%(E7^ zoEq-ky&~}qmzlyshHuk@XmYzFhFXA@s{+aEXnRf3tk%c}P?zJKhx?Dl9U*HmU7u*| z=^;@5uyfA-k>#Ir;M zR%|kZ?zHEOI?y^aWQ}erBSQ)!MjP1&q(@*|l*Eya2>I6hcX~y8u0`4Q)X>gbw4uxe z*Eir{Qdg3Ktj%&LFas-^$lsQpdaG;ogdr+YSpXGo;nDELjo@kl26HOv+JM0M`&EE( z-KZ6Hh-qszXY-iDssUI^?3)gH6Zno_V+oe*p*#S+MpGF(`L??yjyG696U6w{T^r^t zJdfc$vdqI^@KIJ|^u9Kg_X?IoK7-$ZpG1Gi|5IBYX=Gby>+B&xd`j7zz%F&(QcHfa zH+D6gL;0k-%C+Oq5STWfLby@RM;fw^vwe%V(#O3mApHchZ^Kq_MC+t($*lS)fQTTy z#{M0dj$SDL`B1O=CyJ?&W1D>{bF(B#FjqZf@O()AGG|rDW_M#^1T>NN*ROFn*!s!w zJP3!4?W2tUc9GA~Cf@lEKH>y^@-+q$gm)gNVHHw2-jz2={k;#8N4>jNqlaT^c(@BX zU^m2F@XQm#JSOy;uEc{mz4+^T@svC!x3VYlji{Tatq@_nDYq`^UuIq??2T2uL$gTrW^C>8)^|>}{ z3VWxH*ZpJ39nX6aG-Er}RRvn4D=D^S#49@CLq6{jKC()BfLf*)REB}_zrOnFE=?T+}WyhcZ~Vrfc<2>i}B zi(YJ<4UeAp>(sBp=F?=O3Vo?fwVo@2li4~<7gii@L2RVp_U8O%Wih#u{c`!p`@*A3 z+)v|zY{_OA8EJ(>@$pt~#*-{$Q)ae$jJRts_*zLY^=e=9AgPAfAge;@KJ4sbAxod< zV}EAfQ4M9Cb-{)tMb|=%g6&M)3>+UWA=9;cok*4WBkPiDW-F;cea4XT#DI%x&SSQ} zT()KS!J^DV440l#a;Rs{{VK0_;@sI*(#2AS0WRx)~1d zZnwo9q;DjL$R_3z!MYz|S6 zx8`*oS9NMoo%x^Nc*gVEthhaY&1={rOiOe*1G2it8`*3gKK0s{m&>^|PiWt~R=OA~ zvfG07QE;ojg_zdKJfE!FkM&N@8LSktx*2j^M{&`MHs4*AoIf?R(@>G!_oya&-432n zyKxYK+neHTQj_I8uzoA;x&PEf%7Vj<-7xHK^Nn#f7v)UQ-1acwlwnKY+M+YJ_6Xnn z6`sqdjKT>g-Xoq4%1mteMyx-U6cHY?ni!|WmO1hwQ7HhOJxs9nB=oEgVT z&!s4%!RroBq$k^Bt0ljptZu*PtfcGvx+Q|Vdj|SOdYc>h64`7~Sm>0zVXn+Y+dI~1 zuj?7jhhK!YbN?P{ARwo3ikIRKujg}*D2ALBs07<~@Fvsw&FvD;5MZl#wQ**r^z;gD|5q0$=1+Iz7B1*`eptp8)B5jXyh0agJeZXzr4F&@caqJc zZQ-vN@3wgGd8#^}tdIYL-?~&EJL3g_zXVhTiLQilsj=>-E37lIc;KTe%%53GQ@ayX zbMY9S5B~-O_S99}s&eu>_5;|LP%gN>EuJeO?LM1^@(V}a56N50(M}}F;;aJ^j$A}$ z*UpgH-i_R<>!bOa_fbmaacw+7prCoHn}qJJ&1Y<9znl9G5&V~JhonBYnbWNC@J2ik zjv1A3Q42EAWsA|Y4WZ1rLQNv$VnY^l6PJAmuVR)t<~H#sVH8#$cAWoFyDz>YtZ+_# zOitp8hg;#L;jZVVla$Gs*Gr_a40i3sv-epE9U_3B6IBuFtwk5(-(Y2NNw~YFG^P9p{J)EPZhBv#U z>z66!KV22~Po^vq-bV+t=8>MUq~xW?@h=KJ0wgJ}kJcf-r3pM0r@=TbqKBp>qc}&R z!WBU<+CTEt^iyCPD^E0DlRbU*7~Ya<=Aze+KF;{11H2!ai|<;*NFHBT=Ihc3|J}*_ zmdIa-jcGP0nvjGBZN9aq%vlbL<3o4Mrz$g+YIfa(Gk=q0dqo+NlmehRfr%JB(W-$( zm@ z%xdqirfY8f3-gH`OGI?N4P9nuW%d~xVgd>rxR)15=KF|J=+T#x+-`<|`nXNN}B!lHU zK8(1vSn-eC$?so6;)|{Yk_=^iUWElN>aIM+=gi+Zzw(|b;g(Aq_vQ*PxqPIx-;BB$ zy2LA-lpXe-`@xS+u{afF@uE@g3PhupW+wCd0$wa#*`Mu5UfODHW%1n>9cAj;B1~r| zF?QLa;SbW@*l=d!Ze{+=R`q&aDZe)UPQh86s2fslud7`Pucqi^2}k?Qum!0U;%L|} z62=%$wzUF)Q2|*PUFhPZ+U|IFG(l;Sfv!I7A^sHJ_$e1t%uIz@6&x8iJF+0b=exZbWajPo3w|TQ#d;a76gKuy>G;tK*o9!!U>Cg_X0XM5nd0pC<6~-^&aEsCY z3p_0sFo5^xPkH~53FH+geqtL+hogH}c&p}~yXm#(3zyo586F0ZgoT?3+rx^Zw@`$R z`U#5`;~<|TfeSvEm)DJz?XeW;dM)uQW{+0-3!_Bzh?y6ERaHjfR+5$(VE67&=&<4j zH{%G>W3>l%B|HgVo`z`pbxPwuwIAYdP!Me$st<^&;`;nnoZE}V7TA_USOMuE7ZTyu z_adB?`L^nm7SAqvhX^qQ=C2ny-1^x*#!7v9v;H?mwUd()L`h1Ysq<_Sv6m`0I+aLq zEu|=;xa_>XF^1FZTriH!Bf>*>NAWNr{&GDWHnh4~FyJCnPvm-l8IZ)a?{z^w_kh_D z*8$~A9?W_N7TGw~O@BB_saFXVF45?4ezS>^MCH8Drn~oo_mxN-;gEwq$T6cZ%g0iY z#CP|oR>$~0l)FYo@?-r)WZ2}bZ>qQTYYjsTNF4I}2OXrzfvEri%zGi_=K4+v7`#pZ1O=^|r-h`VI*h-&B`d z&H)3|hc-h0aGq#ypIiVQUp$*N~>{Z5orlph?mq&Hzw(u zyqH~y`_dkZ&R7qdp6w}bTV27df{H#)CgGfUWB>{`gn@*81@)#r66Yb_G&Z_~;R?o6 zS+g0UXFqsKJhA0GKbB6JFd2xkp1Y&6x9}Pa4B22Zl8vLlLGL&giMQz5 zByk_ey(H-0V{fU$Gm!sC$hqo9pu_JkR(3e82Yt9h|?DkpFcdVYGhd@s2|EZh@Y=>;&t_Mb@SS&9cjv1bfQT z`7$8Mj~2wcCI;J7iFhjlyFvhLXj)NA*ZqS^Z-Uu`NA+f5@Kn4Ygx z+H=qQ`-K#jDN7jX^eyx}#+>!T&-W>-&+U<@Akaduzuu3Hff{w`i-8Yjm)Pid{==RN ztcQdmn{T2@mBh%5f>ZVP!7Gz%;nd_MFz{Of^+5Wm{$n94s#s!LGIVQ>`P*T`ymLN} z8@S4AkvuQ-Dk)x9XTN69`B1;gBDZTk_xLU;?%GzsS*MC8;!|0&&!rC;_sO;oGKhLC zO@-+O@rKR<#_V-B+%*h3_gZdR4^wKP=atmLK5mzne374j^|N{`ro>E#F3MM3SH^ZS z#bMI_I#8>5!-VcFtPlFa>9O9X+v+HqGUE@FjNvDKr_Tb~RAE~4?EBVmdy;X*Xm{eQ z3Rviu1E$Y&vwDvkPVS*OmVXtr0ULN-qqie-h75TtG8PU+Be%Z4E`iY0wu7w8s2a0ds#Xqmtwvr+Oc;Bxhlql_@75cz z>X|;UD&7k?XvfM-dXHeF5^2GlsF_uy=4BX|`o0EWio(mWCOu)Lvp?UFIah4wLp$$A zP?5KPL`T}0ga?)fe<@32e|3_j`pdjgMneJ{{fv@onuyL;yZ% zS1?3LQoo^~KA2K(c3AMG={|w{vi{De!!wWUnKhxu73w}ze2LtF*qq>h)ZxkTP5n2k z)QeU*Xkl9Kr0MW479kb|MMgEW-Tr?l0FuXKx)1(rp}nfq|0NlQLd6x+o>KlN*JXq+ z#KD5nTPk+zpV5DiqomEqxU0$O$oTQ&uqj0to`&^#1;j4u43R!ErkY=+$zrV)8p_~sY$l(i3?BMz|>C!++sUmp8B!q&CO@6|(HOqz#ohC!j z%^6Vf-1;BfLA;+xa+NsX7f3_w3~YFM6rZS`SV7otkU%ei6sr1Nc?0v|@lqFoKYkZX z&Vx`GdLZu2U91yh83MeGeKJA1#!V8LvODRudo3Wqw(qtw5RhcQg0rW*80r~v0{SZJ?L4W$dnZ12*|bZW&DCEJwV%Q zsq*)a2jg<&ErRX&rFgm6vl+nCA_HwwArmaab_(3LBx8iff)@SsN03PT<`+58d?&V+ zeTR>4Yqrjbgy_~lZKZHn{iM9uDXa${Uv+%p1qIJQA_a9||nILUfJqp}^S zZ7}FjlR?I8hgBR8%*67-yW=jIvWXj@^#2aR;psZxshc9Ly>F{3C>sIaKm_Jp1NBwB zp;l&I`pVk7-9x+~BIBnB*YYFkaK^`Q$2>UVz_y#kK%Z-31`DhqUe3*bVW6k1e9iH`5zYgo`)rd` zICCdXM2R|-_0gZ7VYVDAMG8sNul1NN3zIa>4X>WuC?Z7jS7y0WjMqTc9C~|eYwIh7 z)z-Lu2R$xx8|0U~!m9xEDF*ZZ{WvV3$u99f;O_Bg7S2*JMQ{LrPgtk*+4g%z=s-oG zdd!tn5h49uEywG1Nt&8bE8iY5GTsqf_82xtqDML671+0bwxjB;$d*bhb*b}cCly9) z>bwWe z6vz+y_qR*#;(Q#y43*T4?CQ`MiTQXoA$quFO-+J13aliOo01S3zQma)9kSW;2z2|- z&qz15tHFzzqZ`GYvVP5Go}UbNz2gzoQR5M?C%l=v!LgBP0b>Frnw>@&VTLx$Dmq65 zh}yqE!$55VSu3*qo|fl8vP3g|EC|xItud^)L?rvGfC_0c1=4?mlCiu{y~@YhpFs7x z|2qrkOEKfYt7e`uLHHMvS;d_3nL&d|689-l*wKz)x z!mLpg$bfaOZ~qnyNl_*lT7w@6v$~-6Cg8~!)a49_{hYhStV;i41CMM45~I5~(m3*@ z*dJI?+j+ai;-9?^NHAGuqWscea))c6yR|d%%FqD%bU=iXSkbYhd>5;Ls;CAKy()HhA`1aR}^%+ZU9z1yOIt)qICcOLUjvE`R z!FuiZ>!U)*)_oHjnxz$=KB@0-t{!m+QunY~-$aNvK6^%>PsqVtUFy4Nz`<^252+0u z>(BtwTp6>zhV@wZxRr~thXf>6N3N{oz$1-w1sVFTuMIf3>Y!DPjLt%)1pK-wk=dnc z!}y#cXQ7KsJ9zomSUteV*M|+N#H9v9miw7?j8v9wRynln4d-QEC93~D%I0ey zrD2XW)?jU`iP6&O9VSj&Mlff|M}}?c11w924RbC%zwNRM*lS$YSu=lEr0q!QbHp1s z4*L8GQvDrO-ler>7o;|0*;$o2MOXYv zIaJD@$v}<`@O3%~YvjU!?@K7^bNHMB{oH_+@FKl?m4noJy*^0{6cJ5-*3bJR@@}us9+sMu;kzm+YFv3I5!|^OI6`dwxMEq&Qv($Hy z-3`?DtN4Lzi@-G<5{xyBj=Ql=s?7*a#XS?WrQACZdJ3movAyzVh;Y@`+Hy@}uVosS z0?7ewhltfe2d(4;SFgK>&Ubr?z!-&}EJnCrQbm={PujM|Q>av9%WEz>EVOotz$o1~ zWqdjnQ1knS+Rb6{>J{9FC*9l5w05*S83rdm9^omA!^xpTsQDoe&ynFRM2@!3!>J4n zzI{=hhTTNz{tS_S9p!G9Uvn4+Kl;y28IH8ejuJojG zF*~cy+WT?aPdPoe?W$Zwhg*ZHkF4Y)U_K3pgkeZ+e>&Hl^rw6PHqI@+t7Hf8Z#x4E z{w4V5FLI%QZ-*ay~%Nx#dd+dN<+? zZa(O!F0N@bSHrmw4XwW)YDmAf0A?YXSKC&+{%>~bE;6A@22JV!>c>jB32&Rt+l+!0_)iQ z7#5d1qRMti2AUABzq~}CV+u9tZSnaI6%B5a_y_!m@2F-aV1{|VV%x@%aZlj3|SDB3Y*dbjtH=JGHD(5|x7gHXZU(^3mH zUW2dEde|In9G7_E2;|;PWF~xkTK<+J7(Xx;OgtCmBUi0xlV-p&O^TKI<2-X8FPlRL-PJ#CRjhKocC&#$6R&O1 z+;?^3@;si08N-T8#wV5Uc%PmgMXifHU0WIJftO?R6#gAb(dAnuh(8w$gKmEIwqVLv zCW@wW(+uh`Ut}0|4`7ET#63htPC?qNKF0X>g?-~_jR*qLjEI1AL3$Ylf^-Q@L_oUqBE1e> zKtMtdO}dm2LhtM!QE+D5b9Ud`cg~)-o1-M$eD}LwyI;Ha#}}?52O?!4J#pd$2qu3= z?ZgQp_=yw5kTWNN5!+0sec`cfL9-`V{i~RFfh>9*Ecvg*x%m|db zc%3|Xl8A_in3$NHoSfnu#rgB+DJdzLn3$NEnJ-?t#Kz9f!NGAwP>h$Cm+$&*0ikO` z!a^bgdriAEK3E(9Om6VdaBO!7}Mn*uilEjeGp2hOiu=0xXy861hhWf_NhUV6mme!Wfqqw$?_D;-~ z?w+3B{yu_OmcIZ}!VC=#4lfS@uZbZ*xg(<^qhnu3=kQ;F*Vx$C@v-rViSfyaiOGq{ zsmaNysp*;N>6w|CZ{KF-XTHtOew&+}otvATpPO5lpI?|?z%F2Mi;GLRB|Ls<8INCC zUS3^UnZc|q&a95Ltzl}clf5|sHAvg3soXnphJ!~yRQ%ShTTmqx)qCpd21eF4_V$i{ z{q@(&z|c4E;@+p^M>7%r(IS$VjZ^wQWzF z5cr$${}0IH2ZJY0bP>VsNUJ;QFYNga@@TvFJB9O>XJbT0-~-8)185WTQ8mT&Px%td z938sr7VovnbY(t5KayGamAx@F`5Pn*I_a(R_bt{Go=k6=GdkRqYOVR~__z?8cG1fd z^(cDaHB;Y=n{i6FjQ#ZK`uV8pslZR7D{fE}8|?qle@q=Z{03F7J&s$w(N_5jXGUCC zc-MEPbgc3+ndRIx*s`bv_j?qPR3l|&vQN4vKNdD{GtaswN}t$!TI zxIB9mlOPheh~Iy-L$b$Lm@R-Cgs0f=PMzfFK*L2`HQ1Qs2Jg}ZEqOkPmogyt_=L+G z7?l9G3SWH&Do!H31i^};c;J%?sa7%BFUlKI_l#*`Hd$KU6_($0%s!NLm za6NF$9#}F9yKJ%Xa0Z#b1#k$T=67zGGK%w!yhdUUuY@|lx(D*)J=N*2-Uw4x z$!)ttH($@eqP#C{ISP)q1UrX!A|zqKn<_%E){xhFS!^%pWndU$*pnt@vQ~svNy1G% z>W#(m`x7H}8D5>6=eDEe*QJJ-`xo3@YO*VB_olneOP!o> zw-s=mWy5~p)w7|#CzJ-6$+S4IR!b~a_`D;1xv9-^2#(GdV$JSaRAKZwNF}x&8GU?^ zx*;aM5F6)Sci#$PZ?o5E8rLBf!??YAwyXVoaJt@z&O{Nd!;3+K@P2qNc5v&4xj|3p z>;*P#P~1r)k~w(n-_>LLSN)753?3zNKVZ{=0-6sB0Rh}EKNT0%pO0(faa-!pDz!MPGu zBlVg=IWDGy$^{*r;Y=Z`@M{#%ghri8zpmux{9Uji%d9uEl@}QBHha7l5AV+UaTf~u z)WC4k3m(OFv-_${%DRmYL9sHjnt9G1oLb#t0VVH?DKo4mZQ@khb81Y$6M;do?@o^^ z0V@ty1=_oB;guI=>&N6V12-rPgE&ZT5#9lYZ^Tr)oT2< zjb}fp#2Z?Wqjg@t@|{JcjMA`-PFkw4gd|CL z3yHHo+Vnt8cQw3hhl(@_#VKJ4*!>k~#hM`GL=+^g(+n+}*5i~!8TGt8ymnn3dSwQo zItzVWCAAN8x-67>CkG8m0iB=TvBz$1`(UUxyk8_Sep#rzqpjrdRQ>^pwXODp z%jUM~e-NY4!fq#KYissXgRQ*qAMA%8<%_qUjnideeo_|QrLjA;#k$t4*sWj?ew7ZV zt|ji?X56r5<)p!}!K>$p-9#HF3YHqd%Kc`+xom!0H?nWQ5N|(Y3+yb|%vVy-!g@?y zafUuS9u^g~Lo1H{j=ES&ubQQj?eQ>A+KWya)A|WI!J?V&7JHK#rBzm%!B&gvY&=ys z-w3fXQqtdUXfWueclXXAi*iz0gv32w3OtHW&ce6Wk_^)2sN zd_t#-`MRcQ^eiycE@8fQuCG@$_Qm{;{o3aA;^@}~lwInZwnX)JJ2zfmrnKURP=I~Q zZH*FPvv2vtBf$#KtT_>OqajH<1+~%GrmOvJ^QyQtO?Jt%Z9abz*N#sSy8@bW8_i}i zsPM2HO}%^{?VcidjnC){E$$Wjn6p!V3pUwI0}7+9Kkpe1XuUZte6{*gNTnaRVpU0L zaa|o6mA9K*zNg{qsd(NNyJ-Tk)lyVrhSfTtTL!!MN%~sr?q`QbU0k$9!;!)A6lMhn zvsFIfnrklw=U5WGRkGL?O$^X*q7J9a*){LO83!CE+**S-*EOfMtMtSqW53*pjHQE7 z&IApQ}lc{CO4>{v?1Z3M_WPLaOt)qFuY6;a)ANiV!nj@EMI682f2O_G}p z)xz|oZS0#NoR3f#U~Pb$v76(2ANeG%p_uued>coib1Xxf;d#Ctxfrc=&deG?F@Q>k@of?>FR_KYT2 z;a#dWH7-CAcV5I6A%@>CNo1V*s>&O;kp|XvcII>tchYjDA<7wKSl~~D*#z2q5947h%5Q;<5g^j1 zlPdPIie1LVo10kF`4Fd~!EKm~Z`#dmlAAnCGZsoSir!bW4aKMjJKc+9rG|XQtVOd$ z!>)Hk9q7*E{ci^!Rrf zOH?_UiBDuq&$EhBh`~gCNyHVtlsJaHN0{oIM=PAW`>3d6p#(a?MQ3A$yISj50`3DV z?3CnAbI94)G==(!?Y*Oj?M&m0sSJ{1DWvKqR$oLP zv0mSh2t^fSyzxy$-lhku`_%;d%t9`Xvb-L!sp~5Auf#@1U1eTue2Ju_ZS?v0O$BkU zOH4%yA)0F7XBT5{JKH;c)8+#CMPZGCoQjfGuO=RM4Vnc&;?YBGv#h9skF#988l2qtwdTVQQ$L|wgEA>bed3`U!NlX*)2@vCebrQ+t_bai*a6LsD)LxG4MSi*NQ$VAH<sk47Ft>9IG-h|`nLMbAOquV zEfHX45zi6;W|@s5H}LBd%p%))L1l zTFKSAj$OvD;Z*PUFRq~q)IQ^zA1`S5wLfg=(oV(EhS55VeJ&z#j14zow`1|#Ou z0#JL4;Vtd0n_u^xsWM`6t~U%De_^4meno>X7 zyemn(EcQV$b*GM~n5WS#G+c45aoup_GA_J-)Q zXwe+)qImeah@`ZyN~P$cW1z-TO^zQM6CC0naR-L3tPLI*ieNL%YSP1_*d#9GIKogx zSGS_&QEbCp@7_B$C<4KFGdhQi4OTV~-9`&8ic5MJ#Re1g$Hm;?L9xx+SK`R=8RBfP z`53StZ3P?5;TwY=i*yLvFnh%%kAuENh5z}YR&7jw3HO5O;GFAuww|vS4w(D#6g9Nc zYNtos*F=iR=Qn-6`d>^V%F$=qmusD1dCC7WiL}Cv>lE=p=kky5Qg2$XvhW6boN1DJ z$v`fRj(N+%L$RMYb%+Rbo9X&`a72@o*940+$mim$3ZA$XdSK$T3tf_Vt$uRu?+}lI z$rI$RXBMgf?kNTmXh2LPH+&a@XR6|3+SJesCXP{{=_}RhS8g=})j9o0<0c zAdgcb)Ue+C%ZvwS%ps^uBbn>_=lkJo+O2;7+aq$G2&6rq2)(&~31I%|^;7@X`ZIRH z9j?1YkQY|-qVVEjZbq{vXJ)AnPcyv`PxHoFA(XjG)gIEi2E@J#b**hXHQd~AYz z^u@#^&gmK zvzCuTR~J5=fn|g!p+B;+vSrB%z&e5gUKcPlvq^{v+(tiT`+o(1eh~M8HV$vLd`$-?t zM2U_1R3FWb)hn(Nw1QqCIz*r-B)t7N-?X55_qm~5UOctu;lihH2gX{^+l7HWdRmcc zWQPjKF7Lg@w-HS*687lzdDMN@C_6oKx)ovPo-2MwOY8NifAimaDLPGmjqzP>eKwUS zkb$kyAaz(8tm1aRCXU_cCTYctdD>`XD^W`{!6qEM(2ihDo6kG^O=J=v>qb z2zu29h0&su;Fi1}g3#cZnDLXiM2*k(8|{QQT5$6e#fww^)S;gn{;j5NBJZ4zNZ79g zm%;NmRhVz)eR1HO2|_-rVzB`L8r z{sO3yOlfVBR}!vPJU@bw!4%Urh#!t+X_M=+?72G)(^r zPO)A1ZPC@toyuAk(_vS6E&H$D5 z*nW=~Q<>`6Q%i8~_!bm)^sB7~NaA+z=#v15 zojE5u(X}@=!T6C_2=m;>xHZEdXQH@+U1M`j6xZrY4|Z5t>)!1RTIleIA3u2qWyb;}h^ACPa){<2&`!ubV9`Kki4s9TL)a{834D41*<=C9|2;sf zCgidR6o%@}`ya=J*5Cg!Q?=1?uoUc`{$Q`NAHL5FWoirFUozMn>Dar3*5K;H?HG=3 z^!YyMX+f%aSMR%dtZsx9sfFyXj&f*eTmvic?)S!V*d#CSHdo8OOMfa7=DZs&1zT1y z+B)eGLZ>lh4iVn+_xS5n1nF|q0r?;4m;reDvcvxMD?Aq_RL{qx$AOXsJp9-qNb$xb zZC_{Hw?wPmz$Ebg(d3jc=8b%O~=P5Q^~oA||ypV6bCRdp|rszWZyj z2i6!iK-FemizXs>K@lvy7K^>3!fn+gK^(Z!zF&8^LXzfwI6pbEjMqT_Zen+jMh>~mHJ$N<0f`4diX7=ubD%hk2zHC7mZ3} z?(UleG^*~ojiusY<`A4%K1-`FwCy(=W!`jHO10hYqAC_s>4B7u_t(n zPykYj0on;Hf5#kBxMh6J-y4VH3_BYluRBNI4^M|O5YXMZZ1exciMI*9Xm(TH5cQzNTb_+?jT(^W zr9jxzE3}tGO1kgm#&;uyIMR}JU)>p3Q>{U&fzfXOL0_WuRg^dlDD!`}iOXlI*0QWz zCq^R#b{a*iw|H+0Nd4rvb$mcc9c4usSsL=wfM^zV?5QqHN6V~IA3^00sHnfEyh-}g z0*#uhWcC%jn@81-gmrdZ8KudkvL1B%r|oI;?f8;LlqVjUCQYk*LW0pEL@F6$-NPKn7JSpiQ7C*i8GktCUex3n*m1@_B{N^NInf+2?J_45qu+|59vAeefeH1r2Yd)k_TqF#AuArRt$X3rqJ&1s#_1%offhGHa!eL5rp_7h4n*dmW zaP|>SVJUCsWxN)zWpz{gun_MW1q|+ZvW!t zEr>nT`msoup zcfz}FBElVvb#9f*4$0fv)*+X}&z94PbPt^ovwpzJL&vyRpI^f4`AtY!tDkYky03P5 zT2@S1i(%MtVe2>IA!2{7q61BACOrt(+QToZ56)U856G6}X+Y^0#vktI=3*%VOYHM$ zh_CAR4mPYpRM9bK{kOa997bJ#i;qS)EQhVI?3yKQ-EA?xh`KCSqgLcIzOk${g;5>u zMWuYo9Z`Mzk%mqx8?*;s?aO^3bMuD08R(bApnr3YhRartKZcVw@y@VeZ%&|CBF)k8 zloVAF#)wr1L_&fOyXP#5eFA#Mxrx4M?BK3$#dQD5B@+j^BL{X-ppYj%iPr=&^Jve~ zHTbgpQ$)_g^}bn$uobR7VU;2iNT0(szE=lU`%aNf)((k-(8j=$lVq46+h=d5#Z_^C zAl@i-X+8f9qgq{bpUpZ{G5(@hN4w-1br$X3DuXXMk=TTis1f=$B2exkSnm1`xc3oT05=Spv7XxZf>pHa2H$cM0|Y~q~R z8sBZcI80CevU5_%ELei&-;~|H8!fPlaMs#BbTqJ+vgiQ6qWA^jJvl2t4Uc`Admb!AkEXJQCQ7 zA@;7BN|3sq49MW^^;Ycfo#6w`{YS!#`&zNRoz}JqLP%Mv$d1)_xvOe-1G`iW!=qlh zQAQMJcn^A}p`M%*Sz(W8kg+M|EE*E*ful`(nSV>;Z1t561`A=bxCuq?hWOq_)Xidr z%p^e>VVcb!(aarhA2xv!$OrESkf`=iO+H9=xC=)j-w zmU>=RB<%TrNir&W5Z8bzdmkSl`cV#wIA<9wVkgODx*zC*7LHol9D$CvpTdb$vT60h z?=${JW7K773PV)>&ouc{>ru|7LhaOh$_xs+#*b)eg~CcUSNn5A93A=|V?54B#`P+s zl#RfDt9a@%n`A|vv~P&;*1@PN+wr}UnZP9mY;WDel=ycvJ`%qE7^FbvX5KY2PKM9S zva`S4*Oa8g;XtEwe#%dBh1GFX$?%LK&H`dvPRHVs9A6(^5^XfLt~ri~VW}Z$$-PE~ zk)ccO%9L1l{Y^~v2ktq|%<~Idv0CFfMZ#<>T-Pi=s+2T8sl|RQm{g6r(wLUpzIlDz zvM^Yg$k?W7d>9+#1*@iJsYwVenKm)5L;Hh_u)mEAVA^qYT~l@6iE&4<-o{QOus19z zcF?+zhI5f4BdDc;aNy;~MEa>?S&1>h5qFrG|0OaH)P+6#OVWo5p|#hy)6K ze_(=NDJiiu-UH8-HEp%eI(INTw3c~_&oV~`J;?Y!!F#rSEoUo{b4a0`xSjpFo>BV6 zxYge4rRwUmB4LjQD+4#|uLY!vbQhj(JS=EHvC(OUuA=Qj64!a&y?OH6 zWCEdshI1&Gy6nxH>!RhJ7mCrp+B~@!EfnUfr^F(hcV;ZsPPD&sEH-fFy2M!W4Nnl8gSvbk+srT5Fo9Erox#0Oxxf!}F z$?NsjUbB20p7m|N^Z&^f2@7>qN$bVx0O{e8dxQ%}O%`}1aHdXp<# zZeu~spiT+8(^UQsu$Sx&L>Y^RnD5YiUcd$Z7bp6Uf2DX_Oeg-8gCBtmIVy5Zo#c~9LJDB;iENl_rab(oGlPsO<^3C_BTuBgBI_g8UE_Mz`F5rnP?ki93L zym**+l{Go2t*~UG|NBLq!@VVz?Dn94&v;r<(4KWB|rY#)hgJbA`6Mu+#Hk)NC?DyDgLC9<|q<&GVx!qGXI4I8pC$f zUJLt#sAmQrb!b`*zX-^L5-7^g4P-kGz(sDk{Uk+kQkb9V)cNQ?fFNuJn%h}?Q$-21 z#k7IbXS*|7DsXcM_?SAGWoj9phtL~B$8a0~+zI*}Uy#ws?v3GVZtMh6z#nI-UwtWj z7O0|n8GaeoUsc0Di+$h!4+7SdQTMsjd)*&F2TmOG8^I(a(>kd7kL9QG6F)qR`qx?y z?Wr$tJ4ofloBy?4C6djdoN%5jujuawVeUM;eCBdAdf(CVmmS_f870 zjQ5u8epe{fzle4%q7I(`8x>Finau}Wx#6|~&|sW8?>Zknx>~LkHk=kkjaF6xMD0wK z^MGFqE_NiFG-Eu;K{_c=VpjSVkCJ02V~XiDgO7-u)2`^KPA4QkR+BL)@zy&eN2*ek z@Kps)8`KE2jL3JvW2blPk@+A(b1u|zujO9YTY&yG*|$_cx>a{5b2ZiRa8NQAVX=^z zvT-Bm06`FQH(Z}_UC>`iKSNlCwc<(#*7#WWYmEAF3f`%Z3qaRr&O!(@<@?<@2zKa9 z-qsN-66Qx(Mpxa<0uO6^XMnz*TR%48 z$E-WAKt{xe8=_nA%Y|T zCA3rr-Z&9*yQEJE=@}saQMOOeds6B^*`+efs)Ayl)d1%UP7;` zS#qNp`HbRl1Ekx}Uf|Dh(s&678y8vOjjoXPz?)sRfNVVcZokP-BlYpi)X9{*ys9y*y9V0B!^MbI1jLE7^hY+|$ z;?R4fwSWM@`Kb{TloJh_V*a7CduO%|<}R=@5!yJowj*liu6XShA6mx7lOqYNQQzJl zgmV$Pr7j=ihdd7%cP71bU~y=%mAn=Zt4de;Ue~qdEK%+5R}>!pcZ~~ribwn?R9S}4 zWW>@`4GcMMJu66)`PD{4-8lp7TnUmZmH~NNewlslebv4BJyk@krUFY`5j8_g^jJ1g ztJUozC%H!b>89*WLQ`mD@FjA2`hAR0S;{$_;TON*7d4xB*nux-Y6$RE1K*A8Pwgi8 zCPF*IrF;bLTv(Ml4?zf(z%Phk_@bR%acu0_;0U7Gt=8Merl9(wz%Xe7XoAgE>RCW6 zq#U=FvxpK`ptZA2U*BqKy=eck7P{nUxq)eP@=q;S1smcme&OiP=A#8M@bR_uNIl{+ zRFzXaSYVPje1Li>z|i(Rhvk#v%fix3Y${xw2nATwSn{exH;)^K!XF)U`=)A3s zWDAFB?bBc6nRC9Q#X5U^Y~L~cylC8zSU(%cyHnwa-)AmycB9N|`=z$7%aX0AWX#zP zi62&heE=nKpip6{H7w(}hPi#PXj6Q{G~)SS07W%$0*cHh1Vh(T?Q&@TO98Fo63w2R zaUns3_S*(f3RDbK+-t}7e!W(ga(;2D^0l=~KQF4Osiv%0<>t}i%jZoX*tKj3-%7OQ z%BzklP%`s(TQCC|wS2wii8$chn$6ni(<(g4dOYV+t$)n@97m?l?kJe!`cDHK*6$b$ zt`rm}saq6cGY{#UypXfX*a+PFArR^1h(yJ{_d=rh(C)R|kDzG;A3W;2L^rV?0-kfS zUGsl}emD!mh}6c|`Ik7a{1J9XmqC-n)25>obmSKSEFvg7iXE8#*o7a?6L-fkAQ0zA zq16jVsr>ufuRzI{30s`A)^H&RDX^xe z&p9-BX2RXmB*(*B?8(s-51BX8Ujne4^xm3)PbE0vDyp$-MOE=+gUyP3+BOb}9&OyE z{A9;7^f%oPOl}73v867d`VV($Fm%#x9GwA59#z$Elrk1@R>BErmJAT$Jvkv)V|ln5 zTd~Jh{;vsUpkyrPUUh)5yOI{S-79O&;-_@au|_E1Y82puw@%=B!&Zptu$)`okw0h~ z83fAR4hf>u9=mU{x!J6m3HNWlFR%nGkaS{<{fDSF?1B21Dok4OjiXXI+t73U%7f_^ zGO)rmAe1ftQR}iiEC(QFH_=B?{oLLtqyW!GUhCCY7&HB`SVh{VmrG3=T7WzagiG4)9e8%nwZnqsriQ4+UZSD(&ZN9EC98a+B+d5^GzX9}bF9Dau2#LnJ zvsJt~-bpsvS7BMK-pKO1sj`FEc{eIlr5u&|Rd&5d6ew)EzGq;v%XEA==2G4Z zFXPrV<~NL_7YfT>ySkv%_QmYt58kF9LANGD`|KimQX%V97kC`x2tHwTg%y`i-Q0s0E~fkomU7NcGCgktkXjOWo}ha3CDpjSTi61vb~WznmTStgSS* z8GCFd&k55c+~fl+Nt9N9ck{mMH(u@>rvX*Ll4e;TI>oy*zF81uTwGIZuHkDSj!9Ayr_pc5tO5vh+NU545hU29E%=Q+o`P; znE?Id=tof5##taorR5)S#M{ebVs0w56Y@(!g7nPLmDIZv*bzw?>LrI~f^O9*@n7!f zdu0X|v+&H5_<@Y52tu8vT*7DwMrT zXZbK8n`&%lF$=g4z-6kn8dUzE(}ba70=X3XoG z0p@)bK-RSF&ac0!0(<+;Z7CWO&v&YHL>edq2>L7TInXi$F3_w+>P}tYXOa~Lv3E`` z=!4h|2!#Tn1S=MHRNZGQdK2w#+mB4B`()qd59mp>H)+8Wcq&H2;Co5)ou#ZXmu1f4 zYF?Z{4;rC90I%gIx81orUToJO^#*nJn8y25te(YRtvqyM)sN0Htujm@6q3?cjuYJ> zb#ZBWq2SzwhKiB_!Z?V0ONZu`Px`)KdT%Hy-uHRRji&r)(nQlToB zwX=j^wG|D=Qt$o(X74;Yt08f0Ju#}=S`mCkox;aOud`rBH;@BG6E>WyWR#fUrQ`mB zj#E0Jz9S?a(_OB&jPjFTA2kN%0>xX2;m38t3cUx8_LrqaVl(4_ijm+>x0ZMfCL(Sh z7wn>V^F5$4A|wT$Q!OBgzkXNzdOdW%c%aYkNLfLOQG*-G7bPHU-gyf>%2 ztROJOUx)`^FMK_+mJQtN3a&e>q+QWSwAqzo*0Z2L=$>o`GL0q*-|f9JFQ z%`hS39Svp_{}?Y$c9`I=(2G((AO+eHw_Z(=5scZgjYOdZxqMZ;4pWo%eIB5{VjHMa zO=0#&&dt-00O@~tj%hF>m<{k*#1f+F3-HDjW52x73^WITY-IqxCSMb!j>LZb9&d)TMi8 z&$%=Y#5=bHH@WR!u0v6m8*&4r7(+`5JMz+gq0ffYmFeONch`Ou27{^glbc-SpRLiG z*38fUk{ZD^AgawF4td{E{;0>N1@Sk+Qewh@gFsAkGcAQJs+Qc~+ly4&H#MCBk>->B zu!A!@M$po;fRuhI*P}W<#Mj%a$wH&VC49-zcT2{f*S?Ime$`w9OiH?;ogZ&b*tEa2 zNj@8Yfn)5|?m%y7PApmO!d(8Coup%$s;KKqh2YQaGzsYEhkSWw0U5juoHu7lA0@LZ zFou@Wn6NMKGIw=5|T$pA&F-{J?FdrxY`AOVfFl?LWJ%39_0UItwIkBY^gRzFzw zw>9|F{y}`H$Lim`g*!7|#NLLld;~vCGa4E{g>`7oP$Y`&TN)6TV|eC#H@u@0$yX-2 zbk5YWj<=b0%(nlRcc6(W82?g`VP}rE8k6B*AsrZeb9OKNTH6V#HU6ye2?1y9zBXQ$ zHJinfe0!qAEg;g4$friFplLL=RHiaq3q6PJ`g!_Lk%7lE)%tQM4U1(6>%Uw>^~ z0mu3rdG|qc13aWpb3WV1JwSNYh%|90onFT5(bi)a`+|zQN<%Sj0)?k0aPMLEFDF#z zcAK44wZHi7oVQE?83`#n=OQ{cZ)k4|NSr&yuOYd=RJoyYeiK3GN_U{`_oEAV`QFO= ze1S49nr?kzbH$+#w_@4Y?S}YOoMl!qdy)0wJDvs3{8o^SFLo%)PJ;hYDuDTqY&Ra~$;x(0#+V9ChpHdZ7Nr^Z$<7-ULDiXGyJ)(fU z+15DzGIzKHY$IJlzsexv5j!H;WV3m&aMS77>Z*I1+R{ic$Y)Qlu=qqPUOj9O0^(%O zQy=byGvuikg_mUH+6+}nS5VjS*c-VSA1WA_!u4F}c0a*4!^b3-{z6YN+^TlHbY zyFSy%cN`B9F5fD#UNU=3XSmn+UT*xM}QsiErJXYc$+a8-KJ2if^pwC6Ul_Lv~=6@q6ZRC~o+zt|A({&Ft;ok=C`|OI{KRYhI zH^8~UncX`&WZj(0e`j`@Ggyj`(cGjceyQT>T>ZdccSGl(dVjM?2$tMfW?jwh@7 z;QO6Y<8IEfn!lLFHNNafi+aorjc>9S1hgHrY<;qaJ!&@NZJOUN>svt(*R%T&HZFF1 zhI3@J`j(K;=<6i@gH9-Evu9(!a&l3B$uF`9g}g1NZSPlm7`lNI2`8^yE&b-y^s&c# zlCuZqEPYXs#1i|1)A$gw1Ya58p=b|>o$>5EOgBE5*{L+%tEOI%S@kZR*I?a9NM^{{ zW4(ic+R{J;{e6yG8kd%BrYmFYYAwPUFMX0jH@l~Y7 z*NdT*DO?(@$7gqp!7bl{sLh0FM5u{unSsBJz$6kz4^&x%?4Xt1DzqPd8OTL}W2Uz- zstXfkBUFpkyT0rA{D;#)>;4n0H0w$P3h3Hr!zY0UYwni1I|66G``Gpb30I+C=+HBV zG|#rm9B(`rPYBg%^6urw^(Slw?kJYQPjpBaI#1W^E!5NZIJwVEOt@&cA)ry$1FF&{ zsIub>fc0#}v93N5I_^T#}-vTl* zGnSF_wtBjC9K_J_Sw&DH(RvVxCfx2OL_g0ZNfHZI;Lc0yC0adve*A*JQX9t|S+x@# z5ZmFH(9_RT?jVzGhG&Hj(bnz>X1$ic711}*GLY(&+tCR&v54Baf1N_2EOSF8PdC3e zf&zK1_qMzLNF?b7;ERo?C%!Qpn<_^{esj=m4N2O(v!|bH^&4&(X6y~dpWQD3_pL&=PthPG){2;o?`CMYSw zwYHk|@l(k=O}ViS8rcpVkEJa{|AzSW$pjzQm{1_a{zatv(E z{!(i5c`C9!_O7K5Clq?RaPSrmzZ$w8PbCvZ+|UGgh?c?&}gJFfx zCKeWlk&O~HD{h2qtjW^QyMQO;tY8I-A1S>BCP<*D04U^`$Ja76yLi2o+CF3%=T!CQ zBzIT({@MC9piFFYcS8NVh5zFnz1LUspgXg+y#8C3B|)X!zb2 ziIbsCQp0c+RoU{fs|!ppI#IzU#759xrU=N;lZFIS-Du^0Bgo)J|o*_qp;8Yitwcj@hYj+_Y8*F}%| zuON7UzJwtHQF6oPwt!<1lDFyDzA|Qm@+$~DT^0&U0}9J0JsB^cd+q{*@a^Q1(06)D z9anp7wo%RZ#J|=u*m&FXf8z%+3+f9v(d}KK5vJy_tZ#M^O>FkWvcR++LT3q=uYp9p zT6;%K&+Y7JFvX!;jiLY<4FL>Pq|9%nk)@n=DT-Gze+Z|L^H_S^Z<9SFMeQ(!<}$Y) z;=xFO`2iO&Ne}dfoxvm%*4)Egoi!<{c3O{*up3NNCZ)PMDvAM*jLP=GkpxX-ZLDk$ z@dEeTYeDmw#|e=IQp*Dd_Zc%J3F6i?tcPLuvk*%-SyI$ zARknbw8^x&;dx&-TAJ_J>Hj?Bc0~cD*_FpnNoqykpQ36|&yQ!MKkCsC%ENYm;c)Vc zjsH0wca;StTYN9-FGUCv?#_R&nWj{N`wG4{SB`7xN2wAO?#hHNgxhEgXv!TZyzkBx z1XmyYqYSUs+w>lT`MZ0+xt&_M3q@&gF#&{#gzW&i&+6E2A9Q{HYcG3dhTE>*jxV_< zP|1T^_CDZ>OS@IfAyZBVR6pwc?*;tme?2u-DdT2H^RpoV)Uz@D za67hRsal;g>v6*Zn}`kuy-`dB_6zzY9|hgcD=Q{ge?lL88vsgRJuW<|hJ2I@%f_ts z+7AyLjU@G;u4DUeY=!1PVZV;lMziY4RY1Aa5la^Jt2^#|KC z-9TG`z}qjVZ-84)5t>3XYj)oS@=KDj0R*7{pab}iODIHZdqm1mAmOD3T2wUOHspbR zO10b(se43YZ)uDB4iEp0UI3&9j2<)V?rGiDN+Ck%l;+>6R?Jpt_!!Sfx3)(!)>^UX zc9a{EEE2fhZFFxKNoEs9<7HZu&-{a#@NPQSxB0eD#JU`ht zv?BBWnA^F)l3Kg7tgO1V1dU*G{PHtTAl&;ncgLLQhkhdqL64(ktojAN*Idu;#Z`=U z?eveE(VXTfufIplbi0ZWTZlg3A_w5vgoH_Xttw={C!>!?90AKsiU%I9gyzOGGOXH;eY&k_oEdkKnVvHy^i`_?#~t>WI2}&lV6_C)CPLM zgsT()zsgNwt6@}BXr&g8cz(a8@+@0VuCy?^@8$;6m0K-lC|@vPq72I8LKsA0b4eI zvF#rHf3Wx6K~W}MzwfSVKp0W7AQ*u`K#-hS7y$vvS(2dSERw_QN|LOAphU?k8HtjX zEFd5`gXAPRhZ$}^Geq6*-l|*otGa)DRhE{|be}$b`t<4U)93tp6SY?2S@B$Dfl)vl zt3cxuBL*Xv?={ACpvK1Rkdt=1cS!W?jA=Zc9Y(Z7>rvM{VD28XyLu?OhQV)RS5+ZSzHix^upkVg4s>m5WVDmtJdG28S5_js9#wCpp||+5SE*4e^QYspM#F&Wvun$ zRG!T-hrWFkv*%9AGj)y!{ip7Bq?O%!{>uF8^W^W&+Yt_l3k{ zhvEk_azc16&NtG6E@gt98=wF{F(#12!;0()bP*1IS_p8{cX7!l3>&zEVeSrFNMUn0 zURj4L6;>SicINQ)V0jQcgT1GOS|7Uhq-9m@J{NRJ!bFrITNtjc^NIx|75`(RmAk*- zl?nYIU(qhf{;}iv783|oSdtDHT{et~v|Nc>hZMK*yk>k} zWyL}xVa{dQ)sAx3xXZjNC$`<~K&_UD^qzJ2R^#qn+jqp|cX5ErEvl(t>p(*dhiJ8w zVfGA(1ATd}5TYQ-9pmS`c^!E{uZI^HiM|}z_$qRz?s3lC7z*rDYazS7)5_<#$y7S1P^nTV#0DB#PKhQtN0JQ?1}3G8Fmk=E|2GHmj(8ap z-rgULe?A4d)d_Fsr7GJ-+6&X-H*f3sAf`5HTq5Kv0ueWg6ucAY)f;osNW+K*G8=tAnuR*`FWXSvu4_rH%0lLa+$?*gt343Uw?1dFBre>=NsSlGZD*8MuqA zrgzO;YPGQFku^Rz5C-EK(89nj=7uPPTa|N)(XpR=8zyNvh$znty{duhARGkthduv= z16%Tj!4)R#v`-8e5$<|1S8=RCOEm63y8}ZLL+iKSARkGdeHt6 zNLS~I-dba$y|-`KO9FGIe=sIcl2YVcPhGmzur3r}043RC4leEtjv1VW7GBbA7t#+} zjq~o#wTRtMT!zG|Q9RP2)q6gfx<7zYZ&-v0yG@w6c4yakaV;Ml^E&oV6WVuxt0K$G zk~c35g!*CEtahRPH4=b51Eok~%$5g9D@xzekZE#=&|I{?_&L@M>Z*GyA(w*s+4J zbQAV?YlNEXzCTTp@}4P<0%f`9vfqK3iU5gQB(1=dwg`RY46&>ohiuTyfr;Y;c|Y9Z z1;5wKHDK4E4{mSF;F3_F=^@AFaj~lp?>kPOKH#=0^q+T33}sJ2@Y117ftVwQ*l`f% zF$s2B2lA}yzg!;&b~O?<2i#EXYIVIUONhrkY|R9&vy^+hjK#NGE#EZ>DJ1S`%v>9J z`{n&j23}wz6}|Q?*BQcv@MkE!Z(4WugXb>XaFuuMzLdUal=kKLxOqMm5Wx|zo*O=$ zdC*+^xpH1pN}b^j$Q*4S7`JPY9+A8fQ%q*~ICfj=T0Aeh3CSJ}pJt+va5+%pOAqgN z-K!ACk-9ZVXs(5w^*~>WKfh)RgpRfH2XjchN+Be;TYhA2LtvLHFLbaYQ*ytnl`o`X zy9h^~Qy(uLVd)bT85+q5g^0>sYCS_BBSW%&Za=9TTGtWyu{KpgCOzOvUqtVP*)XuQ zf?MtvtWX<8H;xqxDOPLM{2ezm3PkrPo@3B$mZZZ%by_o<2rku5x2=C$md8T^Ad%cmbYw z_Ka`$;aom2EHi)UIu#`H{@V|su5Mi*Ig?{w%lBh<69c9u6o86g(%3arN?><*K&^-cJ+!lH01Y$*{tohhV~?%6Qk`DZf85kI$oqZ$0yw9l|$4e1PpQ2P~{eLEk|3JKL2H8TV)Nn}R7~hRsr) zBWAq^n6>-FVgHDccwu&Jc0(p~ z?bv8%%t|9fHREU5XnmQS4g!hy6aFDvl+rW==R^PgC9sIeYqigFjqOv(tT{qrk>n?_&yp%@*o>li(dr-0-Gsh5f_L4BZjqt|C8( zyQBpGFw`CFX)EzK&9G|1kQCBS1v9x*MEeeR$MZ)@qfH{ipU&yiEk}ZyVa6_khu8+t zh%wx13bN~~^zlu1_?ON$2#+vuFTLoh3DsEHSGwbElUCEK;ob2u>6Kmmc`t!KB&v44y=vVyAG9b+QEpquv#G0~3+?=g5Hgn!>v<0i?%-ND7W- zCvX>l!ZJNXt80~d#U<+;DUV14@-i1k$bhTa=3F!>gCt3Gm;ebB0DfGwYWE|l&2+bz z$XHP>6{hwPdKRn!s+)%3WcMK%z-JsNqTY3@P>nA>+S}FNZzPo|bn$N|-n=tc|3R zOYl*MyK-{@7}c2KB`;mL>vo@eQuJT2#5-kD1bf%9rWfHih*e;kq`N!Oc=K zKS4>;8K4<6%b)Igl?8@$-frKR{T#3~yvZUfTlnx}2vm7|mrM|) z^ju->Q>@~+iukaggoFu*JOBNz%X6>*6qsGPitL`HY1pX~h3RcZJEyxY*DUO)-z7)- zOevyF;YwQ{-jpL}H(s?q^w=)_bBCsm+80xHkwhX7l%+uOc#bCTxnlrg*tckJEymEq z=3ZEHd1V5&89PhAwGGjMaB}vLZ!)&qJYQaw3IB;Ez#!(_UY1N)(7qm(OR(nhcHOQd ziE-QA*>`$@Xu?cJJbOKu6MgFv1X^=XiJL-#9|EnJj=6;?{b4grOTpZjHqa36uv152 zu`43nY8^rgv|AM5*+7Ulxg_9-Ru{TF=s))aII@O>y`^sTH+2JXoI-lNTXhMU1nMiY zgRVxPghfCcKL#DtF& zx#tfz+wQtOw04|?e;gR{)!En}oB7Ts*ncKgG6>Eo`Fe zXt0$RCAFBezNww(5^)&Ce^$nyS`Sc!mPt{lbTiC4W{0zNWsrM#L`5~cFJ{-I>ivv9 z;zN!0(u$utCaiFQV41s=`07w76NoAwmDw{JQDg^tG-9!Z((J>QC@0Ra*aKQGAzqr@adNJ-A%T`5xJTIWgpB zGjd^a*QtyGE1?#b)GWD90MGTGvhzWEqcZY$F!3SP zzC{y>pjQdkAx97TLY+ETdRqqpAlOC7i;MIUf>5@002COO5lYXVA9OypdWS^(vbh-m zNEmRG?3Zx78?`fcQ)`8#EEk``hwUB|^OHf^_EzX(hQn4wWv>@Zggg$4x$N?tyoq;X zD6}i!ptUls`xC2-_9s3){5tW$6Tpqx0%$BO-yOOv_)VfC!b2i0RG6BmZ2X~}m+BEZ z+qw5A%6rb?4lP{~2{gcz4|B1vkJu2<&x9q#tTXNTmzAd(F^%*s_~$oo^=lhTw!~xm zt|x`y&JAIdQF!B8U$&c3=4b;hq6f;kW13M_8*;1m97IMhwq4as;k12gL`hdodh2pM zZ;bD^VFS}E3awa0WC3rkq5!7TWvN}V(j3!t4`PfVZ|e9bjCkk;oSA53X3?Oq3)0OW zxKM6KVZ@=$IYqY-oy6=i>HL%c5aGcP(SBkXzD(t-qT`4M`CIpic;xX)>1;oOywN2W@w>ik%+!+mA#I#hhE5WZHHt$B)jK~ zwx0bb}A@(GnI7Ur{l z+pb|kGHR>7RaUm@KF{pY6=4tCH=Neg>*Ionl44Bg(7lVD=v<_KG6&B*)1~A~pSHtn z7wi@?U@P{;l^LT{B6}J54gql_Ybe*rwvY(&`VWZ@)`$eVH~(X?B3E1!Yx3s;7mHTK z(04a~y(?cNa_oDWYk8KgGl=Z zqw`nVDGPHrZ6#FjrLplpTYo+!9wD{bcH*^0pDIMy4L9V=5y=iMblL_7vjf*T$l1YH zkc3U0w@SB8^Cy=8g~TYNY6XTn^xrRROxpwp+rX&J=r}@ZKO~-c=E0?4(El;^iQvnk zoR%>6@m^~T@*9tOjUg3*Q)?Hjpg z2F5zPB+DA3r@eYNCe7gZic+vIeV6l1;9llSbET~9*72noteX?Bs|Uv1r)$xu*hFwc z@B!t$Ih71|;2rgZjelV*9k9!An0DVTt(Y2fA8%krB`8P&w;2(W8e=lt%L49zKPLU7 zsEj+6aAqe>S$oF#F3PVA9LnFXCrfyGy~<~DkK zt-?y}r0>b2m7F1|XP?qi_<-=n23S$u94+l{ZS;W;?G}NB!sk3um$FndxH{Cq zva`JGeE6~PrXiELJ~BbD@e3S8SFkMW0heGGXc0{n8OJ>HaxywbgDB<}MLne{oNF=N z#H4+rK^mr4865oA^n#EuD-W)u%idV_;w=zws)H#1+fGwGzEpdCsRkPJg-e8vA_u9Gw9QA2xjh_@N312U99kDFn zoSPHRh9u;&Kp2HOJb$XR!WPBMlkQi0jOXDLJxXfVLR2YC?_zBuPQ56dR_l>-h?ez&ojyBg zW9AR+^lrlLc)(M%dO#6-$R;&CUR=xXQn)gjr<;{(Ml)l-|5KEnCO5?73mYG?%gB^? zoZlSlM**DUk{-z?1+@O-ah{TMzt%iYR4v2KxASd7;0~kWck|h}#df8|C&x9-hn{lW zxzMC%Egg(}!xP{%)_e6+$E>sB`yC5Y%2(~YFT0>aiFfLr4u1f#z(=ZMTpGX1WS3?5 z8z7Mx%XX8Zp5>UB)l;Q(I0$%g;NX(MGn z1MV~ryB+ps3QjL01C&odX9V(!KlMGyClkvIe9|?0G0dYGDl?N8%a&jGA?2iuOTWI0 zk7TlAMClE)CY8y&iSPv5f2p6!$d<*Ymi2c$+TL-mdrX=Y(@(=^b6mM3qj>ALDrxge zDDVM|P1`Tli0Q%TE~Q3RyNam-7OKM$EmiH4YYHn;j*O0EM!7V>z_FBqy}2JJeze~H z%AsOBEuq)+S4yU(1)EB1gps+s$ipOu3S;}GW0YLo65>;NA?fWrgUG1=N`|qCa(qIY zMBp)C+q3o7GswrT7jBU1Ny|72f_nBR2B#5k{ahJ?bIt08k@SOb_w_%<^?%{pm4c_f zcSD98Fq9V?iM9|dlv?@v)0_aSZa~HAMr&(FXyi~|*6{rSfRU^oc=u6qAzYLxB)GP&T?dT?L00hm2! zkIc&oCoWT#6GG=5Mb%HgWJ9x!-W~dS%p7G?oX@xMUHp2&8Yr709CoS%Q*zH_5T6`w zy=o4kv}%*;6Pn^$UCTKvM0&S`Zy=lPP_EfZ6qqf%$=X@Ljj$K$ zO27&AciC#8;Qc0JTudfo6uL^A=AjZRg#;yfIp&&$jUeVWRTR1DbxyIP(No^{>UQN= z`n(9VNA8;BMPCtuuux}~CQIFRJ6tfk^qJjPW#aB19l^EIZlj>^nb|uS(q^S#(h>r< zzDN7{L(dli`~A@wMc*+jr5J}+&loBU4E0=c^*5J&)uuYcIF6X z+*QAHGi^g^goX2`AD<0nN4#eqVu(wAS$jCigsu^UdVGp{|0ne`qMyne4&6W|R*YHD zot$F;q!yrHSJw^7eR-VykbeIv9hj)+nC~Eq6dYX(u;vlyrJgqw1P1a@e-a(RmP+n3GLDc@j8BOk8WidqN?4$! zpSpH{?fMh5H}=y)e;O}@LUD$_^x_x%+@JLIU(k1d(VL&K zdwhss1I;DoYZP`0#+%0H|3k_QH8_g~3T(cbt}ae=`Q?)Q-(;HzirzcYRS z%KNVk9zyN@GA$rf*dAFH95H>w^v!?k?;)!1FH;A?3O>Z@{a105aE36sOb^&atPP=B ze=yCuj^{|Ljt}&Zd`rK?*$M@rIgit?ta!oLIV)}_Z&f}oZ;@f}oxS_w4fR*nNH~Lq zc}|;qV#_|d+fUZ*9tIScBBY($4CYTmzpJ9VR$HWCmR$B)pJKR18K%d)_gzRwr|&Ac zl}b>_lT)L5C7KTmxWy-NPJs6QTc2}8VB_@SyheU~)tTP{e8(;6wR>`6{cP`=8R^Jn zd~1^plKSMml)Z8%K%aZMnUG{PS?BRS%ICjIDK8<`&#}7h9Z`UKlX$O~lU0~t()n5McKY~}hNEcofWdA1Up4*sz)n8CGh6Zg#?eX{syo|9M$ zl`2xLTeiq+?t0FKU0bBx%hM%Bva6J=-GM0@=dwar*}#; z2@5a~iFuD$`Hb?L7W&ZOq}3`uoV5ziKdvPjARD zep_V)DX)b3jkZgzXKQpL&1vowsVb`J5u`*`Ej{9OlfNof$0MHqhz#F@dXDAHaQa|Q z`SFH_Q+-K@1R_5la(M|KmbjVn~#?1ml?WU!4LL5kl)F?#79 zbAG(mHs8&HeS}|%IvpByu9FbI|DqmM;T+Q%mntnaHY#*|#@;36jp*hPnd5U3b`=!G z-XdIcubwlm`;U&?MuF_c=w2K8pSexV-zqDwTyZ9olCs|U-#^Nxuf8{QpKR5_j0F-+8dQ3HwIob=sl zW3D@$C?0Zi>5v-PZ==g^qX|RJ+hxgCZ@uR0r+V<3%`J01OqGPRdVnhLdAlM)JXy^N zi;xLv@Zu?=%_!7=Kv)8_p}-pG|1;_@^WME4?G{<-z4b7UzNLwTjGU|WYAv$qSQKH* za6K`G4>f{-GO%$YfNZwMynG2?EdAFh&Jb=L*5S?qN6)9l?_!z5Ir>1pnyElPReHNN zSwM~^2}PC10B>#(I-frYMjb`N@X+cQy90`wQ5bl{P-?}`MdT9iy;Q(oxx<}4agHho zaeY|67kzbOZC_)W{y-)}I+p`Mi>sJxg=h9@jP)B5_ZaH2V{xH|cC#1*m{`;*JHlfv z%4oASpP!jenoNz5S-z_cvQ=2#K7X3J9Q1Jni&+k>%hZ*05@t{kyNz%xK(hVp$W0yE-jHaO#AF8gYxTfX zPYB`dE92E=^GYk&Xir4L^&!0j2YBQ+7mEZ6NsC8ui}J1&|6!+Nfv7?iS2OF!=c*e0 zw&maF;+w$h5{B0wAh;PXI1!NM!t_*^9zQ0ds`~z=0v>hTFf`4RmVAT)*is20T;QPH z5W&Y2VvD73Yc7Tk4#-YS;QMBe05_lcn&Ez$pD>r&)9Jc`Oz&!@-cRBKa=$OnJ| zB-;tV|K$*u_nc9Q6J~$hL03H&NcYbeH0#9S?0Hb&LLxag0MpY?eAZ;?R2*9%UtBcs zDzk*CS}b$wRyucYPv)vF;`k)Uo$Z4YgOD#UWC<|b0ofCPz&uy?0f8f?iBIVyHmuaz z&~Oli)>_CKE%;LcRI$0f)%0(SpR1?g=Nrrgk8F0|(v`$)izS~!`H2Grq4#e{8-|}) zJ-A9HHhnwQ>cjgxCUD$|7?7^}+MO{oVW4XQS#q{Y^YXlD_T88jRA~WBW z`}qAM6wMhKaFJwiA8Q{>{sEoj0Qnx3^qd0@A;jU7G?$V97UOH}9a;{%0Mc9xhPzuB zq>TY~Z{M}5b3ikP_sSN;Te-5;a(3q~#jRhv z2n%GFQ81A6aVl6BQv`y{`~w_fAZWTnGP2#F04P|#S8I=l!Y)cqb58oq4fJ0-U@!CW z32hM>iIh3KKY0EOFHircr8dj(8|e+?D{cL*hgxuqQV)0@L5JUTDCK_SP~3&qjAEZ4 zCF#C`hl-;)0597TUB0T*Gv>*agLZiq&@!W-kWlmnkAM@4MGsl%diO(%pXUJ^SG?%J ziXNylCvfq$J2x0LL-U)u&dmspkYHQ%e9>RcBTvuZd_EihNLS8;PlPP*?4>iTJ5HmA z%U$9FtPWwpW{;D{0ssTU_&-H3`eT@elDdPut~eHGHF^coz}&9s zvR*sX<=|xSRF5-QUx;4+4b-Rs+}K|z_lz?TgWNp;kN51$EMTS;&p^|M_%c1Q-=oIo zhdQ8mh`5U_(8($O@Pn)x$afIYgZ2wgd*l>zK%~=euyLi?)3^zJkA`HoWREnQbfG)SUZMq|spdH7c`$FBUE#UJ* z@{$06-37~hiT8w0*j3>cGtjJj^GL8tbX>bTkWKz#oDsY~l;p-m97%rPt_(hr4>%C_ zu@1Q7l;)DSmmt?`H9dud5}00&xTUli#1I9Ukzj?~#c&l7%-2S+@$fOf2N-uG$dKv4 zWck!SSt9{xXHrIV5NpAPA3-ef4TzE71UuieW4JiL&jm%#IOB?#U|gJteb`*+Ckr&sW!N7Dk6!YB3ukX(n8#U z%y$4ui^ZHZ)P0u>i5CJ+1q3^QW`Tr+!Qir8R}kUN2+?FU#K7EA0ZsuJEKid~Y-F!5 zY@e!-6BaTT@P-PX`mWEXIXrQT!sj6*X@iTcf*`OL(zk`sV<@x67zzHfE3r2SBk0zC3Ao?>v^h47kQ`fzHuZn|UaS@2w->>h0_`g4( zpS-_MjYb=B5amMhTieIXg@(Z3_&1_N5=hXj`P*_6?Aj&Hvr;7NBb7tC?U&8g((*?t+r&64J|=ru zaBymsUNMs>0+U@*DMP$O09;35>VnrO2Hv&Jp4Zt85~5|`&j=9^Fr#A&;jGkzg__tw zvg(B(aT`-EKwN%04I6J1;x&3Eb$zh*9dXeqx24fwG}sjtf~lMh5$`a!hs4eU{JU#S zV6Ui^wlK{jK?b{{=;5E&kVG}aEABn(nJDClj-$i~`;-HO0Sk4vs8_bvb#ZxI9VR;I z<=z%p&YqF^dsOT!?eJ3ne6IfriHuzgM~&`WAT$P7z6b#1OKAG^ba)bEsFeKR`O|)7 zH$js*>+f#FcD~HOtqVnWE3FYa-n8&7jGCR=I&7&^R`=BXrawpG=5MR;7`vNgySoyd z71C(s|I0JT^iMUcZhsX7@tznVb&*Rj?BQJ`vEjdw`s!JvC6ce>MTE>erD>37#r?~e zZXiySE^xBkzDU)^?%hJNZ5LS7DE=w6Pv5zK7JeE#LtY}G>^6*u*h4rY?CJ{#$ocfU z*(yqkNeRsHi$s<;xfaucf5$TN1u%k{y9Qn4^&#bt&s3t-m)q1AGO~=%u3m)3X!7E6 zGk`zEh+}T87}d>N8O;1c8c{)VSxRe3qFKij;7J0r*;wLQ=A}0+EciZ3=z!`rKxZJ^ z2<(?_j!4mF3qB($)0%f_K0&>zBLe^3t&!FO2{J#YfOW`hN6aFbci0=y)jJI5J}36# zW591ARri`AqZlH-nwc7VE9#Gan;_Ghh0(<3&6%+wkSVozgL1C5p4RykQJ_q;>Rfb~536+ZwJLzz=?vOlgogQH{5-aCzopX`W8jfkPIjDrau&kOu921WV zm*!`bVe#oD53&K$Ce*3xRR~8Gf+aM#_U3&aE{bez=1%jId z)0>O<;v1PUVK=8d_AP%*CC0Jvg$$1VZ2iLS8Tx zM_dlw>DD8=H^-h~*_`P6SZH(fcB&qD+8eBA0)Ou2>YNc`&5L#8JNw4l3=gg% z)X6lKborc}TVL*L!&x%+kO?VA2&0!B0-god&@L}7w#o~SijdT!sEHOHT;k;caAOa+ zDqbu(qbo@Fr#*On8)M+37>(dYg_ADZ<&P_DM`#sQ391^CDPGXR&B6}Ebgg)C@&c$N zbfQunw^*M{k~D9xoxEc>wNd2@RD!xLwj<_D0QE=Ul7%3h@KF^_BY30^-jKdU<~!n@ zD)F+N$Q_xY928ZUIU@jp1qSx1)3iDWU}AA61$VXBiBKry=N{fXx5=w!vEC(3QR{&4 zQy*9kvsf!l+w2$Gm%$u6W5SN=CO3HF8i_=MwClm57vT^|VKb{c@(Lc{yR7;)Lz$z|&FlF;zrrvl4t_JrVs z6bPFYnt8ls9t7qCJu;@Ww`FlK7hHC>|3^?)s3M5XTAN)|u8$g0*`puJP<*q?2wgq} z(zoN7&;A8T&icAQ)BKj(LqVy-JGJf13ZsU_UBp>ohyosw9!p+q+I0&34> za${`tth}=J%KjsfP$|ZoU^_r{w7T!Qh20EDPrgD0l-fLPBdcZ;w+f49Tbvs6^cJuj zMeHySn&lg}%!^TvK*6g}LB>DMmC-vl)19X~+(|BUvJsir1S~%rD58N3Sc2D6;A(+PL{7G7sD zqlIA0^(VzsBG9J5X(`T5qBUX8ZRj3O;`K$!kjK!r**_6Rt+n!SNymZ0=fd?-%w?WM zig;fMxesdWJJj>X*YAD4ki8vFdUV?d6*$WovYt}p9)Z`ZRHj3zheol zyB`*PEBjqTHW@XS@RO##K(y2+1KCCCW#sNFe|Xlx5pp)>-FtGVZ)vLl$da8fWl7VpKoYh6Feu|pP9iW*?w0*PR8WxHen5Nn@I4jEM4cr$vpEm4fFuZTG8&?Q@W* zSTja*)4uo13%u2EJbkS3=cs%1&~()AWqtLR-gImABzzwl6u2 zOO#KD!F1X0(GX*TE{H?-1uW@lRn5OUgqj5^@5D|Er=##8fqGl~VnFB+<`oR?h9dx4 zWkohfVr%9Ei)I|FiuEjf_0=b^Io_D$TTC-KK_rK;GN9Ag6aPxwt4I$F-0a2Qxle&% z!X6Tf4R5+6V^L_~3jx4}V2`*5^d^$Q%5>fN#BZI-2Qf%hOv7$oF&7pB_ z*KBN|4Ik!Y`8F;x?EBT18S0z8VU;vxiylmNxTP0Korg^rxL;;uZJj7=HoYG; zaCr7i5`KHTe4Ld?IXsN0DkCfNQkZNYh4zl!<4x=whNloxkF1k?}OTlOZYA*{obyY%U=y zDJk&k8v**RYJ<4BmNwBw^SZm~twUf=^g`O4Buq15i^?xnwbIIXbBmgbsK9lc?{%d- zc|9o!o9B}*C-b40#Z0ujo547ba*3tqGsl^;ym@0}D&icbh*!1Ew2u59Rs587VGj<#ZmwzfL2xP1ZSjGDK<% zY93VvI{GJFsz2GI9@$VA`RLb^Yd$!VrMtPoOq>mm3mw)x^u<4Y&s$!3r%U{>I#;)E zEttufn@W0TVz(~MiHM5rcEzN@_-!gn78f4^3TdRIZ-ABmu;XTf${jN|dtvxxg2e((y!8KsDR zEt^wzQIgYF{xU^tokx-_p7QbA-9lDJ3>q+ zN7nvJW^Dgrs{2}*^RNyujz`1J{??&lYUvG=5qKodXod7?1tqC#pRt7feT$0WO`kl? zD*c)Yci4FO;bc&d(vN0#(@0Dug>=%GFv zv#hwcjC%B(0U0DLa}};$Mdn2l#8fSPy_U`*swEI$ZTslI)QFlO&jkmm9NZmBsuBYP zx1Dd#2au)kGhwOR;6`$ZAJqI=n``V5WciWrc9N=A9h<)PYNMA)M`lKwE#1Yc{2y3+ zQfp_mLA}2;A87*D<}$M$xtJ6f+tT;2pog>f+kK6ULgJXfa&uVB7JY5OS}^Adc@#&c zV>s?fK`Zj;gy6pAVVWP${mjW=!pTNd8cl{8YL8-2&N`EO=Jk7e zP8pP|J-c_=Mk-bsOH-_?ZhG^Qn_rNY3wkSahiY_PW-zD=Ua~R6Mhamu9!|tp;(kxB zD@<0G6f19d>X|tpE(-?n(sW%Ix*J4h@k24p?YU5(L`52}=x(PMKF`B^ZJ+qmQ%n;rwYiEt}$pxraYByifGMVQfi4>Y^ zq}nu~$I1>BT7>*=y~l$n(v9VZ13SH8GP?eT<}O)XA-8X@lG*+b*K4vBIxN)#-OqzT>TKv2nv( zz~RBlz1nIkw@?8GBQmOjld{-o^*5N(N; zKX!#)rn;-a8&I^|TVi!rYcvOkB#$++%iA|yF~c>ipjDx_WM00vfSC`q=@sm%nq)e4HfS> zhb^6j`nOiTT4i`T5zzivsGK4@E2GG)nm1GDOaBc2ZA_B7!7oi*piGGUI@6UWWW|kx ztKT4VC9%V6tD^DQoGSZS6NR)h)wdZ7rf-jibGhYx+wfQ_TCMay>!v9s#>pO%X`Jy* zT0jq%yuLEJc$Hn3&@K6*Wu$+>)2CK7A>D`Ai9p*Qmp2 zV=cAYS#1D8>#-D~v!p4b_1Q~@*;(W)g&JEU9zZVxyG%wsXt)32rEqhPOsQ|pU3FX5JeLb-Ep%~5Z%T4x8-`!r zzl+ECuobR*n8HyVXGzkch}!C2r*~A%Zt8w)+A4R`p?&+fpw*Jks1-wG(OgdJKI7JuhWf-cOBVNs8 z*J5En-EB99;|{S-xyBYwzJ(c_m1;;<+L7Obnk|GomDqfP=1Rb1rj2M?0rQ6vt7^0k ze!!BI%wUxXkp=AtrB#|p+gIpO!o!8&Jl*OlY+PP53a)J3{lNKN@j#(6p+(F5Y5e4| z=C|D6o?Q`j86YGJ)PI=0XU9R*slGNQRV=z>8&}r(t0rZ~R`mtfm^6P*rWs5yyKY4^ z8NqJ-Zko$)`!ns!CALnSbdC_qy;RVxF?Nt+8VI>}!AOFq7@S(|=hQaZtNOmlZC6`$ ze*HUXlzGM^=1=|b7N%0G#&s(gMSarIY6p!N5bIK}LiI-rxvbCNU8Wd{C(HyX`u!S_ z3lAyr=+|f-)atZ%3H)s}`bjkIh)&8L|Gir&LBH~utc^+tQjL5`h)X-u+I!-{i;gQg|QlGPYF%5 zdjc_IEjFTP-H}#SXus2%M_&25)rxG*%5O>|HYHQV#9eDw{ghA*j$Pzg`V0#E{nXg>Q%w(LiV6WkG|2z_dRSJ zBParrJ#uMcHVcp?Y8fCMk>6oSsLq5ft3ii5@ZqnT3MSCVBgK0at&4A-%!xoWA9l-5 z^xn(vW0kr~+&L}9bw$R4WJ1HG0N|&6<5Iu%@G89lpF>Gt6rcxPtuYJ*2&AB1&t&2DjxURsX{FTX1= zUl(C7;uT@I5$6B1MOVBZn`{oxYGO8AQ+|6}IX;|_lvNcoP7L$it3nD9m#cBSShEYF zSnwPPFVqHCNVFAwn@8B)x9Zonv-i{S5aV;0HVL7!qh%;D`o#c5e~zt^d#JXv#-kbG zbjFFm1X<)SQhi&%y*wwgW=!rA=|E@e*4^$7=BYH6D_}*(f9$5_CB`LCQ%bHKZ$^ zZvi-96mgI|pK1}~L3#f!&h(sZ)OO>GHw?!LFuHX+@h`WiQQH^;<=-b2m;nDpOk zd5VIU=FKUlr=C3X3v^sQGwN;RDf8{u$=#a~TkOc*1CUZdi(A>-;;CGBI7WpzHR2gL z6oZ7g!_|s*BD@Fk_iR%bZYx_Qs%T5EyTH@iHY-Rg!nJo;0(qxNOKoHNJhV+zc(1NM z;<~f<*wnp+Oswfe35=JV)Gu&JNJi_5U7z8CVA{)oX0M`OC)c_uJQ$n=qeyL+X)4 zlyvF%ZqkFOwi^2(sWPs~>Wt49lR&MV~cS`VJmkLIZXiVf!ESdS`4BLs4Ne2v`U1g~>} zZR^rI+|R}=ckbwvk9)Gb??Z*NIW|8^%lwh# zNzh3~<#0Q_)hw~^?{zNPK&u^Ei#n4kd4@=Rg5w)4FfFgZsEz2J%ww<4JptUNYv0 zH~K*oKe+2>5MTv>Z+cDIT{QnRm}XNrvE>pzKW+ps!5;39n@ABOgH=Nil$Z-4$^J$5 zY3FF)F4ujTzxRFYH+d+&TNrKtU0LlP=~E}48Djux6Pk`Lv=xZfinW5l>p3rvAH(HZ zt#>$~M_^7CUEM|h)^nRUpuGq3_3UrspGNl~J6+@APx=quO|F&B-K!Nk;b;(Vp2i~1 zo;%@{5Q%DumvX)^-YyNC<&@%n<3|_TN$^=eDzkQ3VSvB@aA?2iAL~yht0TYxk3GPc zJz6dIykE4$Qv;VUgusS#eDsK_K~rnw5*fc%3b7Ln>|_r~`1Kbed(l8q#AN`ihsCQs z?D}oxO-*3yf2|Q4^#ee^AA-mpE^WEih_|M{Fx3u{g+KpF1k6cpL7|Tl)jK%HwH)}H zo(0Fy9N*L2j*s0vUwE6i*)whux|_um(cTaA#tuFxV&5p~CG=%J@Q@pTa$){z4&bq2 zFYgtv!+?j8=iCjBBj*zI4M4jhAZS;g`ahxV3opfa{6JOP<#6iq{^ZMq(Q#83P2rBv zP5IlnDb~*&v?xK#tu_{kMG& z{aHjdF)inx1qcaFqgQd_PZv)mj*Rnx@U!=LP!m3SdjXymboCDqml;dHC!Y*IP=xPn zuo3apPSq=`I!g*>#lN?7IE+nDoH_##VRY#7!w#PP-S77M9ehLhaN6rWEV8TeJV17p zyA1q`EX1G7HDT1H^nx9-=JOD+D}A4xuig$9wRyB z@5if|0TkE1HjeO`CaI3U4z;R7agP_b`MZ+`zYyKM6=D0v6ewh2{}hvSIgd0GK3%BEc|GwOL{mr|4NumO{*F@>ffYn|WMm~S$;keghi=FA9gSwPw%f4r zK{F$IGX#W+dQ>fhQZ!{1$~><&)C%$3k2u&jp+5lkXof51(dgV5qF8>RCHE{S%wfNG zu#eKawqGWc6{?G?aYazVl z0i5Sr9c_l2$Rq^{f+u7=>LC%O=xh~ZB>Gbf-xt~iuZ4VX6269#Tp7UB*(0Xdy;^v5 zaHU|6Q_Wn0*1b+MdaQk?lb}2z(|$#CHfODgFvRQocE?UkWN|?A+G_)zLI4GLyx-xK zhEaL3%i;l8q+&&svIJaMCtgFNG<^L?%W$!Rvw+-%^gmg8i1Vewcm(01z;#bwVvxiu$%A zovF^YHsCdMjeQ-_`|Ya;2x6aaJW9G+M2?t z!<~#w;mzKMBSqW*-dz6E8oKnRh3nUP*q}qf+C!9C@6pZrOTPClL1JAb*nV zxHr>R#XbOoN_#kViVVCQAtT^9nL1K^bTk#fuRKY}hZPB+zn2q!`bwxB_F?v6mXgLS z5d^V$IQ4XUz+JNe+NRKQzV+u0Q%D%yc?J%v!BC1D)`wr6{TM;l*pV-rz8eIE_PPPBWk6?R@3d>3HA)Ry#5l7_LCBbKR`caP%zG zujD-53c|cMG>|3`yyZ#RRA`vMyLQ}v;lSgq1;uc{=?n;?^d#FJ-gA#e@;$?#=Vxc2 zyv}2T^`^&@Ae70W5W*ZiDTS{Z2sLL2W)%Opl2AzKjAXz9DI`>jr#i9$GU!Q-V9tv5 z`&_;ZO3lcg?4Fm421y?y?Ro*zqb&8r*U0>(B#{k z{k6sRypZpxK1YzF{j-Q22i8KyNg-U&V_fcKsoJ5h;weSjm;09Mp3qCQLe~7Q_T`U! z`KZs90G{AzklT^hJEQhaAzss>D;C!TwG9sAs*_g9cHkZ_L(4EgML6PZ7)j*YT(`Hl*LZ^xymENdx&m4~ zYddoHEV^hfVy|H@ERl!#I2W|2u*X|HlDl~b{%G*l_TelIMhot7pZ_Lc3U|r^FyRMN z>S^yDs!aNHuLn$z2^+42-A^=a#mGKie}w4qJx=5kdSmah&$rPY*>x}>&K{25>Ak)^ z8ojBOpNKEVAs3M7p}mY z@KFX}D_q+z=>a|>ppPnkRWaB1KfVPYMIf4JGVd7Ih%XpmjfuV|OAE9qi5h5mylMyo z0)UXd-p7Cl&EC@RtByJN_~A`e%+2IPQ}iRhY8m9KZV33%W=rJhCVo(xya1B>%%}{S zqvgX47~bVR!OIGpOu;SkqR1Pfqsf9Y_7Jwm$6db5yR=^n>5{MBpemVKV=#rY8!_3V zE^aaCt$E>mkwN;OQyK@+OT&oe7}ssDIs>{4L9u(ia?xNSP-zNoSwHy{(qp$PrQ4dK z)eHb3{MQ}3l94OCU-aqn1%+i@x2KzTinJi0Oz$D{p}@V>HbLP#l3PpuTC{@G;t~+U zn%Uvg&O_M!Yj1wOf&~H({}*@^=aV z56MBI)yuYK%l_HG6lFTnY+^rAi~RZ-@C0ihSjf#eI8nq2i&u7MU-C6AaD`e za3F%)*jz5rOyH^e^^!aQEx z5~uI^?`h<}W}E4TXwWTj%%2Z@F7Xj;yc)=?mj zX!h0H_U2X<0kdRUga%c&X#ag zE@fpht0E-gFKQ0P4Vr%}_=->D6^D?hxXf1tA|u@BclV>??n82g{<#*DFgZcz^NdqQ zHo+4n@;L$~Vhj$|pChzc7}x<+f1>7G}Yf|PH*O)wA>z5mevNl-N9{j}GD0yWOhY>u2$jT!=cSFI@M#BPfi(@SC*lLGA=zGpNIM zYlhVL2Q}ENVmtRy`+Zhew`}wkOcy`6#@1+o-RaZAiaRrI8O^#SuPZvu;9{Z}c|cX{ zqCGV(=4m}Zw46UE>SKavVPFw%HNK^Q60XjTH6%G8Inpc3=Ii$|hrCX)OxDT_9xie~ zr+|Z~@y5+tX`(D0Op5&?)SRhg7Oty8);n&2j`%xMZcEw0`T)>UJH}piWcF%S&Dl?| z_nP2Fu#npro}A`hZFSSllR$LfW<``$y6vQX^2B2I*2F|x4&4DCX5gtlXZ{f7#edM* z2{ieV+w{)N?0sF2=W56PS9i}D4%Zf~+!O@SBBOH!qnD^r6QTw~M2VUay$_?e#KgFW z-l9hkqDLFOcQJw>%4nna76!pPlH7a0_kO(Zy&vz-o9~;M{hhPRS?jE|_FikRwHMc? z6{UXGImHRh6GNM-PlZ4|o~}oaX&7K0SemGx6ab(K%S*z>`9rGA)EVM!NQ8|Xijm&< zBfLcytgDiL(lds?nJM_K7rzgP6Ku<}6s?~@vaGUSBEZ4uDScaL+ZrvSghA(BjaC7Z zpf$RRFlaf!kRNSDc5HQaQ_i5SjQ)^jr>m=Mr(tELa_#j?{Lm!=IiTC{cX@-}6;(GV zEcYowzfBJs-d*KrupFFm4#-7c21~Cxo{ z!o1`O&6=zs+^w=kU|Wbn>$?^ltM2NSdc?UQ6_0I+NTo=l>DZ*g>R0e`{6u;i-SXxm zFlI1FYeF;7eaXEXa}^!-*3-5i{_d}rg{(457`V~x6( zygC(BE0o}vb|{#&dF#k>3doI0qb`M-%|owJfjEn_vz2ml(Y}toA2W=Wr z8?Dac&pn6G($=I64U!&la3O?Jc8Uc})?lNkqG8eBWcjq~egn5Q5A=2^$4As^i~#MO z8Td1vFNi(*Q-!*u5X-@Y^AxiK0i)4E&+-V5&*MzR~Y%x>s^b6|&_} zIc7MQno$OIXcOr}%6 z8a{2UNS9A6!6{h@ZSM0w!;JU3EplWphx-DGA?b%lR7a2~4Bh)le7+*5VvElT$o0(0 z)OAJyE2>^4wtq<*aTVsJ@Am=7q@Xc@kG>&EttciOmlT5kFP<|+`M4sW9Xccy72i!p zBE+nMM%s$tON!P6Jn8onsXsF*9*M;zW#!-g=kb2lKcIgOm7jBm=+kqZ#v>`J7#yYZ zGX<;xW0mK8i!S#+q9!~n^o2fbA4ySgQdqj_%&jDUnMX2aOCI3=ubm1Vf`!)dHm?^S zrY-jzIQRWBz}7^L3V| z89CPP#bR6gVS`)iCDW9awv+Pu7GD$pL7LSaz0kj98XbtLt8`wJB&P}e`Pl5%pR-h$8McRM$1m>5NI z`tTGWL+BeW7R|VQp+BmmAjxN$=IrCm?ck$4Nwo3^ zbG2s(={`FbWYcK)~clJ?{ACSG4H)pLd+nsvdP$ zF&;5~6~Wg%mH^XfTlkj7e`MT$I0Z+I0MX?>^TT{L!QfTs*l&Pj^Rd?*=7tGe4`!iCjMeu30+Frosn|K+&${&sQoMR}%tI;vJTxOQEj-SYV-AjAQd!BaENnLv<5ham;+m z6X;S`zlJm>DBMn-W{ZvzLjNeOn?R)?`cmZXzUQS?b=ecJr3!z@kF$Pn1TIJwz30eb zGiEcbMkrS65H#3N!ZvdxMI|~kRbQ8>rUCdi0+hfSuI~Q+w?NAlqzlb9j8YrYum92U&imKeL>je37dgPR zX&-cH?avNUW|v0wxJCnhzUPTVT=G9B-XYf%Ulc1mDc6gr1IdC3HwP|A(1i4*1EF!i zl8CM+d%07I_H_Gw`+n^5r6n+WOYz=nPsecdA>A9ui<6%HRKS3eT^Ft9?b6_6oOH&r z(ECfq9&DVpwMW|N*M7aBC0a5xqk__J;xJl`DH;yUP5J{9{>vYiYODPc0x=r`1NTD*Y^aHU7{kIPhX0!2@(tYy2h#)^;S4m*QxBZ^VK+XkX z@!`LfsNQu9$wS8sd88>0>wg#H57dU3cu0@`292cqLMVZWK+?Z+5Bt1nK;NHv14q-x zCLIad7S6jecQ|*xF#QV&(j3>`+*-v2JO8+wH%G>bDF~Ev{vj`ZP7df$&-y}v%fNcS zEP-kcrvw}(Z_djNfZvQ{=`qj+(DVw>sVWNzx@7+V^eVe&r@0juz4nW$5ow82u<}zzzw*Nxrv_hTNXWpgt18~i`MbU} zIT>*$R}WG)Oy?JChvRViHzGyt5#jNten1|tAxF7}Bw2;Iu9Y)?$EN$7(VTJkE?#oVGaTv?S^6 zP{C(1n9ZY3Nr1R2vZ@4xHnO?}xgL)}kl|T%>2NzGLQ-#Ec^k>Dor)4;f7iS9-AEQ2 z$kQ?yXO6Iw;anBPk;7Vl%%ND5<0eS~BfPwdTw_D*NMH)kNb^VM2TG)u;b}!`X?@RPrUWgjEX$F`@elkV!FdO zQwIe8=0i>&O95V(!)uah9|Vk>ybAMse4xuqDVXGp?`?M(ee2;iig#fs)D%82HeGMG ziUGH`>8#w<&dxZRFfi@!*F&0T@D$I9%d zcAZ5|W^in{_9fno%m`4oT5`xZ7%rDN-Aoh;rb1CWq8UBcd6({w>e;J8*p6#I`WL<9 zVo`U!bHm5z*oBbFP1<as zvf7RxFe#3pc>xDfM6Y|y*Q!$UL}csqz2t=AAnn1b$j&`z$P~Z zR)9vfYtwQ7ZCByryV^{LbsDEH{*$wubvU+4K!;<9e){s z``qp)bh~>1xe1TzJgzmIsdvVnGGrhmO@eoDbfV71`zmi)gQx8dD7!j_$MT3mr=f>|UUhpMLHte@j~$F}`rLM6_n`#2oqloab9EzbnUF=>#fTj4%OIsghH0JrKPhiBGbXo%yeVE&anB&m+(;Aa}s|=IR zwZ!`88R8F=8>G@VuI}EtLBiOaSJLzm8Z=3HQUbjvw5W|M4K^T!I(U=`2hT$kIj{IT~k7CiUScYAeeO@#&%d;R6yC) zZe4vrDJip?kFgZTif9**Vp)lYbES(ZKAPn;fAl;u3b|sEF3}MLiTi%~6)}`* z97&mTot=kvWC-`NNZ9&3@{)~oM!@h(PS<&{k&%I{5%oZ#@x5Jq%ayAtd zgb{7ErY75c1)T%V|1ZK&W-Rzb=mMs$*}i3R#FHw`F>Ob`*(aNabUpO2041e3-9u-e zNF>G4l7VrA5SYZm84nQzg-_e=;W}$UMNZ=p9<3Sg=!??qU(})+z0SwY_nL<-$!}oO zZib2ZFS26TEq4Th0M}&USD_bqNI5Ut>^jl0JFWBX9<7^DC|6A;u|EN6*0gR(xD(30 ztAwCy#PP*m^+_=kZuZ$qyOIpoS5{u~{hb9ft}?XxOfvjDXE4~pORVkPiaM_ys{$A} zttzf0^L6}$-(&X*x13C3cj;okG3_RxFF?Bnxv!nP8gMW}VB_JrZ!=ng9Ld9UN_V6W z&&Nch`TCNci_7baY7xZt0w;^3decYxfW%zo-*n+ZueMaQ1iM;aMMtoWoHs+>^J(* z_2xJ{o>1^s6vS|KhDasVBh;m<(kd!X$Ve9Vw4?8A>j39z{dEdPITw_Mr-pB&RQC(Oyq*)7ng6sjU#PCf?|730i1p65>|HxR`e z+25nGp9ZPRwN!d%CbTaF%B9}xEIMGlabS+E)P0lTgM5N!w1dQn~#}SoOd5C8E zZqeMBeaS9%ucgW_BV;VHf0n|^%T*7neC(6o^}o`+se97s@ew~ffZ9%n7lbG3`mB2|TQ zU_+cZ;6=M+ZyhEgaANl!_6R1D-ZYYP=MV=1IVCLR486PG5bc|65YSvos%z&k&0jxx z9S@ajpZQ>?G~=wiO_d>SQ{dQMRAnA2HlJWnK-OU3Shf)2TiCxjSZXKW&!ktU%qyyq zU+!vUM7*BI#5PO*z(Ub9_a z63^mT(G_%IDLyf33)_1^$|SeNHwrCnQxS9EcO$7>w%}|mbsx2T(M3tL&6kBUpFg|q zj8BSr=Z~r)&#&ogd$~_28Uj1uSjj7Hr|-%QDBrY|ta%Pn0_GZ3k$_qoA zg3!vI6OjO&R8?bALH>b)XYE_B%}5X2@|N=IIWfu><-HZ(S!ohKX)1io_tsW2dHW3K z%$8+xWh%k(WRD6#uhHm_e=5AjyrlYe6bxHT5z-kh?hQ3rrds9_vTe-OvYiz9+^)k- z0%GR3RW_J2EDsIA6xvV^ zw;1X(y}6@R7d%jFV)~_!$CfX%U5F(EFCdJWHlk%?-D+;e{}s`lI96_A~6Wm96rMIh4rZVl^-9SwoUBw zBRcI5=LemLa)$95?$=IF0@I|TG%Mb_mY_5GCHH%dKQUN3czkzDQs-#XZ0We}Y5vo=U>W+-35yERYAkJrq}ld%&5!kc9dK(vBQ@Ls+UZTxfvubJm`T=Bwy*+~-* z@6?M+u>Ro{yp7!LQgH6Y+D*LKfHL;mGCP0Z(MM6)n literal 0 HcmV?d00001 diff --git a/YOCO/imgs/inference.png b/YOCO/imgs/inference.png new file mode 100644 index 0000000000000000000000000000000000000000..0751e0a634a78522b379612163ca521df7b58167 GIT binary patch literal 100685 zcmeFZXIK+m*EX!ED2gQ_MXCk?Y0`UD14t7QLg>9$ks3-6Q3zc?nlwcP1SEh+4NZ_< zM0yQEDAGxQP?LACUe9x1-}mo(e|*P%cxcAiGkf;lYprvwz1BGsdhf2%+0*o=j~qF2 zR{0L({*famD32UDHg$>|I3xMwSsUIZA)z#E;`6M^uh7{5igV^!l%B$c`Kdu{(17*ERaU@8O}!;Xi+VkG(wl zx5t-czpp+)`SRHB<6~2Y_q}#{cnA2QcyLG0?Z^?qy2HOmuUqPm9yua^L>Y2Z+xzI! zI0f2i&YOSD&DlEk&Z)=9Yq#zzhr%h9)!ydVKV8fXMwuAARWRt+(T-49`S$K(AEG5sCW?T?ZE9Qs2D7%)CqVd0GocM^Fa`dBINAQur6^bR07SR@haKUhn zrx1sPhijV5tPk2Uu!#j{O zGq`#~CMq!{``0BATvKeIY*C{-a%657TezqBT*_BmYw)GtxZD9z0#jYS_(1hx)mF&x zg)3i-0;Siz&AhVZzILqAtBO=q@}nSj2B(j;7S zF$SjTB=?Hb>eD(Fsi7pS-lk^TAAI%R^=ET30gUU*NV)KG1caFJ=9ki+Aaho}A+Mly zPwj~98rukd+w@1B%t#l}?TH;UO7l|PR(s7xCiPi5D$={{(wtj2-Ch9rTx5%!=bH;j zA$9&1Z&6|84bnQ!X+OmGG8`}{qcMkBwMTVtj>|F}bF$~xHGaBQjFhwcLI~`!kuZOI z6I0+-akG;g3uUa$|L&$!Lb|D*Au8(SG2v7qoV{}j%OX5{$@L4RPJn6t^f8bU1j~2! z64{4~LsB}=mDgEqKHgfq`a_Q4;K>B6!f;$^(2+p9(^wN?2lsZJqCw8Im0jy9rLQ*x;bl6Z?>~)1i-^a0Uxpj3?Xi;2U5383s~@vqcqGzLj1 zPHd(5%dxCaAWPD;TaS0&X5ETxvAA9)G3ar(krPx=Q!g>gQhiM1tKh^t=$!mhaTp#r=N#EJc**8H+XC+k3Ws zL2%0P++2$iD>DAFt9!HEO;!p^?U| z7Hk25YlTQP`4xtqH~Tl;+X%%U*FK(Am)`guT$gR}0)#&C5|vimzOLUhvsn4BtDEVOc(mHDE8T7{OoEa#LK z2o4VLf#?kv<(swmgg&-56$^7FU39hluwqcK+h)1cz$z%=W!^C3GcLG;-$MFsa@jMx zA-bblp7czEw<6Xmz!&rlafufhSWE5ro6)&DoFJ0cSFwPofD7v*$Z0Tbr$A4uFvs6r{g*)TKk8G0L)*v%(}EM_#)sN1j&4b>1LCdEX^<%R0&bE$--#gfVXpQR4s4t9IP+c&+Ktj`lDO8KLtxLJ5wFF zkucomLk~lACoMoHDXU%*g=oDE^Vez44!mfdkO9A7-zSIV27@(y%G{-qOHyTB{7Oi96hVL^lZp7Z+d`Y{hHKK$uI?mc>Z)skS3TU<@0 zX2z`7g8#KCB(ghF5;?;TULy)9wVIrgm76+twy8C*OZJkc*O`a?_mc0BLUPZI=wE%H z9i)fdgR!UZxqR^|Y!B6pp=eD!D@ut*K+s);{4?YWDd>*8k9e*Tlkk>2eW3~j6cQD` z9*NxFk>E3bUhKy6eD$f-%)ae>!1*r3NhNIRR%i9NP?8S=ChO!pnkADqw5IrNHW6 zrJ_@$3u+l+OwK&;Y0P1ZZZBmSeeIkwb0}of(*V$F$%|Y&N_K|x8OwYIZ4Add538Xk zX5-!m%N!fkEe0f_j-zV+{Dp$?lL?WeI3%F3BO|G#|p%xAf z(RIt_TwV_RX|G>*&z#+j_keo{OACRi;6YYwTwM{{4?Or4%if-2MTAVjal=n@D3rl0~`?l#RD2WtjAVP!QMm?^? zL{@rH_Q?qv!y;4gc0P_?Wy5b&CR=#`wG&vE||sQ>_N!w^b)5q z+lP?jiJF_Wxi@ygg9F006(H=I#%-S{OYcDvL?#Bx)QqwKjwuFE@)!Ejj;~kZbfH#>wqutBl6V*m zXNj6c82Ook_1)>bnAQQa*ryVGn_%2RpBC7dxyY!|^j!+clmth2EhCg0^+4Ux!1CC& znyg3zI*LgD>i+8LZs+b0v$=gGfD4c6@=pXGMEeZjKga^hrKUAGAh2EG&)pIiEq(R7 z0s>j<1~KmM*|>HMRoOx{Mu_=Y)r0%F-nb=XJB&MFdfgc05;eqe%dSkGSt#0t z|FfS)YvWou8%D~QuE)2Q-UEGLQBR60-^@AI_>d5_t=)K=RhB`(xx$rlEen}Ts44cq zI#pFT*06}}R6b(Nb|NQTbd$Wsp`(RMJYBKs!$vYWKq1u_qYX+!kK%MYu~tOVz}tu6 zNlOOcCXXT3CU1{M88Gq6R3d0PL62p0x@!=381C>}1v+I**L*wFG3!9J7wVPR)=6?8%neQr_-#@_E-9~#F5YoZ*tN6^ch-Tz8PtH**7MFac^GdEYZa;vKsde zZpVA4_i)3Dw_+#4A2*}Pz#2E=drlBVXgaY@0aI*LlGzZf>v}Sh3Eb9}-c#rX;p-X1 z@WoA@UqxN_PG8(TKoTwZs%HEIDct6eYAP(CS0%73(LoWpxMkb@JJ67;qr1_=S82z; z`lGhRqKhgC73>xme#x2A8Q0wk_A}ZW@BjsaxGX=700boNiPKvIf!>p_u8<1to*?Ci@A(xuI*xt|Yr zVx}}?JFgTDJwagWDKtlLYh0pcd$dKpl49zxm^{0qsNxuFrkq}4D>@gKX6tuTAq1bf zJRi2@D4CmOgm*ttbGboIB$H=77G`gKzq|mejL?+e;aZeEiB%?<78eHOhfjaj1XVZ6 z2c*m`gIcf;j9Lj1rO%zr!5k1ZAkrJnT5OIJQ(WrHOPZcGG9Op!%dcG|uORubxv+LU z84y+(Cq3=eox5%^+y8baVCP!g(`q=je*(2yorJfDv+(WGD)BkQ5?ii&UYTFq84m!N z7Y>{d9407PfEj7qZNR7kM+PRrKwT(KDrzBCoW$(k^^WeE+;-VFY^8bFnemOULzQZD zu=w*(k%H3rM~p#|_tyJg z(5zDd;D@!MoP_AR$q^2|J9|l{ajR8~9#YHk(;RL~)HH!uFRVP6 zUEZ|w6jphv0o=vn79QL7fdfrAhqS zGOuw0KiUCxyLCD%$HnI>5f>|*QhjcIs@i#;M4dGBo?{7 zjP#Q$;L-?fV$k-W%Ut&FA)Eu1MYnLzTtu8GGBkQzF1F#iel0GbEK*#2Dij1y72#}#WXrs8cl^RJC=qEnm{(3ljMO?pjqYmZD)UPtrCetd7sT zkDVelLs-{$nD6A`mVewEC6`T1IETa>lP`7nZaLy$SDiu3x#KuAQH9-Nq0MVia^vIN zARm_RQWGZ}I$=Ufxwf5?S4F+^v}aD@M0*YQ!rjOglPAyF?&9b=M|Dl81n%w(>Warj zbNdH5M$KhIq}iv0KvIzB@iMQk#u+8jy3aqpNvvHPLQd>y*K}h&<#?w$LDjhViMZJ2 z^GXM+N`_irh#zx&4I96Mc9gW*+MNB*W`xaMu70Sts@!U^(u_(hQO}i(j+=Q=kE@nu znvnX!H|OaGAqM7Ju;vzIbxeN;d`Sdd_M6s{sAbn#E4@RD_?cXx0(t93#Bx0B4O3e? zfI|8y%!Ex0CYZZ(XK}+0z5{YQy)TM#2ZS6)Rk^a_IM|`n)gR1;#*i_pdS&gjZ5b%kR}M(odW6FMQD(684*-tXw{+DK@UkxbWi zQYu~UUzA=G*9fX4RpNkLFUlRk0d?_C)K>DR|o=eB}U^+PI+B3!WsgCW(H_oxOmwg+$k^I8` z2Ufh69}h_r?HW!!bPx`KCXJ_sF5_My1H!#68`W8jX<}G5I5fLh%%4dgO@>ww^K~p` z3@ES^*1fSD;|&Su2GhA0PlIJjyWd^b!0#U~urFK?^<(dt#xaYy`5Tv*==IKFURN)8 zqg6{1$8$}DHWFqNn`+@AX3$m=--1dHW2NZ>L&qVw2SeFNx^h1H^tv4DL(IY5Mklam zWVv4B95`2nol)qf9@I0>KGkh>+TuWJpyQiKrVJ`GpBrwqs$?%YLmDQ$`>xe5-Dy!} zUp;at?w3yiD2D)+Qm0S7T|Kl`9~Uo8(Gl09cG24MtL0FmGLNA9JE^M+Ec_ZJ*3bg6 zJZime9kPaNlz5diGu1c}Dcm_eUtc4Ah@s~Frct!S_tcW z3QI;`%LoVg8GLrAye{sE1r_M0;xb3gPlad5n+c+(5zQH&i!Uo!Mdx(984LpBxaGFO za=1L2)%uHjoqgpyPb27;`EtJZVp6?Cv91Q2h`mW$lq%0w^f1ZqMYAW#N1Ef%6`eaJ zEMYcZXPpEFP*CyWHi9P}cCPf*^E7elPTtx@TOEZ@4|Kj7pyuWbYV-Z#VQZ8If|n7l zjc2)O&~OC<&EEH6T>T$!y=?6_6|=8bJ8#P$67;&8cy@ONItiUQ*CVd-i9f)zE1%x` zGpo%R9zn zC8;z_`0c#i%jN#P_2j$idqd2+X^Dp>_r(*)_?50+#k0zrzCT47$G>lU?D-jn4ZE*a zG%%QGa~lpGa{70hjufcKLnQ@}m(C;2or2qH`iMo0)D|hx$UD%1E}qV`9FJ z$e~Xwfy#d>eDB+Y268zLWDSN_H52(e4A5KQ+gMjQKGD?CPI z{&oBQ@h<1Wg)t2LEesLjVALuXanD`KiS#g_mz4PpZ^W>b0x8q1bv8?S)5gELSnXtA zRj*JTdTu?bwH^kMmNAbF{WNiDVpCE<5S4=4f9AW1Mc{8y-E!b`!T74h;D$ zGegXb4Gup5q5?<;wtGGM4!TlkeLQ$)Ee+<4^%{ewt<5HW9%Q4$lO82PuoUfsFxJ|X zh2m3I>n9d2c5|+!B0~o5VZEXboYh%6@zmRz`LeYrJ_1y4n3o>GVQ!%pQnUMdFFjFa z{K8q^M3IC(v7L$9ScQ;+0v}R7JrBk=B_TeGX>a%AxM>oqd){f_%yg@e4_~mi+MymRA9vll`LCnk6x-4LQw7#vx1#g5TK>QUi5F3FYbM!c<-Hr3Y<5YDXBFn zSteJHH9as6X%4SlM&)>?lT7@jr+~Dmf*=b9H5 zhO9)9)SrJ=m+1^l$A?(ye&pw7udb!*ejl{g z*)O-1W1FfAM8G{hRRNp8!rX^`!UJh&9bVM?1F~d!eLMb6Ot|bEB$d|Hy74oJ#C%nc z?i3RzCK`??t~bQfo`q6Gk>7UwE~o;|S~*~+QraPYYTz5to^Za1O)R=5 zx$rXEW#LA9_q5o$6o!ZtSTsU<>JhnrcqO(!hb5}qO4CApe5c2lAY-3w`i^^4F+?qR z4(gy0)*?3_3xj&SDgksGYghxz&y(*yZq_qsPI=?uXnk1VAat>X#GThxLKVF8;U3g8 ztcg~~W8R^iFrK(oeZzORzL!HWwWT)Xhz)^CQhksuRfc>s{N zsm=J9E6qobZ~K`0bq_Hh7k#&)GaF(^@9GI5PF;%&-Gh{TEE&EM=cle}^i|wNp!?uH zbcH^$?A^izr%_l5tv{Pm03lZ{>np~kZ_Xw1#G>;wv3fp^BsLgNVqn50K7!8d^=!`o ziLf7WnY*nVDl?Z`6v>eB^uW%I9qJbZh@#94YNw3Il)vB$vHr{VA`6ug^-#mNAAxku*<$+r+e*SCQ(|r z6~OIgjEcIx_gSgy`wnT;KhGw)ufrr&n%8^`?kuwQ#DpiTo?7Haa=uh%rp8M9*r111 zNZ2`#Rfk&RqNQsuaOW$~DC?KfEPHnvwN!U!iqe%=0=Vr~Q3z#pPE~GkZSC9-dA!sH z@cg;rF+iCluRc)hzy*Tj(!V(3tIo1eYkDa(KGCsBN;rjWU07Abwk{XWXcfsLG={tF zY4W99QF?~>C&vYg2uoL++PKlUuxY%`bcy%AUtwM;Yz`Hp_4e02Dh z7Wf?Fe3=Q__{pk`(WM}ESfy3*&cLxf0h0*{FeY&&fzMurou*yI=&NAu$n<3puy3aY za({k&sg|uyKm~tb?`07+abC}#xaQ)vt)tLSc;a+SeVj`tB1L!g~@o@6x^68MLE_5vc%|0KrVxG z+v0n+ALpLqwKcdDgl`bE=sw+RpfCz!#gQ^lN@M(hW}lC?^`W*kmeAKhTsrIfTtxA} z8H8fA#du9_mh)j)j-juDsTMtEh&@*=hKeOYao9?nql)2ux5FT9rcswS@ ziWGK2hys33!BPB45rJyYt z2QT0fHK7(!qY1SSMk`Fm^)?Ma4N;`1$3@IAp{{o)+9`Fc$o4 zD0$xBth1N}xmApNV>`sCSyp50%U*u6{msSTYFGRoIbY=bd$*zH?&e~`;@N?8O}?sH&VgEz$158I)b$|HYR%Z zgOQa6_9qE&_Bq_bJGsyy4)fTV8iAn{R-CWU$w7k+$>`-d#j)$=Nw}7ILt$f-^Zl}G zHe?P%dCoRw_Y(Q@cUM{^=g^fa+sI0+5wjA9;u1>_w^sy5wg0`1B-j<5_rBGCyh>yDp$qF65KM_ON@81*^rbI^6(@BSzj!>D*GaZC zWSr*UM?#LA?Df8lP>`M8iJ$tye=JaKW#78f2UlA5n$?&p26k$*?Z!H`jFl)ob*{QC zwi%}AZ(KdF6HCz$A0HnVE@}yf!+Uo(7tizAFWMRzU-k}18Wc1c=S3%9tKpg2N3s;i z60b08gl{V}3b&UEY*&NNMYJ5(?y@RFvM8nY(`Gm0?Yh}?1QNe`1dZ8Lkk08wjzuTu zv|Dh-`6OSe0-MvbMbF&^<7Bu8Gn(%ZLAenqB|kwR#M4WZv3M$xUCoM{;|&-nwgd}L zOMzG-%cXDqEHw#%TFx{6;^P&Hp12#@S)fWcOSnw*Zwv?H8bwgV*3o1k}#vrn^LpDxgC(%o2!2WyXO+E_IX#b=#2Ah3Y`?++r8k_4s9gzsxqP`Fn z)z4=gx8{9T#$ruYsNC9338(IO&*~X%8_N^nY~ZYUpd+jK*Ag$x2Ls{0*jAk0VbUF_ zQ=dv@4q_IG41_gztj74ZU1}B+{)g#mU3kWyN|t-|P*{)TLyqIP8fZ1lYTz?duUWg& zJ`Dw5`v*FO9hQI^MSk8tkF0YuPY+;p?KwW1f zTlg6$oH-%doJ!=K5(VuHM#wR)w<-j`WwacZ(K1O!ZBI; z{7aiP=M^(?Ri{Ppe+=Zxmxq9-Ojy0r`;&(9KW^q%1={rN4QPLl?eB;J69B1|`khQ> z{+Bkr(||L;F@7&ki6Ia>%}BLu{7YNMeIWOnFfyRFy|Qu@y} z(*&UHLtp8Qe-RHaEfDwrKZzi{`T(*D15hCjUy z=N)cvNay;YXn>cKYT8fN6>O$@O^l5d?fgyIVoYM;u=)#qm2H$vXo)t?2^YOf@{BU+?+vXa7VG z9^PffQ_YHo)IC-D_db1zxd|Y}&glsJgfu4EKnl3g{&?woyTKn>*TcaTa{YkOsqb## z@}e#i`$cCNL>aaEM6JhTiuSo#odssaX@1l$y1ao;#Ph>;}phzE~cl`-61N&NW#L|3P#M%R)pi<9Lxd`7%fI_{3om) z=CiL+4t5ljzWSHd{Qu&%QX?S)``bT)<-^5&af(MNTS{?^eaadn77FY@;Cbug-us@7 zCGA~BkE@iHF1z+3Wxxet+dcNFm!nOhbVHM%ttO=yPV-fy0R%hd<0dJWoS3=Ew*iViahfD(4(@j_dRk=sXFqcVe15B|zCqe!NqkkeY4p zEnbZ(0{IlhyrSFEzzzzB3}?mJY%GEXA?|NW>}o&u=bv5e+|n|LK8L5*141xM$uqxw z5c`d19B7WM$Vzn#zCL>u8K#EaOqBjcJRpW$O}F_p*9}H+OeKi4TV<|9f;b?SWG?q- zd<4G4ff3wOHK0)??RXZ&={?qo3+wN4qu89P6b??F8JS@AXx{2)xzwFOS0p<%>%Xa> z(L0+4LWOPHX@*1%@;2|YTwI5N2zkQ^ERa2AkllK9e@#&Hw*BT;pgiE~%NOkDKK4AQ zbA%lug`2Y}#CFd+=y0>pMhCFOgc9mwL(ydlXyGZ6jDB$) zH}{ANjb2;#ITudb4lIepnFIS9qMEKB7j+)<9t!11FTa1O#}jwFbbFG{!YX3huby!$ zht7p$+YT^*sazElYKtYTZY}iybRFrPE~;=mxNQIm%ivIld-U+X?U?Nc$T@t28MLk* zkn!D5N5pX38uqYUl;Z-+Q0}XxgP&!6-5=sU{Ozrv{p4^@<_1BTnY+ym!l! z2MPPh*X{Xo_ojJO4G9^3-MgbmJZo42L)J<$-SlUWM{VkLDd&`ey!mPg9p5xT)PZMO zXh%in@@nUYF^wJ77sVt7v8cifB7Xv>l(PoC8Cs6}^6DAZ2&pC_nU`?F06yp;)dXvver z<$kUWZ;5L#Q%zqZsy50|R+*~q%~3y1e%{>F8d_)XEK;pcQCy)hUr}R*z{zEsWUX zbCM?H5;gWDc`=oDJ5dPE2P%EF*TJ>9reno!lxt!j5>pK9W9NqwkJ`@OH@4$=giHD+ zq77?2+&_3eqSVYy@;uiqqxVug%tALM)Zn_xb6)X%c`MKA3Qxj9L_4+r*r@&SyHQ(J z{Xo@n|M~oL7%!y(PA5x4Y0PWrgzD~Gyb}Dg0()5#XL4U#T$=KbG;b5UNjxqYUkt~2 zJ3=OQMV;YyDA%@JPlnDgiOnFLKDJ&{Wj}-Ww!hyMbvK1vlIVl(vix@S*k+tJ+?!G` z)WWZ?0vK~-rA)8B^TcW=eneyS#?flcKFqr}?{g#rc03@O4faOt7Wlb*yJ0rT3Dlbp zK$1}{F_(?9s-&#rS467o9;j+@EI^_mvV*&p9cm0ZHq)xz8%yy-9duGucd~6huI!U_ z@#LP!LE>u}jeXL~EE6c66x!qq^`I-}r3!v+_*NWipY`=%M_|E3lyPTl-pf@TW6)nR zC?e(D_H@J%&mpiZ)1Hk;OAu2~H`eRt9{n6wric|Ci1%aZ%%hTHaW#gJ#Mg^Uq_66? z73inV`nshM3^)f;>1TH)q`MEE!@ijqn-eEvcQY5NAXeku_n^02A9SEUx$f1sdHY)f zuL-<&Kim*bOMTh+DU86U7sWYB*{N*33?pxIm#%<%5?~`6&C^`FVs>q{+bJ9~@QV*V zAouf=q6yL7a0nvNL~mGvo>-n=^pbd1v&`$JKWf2d{Tk{L`ZgrWe-G)aI3@^2J!;9D zKwQ?;F{VCyGF~L=UVu7JT$#F@EV$DvVhFIQsQqYUZUr_Kf4;)~{g^1bq1VUR(4IMf zA{5$kkPJg(A8H_5o^-wmpUyHYaS*LwOYY$IF=@?sN5u7cuR9!2AM38YjLKjvGJqhU zw`!s%M9fjhG#!&#VP+KaI`@uH+9D@cub<1x6oOmu1#CQjB4w^314l|EaUNx6H%lUw z4aT!B05lxA{8c!q*NafQmiost*L^zEF&UjsNx#BK4Zme{X{Y%F0A=>nymGpP(D_7; zL7*_24DYX-EUq3jDbO!R@oFsjwqKG#2#=%~zW0{j*famED>10%1JqhYGa`-(y6QoT zW#UdNjK+CXs2g#!ASn`^G}7ycnw{j!7nVsn0*a&|RNiIo6hNErI2E-L-VzV=xAuE` z_yRyN%9Z<7| z5EY7(ok``FzOnVy^DIHzSSA4m-pVlDkgL_}6t76fb0u_hAHzf5*bU1_B4jfI&K-2K z=n345i~E}R8K*w?!DZtB{@Jj&nwt$}y6m+SKzwamwReHxq4q{++z3&CJOe0f12dd- zDz=LSxWg&M8Sm7zDcR+iwOp6bLL<2d&I_G`oI6lTOeCE&xi4xFA@S_kU9j0u)KD)|Z1Kot z6dSx+QJSeLnh~l;x(ONX)}Ao-8UARnmB(S#`i%Sa&UseR^BXANWaWd}f_d#NqIOZs z9d)A-7gndqOSCU$eBBJTdKJDU!)9sXPf|nA^)b<)gm#ACF!sFcZBW@oi!Fp4B=cNW z*+c`O*If0<{ES};%Wo10(15FxYtV!@lLDyiJT47xyAEy+gdEt#gQ?VygJzo$`xG~j z6Qnw_keflv)OL}vA)5sT29_e<%tmtyK z^lW%rP&n7g1!nQJ$_1<8y#SJ+6-9&BrWX$VzhAYZQ<}`z-PU5IeBj9w2P0gHXP|)= z49xICy>8eAX!Qluym_ze!48zR-()`Y*0fSLGKNcpsUxB-jO(?v3rw_!KUGTajMZ2g zcSr6D&c!N}YR`EYRp;j3R!Eu7V$4<*O&)7s{mfXpugn8*C#G3tLF++CH7pCQfV~6< z4LaBITd_s$o4q^z|CGb%mX_Y9wSjNdCXw(SC~`I23ZYv#R{gqQVeSg*Q?*6?!yVOE z=c)Vh7J!!u&6K=S?S7{h*srX812wmBKfyhz%F#A5^G0Q!&{>oz4tj1+nSYhpP&h>66nte7nZJBd$Q!F=|!5_i*EI z2m}#V*S-rR$mCfL?}uku&B$7v)s*Wp^l3;R@D*GGh)oYt^O*RUbOTmrcrJpfJtFbC zTTcllkvBuC6{-T5cLv4qPrQbJ2$^;)p7&@VHo^KJzrO`Q>)%ZXo7<@%EYwt1 zX4?#UaNIT z^S-={7ZEbcvQnQB** zcmd^g5=sRSgn`h@S#q^r*Haup@y1>TSI#s4$=I7ZoyPKJe&B&}nDnMM7cMf-y>TBI z73^GZFSya+kuDNQ9WB^YxaUmPi}pmC4azJ|TMz9RRhdzF3occ1B=dJ1|KVub`1B%P%0o1-3A zD8mGJe_V}U7f-V6$l5<0xrdDl#HAcV`SOiNjm7D{RHA4&F_J9$d$jy->i9yI7A&)ubK7Ags_e1mb-0EDF38HCQBljoxmuk3K?6wnr5B4nIy_&|t#q>tL>*vD1 zTNYn)aY2|vdgAYDj_(TQk6#nBO$Qo1J8(9R={nFwgcpTn4}p$`4dk}IAH-4pRh4;GKdPAzpWvBSbw}ZW1Y%ltNq;t$;2UmjBL|oS9f1WE!v3;&V zbos2?uacEMUm+TeerFq-BIZKW)vT_3fBZ%`Ov(j{&!dIEoz^?C8JFMY!M#ZaEsIR> zmp~lkkVaW+Z5)1>vjMj#!{bS-8OnIxTI2xQX-6oph?AKwOq_3O7KFdl7+9``idI60seIkDWN zu-L9}jZ(|(X!w(FWGmV!kYCAP+jsKme$??&)X?0NaCu|p*nk;$)#HyC*@e5;b6Bz# z&Na9jFE5sJDjB$^s4b*j(sY;J(w3#uFb3X!_AW=m^zccc&x}-e``Fl3X&7>SdI+QU z=y+GgW)C)M9<)O+lj|d-w5JM~n?tJS&K1u=LxLz8mg36TY(A|if(Q|GJaAt(_WAe| zmw@-S&y;I9UHpZRFW-{MaU~D(W+I+*cdGp3$zMU%@)xuwFWic zRRb!;0!AsST)&l$`Z94nzgj+G!m35L~JQfB=(i`E~<5);zJyJHzW( zscKsP=$#L-CQ9pTHI>Bd)rEj>JLW(F{>A{YMC8|~0!7dGWtu-!f|iIl?RL*6wHEAm z6$4Q=)%rS8OkwH5D`BcTA2N$Av}wkRb^6M3Rzfu;+IB*wv+m$w9^uX(Oj@&SV)+9K zlz7j-*L^@^GWv0T@BQO4Ep;c1#wuq|#}hL|fLYI;linx(5U#2={^oW%9$LlE+2q{S zKdYxd#$mckwnKXk-&lSf@&4Na;kuq|^H}XSI)(8GW5Jd7a7om_3^YKFTkpo4Tcdrg zCNplK44j?6XAS9r-tH}KjVs8kU}V}fy1v7p?NZchvY#(c@532Nu_i&h^n5OPj32S<{)&*EayuutEYqKN5zfy$q z*~beCl9o5QMn*=S2dGUqhP58JmCUs6U!SRu|KeK=avGEaf}%EI3_vx zI#G4wqVHo9XQS^kip|)EJIBFJ)5g1YS(scnrgKpIrq%aL`D!PijbU4Qw2PI68DRy2 z9eMQ`hDQ)8eKUN^ZEZ@g&D?HI*PQUsn*pK)x__i`vv{9zd{(IK@2*r5G2AIB=%brC z-H-|p$l$l`mm2Z4-o2er8%?yDKDaj0R*;jF&>5mS>$dT1RHS>wx48|^6)!j#m+6#9 zPSwGE27j%@G1d5B(Z1>7*z;o_Tdlvs=n|Nuk_+2cp}?v-#ScO@qN*~RX+`PWFRpev zjkuf@g=ZO!Y)p)89k}Z*C1*~T=d7&KFMZZxi1LDOj26{8?J*@{|A{{tS4AFP`VK}2w5`?ibi7C8+TBRt_K_|%kFXo&x%G*Em{{7hwP7oF zT`J-Ep2ER-aM)J6sgcnqe%QBi<)>3$X;PFqDDn0K^%idlyR1C}8!8R^DvyE94z)$~ zOgl44U{{!Px<+hHa-Nz)eZ?WFXQET&8+=}cDgrsT8rSBi$SMb%c#Y6CW4jzK*goagrbF%Fin9Qy3q<;LQA@`Yx9 zm#%NXY{*5OR`^D@4wrvpcF5wO`9Tj}^|b~&?PA-(>)SoJAdgy@*s~umivG%DXK}C9 z(;?P^e1;eIYf#cRSNMKTBnjVca4hB8^WyF@RvgXHepCZNAr+)){Uxs!ygSC1nXA@r zt$8un&P%>A8thZh>$6WV)C&SaU?uyY&Nl$x^i0qyM4QQLCoP46HJvI(IiguXnj1`- zZ>Ru4+i55ok7*?jgbtQdG^AQ1IQRNN&`}{3gY*t*>C6IPoH4*S zz4rhfPDC-^ZlVN7({}n&fOQnC1MG>9#WiK)1HSON%4cu5PmG52ko8QuT zA@kcKRgnRVo9N&qBI<09*Mt#Cg#B4)H5zJvpH1Uo`la;=<+HY_yk%?{KE}ID4O6N` zOa81i27-#|OQO7hvwEjcvSaADf{Ay*ec|01Pf32#kp~JeF5U=^-6bw4*NE{=Ar{N@ zpU*jsYwQHfXVGK#6?A&<4-T3rIdcuKBQ6FrVFI&$ec^=%EsscsPnZ*WgAIJDf)7T% zE*}$!sZvC{bx>Gk@}ub5^1e+7&+5|D7Ss)9E--@vXav;A_Hgs0L{LR^ci7O?%pBm= ziw+?Bs1R10YXC_}Y|1*g*?7KPtg4g^qng3wI)_9lVg(3A$jq5XB7iyE7dUb${2B`L6~Zn5PaZq|$17RjF#Y&gANyh{@G-`m!=M1)XU(D|%AaiR67&@O?>5IaPp~9HgXKZ?^ z+tBk@P&E6^NOchW^8gzNAfiPNWBZ>}utZ1PnI_^YyfrQHVh3r#ekpFfIkW>+pcRqlU$`%igm1{URIbLHjuHg^?klsiwRb4uOg^xPb#-4OKT@h2<>+ znN5>0fjen|>h;R@<6Sc#^D?6!ZtWH{)OU>4nJES%mK5!)0FaRC?ftl!3zx7pU(N{@lvBlPK6Zz zxK!<~X*1nI8VJ6*P__p~4ej1MWOq07H{alb&%F8vS?W(ZQU1&coM(4t-VvbaAu#J} z-dZjf>0=TvOrEdsCn5fKC~8_);=Fd0Si)k1HJDvLC)=WnEjvh;UDd*WsfZTv;Tqg? z)3d+$2R!L@hg8Jy_dd=&2~HE}9Sa6gNAWCxSP>XSVVSv>kKDg~F=)n4GX{2g)ZYcvxZP*9MR#eBZaHbGp}meGllR0-5Y^sjW@YpZdj-z=Ttlv8a4$ ze)_Jk?QEplabv#BS}gU`5JfDV4Ft;~#mP+)qYtjoFLGr3Nx*+pcl?B*nfOh$+keeM z_d=k?XHPWSR1w?g2dzFi50T<_1yG{Gn6Np2ARYASOnJ^h_a~b8pQ5TWJnR65;QpP{ z0N$L^56o`vxa%TbC6j3~ffy6k+wPf9!Gz@e(gI$9S?2idt^nI##o)cLt=aLd29%L9 zX(-yP+YG@vbtWqPBno80%CFD1*uEZgvyyJkr}3!hY^2 zNm?F=$^ll@xIot5a@I>WGH!urFD1*VH6mz-_Bno`fgND-10PMPrEx7o#BKQ4`Nkig4G6;! zgx?;pFR@g~0;d!&o2@pWod&XoZ^jAawSdH2(Kx;0BHT{LI z-y^$HcmK+4K-e`#`x%@AEvR?%_KoklVzn}4sndn{sx;!x)Bl^3~!r&8rSw;;~0cV{nr~S{a zxTlQWsq0o`WluXLOtp5bzeAbtw|WG6o?g1p>+?`5+gj&1`;AHO4DokIX(4spO^=uE zr1UTYJ#Q_Y5&DC4bjFkQMJ=Su2m#d#^KTwaSI1R#zOlJ7X=6d&qEl3QSTrB_KkU8r zS5@2hKP=rXC3Pf~ZloK5gP=$pxe9gJ$TKl%L;^zXOl6cS9xi`Xau2`l6rpRFtV8hN}A1cF(H4g-| zdN=z4x<`j@_UtcW5`aF|H&{AaGZ`k8Dh!mY6fm7v?z06IH?W@E(@o*M#lC<*$YxDH z|J_O;39=HMaTpV885Awq0H&8*XsxtTf)OUk=vE68@drwq2w}8;{eTME4?2{&*x4;D zBU?Od>w1L;L)d6f0qP$+<%NE%0{W%u&%9556+)3iC?l%U1od^l-bQyoX<1P0)te9J zH|I=_UOzl2W0TYrFR}m_RMBu}@e?2b)tjTEqtEG+^)APq0i?2LhwztHqT8X=dXv!y z)o`Z3W%O=N)F)9vZP5X1dncRY6;ge7Zo&*gh1Gz+e)|mj zH+ciM5xf_f_i>bwc-3-XMT7%k1=!@X`!bP(4Oll`1}FAknh$)aWr=qe*o1>8JEAk%!%=Q9o_s%5Ha<)0j_!nLlqYF|Wvnj>7l)dVl1{7wS$uSchBN}t^G0J!MN zVECQ~OlAVV^S~aGwntE|Jv?a9MK<5{*|WfV`9>X80r@QrAR-1#mfx9B?1=Dj_D>32 zcFF19m#+Tb6L&GaY4H0r6UASj@xtT1_wpQ6Bn<*sMag^h-$}@qfqt$^JBbRg1ZzjX z>uPyhzU-mrn1zv03~&p@_fgni<_dh!!Y5h-=xqV$)tZ}G+jVwA$9_U^Q1)skJ@)tK z%oTOx0luP$SJ4!8i5orUo<3~Et{ak)69qW?@Fj-KFVu7|fkIStH#Bt?Q|IkM%VL)z zQ$V;M=Wn-T@o+ff6|%lgN>`n3(fyq5dIWvM9ub2q0R{m(Ui z$#_I(Kb0YQ=EU5#CG3o=X2!xE4xj-e!u~O<40Kfn)0pw^CE^h{ONUh1eT_$woU=pP zYQiZBC;qbCaQ#+*H~XxG3L1<0Y+j?*K+b#*(7m-rG&$V@AhZ-#2wJ-R+(#~fmw&eC zXV8r}S*#~+sxigB^_<`tW2}IS?q+!VcD^byweTPl6)1`K2RHwc(XS69 zz}eKKl!D0Lh4M2j*~giRDhQ1I zll^}ZC9)2o+{vE{sKOi~X znf|`XzqHa07t-W;^BHn-l%!(y3l9LQZcOtpTDe23yg8- zRpS1K`pyBTKXy7i*}?xY#sL7$B$Tq4e+&2@r)U0mh4DcE@{o%!@cXxsiSXQS!<3}< zM*VxNSq>O8X?aWY8#8|WaxH)tCqr)xWB+5!m=eH}c*YF>6Z!w7Xb=NxQOEY8>{q`5 z(5xBLOnkUysxijMl+1H54Tc5os&v-*AZW@@%XE-TGZouPXVO%<8A0)VD#5QfRiV7fc_-U7}5U6n64`@Hd4mL{%;OJ`|nd7%eVyA{}|gu zdCTAMsfzsK`gow}R-S*?-g=ZA+a}RrdwjRCw6ydXSo2Bc-X{Lf@9Hu_&6hYjjM&6F zTn?`_a294Qs^%K`$G>nu&O<2#?k>S~mTm{!+e^p8N5dBSF?B2~OuO~Kf9O97{yb@A zuc>Ra`SE<#daCPe^TNutk)AMmQMd$F3?yNX%S6z1z zuea0)6xcRZkP*ibjl{nKSPX1}fPl;KTHpRnmxAX}ipSa98DUHV_z@4#1__ZF83xYb z@x)ZbjHRA$zQ@#@GF~kKZ;uOtE@zp9k}m3%0 z@guc=({kLQmv`*) zS6oEP5ixh-`zr(&2=n3kU)jrkHT(%5D(JDDwi%P{`?w|q_$}3L=h3la70!3UQy&Jk zWGu%ndl(lB<^9q=O;wPkPS^k?omRs|u8HvCo?HJC$DqS{+UGG?JQ<=63&D49%lf#B zf+QB_G#V;se;s0aLQqYatK;6!yI#MO=8IWdTHWJZLFc~bBOV(!(O_)awVriI&GfS- zA}0s1D>&vH-M&Lj*ma8Sx%@iEVB4BmS^{cFGHd-x6gUOPt9d1h&3vJXV?@iu=ueAj z&O}%uSbe?Bk4eF?nTN5Q+hW34py6ujesJVsI?Uha{PVF%*bCx~k1=@^%Vw=5PYI2^ zBk=;(Q{49C50izh!tzilDp((J|MY5*W8j9`0G#lk0bUAH!sVD$A*bv_%ev?R@Uoct zPwB?{H`6dA7J2pt)GgTW42YD)=R3%$>Sp>aDtKLv%mH+qNtGVVk0hrI{c`reHIVd$ zoC5Sw0d!%tr+7j(r+Z-jz3ch&;??8fFM+W?PpBEwV^`7XRN$sU$-T}+3I2T%NZVzjnEyTRtAf>`9D`i}lcc zM4;gHr~lWThh<1qRY^HGuiIt&VUp&eFndWyS8N&osqyxt$cmz?#i^ZL=o%9)`zO*H zl!UHUXDn5pH?A!Z86R~f{W#fmn)bZVit=y$viY-kRW1#(e4-_^_ae1U_sf6?)5oS9 zIT772ev8^}yC+yB{ZzEY+Ty?5R4u4j%UxIKX;i=!Px6k%X~U*4GZV{I{pdqE;XjE? zbAYws-*ViJ9Z&x8SwUy;oa5y-cZ**K?wcgmkbNcrcF&&Mm6CB|y`M2nq!XS}uNv%* z$hR57(Ledga_cbhoNBqdMYwP~JNaiM8gztSqnGzpZp*ulT>-wJ!b79i${8w!(%QpZ zx6^SbdcoC-Wrgx{%4RX-*+W{Mnl8KC%9ZNyyMvDH*s4R998{SQp@ux5g0@EMDKw1)SA2pen0(8xcMEb=Z*cqsaBqj|9NsY+CeGns<^_@pD@)0CS;B z|IEV&WuOk=o`LJ|0d2@7@t0^4qu~nJ3Y6Ar{0sR=Mt8AYX}({~Bhb*jnyfc?&wcnJ zwdO$nu)$x51%L6&&)=$LF7|NGv0F4rE3}Hgh(jBtMPIJk90+S)CS3bf9MrWyYh!su z;d$6jxZwFxZ;rc1hy^3v`KJXkhKaZ%UWUK+v`-!od0z&tG<=x`Y&2KRwIMApkZXSX z<0c)zCp168hs;~n?WT-1?Yg`V8vx%>ru+;#XitjkcWW&wFVV}rXWm3#!qejg4DEkG zRwX=z_8o8Ht7?|mFZvTQYuOJ;g5z{NFNT#>wYUa0A%C_SLGob3gY(d>24~KzCD*?= z?;R^*wNd>UC5pemhF?};CA;jJq)E<6&zvwoEqXvI$~%K`$wbmn&y1uQuMeX?bb~YknF4)4a_6>2KR{0*}kdoTeNmm{Oqx zCV6jV5=1twT9?C@>^F1yf@0Q$f>7u~9{jW@eiI_naZo70X{MsZobSVGR5 z5#pFrQM#tJNuKbuGzXzp?gpDT`(AgM0d@V)TtZB2ubbhpG^b5cJu0d3i$-J+ppT%c zO_yWfi=Kq9wkwg(nRgq`=6%Ps@v6g`flSZMY0hEdd6IRvzmxX7aq<9o0!Q&@fs!#@ z#1nDax=N6|Byh$!v6A3PLknx6BES1~q4o=LkmqD95Yx-E?wJja%mgmobKzPt5jsxD zVsT$Y1HU78-H2_|(=N!uhCGA{bTnP`Sy={=InCN^dc$EtYS#t#YgV^fC8wlj*o2|( z3M$B)533hs=#tC(l(%Euwat(}|FpAWY}hk5dv`?FnaJms9(Dapit%QwD$=D+Bc0Xtof`b{V?7P`y#Z_tWo6<7}8a16xydXC2G&LAi_-TaBL*R>v3<{_VT9U`< z6!kf;FYj}$f%fk zHlKMG_7LaRg|5GEcFNA!#L>jbWZ$D{J;B6S{-&Sbz6Y)xUYUUiRy2^))ke3O>1jIs-kpu>XYGqQh`c@VTjatC=3SEQS znK8lYmyI&fbZimseOwyZYD}-gCCL#=0?U(eo9lC%fx;7M@*gJ?Iz7N2O9vK3p?-vsxVYnW-?O-&vdrB1oX)_@jEf`N};CPZpCn}GJ{&NE;L`eL4BsZm^5Z{ zBpV#ZtW|CS5rxMMdT?qwmaFWc0K-nY@=S9l^1!e&jI7W=w@4v@T{7fczi<~K10VPR zClCor!6N(B`LgV9!bISQaiFAeZFM~ce9RGiiO5fYz{Jv&geCkP1C~$*BKVd5B^^G! zQbeQ26@3AUgGc_6m_KSl+vEHy5!5Ys6iW5-H;)le9}aTq@!}wp!4^McR4@PHGFqgg zGhV1=PK(B8w-%x@5yPM=yV@O-<$AI;&7J=TSK55N7o&QGKOa8$*7&y|)hTtWsiQe$ zd+62`OtVNrMa@=^c+fSTi|h6$@!D<-KDH&$Q{78KLLAVbxO0!EkEHYI{!*V(O4w`6 z5BRCM1}^}~3sK)Pc(YCpE8-?e&==YCuk6|~%9z?-?4Hb6Uo1M;T?#2#Mdh!=*exEYXn+M* zlofEiZ<>n=MdD?w|EWv_D?-VCcX z8z_n*r#BYBkw2aGJe_*%n9fWqh=_3DlWLLn(Q2Fy{kzWNnQEJi(OhM)+I}{+T4F&% zkVo+P2M!Z?I~||v=ZvqYDP^S`*UC%=K3*NS1>yO??W3L29(a@2?reFElN4AG%`0yp z1KryNaKgdgT~-EC7u;13Yd*Y18PS#*HE}3qTXp{@JE*Z>a05^}9!dB!524&9SPZ6# zws1apT>W;6-3Xm$1D;*(B%JfxJ2rz3i4ElWM;Wi}c? zubeAkzNenLb)gjl_Lbd1Af2hScx_haadjGxA|9cTr&7RvwpOTBm#)sBZ>2B%-Q%f7 zWxir61WimH45Kl8Nj~tqS|E{JsYVl~gJaee zvt$zn;2>?sR6(}Z8hWK?vh+fE7CoypziHSX>ZV3?&k4_J(763@t;4q8?J+7ERfHO_ zMa;?+CZhfQiE3d|)RXcb@Cnsc3qsr6%jmY~rS2ED349J_QZ>i0ci*TiNyC`6Y7HYo z`W>s^8DZnL+&dg_e(oi-T!eyj5Y2`}6t#^Klu%5L{lydBFegiVb2uwYtIqK=vuB6M zjMBiq>OyhlyNbdvR(O;h)$bi4>u7PtJ3K``l^`M;r971veD)jPFB@Hm3HPagJa`r8 z)R89=hk!;9zt1QwPAwTcmd=a5pIc!W@ws{VV6jyta+^5SCnnzb+Strd-Cr&I=V~3I`;++-_gpHM;#m&EYf02L zNFhdF-&DYGFL#C$et#J5jotz?Jjf(oZiWd-@i-Y%eO1t*}Qt z_Nr_0AHpt^jaURc9_yT4a<1>J|N9zM9@_{5AXXh2wy!7kcw)U*i-;F={SkQK@oum9 zyq)~LGGi&0;axKn?nTmd2!4Rm*f1f1f3anlBqq7Q6JI2(SRs$gwqdO)IytxYLfwee zu~QqD7xy0fZzmEcaw_62qkwy!BucOWKW;5O8SzGx9l4qY@o^x{#yduMm)DK%E~}%( zvzJTnS(a!4-`J{Xv(y%p!i0}S`V5ruJUlFsv3&ah)yXOoF=i75Qf)4&(I4t^qS_$c4_|#I7_!6wkv*HyOaLfRX0I#P)C) z_XZ{~mRoyseV;5b@K7^T_efr6-Nmo76!lb@>0IUdo%s8eOda#*LOx0+=vJE` zXMS|VN^>5lF^&ELE^t&Pn%3nJ;!R}pSB4r?+g7xkph+F+C}_Nar4KcOv_1@vB_!LW z%NRpdrphGzLQ$}3=Hp0Bzipyw!B=pvFyy_*?Zvt=CB+!gDLXa4s1HST#(GcOfOb|r zVgh=cei1CYSz91Z$~+>zmCv_xv3)A>!pXm;jo$)7{O3h4lh+2C>mjy>k<99*#;(ULBXSaf_DKi&}X4M zeZok*XlbR82IDkQVLtIOp-WHsOVm9^NMJXTOMj(o#gfQw6r7Meqix(IC1tqbb0}h_ zVaUBE#U7EsVPY0!zN3XbW4$~f0|6W`(vkh)Qahh=pKbkq;1sHWH-->Lae$!PY5aKd zkhJ^6SNs@&DX1sqlG?&~6>!BqKQY)ZZH2ytK0s-O7c%DYNjL0@LP_G~71sz766uFN zgN~6d88d0sDn*Dw@L4dK{rT7t)LX^!O$Jh6dxYvB1WF)3-5TT;jLal?T_fZ?5eOY0 zf{MTNw{+b69Iw0E!l1{+^NE(VUF8Ic0xqRyL>xnM6NOqF(xh$~N&{9CGI>?- zqQP}FFMS_wkLAU&6Fi=8-x|wH%%nw@O;)DD^)5FZp~c%Y?F+ZN*AxmKE#1u^Ck*%| ztzxVLIcsm}Hjbh84;5#}wq&0hmLwM-LU|1#J>37&7p*__C9jONO}G^B`!Bxt22ng3 z7pMZx3CJ9LEk!XGQ!zHwZ@9a?dLh=AgD^`nCmm5~G4t^z$?%gMX)i+h@>-(R_!6?d z@ld*Cf8m;7ewb3>HxN01Kh|Xlrjgzbra?K)$9uIu*o0o_O=U&_Iy3wo+|EK9nNOLP?4j%OOt<;L(9IL?^?ibcAJ-ShpB>WdG`AZ~M?Ty3FjYP2 z63fHrcV>yq(lqHqk5f;Mc?+mWK3pm>A0=UGMste{L3*mzASSgiYqO93@v*wmshR=+w@=^}f4NM7@>Y!xE5RqnZ7vA`(GZR9(*EK50MVF;BE z_7KVvac;hV9!d@_L_&&+A^9eaV>~Y^ncpclI7ZEVh*j;FA*Nf*KeyrCY7I_6@iXOJ z{2#u`42(!OJ1^>&iyw^cV(^@)V;k~mm8|nSY-N%WUQQz%pY6?QY^sOf--l4fGf<7$ zQ-|u0tuI37e5YrzDd~;C-l3YK*bcU{NCehW5GAbKcBw{Yp*fECN0#wR!(k{RPHmken|#ix(2U4B$Mny>a+z4EQ8x=t=Z%+`f6pHUWj8?h3KKA6%iU)CO;Sh|H= z(DjK{kMis&-*XWj0V%eDmQpMj)NC3|NITLbd1P5 ze{&s10zLpj21^I)KSgv>V9@hbUOPd@MKHXN%rRQnW%@(e#{!M-uH}iJT-*OST*Sc@y0N|Dzv~#2ipZ(SbpMy79tUY&^gx8WTO9Nt7n);w@ z;b3Gv;un)j{|N>e3Gzsv96Y-;2vri@`D$L&FwXI67z#J)F>=An_xUCQX)?mOb3ePW@=1JEmM? zYXq~HPMN4wE5QIhxgPofK6B*}n>W=8tk^NIj^&~ zYU5W2iIH%Xypu#Kr(|R36W}S4J0Ub<$5%8m_S@*&x9wM1xY*~??u!qx+xTEaSEO(^ zFQ%0Jt%1JyH(hf;l zK1}&%eA@B`z`%G1s6lz)~74X zUz{1R;OSIbf0b+WhDQg05351f*UMrcm!*w-L+4Q+5|?`s2~{QBA9_EpLITNakMeHa zO8vrt{47S9Wo=|vYwWi8M1j&>&cmd*68mO_Ur2qB;i-0*VsV;3tC%I;H0XVr=khSYort;xTB?7!j{d zcNxS3&?5m{d!@1n&Pb*Ny-Sl#;*1n9fuFDJo(0?2YE8(aXkM*A<)z6l{qT&^3X0$VFt zmVSEmmSHHW+8m;ccYZBfV7kM$$7SI`UbhYgo6^`nh&kUx$o!bP~sDxx zyQVp6FVy7!Nu?{5NZbm%kZ@b|2nzcpA0GJzFi}_=YG4i0#p#UI3o-O5j!2q^G6L4} z<=-h(=Y;P>Z(rw8;nY_Pw#E=4vG4f&m?<}ne!I{pNb7^2Iq>CGMkWA}5=uGh7L&Fv z=Ip`Qd^{~L1&Ry;E*1UJs~Kl&8AO~+N;VO>-I=NZyI%g8;zs6)0*$yFL1|fr zV+KYn3XdN{awkF6Hg=(Kn4-vxUiRZ^p87G5xz&^T9D3oFVHhCBOU=he^dRR5e`4kV zPwJs#B++tRLDA4{03>@+)AKul@}c(cmWE9%^rXN4Qz z4$@8r`@&tKZVD+Qv8Bb8Nn1hoqNIvVxIkP`b*lV#9p)TRiBA;e`foHU%#>;L0zUzV z4O6XKuoJu;mW*L?w(-Ecv!tS|Imm)dYO)T;6nys5s`z%c#2e^na-_gYd7=%P*&ilE zRawsEQYY-l?kqGdP(NZ&$$!Ig>Yf~U(G$zW0EdV^sP68nT=-@vT|1GsVD7%$WB2u= z%KFDbs;-0UD5hFNa_wdPjy$W$dFAVkbV~HOs2S&WkV?x-E>HT?U|FnKoPXd95qKXl zK$%}kn|0)<6nrEWeyJSFSoh*1XCzRB%gJ44h6_T>V@MwT@=9D4-`SK@ zZ)O^o50-~_@(dvvy&a<0%^Tc=ZNXN_;~O5~_B*v*?g%Y4?t9dgD%=Fht9JN)+$paV zu&ISWo=XAe1KN;J5y)wmRsG~9lrzb|NA@p;e}IDw3qvc20xKEErW=$+1;0oxV7De0 zH-D%M?Hxv845C*p%+JHDCoPA+)QIW@Rg5#T?^SA=GLYxfu=Ye|?kkKH$H~~)h-98)2H6NE7+Sd`6 zLOUoeui7mHc&?)xRA+w}lKm^bgS6q_=n=8_kYDn=oqQQg$Wmy*tJeabHIrN^{s9%j z@pv&nK0+=HD8klm`F*In$!K^xRa(6U&z4)Jn6Fl5{JBJQ5{eE5XCyO9Q1MJG$@xCB z)U|-*L)e~gdRaj$w17NbD=yzUl$uT+jrXcnq@%)eZd&`HjaI2)mt1CY1I$uL3oMt) zRE4=JZ7KG0bMC_~`(mWzE(2W*jcRMXRr|@RA;yO>WT9f!12-mU^}p@{*k+HRdh4Ur za#SdmZM)iSs?}mZbAydH)OR@@=6}q0j}S=q{|OyIu%eU^34O@6hG8+#2hthlg*CLx z^DZ#3dO)kA4G!LTd|(#Bds-g-Pk=l6><~RhM{I)q`P@W<2K3C}W}Sk$$!@)08N)o5 zcbSv&HM4$I4*bf?{B&^3cWN(1L9hI6Gy`EsJO(j4waVgy zp6<%M-@ICO8~zY1%FQpCjTTr4)W)7AK5uxp$0zt*f4e*vqo*#|ABKYNlPMEXDv71a zD1df|3#UqqlQCzsZ3;urBtUhKt+&o!O_jJ5>`a$xhmp@RtPiAgSz=o^{HD9oM6h?q z{@}@nq(sQgiii^4-D6lM*2LFGJuF$~6NRv114YfQ$OZ!m9GzTlER6Bf<)+I{=OWWQ z=HqgWx0mLtkiZ;7+dc&!?1+rO*0u77<+R>z+0p2tWSUCAZl^|`S_iQT01*#>?bI}gW?C91f;-ngQ)qf>l)qUVr_R81d;MJKs{>i4{X0xD_G!18ctN*H z)2VE;_taE;PSaAMj}oq4ODgIwP@0COVk0iNABS>ZL^4+z0I8_%y8{#k)k3AOPXJhk z-NtKgw_52W0gE5=zO3Oz`8Hs*Z)!_ZkPY$STik7+%g@^cr)`d&G9FM8Ci4$ddr z?(Ll6fP3APJ##ecB9-zcl+1Wc8BR1bnRr7f0-i{Kl#zFCEX(q>Ir$4W?bhn$n0fTs z*J~-OY#POm2-)>B37H=IzmJO&P`$*;#*1U$Nut4t=j}MhxxKEDGjClttt00#pc3gWVt&#fP$bZ5#$%dx|cKLrt~@@fcq;`n-j9Uzxr zpt=vB80Aw30?J4PX>jrvO|r>8eQ5hgW;XcXFx6(nZ!n_8Z5 zzq8flkIm)H8&S6i`C>zHxmPian$?4&i__*s%29*;LT|#w>CRs@lWUC3V`d7?d*DNZ z27p)m0kQE+B7l;CUD`V4JDHvOclF(Si=(KP_T_9-Px84ryjV}Y->OQ#9rPiJu-2LI zxaI2{p98SbhP@##&Nq7Gd@XiW;}CL2v}|HwOupO5v4X?i&FE6cL(*fS798d_dZ^oz{8`ZtR<`%HX9et@$l2h3{WK-(mWnyjSt0f$D)nR|0GYI&vEBV-*@s@ zpw>)Q3yU>Y2Bo3(d?P9+4ME};$9y1OpU1Z}`muH3K2-{ze+z~mhJiWm92r=Me%iVa z=fPjqQE(J_<7T+hbVrL9(8vTw?kQC}Yp>Ae^uC&I@qq-$ADMK%Nn=f(RFC`ul^`~D z;zuOOo?ASUwvQLKJ+6)ehtBGY3c1tAAE#mGc{dlVLnGc z(BOK)zAUx%O(+8I8DlyDH9YQ{W{gJ!Q6L8g54Ok1q*aC_IVn0QvII}yKVdS6XwC9h z(NMavF4-rP=AmcvF84=r>-W!39eGfIauFHOoyPzm*{H+iPIF}`QYF%JJw)biXQW7Cb)%tt zvNi)rAYpzUGZliIi2zF%MjpkJ>LQ}Tl7wH~SS)b8vERg|m6+k?d+V#VqET@SnjrlMSc}ygoQM7}x<)ej}iUjdEc!UPFzo zI)uz=k}5GzRY=N+;9%30U!%wC=XGx5jF0`Sc*$O!mhUQq3_0NqffYzQJ(8ur#0G0P zS1Cs^m%fZ~?ARP1yo_2seYQKJDnisr4{XqK&uK(%wDtMmFJH}(fQpNGMB7|&Am3V| zFlwt&W^HjMaNZG$O6~8>)w!9P)e7G4Rz37j_Gx`XGoRFL$3XNn63&ey+OQ%oEhqzfaIq6`#(JJ>G16!87-ez7*RW)@Has zg9ngCZK=Go(_~u(l5pQf!GKru!64aSZ%csh1G28g+ipw-1IWc9CH68umgaLx&g{ z9Xxhdfz>%SIKO}&#(>)RE_@Ico}3|6i*HK9v8tBGGj%+1Fm`8ZvuQPnC4}L<=#F;! z=1G+2MWK0cfg?0oQI#$VCq&n!zC8TRWn;YaMT(%Ura6~XTw(w~tUOR$(JJh3z5o0OJ879FB5Aud-kE2SFZ_(1Tog7^r9l1Z zvYIO~VUXRyGA!`g@Cr($(D!F>YLf;Ra>%W9W(6qQ&HUldinMXRLvHdpmPn=0-s@JJ z*kX=oAEvX+U_*-^v+7Oq`{-T`!B6qk5e0##wcVmWJkfXsv{t}>PuWd`H|Qt6tI^~X zz_%~8{fh3gym_@+(&X99*v%IKNwJ2{9k-y5F&lSEUSDdbSlv0x4p}aD5wg4dKswo= zr7O81InI=9UVABgy7Ug}rE|nCJcIp5f+TWDDbm6UKY+;`{0zuDOgp}dIV!P~TqfmG zeU$>-LoXGuf!=R6aJGR4iwN!9g{2(MGm2_Ro#7f6d@~3d@yE`5ip%?KeeC2N7!>%5 zIxX-Wt4{@vbOO*N+~tFS_Mz*gJzw)tUR0zsBHN+}x=V#+ad_O;#cnOUP3XCsC~`4E zjlbYK!Jv4kb#^a$T>zeG#OuGDZefCF7paIat%AEa5;!xEmtz#+!l za78x51%=hu(@lm?&J_3B*+rhE>kt7u3JTTc3w8s7YwL)muiP%ae$uS5T|52)DM{y& z(_L5EjWP68K|&-zqJ-s!ceK=|OKrHI(~w!AVMHk+J5RIU_G;3MY0@=>)96~QDU3iJ z+wS%qXngFk@1iN?aiEYOXc>RnIVs7}(bai(EnIVz(=;WuNP{Cy4tulbq4}}=jDWVS z5WnT~&Q#+o?gCBf1tJuM2;$in#S_BrhbT2?PAqj?2ES4Rs2uVW4<7Wq(6A2SZ4UN| zl^4U<4{*GnX@anmk=Q*cZNyRpTzcU$QS>XJBBE-wXiFuJH&KuuM0|c0@dmaHXZGo$ zWeD8LJJkt;rg&W;2nnT&UyKOs;lszER_g~&)CMpY*qlKhw=Qm$Vzj208{l1#OELE! zpVHzIE`s?_DNpGzu@j#~e4%=!-vDgnfi_^-@!W8Z^}UujV5rpugvqNntu`aNfw+hD z?0d3*`(u6Z(!~QLh|O}s_vZ)#(S|ZWJ*#v)No#$|?WlA0z0=*~le$9T6o)(O+av)y z6T+z)mjcK5+CKho%XdWi<+P)Ywn}oBOAAMb3trn#ZWb)$m-)xcGh$_i6pijwzcEl^ zgJ2gz?!@LVTgAI^Qk`Ai$oYM;W9?--k1<(UVRN!d(7Cz6Go3HVF5}-1@)j7(`Uo-& zdwsj#$CBmQ6V|Ukj|X#&#kpNc&|b}3daey*lgJdT`KyfjjDA}*N;KHMfxCp@*qOA4 zaEGX+2+!$(@_M#@-zF>FJh(b}S-Qe+Q4qx!bKI^O3;9s}rMdxv4Pc=t`hujs$kedV zfx8nE)zV_m9(h_dXUUXvPxnnr2rW5(xsH_Ad9W?9wMGi-jz!Il>Hx#Gnlu0dppFwK zaZ(m_-AySTFI9R+=)fem{W_VZ@5pUj)-vJ9M}do`@2${B6lB8MYW?tki$&OE&QGZX zixI=bs9Cvrk86UkOJ-#X{;zap2dF)jXMFvisua|Zl6A@&Z(Qb+e%4AHiwAX0VW1#E znc0qLlc%Wdx^icuq*#zCm9UC~0Wpp=1AGMO#-W$z)}wv9wmO#XaGGF9p}x zOOBfBBTZtX5>3tO1K<2h$Il4`7b`>JQg7RA6+bIFJk_bFeE5V^12-Q}O0%{vkD#Zd z6)A+s;Q7jrAOl({g!d15IvJH4gA;aW2s|22d0ln_cNQwfo>h``a`Sra<*zWjgd7pl zQ(lC$aY76>QI#7o318Q-ZeT|*$?W;v9K0vIrD73uEweQi5T+J29jhoRomY7L?X)j1 zF5d0HP4xMzczW*pt^(!;rqUwQUu{%rkEhJHS;sM!9{w zcvD9;y~?`uy>e^#x$DAVyi%^3aQeL3gLH#o!n;VCGP>#x*YZnzdUeG|l{He>J%yHK zrz$qRCe%OFPj=Bqj%-g_)NI#&bbT?vf2t<6RkiVisjx=-5y|0?r5J#(`;QdIn)J5& z*E|Q?IW*b4)PYO>YRVRRdfCJcyYf*=EjI;P)en&-jP$K28!6{wK4NIpR)%KbuAH=N z8W>%wQuF}t8O^|iVKBaS9y~rx?TNWSm9U^F5Y1gs1`F4-^fV?g8VI{+aWm@Z;xh~= zZ*#JxK7S7P&e@ClmHff$`v#v<;q1Bbem=?wWo+HpmHZ>>ntKfWXO$b2gUkvp^{Uo! zu1?KYp;Wi~$;k8<;|vwWvR7c=wl`Tcs7#V)#N&KTjh|{9 z5^eU}&Wf_`S`~Ue;81*f$0C^UYR}PT!0Ikil$~$ps~9SZo^dz{Er`t^(iPFvsb?TH zlHTD_=dhMaZ>nYCYV+PM+sX6nT!%#2yLX<1c8+5J!;Y88$f6@@rr5Gv>h1ww=T+fr zWt49hJRJo}KO)^OO^Gnh9<;*E=4BI0JkGbg z=~hNSC5FGv81`tsiQ*rK@vH;ihelKE&#enLc{}s9U+ItR15Vb(z1%hEmGbc3#ID>& zMh|&6SRN8i{n(Ya2N9t(>;ZbG&i$Id_XjBkHxK!`8SO6d%97b?J0_HGjw`4U8ZLS? zxk=8alxu5Wn3Y`bU|BtimGVuG@*i636yDbE5(~+)L(;5UA-ULXJ`GS_c+M8>Pvmvd zl5o_UtYz5SS+qDF*PMGm(e&uaVT6CYblL{mU-#xpB-OI%;dxK5#XVLQ2JYC6fpQje%0$K>)combY!4!fyHIJXrAtRLKG z&WwhSX~l#g@WP(2!Mx>GaqBfhtUzy67AqsF-px)Nqr}}lP|3(fa^PmdD>z&dd}mPv z*+$v2n|>5-LK&fj4bqF(O5vRNqwy02dvA!)hm^&JrxMVaBw6|iGisN@-t1h|a&+dv zzyg8~NkT4iLf12|Q{FKcw{hl=lxs;`sTsWMM#DP6$9^n|@-4#Y?nfX><)Hhu zQ&+e!VjQ-?B#GCDA&Q1ABiFJQMD_>yZy#|=bl4;}kIlX(oxDjFa2lvcne}K0LF#Gd zNoi!EWhY*4-92|g?5c1%(PtTvw6HV0BfpqoL2~Z@Kn^&X_bn!y-{Vqo>J1K&HM@0g zcITwrM>7u54Nfs;v`Yvcj;@z;kq=0K_#XoRkYCWEFy^=2y{BRi?&P*I`_ z;uEv=*2~syBT7(vLj=t0>(8zY)Y->`xb+`XM#7gay5dBTXy;SlTy1A{A|m7BRiofm zDSgpaHRH0hP?ufnPVOwIQA4U4@|B_teuIlhmcAKpd2*tzS)k0Tp2TnOS23ANvBTun zIFm`%nOof}?dY>2G`7XX5p51xt>p+T+{{4S&W%}>u039&;^{%5`YOu_1>P{;&DG}- zY|b-PVbnle_@J=d68F9#%<6o`D-xx3zS2@_;QUj|F2^zw6{cZ5Ze@eMjskY-$mt=F zb(G@4%DF}*r*KdF9;zNbwjQO~OX%KWmNY};W|BswgBkiX!IaRkP~p%}e^__*AqbKm zAU!;I74{rH2sMIrHCVvqg)I(s|2S7qNVRQidEq>fL4auY=$k>Y!aa{XF2gpLl`F4_ z+K(*-6<6L!y%IRN)Ho!nj2|NJE)w%KS*i3jA2$1#wz;u%i4WX>)vWLXM3KO8`L@g& zH35!KJ27`B#?|omZx0E(sfAPo_3!4a?_L829Jk6n)O5+yXo9KuSDyt0R6jwAx+uTH;M>w_`$Nvd1@buff`x zB2IlL&%=J5kn_|g)i8=!ps@XTf7QP<@0ZXO!UXbg4Xd zp*r=lkc;?x-?4>#-nVY;dAHg^*$rXsjuIdSN)fcy{Bxxm+P_?_H?ki(Gy~}*2$z1s zl8D{H;B|KG;lRcIEtjIurE!%zyH#pEw{gwv>11jzHQpErXPn|{p5Y@RW`E1B7ZJ@X z?VrlviBrru-vU=Q6M=x%FU3kl4ATWWZ{CGatZUj9+aaMLBq=chh)E87f}8`}YDE3Y zzD{p+N|j)K-xIV3&#MmPnmgi#?L(iO49y8*L^7qxOgG~ES9&qIQPG{4dBGgrx-tHY zb9^vecf%$N@rK;_^1FtQFFzEiXjppu2=BN}W|A)agbS%`xc$*Un~;@T71wuhX~zL9 zy4N%GY`gt&+3K_gIcL-!XBP-<2(gxHf3#<|XW~cQ$UM^s7dz9o=6UNup?bIHW%qss zH6x=Sh4UJh%fS>+$j+#iTq*XA?W3y)135hyPF=?o3iv$Rug%Ge){B9vtLxQGQW!V+ zc94jQl*kPsHA7Ln`5!IVe`at%YR?DT$?5kPIn*8DJ+)S98l9iyfZe_V=N-;FBfMKt z7E8$bN&U_G(e#GV(agj39UsHGBg}n62R)@p|F%a_`)Z!Cg9EzLY)< zOaM~ym*cyfe>6qKX*t#iTy(!2AU2E?K{QuJSz9oKaBoth^KICJ*N=MawC?M+ip+~E zB~;zE4}CbVwr;%4w(CxsbW%LsKHR6VGab24Ugy*fIhHFGX5GGeu-9zaM%}^dwpQ+b z+08K?fwN8yM-gSRKt?ACjtTuzMz{jhg=bGj7e3fX3}M<_>zZzSD6Y>NG#{(tsBm88 z5qj=*w+GprTz6c@WuIGTfJ|Ns_^evr8#%6ppr>$lMrfxiWd!hcG`;w-RMv^-zu{4A zP@0`gziL{sOrgI>d%UuDd-qL%G8W^V5^NQZ33`c4YE!nES*(^h7vfDcPe1Jv>W53m{$C44ytlSR7`)@<=mB@_Zy|IX&O7_c4IXn`$77+HGG#MrOrA5 z7fl7C{!~uI?ewr}?<&F5=eydfUlHOapWhx)?9LMjuP`ZBg_J4>)YZu^Tr%-@rRfN> z#Ay}II}!U{+e0108_%KR`i^^Ty=_9Z{;OL4%p9HW!O@C)x4Xh22XK8c4*g>8v~w42 zIv5OXh5ht8XL>UXmff94yt{z51d2G`R|*}SyEUo)1m}OKi(=pdM!)_fp`20BM}*|k z*WXaD=C;FU`Sq%O3)KXk*PvEMZY_Oz%0G#!p>5ZoR52Qyif7D#5Di*mQSuC(_q$xA^G1#s*DAvHX#a`cz zq8QD!4a_UY`WlKc%0jj-@T-@d_MG08lHeAD);`JWE@tCyEU%#!ZTXhM2GB5{X356( zevI#0;+ET|*H6!71eIW3>kj6EDaN8Sdk(04<5nbVMO!LW?OoGn_t(p3Ai%MNDFk3! zU}{yyF8MdKER6%ZP=~F+Cq^ZidznID=^co__MG4aMfMB(;etDlgr%YjVnTv2IIqL} z%J;#UjGE~orGm&=dR7AKkKA649TKu!Kc7#ml)M%6$g4l-;&l1JvftzVE|ragA#XZ^ z@ed<7JL*AKd_hN#O94MsdBX5PYnnu@F>ll~KmS+}8BIw`uDt?03eOkiz<6a`YJ1*c zQHR3*pp%l!6|;8~gZfFS0FUJa(?~c8(0ImEpq)>1$bY6BU6MeMQ!P5_XRLbma#Vr^ z71)&;c@{fhT6G9{4qu7}lrqbDqSyF-+jxthgpX)YqN5)co8S&vm9G4*%AF5M+wqCF z+jTSU&&+?jp@?7c5Gd5d=2Ve_Ookg0Dojy z@l^<3-Ix~CezV>A@Iq?bJbBb?R$X;9r~YM0l;UT72csDbhDPOBs?#iH(f1tN)wT~_ zJdQd6@qV+?z7o?Wt56b3D(jJ3I*MN`)U0;i4ABNw=g?-lSFb7)jX7bwv2&KL6@Kek z7r)zDsX9x5(*sD+`Tvk2i1tV`L4mg}4E-fHrk>u(f8uY&V_qDbMnpS8+HvVT~v zT0eFQ4g-nTJ;G3bmUz`zq?89O9gxC!RbzLoZshiG{LQ4I#(EZPmne_~b;!;8tccR1 z0PS=RyX-N+iaEwN@dZ(Ax^u?66|t$^C|>mF_j2O;O-}Rat;U3$)$+eZLu{xY=)rcn z2S3ok2=uG7L1#YgQJe;1K4y5LQxIkqdg@*!Get-4q;Ta;)4fqPLC#yPnmhfdHMHi6 z3F|)DFl7j>_v?Qak+u{(>yqU-!aZDk!Znr6eyr&(+Nl>CI_aw}@ykW>0= z`x)Js4XWsOD#Wzc5fu=+K&~%k@S`U-v8mkbIC(4Ipn^xs{%AN|;4liIG&5%Z;Mfm7 zlEDnyK8!%)U~X%=*h#Go(*=Navg|K%*RFuGo5j2MJaYyYNsc7p4!}k!2*8ka0;Yx0 zzhg*=w3kD;hjM|}p`W6(Jid_dsF>iC4KT?4E zy3KuA{TVC*M$1dOPOO}QK>OWRB~N8hufxj-Hg!7Do7daMdd`Y^i~fFC;dlR!xwnkV za_QQK1!<(E>mo%uR8m4(K%~1F(}Ey8eSZ?tMS|e!st8{CS~V z=Q%TL)>?C{V;!?1H}X<0J5lWtAOr?IfoPJ;dq#7nJ0$xH(dgmTuKBgHCPN_59xd?L|zD2T2kG|O@tN>zTDHj8(gWE<0qDP z?JzfPP93q(D-A=?e+G~P!|d#zm6(6NDmc18gb|)HD!%0|SEHvl&kmR048%=#>EbPj zqI)V%V`o0sgZz}yunr}EdNZ=`>j1K^5d!_%GDc1O0N!ox%~Fc#8tLZ+YIq0F4d)u> z%|(@|7si{0EUHB&3~NY-H?mYE%yz+H%zgPmLEF2e1Mczj3h1?QPln%9)uO~LJ5k~A zJZyC4!)p+ra2^8G10l!3Wm=u?p8?8`A2v?e(+XKCRub7n;yhL-6}_Fx=cuTX68c7Yh*LOD`&j_Bcu=_~&pDW2CW0ek zdzRfgzp^Q`2V*hAUyHz+^a{9e3OGI`2Z)N zTS~?Fkuk%DdWR7p^6|Q_;mZZckT#iN>VlKa%W}W*599rz&A6*>d8PbzoAe)T))Drr z-5RNMWMWhkdQ4szTj;l&GcKfZkx6VwMm|8!J(nY}DzUdn2&taD9`!u8{WQ6X)wvt{ zI)mTK%gD?(*KsrzPUy8iIJ{eWh4v=b3E$nU4+43wQoGhj+ibI0zObWl9Yd^31NQWn z=J315Pak(}Tdj8wJ&a^VuR*4ngH#R$R-y?cO`C+Q4!~Z(QTI>dIShHk`b9B+K{7%4 zar6piSMnnRFL=DlG1g%fI#U(s$p6d=;D;>iZc{f+_0WD+*-fIJ( zbVCcewj99sYn7ZJT8Xij$5CN%zk;k zgIV}k_Zb{ntqnZeJM=p3(B#C4?tOE@=r-^8V)b(GGSt#}`BfQT#pKnXxB%K~%Ch(8 zO-Z-B9{F{~E<5WY&HByxarm))5EKRLC%%KQ)?&ABC(nYlZl?f^)q4{lp`l?IYJZLI z_h4bX_(xD*NBfcar|=obw;~GaLywD7Q1n`|!dQesa^;I+J<6lEf6alzU{O!koBV3| zVFy-P?GC>uT^w*cH=zkqxUt{pa3$jmY^mwQsjrhAbr>=vc{iqc)cELOnIN9AF;wH9;qm_-K=Qh zv;Gy{m&~DF?;qA^IW~?>pVn`P3P^Av?Jv5YCi7l9nzL!F+Wv^3*_`>Hr{6!3Dn1lx zfa16loYiaJT^5reRQndXS5Yi}e(a6Na!#_^xNu9e`)zO_Ycp)X`QGu2u-yQ)f@{Y) z^rSaf!W;w`TpFrvd9&Y+_%CY>PsX58tIdk0)e=U8n)MB6xGE@P3~U8>z&NU{@P5N^ zjybNm`ye*g#14aLEDZzNZGW@H6psOJdpNza{6VDKY75E8oYVKwp`N?CH=)Snhq+38 zQ5NhA*{0O+=RyPV0FSCb7K;jIT5uJw|9p(cf|TgEpQ(YU>$0tX?SE1DL^_qnqNa+8 z74Za_5xuyWZ1gd_8kIF*4kF9H2<~EBbd#>~exRY*;j9-!9FP3g)x?q7An%cb(es0Fz>Q#mm@P_M|-a7w#yeqN|f z+clG=^_xO8)f-B@Q-D#9> zxM_!|RNECzJr%ND!pzKc+Cq{ouyb}^Oqh^#bid{p`?Si%+tBx` zlK5>bF742AJUvf#`9t&#i6?OtQ9jnO$8cfiM zpevZ4ph|>AL&JbxXNv3dg8_~(B2LU296kaSLg^Hq=kTaR=%JoxyD9?$O^=oQA7g?_ zd&~KPXX+8E+QG@?$+q^biq)k$CPtS7f?e$rBS8C89Oe#q&TXwjzGv^{5h0J8XVPV`S8T0<%jbmQYAY?QF_yAvHTMeC3p47 z2*?<9=%F!l)W?8)ew+r*hkSH6;^>|%9kR%zhVu>4$}bzN%SLDQO81Ok^)gjaU~#Hx z9|}460kR4knP#_Tm-zZ*(`|1LyY5~aE`Kxk=We2%v4o-+5(THyn6`n$Uzx9JKbQFy zqVq{Y*y`VY(VU}5=b%5JeG2WY(jrEZ8L2GZEcfH{7sOO~JrO$O8O@+9rUyRy;b;C; zE%ST55_k+TMCgl17MoSEsd`7ttjmMudynVJ?};Ita)KQw2wi}z1cA6*2p)PUPw_1=q}1n#P6V`+A++W9^>KH%8$~?|QPG1cUu9MB(+M z(2@5t@@DHUr6(8lV1Z#^Zbs1KwEFW(m}7vlYB7&kCWq0 z6=@N@QWM`dAP}=YS!=$}qPew{Iaeb=5C2K~!F5jxn%y)T0@am+rWu<^a#L8|b4r5W zENWtxYoTvbtv2qzrqSVZcrSXAUmW#8%blo{*Uh3Ok|5Q{V|9n@K9Vh4`f4k3=vAmI zl^~}k2wdn04`vP{f*Oeg6U(SUAb_I?+Onz3tlIohSj{Gxop%bWS9gk=V^4P`9vY@% z^vT9ufc(NkhdL3D(|g4NWil=I)0l?K^~8s^^PVd6ZKiHb7U(5b`)&C(>+R8d^^Xl< z8`TM`xEYJdhKX@2+)G<$Q(;afgbr??PCS?LY< zp*LaneVBM{%KPlBDaKEQIX&cWI!ltSZ|COJ%g=FMl!aYU5zelk%1xY{X&EO{#IEF7 z8ky`UV2h&dMJav4Gu!A-QdE%m1ji5xgPKXPt!lqVDB}-J$%4;*A!U%PMn+$~IA4I7 zoT-dw_kJr8)xb2aFXkaJYB(NEE^0>$Y@K(!VS1H1GhI}FyD@E|e@p_3ua&2DNR-@f zV$EV8-^H#|9?L5js!SyA4`D{9^SO0A(fhhrkaMxThg*Rd{>45t^Z*Z_p%Nb!PH;Mc z@6=vgtYyREjA_CKeWms|OPN^jaR5H&EfpyC2J-hkWEA*Z4w6*RKR%PjffiWmD5N-i z9ArRJw9hItW(Qs9v1Ee&U7$w;cH#n_LJ?{Rt_(ec451wm=Q_BaUSoLEf8?(?N5X*x ztf*k+lj!Gi6&dbAdN^RMf$GKV)uwIa436|btrw|h1LWZKeLN56*H4m!m!%tDZ+7B< z?(V{v!o16wQC5Art@`yX0F-;Xe&sqGl~+r&zhr$vVUhS2J!-c@)FKkYUjf@ABa|AV zdv{P3mStL+$G~f3>9OgUNBhBE%8=c_HC@N@Zb%YIhsm~nzS*=sf#b~t9LuCU0Pp2p zsOokjkIatPhxfc@=n?Fc=UdmD+JY?&jzQJ18fD^EYhm(D%Kg-7nf@7!k^)Bg;e;pY zE^IvdYO=*{XNGg$)D@ z1U28>zFFyYzh^(uz z439mB_5{P%6bT&QUYL}{1J8kn@m0bmw53$d1S1|ayWSkOp(R8c1CIGqT6r3|gqfd! zsHP8ixfW8oMZOYj>WMSemP|jtybN(Ba-amLk~jEwq}gyC6=TE#C!_wP=X3Rr)xl2G zAoD^ddt_SufjaGoD;s|7KK>*vd4>BKKQq@D*M#l!et+Gfc?)D^T2J7|5t@Z1Z zjfI!GSg#IWPH0Fs>UZJ*43n7DZS7|3I8kl7TSLSNoN?;J=kt$6L(#}x0Ri9(rIFB@ zH0!H`b(mo6|15F29Tcmoa9Jt{ zWl6>bkm$4LDn*gylWqsC02>b$7%*6n#HrEehTR{(_-2q8F;qA4RW#I(HNi8QUu5-2 za3Q$cWs)<_o)pw_wmR5AN_0dKeyJvUvtkWH9&QDL{%jAB(|TW^EhAQ$ZM~M5ZvBXC zaAsDiCY0s9Qsdo0))m(KiRt_k!OLJhN(UmveF~kL#JK*G`f>Gi=ox(5Hpn5LVC(b3n>#A z+|pH1T@h&hanDI#HEL_GDu|~$^VB$~0UoPEnPBplcI#2e_ z7152EL7Lr;+7Nn$j7aqh;qXt6v3!#h67~h^gebgO?U^kWQE$tQ7NTh@=RA2(d{N)c zja)XpFO@yM;DeI@(1EU|NZWYw+Wx`~AQWF)sxv6Fd~Zrt``NamZ*}iJTEbiejzbfS zSuymO)b3B*)mst1zjWM-wCro^U0nNAFn`W>VltYg9~sjSFrg#CO# z?54uXWxd^|$KM(UWc}^6L=K<so;TWQ0?Kn_YU7MjUAp1FmH;b4JeRP{L>MJ z+hdhZbTwUGBm}NbDSDz33x9hBJHje6(Wx00Z~yp7z686eD7gN7VtxAVz+l1;=gb;l zFB++K4UF;?8Frhm2kk_EDz&)gkj$w@SY?1Sz;Wluc`dcV(CI`B-^De1M($G9zEa(s zTX953O-94zHT+tDtaM^>d!T<=*zn)+dW)XOQB4np6&#L+`A`*Vs9@0{G zxT|s-7m}xtk5qu2jIWlg*QSdOKmF2pkRM20Q6S^c4`K#bN&6Gpfu6M_!H@gAN=53_ zP&<@{M^xR?p*rKz#?aQCgA>MSWYO(c<#c=7sUT1CoBCnXh zrq>2vib*@hZ8`lH7RI7)Y7Scz?6cPyps2~cZXM3EAx$cYsSoQdT^~3fpGxOh8SO(FmXG6pWoTBzEm-5ZCGf zNxCAn;LvF^xua&APkEGp;yew+|Em%`$bfW*)W)*4hqTMAHhPjpj|uz$2fH}3q0@s= zp(r?-ZCNFjZT5rdOA~`gk-SS^T0!y%K)=wNG6|pR5^FH|{)El7JnpCzjXn)sJj4gR zriAaL-SmjKu1ViA0@RS^$F%0 z?Uz;R`5~d6sRQY56AHSMDto$BzlhD%ns2@5o%00jqB!Y75>(QmMP;fMjxuKMWi63dpiFhoCnUtn;D()l{VVears!w;~ z$0icyCPb0jcAFj#>)+D$^0#9bXw1}e`}QBBp;wsCys`jvr>As^eHHzLd>%X&6#789 z1;c>;0SUx}YNt{FDjqzN@Dq1k*rk3T{Z+>Ba;54j5yH?d-Hr+MfWTo)po!+K(zex= zWeHR)ipBT|1jrY@E;w0kEYJZwSOj0lEI;i+40A*NxGlfWH)C=Cf~sx~K#QW(`gMVCnd zCZ)AgRNSPP^9&h#*)H~&02W2U`2 zP>2(d!nRK;*@TY!1>^|7I6(-{flv>OE0VZ_7kr7gnoEK8w>0P@xyO|7pwUOU^QaAt zFaBg>AXO_EhvCc9YVC#{NkR1p3SE3> zqWCi&eo?bWdFHm5KsYNz6lkPju}Bxd0%$}Q^zfmz%a>uM7O2xHJxv$<(H-?%2OK&tYhB7 z{FapdD4xRieEnx4ti=v4lcrrommBhO3FwoeW;E!9!!7R#6ed@zws@)G4b*#*on?Oz zv-+*ndX1iZ$(&^o8l3?GJret~wH*niu9wGa!(e8$a5`n4={^3};(+5lf6IdBBL19{ z08FHB3uw%aSw)=nxJBxEmTWdzB7n}uK$ELlnfE~)0TUA^bR2&I7O6chcPKv$g7i!} zrNA6@7%?{d9n^;kpqyq@>Wrq_E5&b@dxgVzv%o)5qD;tckyQ4{WEQ9{^p57r!Qz)C zzBDzA4A5lQY0U^RF{kkjgGg`mC-3zB#9^%cLkjJuWQJUZ7{=U-lpdh&ZM?lSW==AI z{{x5{uGZ}oNr8|r+=*cOH>PBs`?MYI)*$>xa2U5>NVPt z7_>3k-#3(I5!&$C48>|~)+CiAnpg6seV>AEvc^2x=LQhD*{(M=F=q!0TJ+>h_;0`K z?g7ci7p!zC=dXzi_#|3~|8O4aiD zXu!D_f^O|bnvs{05pF;q?zsC|YP|KmHdL%dnZ+EJQW^RYJp2LUBxaoB4l7V^(ToXdI7XuUrI!CZ-133N{j7P@3C zkCUJ5;Ou~DRC-LJSYF5Rb@S2iYYb>dk6q-LO^{;$Y}Pe}=(W6-6x-Easr|>G3f7xIYj+ED2ubdL|bK zb$8#9#GL zd98Ap-6~hs7yd{U|5n#pw5s#DWw7r?K$vlqAFK(LdlYD}!jUM>UJCk%a3TQLGQ}Q7! z>f##4xUK`u)9XBEM>(wR)4vdj@!mAPCd{?>46n(P} z&!daAs1@!SmW5owM{@b4>A1W?ZLIg`Ct=SC0rQv^vGe^MUOKZm=YKG||F$RIhw3*L z^DAjoXb`XD-(^ssM1Oky)W-^+!|Lm-RU3+Iw^#vsG|_99QOc9Wve2Ec20#L>NS`b) zmC&+b=w-h7AeS&98jfyhjgOu>CDagN$st|$IBZcls6r{_sZ?8NnB!<0z>AWV45?7( z>E4hzBh2n7q*0J6nC}^M*=ZE2zI2sYoTzgoo%;FwFK_w}M}HW_Mlh2EyiELVa0zmn7ogJl%hoi6Mf5XC zp_DSzaye+#{YH*lngW!Xud5h={G{p2l%ThEr5Ez%g=P9XJ_W5@0+`88Fs08iM&47E zumWy}Hf#_9+LltigZYNM_k5c<5Npu;|9Ho~NFw?aB8EpW+P4`Y-Eqd}(_oh*iW7x* zErGun`s=rgwSBWW_*JG}<3(OYhFK*fL5X3`v*{flRJU6N8NWK)OMHJmXw0>}ZEw+0 zch+yCzpkLtN6cZ>GWBBHz06()!!b5epr)Oo^ygArKp`1hf1a}ru&_{!&uT0Mssd#@ z^BQAG9zAx}9QJ&S7x;T)&xk9_#$fo1>fwIYpyTsGRT2409vH^$k=W~v!6+o>xUERJE1pbkavI@9D`1Hg&m9aU>Hl>@JhQyop zJnZ21yaA3M(jOTehWm4+(+>7*A3vLGLTJrok2uIC)Y;s)d`4gy_J6goez#qPGxs4$ zvxF21#0^pq$081A0*T^>w&}vU694IH%halQQ`kuP&4M%f3H@R{s=DN`jAJ&Y$j;4n zl62{bPFnELz|uI{(&7n^y;1n_;-lahwqQrI>bt-ZF|z-Y-95CKETU^aX zi4E2WiJQ-8|pP}kM>!SUPBWc9<#i0aV>$F8r z^z;62eXjb&pV+wzY_!u$q!M2h`F$(Ws<5fyb=)59rsM)jQdA)Ck8TS<6SaFR@X~r& zXltR_^NrY0Afg7+Y!`2wMM55s9gf?Y_gD#Fmi)-*e7uT*o!}3{0b*)76Vd8LLpPiBj zJ2BtrYCpHkDB7v4pHNIOh$?YL*21)-6*iEG;tt2+vi7zfz5~Am-1R|5jy!TRD`~C6GjLlJQ|6- zgKtL?spGW#W*Ws7P(U}R>!!nv{rV=EQ6M}H#4pM4@wAHI3ToumpR)uZ|7#-9$#l9j z(79k{b@yvk@V9WF--zSZqp-k%q&jVfK#5ab3xiFD>LW6)BK8k;phM4XPTx?XFBKLS z{%{n=Oa-Fa&wJ;Pq6#H|fOX|I5r-AO+D0X`{%*=^S4mwC)#0yc!XdN)A4&2!LV7xb z-_)r87|~+U&msp-k4}>hCo2JN?LpGz|N0pk;Pi6*K7Py%)3Rg<2n&TsY zV*|%uXI-HXzB0b9Re5c~T`T{n7>I+LsI|Owo+>k#zS5U3&}!yg|Da8V_(qayEMG|p zyFM1=380E+a;nnkQd!k++5zB4gU!Kj$Eo)>qMFWY*v`Ov7yxOaV{j2c2%BIzDpR<7 z9p!7yU&>8xwQ+_l)k8q@b%6o*HXhzc^nVysm!DT8^9%Y{j0PqhgyiA+Fm^ajR!gD6 zprDblSLLv$s#2yORK**M6#k-aw*ybwXbrnpU6E8@NU@e)?G&i$iRYTKiq$D&g#k=p zSXHFG)?_RX!(a$0vQVNslB)8E{#)+@&?04E{+gai0AU8U=sTz=<0#8=%~eJus~JV$ z(rv-BGHp6`HIbgjypFIt= zdE0up+!12ri=UE??mdY<81V=qpo^ibu+`b_1k`}SvFQ}sS?z>KYy3djU(pT`0;>LL z_M~BWeMSP{f`m$abo#FFmzTqtqi$7t9_@0!=(_Y9osM)3*p>IUbdR5N-^j3*(n&BNo1cA@8>JzXYD?w>j#QE&v#~#j@)#qeCIkVw@Zxd z5rb#vP4oFcubK?PY9ARG3XK&f<9~*t3B$Oy==k-JH@W479Hu`f&_YvRy=sx);8($i zqC9?&*p;pvh!{@!Gm%{hB|+wuLC@1mpw6@s`~hKNDuP0^2QXP!JMA?=>=;&gI{8}n zrNjQL3O7l?5MWj06-wK|(oX^%?@ecd_6}dQ!Xpa*GHC#7N3UrDvqxrC-+{4y0U(L| z{P9>{uenmafHIOG{_WA+^a9&*Az0@k+j~V6e@E6KS0wy0dvV*@*+zw{vQPw=pcVtp zRiW?|Yx;lKQ^sTHI2#1cbUUZ_*4sIhPd<6}$tKFa>=!O|y`p&kAWTlK{b4nruLKP` zl2Ub-Utj8OGySvvaE5ppI13CYYC)v3-X+tLx*dAwyFTywB!op&X)=b%&aP2IO$w4$ zR4LiwAEEeqk$OWQGRmBR({~^vH>=nh;PcUQCDgT`Xc$ewYdpy2Q;3A37h9Y^=oJA5 z$wVh~2k2P$hph75-`GmW`Uj41>UV|vQFDL4vWxAwAcG7YM4@WMVnIQ|>9UE)G5@in zyDSj!1XQ~44SWMUYE3?g_yjhJG|*%?Hp(gtu#$%x!+<=+(D0Qa6_;0xdgj|0xBN%d zgAeI6CC6-JcFPwOiv*5vNf_JjK-6hr@h;f{Em4{uozA}e-}t^r|R zmTso8CjWg%u7Gknv6z;^G3hRQ4cU&^i%?$=kWqclRP?$XFI4;KzH0_EEAWK^j-1{$ zIiXkQ5|MpXI0T2Yo_bZYTCv^VkBSOhn}Ou*R%=_py3CWr!Yg@_^%lNg;y*`KZ+!}F zJkL#y?a0C>8xnXA_m{+@2=u?g7j5(V+2nk9cfN^Y5-2~1kNQK1hK-g(oMiHjnD#UN+k&Wf~D_35$2 zXbBEXH>?j5rP&LpAXr&W8u3_0AdnunQ&it?=RUE8I-yphBM20fDXHE@Gk2S`x5K=$ za&^Q)p9XO?gy0!g4G7TUou5gIiBTda@V6wmnwx(8vW3hM2)mb>e)Rv`$c;SMEmp&~dwgKe6&uKNxx=qn|llyOGB z_LAQC8$3N7OLH~7s&hwDJjFXVlt5A0(p^HAUfjX?T-3_j-#-2A;ujvBR=*4bDY)+J5#h|#yeAeFkQx5gP6_~FWjrf*q`>&&s9*;u zgxPUyIQ83>v6TQZ!= zS7Lfdv=)n66&v2PBR(0ne}$xvF&2n6nI)ANXp{eFe-3EAmRexBz;SY0E%JM-6=^+x z|JCaQ_IB)t$$@yCL9kb8ZLasKB~G`;ZgYyv)XEK|YESyPD}yAXlg&N?rRG-}r^!c@ z=2GfROXR4-7?k#d(D@=Uo2>l#)4&H(B6xqmJ0H$`h))nN&Oc=&0J?+z7N+lj|J!Kj zTi9T)>0e#(tzDb7y;iRijcy+27!g7%eDuZe7K7nOYEc+_ZaQ8N;jlNIfGCz^?{PpU zp92B6V#VPn{tPwpf@a<)mJ*qEb*YsSr zH0cz``V;tJ#HEu4=7yE*;TQ|ms!{!-J6#{;tA!tRH5JS-IlM+UdV12P%Z}l*Dm3|w z7OGpE3_4e+1>w^X?0R#f!jOg_E=kHWPQQgX*-L&7_P+yL_*0lrqHpVtUo#{-#btxN zVwwFE;k*xnte0^Yqj}SIUvmbsD{pcl@}MIIZsGXZOJWYr7v*L1j5e1++c=vLIeboox zW{*>52~XG<0V~ZM$Ga*~?Q?(#UNvh?*s8y1Vp>~0&opahK==!y^`;MN$&{$CmUKJu zT<|WtuqnSjR!0={CBipU7H`zQ!BSzaf824J4F`peSEyPU38*8&G$lZMg z?=b@&b!Ee$5Kvk`lTri-S;&|TY;9AMr zkXTe)&puB24TIxI>wyCa9GVAy@t^2{3aYmRr_IgG9WhNWLpXAeAAi53V;Esu>xQ4s z)NxjNe60lT!z1p(4TO;42ULqC#B&6>=0#Oq3??Z(9QL0{CP zt;wW~;j&$L>70t2x&ai-{se5xaG)nxq-m9m7@qqG8f z3o#gNgGrFz20jmUf1a;Y97bdGP%qf}{HPFl(kWRg-=8=rh+u+{x&r(kE6A zEgR@I*T2Rh1d)JPJr8K`P`eJs0MpyPT!TObErTf7#&8mTw041SV2Ok2O-bgp`YbSU z{VBRX1oZ*<(I64 zNb+ud@;V|t4;#7er)z$kdES%LNciw9yOb9nhG*cOmrt5Q#xcBw$h{^l7*s0)G5?BoOHEFA8Mvq{N2xd^;oBJJ zT@t6QvB4(HM<4_$h2drY09#R1h_c3mT;-2ZB12JxT+aAUbl_EAqQl$(k-kg!ZWPM# z|40vi_~`ZhapUV8XX}Xr{tuN9+8ul-X+JU%wpjsoLao??6CPGVOdxw(vs>1?3bYNw z@J_)+D$pU6NWT}a1st7LuMa3tYCvf+{w`z%X&B!-6HwPLV&Y(*n8@jtAJOqNGdV+r znExjLyr-}dXJmNowwaR+9H=HjOb8;>0C*<5kDI4XNpEw|2Wkg$Xeh{gfZ_oqWYu;t zAg~n!?uHY3SUPm?Uji+GD9=K0MO|!PSi}dTm%rs=zWfwi1@amgmq-83jjw9- z#qzSSht_d0?hF|4;r;F$kOmf>X*n_yL~itze=ZA~p(GHFN~;hIJ#0o3C~Jm5lM_fD z?4z6mP3~hyFAw#>=Y_{$8387M<{jWq!IQy+mdruB0XQNFq0zdHsYor1f zm&tsoaXL!7xw}4)Id<}W1dS-64Je%!uDFKXk-|VDGNJ_TjU`r8i_R&xbQ=+;4bh4H zd*VEOxn{bf>&Rk@QPD>aPk!VLLAT;U3z(~$E0Hi*@PtS5nI8eF_ZLLWqvr|&ZE<5v zgT-(;XfJ%dq@lr8A2C4uxmKgxK#Hq8~6 zZ`Ws;b6{?e@U}5>zAiCWW-qO5>(mj+Q>3wpzxk?Ms+-nKo(-zcy+A3M+~|k^3I;RK zYwM8Qoz(>}P%*m@HVw(KlnXlLD8QK$UYEP*2d&Zd{X!gwKDwserw9g${ znCnC(rc2qMQgR~MwVNNqmIS=8hle5ZuGu(V>y1*%dH=22#57#>O)Mk2`7j*xJOUA~ zn=&df*V9ZBfD=$X)DV7K#&-X|H!Mw`VUYaoxS=znW9-g3+UVn>2 zdz*~J;DCONMNU)eEpz>mScA?$;WVBEs&*1p|7y}KQE z(7(;JNvxo`b?DYa@p~n~9|907;oFqs#_14eT_DXGJD+?U=JrkJ zfrR6>?_$#*gQ3KL!lrCkw(2?8A;;pD?|1m&0soeK7SSW#8k0G+Ak8O$YK&?1RQ~W# z;8Z~~W=%oM+GC;1eeoqQoQG#pYCvHVH57QjPvycW=1%kiaHfg}VBK*(;`%*xBUHp> z{Vv<=8SupKPOl?<7vg-jPQEey`Kv-u*pShH(S19P zTcKn+?!BeMj-(iB>VQ%+lYz8@^f9_<7IQX5V;g?t^tW-QXBjMmN`FnX7I=+@H9IcG zTrE7td4yVr8q+l%#C^xF7uYKne=hl7)BFGTkJdDchn42pfr6;?G{sG0=tg2P7)FNH z9I_YFsHcC-*B_Snhr@${*^l>jzb;IH5P)2JUl27YQ`z~;Y^NjmmU*N)QC9YLR(ZCECwTO7@v=50GPSxV@77GD5=J zlEJ8_2LGOW{xl3@>gTKM8}p<+lwUAd&{JV}`kCd~kee01x1#b^iW&U1@&55x?{4 zCPcFsF4BL?_d?u{LQf0_s@m(D?ZVA8t)g5mNw}T0J;n6@`cknL=y&ke=`c4(m&2zyP=cb$qvuT(LK^>T z8ieQbc&@FYXTtPXi*d-R!e|*of zq?eaY($FyT!({L?BNZsuOWj>imxVd=`~=8G(cR^-JwMf|L(fMOI*`_4 zrpJ1^Hso+sb}G!|1#Jum%oxz@zAw03$tmDhb~T zjn6##y;U^TU2UB~#j-)m$#5pXL_B%>yt{;jP5pb(;Z4`VfK~&2@h@XSPP%tmXt%3` zLyB3^RB|pK(y!dIN0@f1x|K#eBNIXtyDN9t8@KyEN+`)g?DBVZG}e-tlwNI3SAFA| z`Szg!PQP*647$*6t4CprMZ2Q({M0n|nYM8>8P;ERg1`tho&rCyAz=CYRWXCSs=v4X zNSuhsO}`3BshzkSx*8vnPHvo4#o6=;q0KqFMQbKV9FuS*%~cvx#vx76(a||Ry5N-k zyzHLLpj8@YQ_lGjw09xoJv$F>;LpZ&z0<3;@Iksu|@VB~LM&gFv&C4KlI zWD$BE4!J%z!8}Uz?2|I)Os&1UwX(~}kD|v#RMoM|UWT64Rx#?e>rF{Y3#+Aul~gZY z81Nr#nCm^0PL1*>){2|A^kA$%Cp|i2E#6LU2HK#c93m#Kr==NlrjjWCVO5c30BXv( z*=cHnScIAF>qNc}>zF&4(p8pb z3?aQfTAnjxYXp2u5{mT)`FLH|cXdb7AEuqd)oWcC!ha6q{A1@Lz6A#Cb5*lR|Az%5 zZ))r880pTJmkdNPtc{Qj)W(vDl78lO>WFPhs3-4_>)XFRk1v=$3@Am$mcWg;ev88z z+hMQ{pE@Al`}w+eX(hX{Nvf(3KjYY4IMdnJL{^lTCW_C|0a2#6sZl(ieLp-_8U^Fl zIr0yi`y&nqLle0~KIOCT4E>!|T6rI8FWqFOpm8|gUOKQ=EMSdav*&9`mF2iY59CC7 zravQQ9?9n6Q*Pr$X1RLV)Rf9&8pOeXYj0_ZW~j;-WM4I3T(ZEuqH89a>TzTCBddsN zO(dX03x^ki4m9?_Ncu89Sv7`5uXYwWv z5`o0r#vI{M@&x&}8Qj4%px~hH&2;OPygyeyK%4xw<~!d1W35a-d*peP0_11(aMnAj zJyPcu`KLzCo;QtJn)t8g^{!p0m%@3IS6jY_v+l+e$z}WHmriy1YQsjBHkN zqtX~9V@ok4_PAdmaMn3g{(PQ|{`Y%4@@}UA_Q>Lq7y8>sA@dI};&JHW=-g=JT6{HD zDJ5vysIvWQ*>u7}xmQo_YaIE3f<&a$)=<_?Gzbd(QXBZk>MgF1}BIdYpI%FeZx^~+p=X6@l_`&s*{67&bd=iiysH>!-kA8=_ z_CzmKoGKT?94A%(0>__(uQLLEIp@Vp+zwyEY~MmlZMA07ksKJ+bh|8Swo6p*7>8R0*?+SfC`ylu}()CHx(aeZ?(BC6#?%bSEue!6c1Q zzs8H$%S2AKy$Q#oV}+Hqxf>$iQWhpwz66c|m&3RFL@N60lM_@i9i1WTpAMQG*Z$`(?Un(zM1{|OgRq&jj zxS$CUnKo^Z?N}aR8rhM z&o)+aE7L8v)y8tDe&t4gy?leI=g#-XTFVkvQL)~${WDe3ya$rwP980oV&0E zhh$zeqZ3kB{qwsvn_untoYvb(etBM2DfMM}#b&ZJ_ZqSl?=@%Ept_qgd|G$d52E#+ z4fI&s-^&^G*YoZqO|gpinAx1MNDsY z`3qg8X17|bRz@pJ_Suog`IPCgRb!qE@{Q9(qXvau%X{{h!myZ+v6*@7er3?#+J}?| z{3MW%GE-qC=v^ka9Y@R85O^Usa|x8G%`ug79jkGUw=ynmxas@$aJWjc(fpn(PF}af z@Fp2{w#Jk>jTx$xIwTTtIrJ22$oZ#iDQ`8U9c!PK9Rt4;sgMjq9GG#P?tW_49kcC~ z7$2GPGdHgD+_@}f^O3{X5#F`j$E9`B@Sk0(YKw&guxpB*eP%DF;4C*n6;t}fdULdJ zM{kQ;922v1k4_wT9&NMMMGtJ(A-PB+eYi0tPuBmhl#|j47%Ac%h5n=8c5+1aReEm> zKFJ9QXPc_swuyiS#Y6!?EY3uckxCe8RgtYYtfumOr<1q{Ul;ax&5rKmrQm?>6S?vR z$=v#Y#-~!JdWr1j`$(o(mcLviW%Inu)Tzv;>bh@__k3b(K7TQAY(_%5y;pjk3jOg) z1^;O6yhuAfrEuM}&Tj1sy%Lnj?diM`TShilyuo1z@eN5msvX*uRWN8YjAYwja-K`3 zFsWJyms#jC#unPU-%T&_$PhQztlG0r&r3VQ=XJ zZmsv+C4{$fX2I_I%sk;@P8XkJ*>p(n*DGR$a^@c(aoNciZrcX#mC>5_WN4;I2JJ8J ziBQ2GZI9jWeqt&icD&}@gt$lc+pPhb+}yS2>J@x*E+6jw&Aiig5&QQyuj$Ob`?0Ti z=NU*!T_>}f^;I8lyJCHsXr4Pjbl$sk=w#Jm4Vly6zP;ML8Tjk5rYv1h=BA)kM5A}l8|(C zr_wbTjE?67FTJn(cVDlcSO0g;ZR{IId_G5=VE8xw*KC)-U?p_vbI|mM z%Ra^V8J28LqH^>lq!fDKYr$Om3n??)!WT?v5WMj?JK>{Dd+!-bV={~AjbFyjjd<(6 zPBb@^>d?JY1dHg-s@;ld^wD2r>aoHsd)_|>j>}`-emL@aG4VL4;$?FEOl|90P}9>F zTFK5~g21Pbm(wfGuHsjqCRsG&8Ys42CU3$MZ4c02wvW*vD$+TT}FsP7uU^>c*Ouj`<#&M}TXHO~D#O4?| zb)M6F?~EJf9%m`C{tS<`rZgL6%Zrs=ftuyE7AYGdsClU@Fm&b$F`Lunf}NPj!lgfv zbuyA^5>4@5q5K=La4Q%P%*wuJ7dG zp?9fvW7a~h9=*4|{TtqfQd|)|rxfB23g!OA%4bu30Og*VZ@C5iA*z#w8rW^>bc8RM zjIQi>g*|G{u0N`d?yqHD%jf;^des*E*v#B=TKQMu9KQqdk%3OdxSF9X6SQ4&}-WilS$2c?_sdPx6#!f_xp^N zPw!Toi)tN#2F5Wvc{lH6s#%rKZH-iSX_;!3ryYji2)9ot_dLSvn{%U2-GYC6kSp&1 zl=bp}bQ$N83iMtBSjQkO9gkJ)Wc9*KzoOCAc!Qp=tAjZO+qa>eYXDUZHsv-|j;$Ki zk-}r4FSsCD@R)L+WJ9_HJ}1A*T5#U@Fz+0vLG^1JNkoRoNm;ac;|HU784lkB(c(hV z(s-|Y6`^TU!KC{qM_pFKQwoyx*Sy#EPFw9b=HZkmh5E@v^N>}B3^9#ocms+nyH$bc zh*-;I`&G0BRF@yB6I7yuSs&9y5D`zPt=L-4w5QUUWvnJGs)GAVI95oq;9k6nHx?VZ zw3zQ1;eo*j+a~Ke1ASDp+#dvS`(n#2TdX`+e-W)~2I?j6^+freiE>DO_Dn5nKt*EF zD}IV6GcUwoF_k~o0Q6vgO7Y*pCJRGd_Ob5(f!ea*$d469~%-S5y;|Sxu>*~4uw9=A?$Af zPAkqzt9ryOPFMVd>L<_8GWqJu5KDVOTH%%9VY>LBY2~EpwU{^Mx{$FU!?32oRDD*TPov7 z80jMHn9r7;F54tp-t~k1eDZ_BaQQ$ErE61s8v)eNZ+C?$VW?Y?cCaUZ|A#v5=6Hth z%+z-TOAYwW1kn8*ys>(^bBB|czCU%psT;xKzg->8gRs!#l8@@uU^U3RCXlZ`Ml~Po z_#{mL7rhut1#5Iov$EqtX^pH<#$`B5bA<4x0~od|V`EXhMWj6MAsOh3snUkjcaKzU zk3NM3CCe&={*~zQdw*a`r8@TFOzozIeI?hg`+M?0HQk1r)ld+jsK$~W+rO>8l=d?a z)rC}_-nMj4&E`;X&yO37GNl5;$W84F!z;iEuF7#g9IJ92+Y0Z048uzLw-8)&brgbTwHQ z0ZF0i%oG%~?89Cen~i?L2}kfNWtDog(2$;N!-E~ii(Oyr#!Z-!uO=lB%1;NR?QbX` z_{+A3Grvdg&+}>#hAVo%`v8H>!e~9_H&p72yb@o%f*9s{jBmbTwYiC*`F0Q# zRIn^e(=r1<3~$U0=F?4Stga`I%G1|#y^otATu?P<8+CUuLm=yl&wq(v_ant+1jFfN z((fa$@3^Ky*W2s%0fd7qkHLdoYKLLuE&R>avmZOqKCxA4<|eJ0{m1xT+#7fmi}pDK zvF7YLtVkEn_I!3nzr;g++s#kyy0;4HwRsl+=P6wqf+DXP%*zivCajs#LNW~amX2P@ znWVb;HYjk_WWjrnK8$wN`33arCd%q?-7nv}>o9PU1a7=qEY{w3NezUGO?k*#Jq*j) z&k!yN+;xXrWOX5FtQ(^XV#i#y(^h38FKuu<%F}xxT3KSCQ)L<&QZcdrWPpmu7+zB7 zKPkA}o@^vGW~jfT>K9AC^|PkX#=4mOmMFewS8Fg*9niha1#ed#}5=i}(?NI@Vp<`0f3_ z!rW+|mBy_!hKXH~#KU#n6nfw>7(QNiPUi$2gAvJCe_g9!QI3N%t}W~~g%`K3t-^Z!^4%P7|}c8hGK{W`iXngriJ` zN3q(*T%d!N`GK(W`&}b;y!;pdSM=x4$e>Tj?J2r&!m^3P-M90(CS&_O$~V-Tvfdt| z93SE%qaQoHIp}&Y^AN^)PnZG15NW67(fIEIg59nFSOh6bDJWGYya2r6_cI>rfpjpAsojrkYuW*`>DUVlX) ziogY3CP=Pf`^1nl*rxKJP zY+i+_hlDiaDhNXhuEZ)qarr9oL~D1u$#x`05YM2K_GV7h6D7mS^@f-Tr;mw1f9ffuptK zQlezB<*{9M5{yDdrF`Ha0@BNY*xWzlT~=0bPc z;~16w3sFk{0WTL(;`b*jeU~|6U!NNXkQ=DwyT|}THjmk@>&D_|(;hmI+Oup$s<9Cr zy0~u0sw(WnDkF88mm3m`KIN(KnL}8q)CIGJTFAQ|eTmo-TZ!<@g>i90xZ0LiCO>qc zCx&0-)IQry@u?pvdBljvjtT14T;&}$0J@{*y;Euhr0e$I~>O z*g~Ig7z^s+;$ZRw^HLnQ9`U4>@nReQRiph#O1=?asSDgiCrgtv>OoE`Aeyvk!5M7m+pFGfCsxRP!NIx z4VMiNDTlzGy+}oYJ%QFL$p>xQj}Y1RUH1kG`_X(`N#nDGPS3PN&`ZI^HQeH*#xTLP zSg|p;Kz}HUK41OJ#`H|i?D}Eg9;K)Kt`(QKA{`qC^+>3*Wt~yR>PqA1FOPN%+TyMJ zGFUf+95f0Hf~Sh-bpy4!7+>IZIpIhLPUk*vn90MoUmEI0zEl$8$h4!^>iVvd$WKp9 zl4qQ3^tUbYk?_PPeiUT?X4C&de?ph9Ohd9$t%-yu8h88qU64c(Ya{lfIR{Zj3`WY? z`l)%_<0$VFzI)!yu!dj-xO#>xXM)~Pa27cMmwXmw2oP)Hcah>E+7#J+XspY?BTgkS zF|HWftI=PnELDMyVRJB3ps$^NF}*q&Qw)AZrvBTdwZ?Nd!3ui zJwZTitlOyC(7Cr9F`iq5DX~{L%mAfZHxSj?G1fm^tz)sZ-1$7}zU+cbXkfA)LLWk{ zt}ZEIZ2sgKd|Q|=CBEq@KGCI*eXRi4mr}CUXz#fnrn(L8nMH=sGWwTaGGe0? zbZhvxYg&hS?D6$PF)$7L!*J4^F{!74qW2DST_W}JkLABjqwr2baHjVHfU3Y>!FY)z z0MuWV4|loDqJ)?FBvS6z$39f?AF98l(5pP9%&L}CDDRGvRR=2leqL@&;=2szUWetNf=nX*KL5&t z2)itja!j}p@Fs_n9G-Vz4(OKBN?cXLM>}AAok~_cSfqPHjh56_t07k3>x)Sw7dT`& zCOKm_STChveS{<9ULjh`_CQiAZ-VZ*Hu8t4+iv8VLwxXrOTZZ;_J0t5`vt;BXUhCS zg>U6&t61n@ZDmvNeCbUSZE$P>M>}a%Vnv~Pwr()3o~T7$0i4-hjfD{<;x?IIo22(W zPeqTgElq{sY(C#S|HS}{eN%hlySw~9thZSafHX&T88yc0%`Lic_HHSo#WD)-7aiA z*f)x_iQ(13bCcfi*waDoPbzbl8_r7kZj{cWybm0D@6lJFsciVZm?pBC#EzxZ?F|9p$#6dOu1Y`0!p94)ork-*Ffp zSWhv%pxTGxG#|UGZ!C-exK1r+dH~&8DrJbcQhQV(p4Q1&m;P?tTA2&TvCvw??!KZA zQ3&js(jRp@-1zdy{p7>k?TmVl2N44>avOL0eoo`M zmL;s1v(=t%ZcxeT5g~2dK>xwYO&7W0QSrM+zs%(9l~{p1&M6N6u=hmL$UVJkh||G@ zj&}vE*K4cF=NmV|1>o)h`PqPp?@Hj!y&(K)O19JfoV7+GgEqkEw9{v3&6Niryw71p z>UTN;ui7Dg98wAMF4U>%i5l_P(#f96Xc)AaQt?th?rx1VTm`-*S&CpBC5pHIDMtHVw;@<8t%F6%y@Ch|xRoY(TTR`2)ihWiJ@ z7pG<-8-@r(1_HWj_UP1nTeTMg;LC~c( zGux69xeXmdH&cZ0j@HuJ)%eqg!l#d(78umf{bYvR(ReBsyrvPJ zK0~0+i@Ms8T|zGEqR+K|=G^ZY0j?k_jWmQq3|Tu?q(}CFAI)+Q)LkiAOFho#@G^o`q!d+vAIN ziDItL>DDlb)`w^iuA_o3pVQ+SM^T4^%76f`rH+f_gCmYj z(fbOZ@MX{;U5(KaBG zJs@XNTlfAyj;a^`3zN{Y_&woYMnVNn(=I zaN>cMU}0X&X|hH=5?a_M0iL(x6IstJ^oP*06ExT_6t~Xji|xI8{&mg$xUQ0F&Ci{_ zYlQJwK2~+d(FXcbK`~n<)8F(Wy9%UZxiv94wW?<|CVOyu+1SB7D07|FGNC4<{HG)j zFu-smuTuj21mz@jb_zFWnvy7!6ZvD@9$f=is-KFl*rms1((@hBZ2XdQg9ajh#59hH zbA{ym(A4eeOi%mhuWK$`*#`i)!tonz@(`6~3|r`4SI}I+SR}+un;~3GXCZx)V?LzS z*m$y}*QEiDuw^vLZngt*c{aJXjlEz&EAty(Di?G_cI5{ybJUGpkZM`!c6GKBjCv>4 zyxc-Va`-zXD>$dsZciVeeK`v|sp(r&d%D4WDB2(t=Jx)~qBlDSwPuLs{w`82E`!f> z0*R3O20V&&=nnZGeA@JYIC`e^-5ZnLzg_M-2_UUXP<1b6Fnn5rRh^>ibwsxLj?!VJ zQBdY-{bAqW=wur>zj=;WrG&s*la)j2#$o^ly6KDqp_zg1Rj(g|Q))<)v$8DLETY6_H#bia3h}=M`;7D%5n%4Psr^R#xNCT zZ$C?H8cEV}A7C!tpD}in-l$np>d&;`iaSeWk3E15=0Dd`%hU~e$O^284+uLwc4W5V_9g(JZwT+8uD3s^UqARQ@ z9a+ns#r=oZN@JZKJ3e#Vj5E@I8t$>rvi338&G$F!ofN`?_HTv#G&f+mx7PfOkQPL? zeKgupQt+29ko#t~UL;7_cgRyKuhb~idCFt1NN=7X<~>)uKIzm8_W3B5uS?IALL?wZ z8>@yZ$Xi8N&6r5DqPXOLRB`;S=kNl6{%#y+@{9KH z`S*JPeMY~{_wV%6CAj>d%3#&_h-s`xu$Eluk1+k#6X4L8G%hRio}R9IuDcHU%mwQ& zdDbSb{^JvWgVw*W<(@RI0OfY1V&;5pKn-8S$lfQx=J57W%KHG-_`R&(|MIE8CR`Gq z)=D@0dgtTqN6s4dPMYz>e6NTCrvGgjR}5y$oopK?(wdS;FFuaxW!-sKoaubycMA9q zA|sZVRlaTj%hiTWWyd+q9FRE>QFLOi=ipcTsGy#_`7fVmu)vLfH9)&xUPW4Znre(i z=73Kje4~7nOL=MDXZl0iKWdr&{L1YNp8ShrvNSiRE*i8MrVD?{s?+3X-pGnXp@#Uv3YUJ@djIosC7u#H(5aDpGjt)f5azhM3lnP#_EBjPFDra?os7#< zZNqld;KNP)HYKcK%>j{eXP?-6*tNj1% z|K92}qSV8olmq2XxjZcR3hHd`va&^Ah-F^+55zowfiO(?65k1T$+`?{{vUmL2{fR5 z)remR7fc}6oLo4tHE*A(u%DUM71sZdkW)V3fUA2=2log2%Y*&Ho~S{rDchDgx8AwX z;vBrY!-lH==dWG9BzC~c9B|ibHPe#S86|>${c5Jwt+wsiX#ezGfHFf8R&%fne}){K ze=K74iKjH5foPv4H^cuL5r(-N2I{=T<})tSXGta*pBh*uU-j-(-nk~*=|o z(%#$Hr=vzKVW0vLrQ?6Qt~30s7=Q8hjW61t%(K7roNHC`6y0bMet9r5)>9@K-a$e3 zfIUnnjDq}s6$B_+i~muZULJAGw0>0OyT6#_#99;_vVFic>)Pnh7DU;xX{}-M@djyq ziXF;TfG)4yLY;PE%evk;BDDwf>{Z@6Y!c`mSYZ zcJ^oLv=uXw^n9fFXoM|>OHe^?1m`ui$T1pXZU1muO-=rH%=@3ReeFt_MD%+MM_BU7 z$D~cBgg|wyLL5I4l>%jyY|JKknf|LP!HrwN|7Jxe<*>jXi=EjLi`FAW#;$Kdm##dm ztq12glU%z4$(NG)$ASE<6%5HY1T_dOCXJT(HD$AKl4goXw%9I(44Iw%G5@ve!vg95 zV?Hik%3i}}BaGx}xrN@S9$4dHdVkjct?@<+#iNt8^+jy9oD;8?_6pYjCJIkDlD>aw zj}+alOWI^i2qf{P!14P`{Otkwp5v-ul}PZvc)Dv>@_m}`s$@rHM!hY6=gh8rMf56x zYvPoe42Xj0TStN=>7P^o&r%siai0!$4iLSqi`eD*f=!3x7p{2q%AQNR?2UtwaV+oT(*9u2b0W!>bY4^sMR@NfDWx`i9gqA)Bz7xMO&+ryVPDf|R! z66$RK1D*eQqX9aNb2hc72i5TbZtn>=!ht4&i=pSF%KoV@(jc|}DeC^?eu!m@u|J!z zN=niz6gR89#u2zmFhCy8M)<}=;q@H?@_*TH<6^O z{m&&3<74?Z>q7fBrLtf5U4h>x#&T*@rP*f*JsVK^Ujg~c*X@==8fP@G_6N5rGJ&d8 z>_wmC=I1{??|q8m#;$QFI|Osn))q2GTmDfA02 zP3vu!^OZUQY&)#x`RtMf^y1Uie_YNV+s4F=#g+*D!~a#{)(Ae;;Vfp}-B7vVbVK8)j5x~QuC-i8>1wYM&P?qWDIei$jMT9$;P z{0Q9Ze(^&4eIZ9vcH@*c!C*GK)F3)BQtN_EJdA2?zqfaBNrZrrL4($#}S=EyxM=;QALDwVf zmGi;Bj5xQm?FwpLm+z6c zJ2d%a6GI1aWmB-# z>!Ue0sGSLAzdRD2>z8_%Cbt0NnHwlfC1>u~hwNn(HhrzdMa#yP1{jM=f<$bN4r2OC z9UsOBxhsczUZr^a4}JC}1?nnfmBueqw+`1mF@4V{`(1V7t(GbMn+>V&&IBEEDx*l& zgAw8$PsEOAA0ldWOXopiMkMRx2BO~24ZXd-I9u)o8B{y4j@Brg|AUWQ)5C%>;cduB zUZ{odd)6c(VmI$u88NWf3qR*@Msxn_PJSJIlHV4N%GL~x&KDI>vM`&N^j9SiL!*dj zHoF6G>bF^)Pm!!?&Q%@@1+_vin#I5;rSZaE@|~JhT-@BxD7#`8*5=1t>P#osxeJ=r zQb^HP|EWW-FE+<`i|leywPcl!hdBh^#wI~Sxx+b3vbI+IWR^KFF_lvf5eG+CclN6&LbBK5e6?$}Hm_=7FR4>$VWF{7s3^T3$=7Yc#I=Dq)&Cy}3O`X*GTqvRA3*}s6giRAG@0!3}He9^6% zs6H0G_DTCYm9QzgN!NCr^9toXu^7x1g~OU()GL z6T^jrtq&(TXlbTvtPwVb4N9~w#j3;kCq2g0flFKwOxHlu@QAtS?lv9I+9g-P#sd zff9#WSw;Vmq=E2PVH`L#J3@xEI* z6!hMEE3A3)kw(nz+2^c*Rt#?(x07^Lje}T+_9FyDYez1&G#g5rGJ-*7A%gR%%ey*A zG6$1omgc{X1>VjnCv1-e1+FO|3=L~Fo)=C8Dz)M-akgV&bkB1MWrZtINy0CVKh0M@ zteK2r@t;&bp$j=qYFn%?w|=)G)CV2(=qb=GSKeD6=Q1C%+^s?{gg=$Sm+cGRuOy5u z4u1DLBP0s}E<0k{{QF1XDtrZR$;NR87ftbIe*bhPje#XuO{|t`LerI;{CAt74B6k$D7ZV*;xV?Kc7%;%Iu(MLEh`V_{ zT{NMtsU35so1MKEYcpJst&y)O)OJ*_(VJ;rc8{u6=ew8EDcn`#fb$S_N|7E%gp&A)CBLHh`x! zm3-%S=q)@6fGJD|$-E{PFop99I-An9e7AWWk!}kgE`N>KSp%FR%jd*MMXNheiA~Y0 z3in4`p|9-{PpUlaQ03!zTgTZuCWF1)VhN?5YCZF@+1NoxxzmzF@-qTP9|tHEA&B;k zxU(%Q&u@hEe9N53#d+_E0B4~N=i*Wg#JL0tX14bBQoOb55j-Op%{paGcS}_j zgo>iTlp>PqROfW^tYn~OZoW0`=>$*ZUGrB*_L4VE^9M=` z&D!I*>WpdOVx$&*c?biecQW;Kw@gY6C4LhqCdh^T+@s}s#HmwrIs6>CTnTZy3;`Ipk%k&ayEgz3qhjM!6* z8XW#Yfw3-IC86?GRywSHyq}8RjPmNhPBNUfB5TaB{H%P6|1koMSs%#M6X$|+8lYE& zK_XiL{)yp$BXHB4iIW$0IJ63RQUJx&X44zWa6K9B+7YKXc;f~$n!&(u>Vz@s$DE1# zYe!qa83|oSsUNu3(Z1(u`Z{T)(#&QRqVuOxPogo@{4(0Yz<_IO(z@&8p^M+a_CkX7;5u6fRi{g()Pa&_{Wbb^MrWS5k9Lw?62u70o9*dv|Gdo zLV%ivxevmBEoO5;xY|e4UP6==fA%VbVZ0)yGt4&VelW1u7iC=*1^7tZBY#QM{Ok;i zSM)-0#c-=dXPz&5l{6av^`p?%yNen$6W&xe1N$`8rI658X27fW3C_tTJW6b|GGvZN zC2&sE;B5u&T-zxWW!%qw_ZaZyS<13!_IvXM;^$#PGu`kGgc1oTw>Q#n&qJkClaoKYD1|=P@}JE~YAl3e>4SsZ&GO|F|71JV<6R*mSTF=hD8O?PNODIb#Fw za%HPDtnY*tY~bcaooOej)X)i7g@3bx#J!d7SyLF3HyyO>qcnhyh?fA!BdSm=(WGo^ zG%Nj4bRr4hNymhfXh1qPNA`YYSie0n!*hJWl#tEBk~nLBCMWigMC9LUQhO^}>`a6* z?X7S9eW;^#I@LnD8s>)D%Gr&AMoC59G|xI2x6^3Czj_tW({vymG+8@|SHSl1^o!#K zsj@4&D1S3~DS1+uyLH13#kHxkW5N+HyNos0_^}5a5gum~hD-j4@UjLQhnm{+Q1bK% ze3iEBI;W>Nr6a)W;hSw39}4D0~#5kJ_O^ITY^e5Zhnp z=m9es&__qMUVg`~9x3~nQs%mu+1|6}p@7cCH*4=4v`~h(nIF(iYmF=-I}5vp{Tjf> z1?EnlDfT~`bii8k*+nJss4>0|5%nU&!V1svgvbjJ%!!05{TmonEOh#+iJ5vouPaaO?LCZ zhDrZhuGa*2R@d;!I2ad&7gim|uu?L_J(M4n+%-q5}KP<@w{W(c}@ zpm_Yz<<$L_A~K-jbcB}+Eb#@h!B(J~b>!5CcV(qH6=rAjzz8bdHj(kS-UEqF34mUKy>gWL$A)C>to9!s}e&=L`K1h zwA*I@Fqog;R=~C1UtS4UY0#pbtYVaRjg2Ha?t3W>D>5#G_}9JwICW!;0Sech}%A!TA&P zsjQbJ`FGDO(+%gclcel+W5YXqHHB{iueJ$8 z_>&3y{WEzpk)4pP`gcdFBom4Lb0ZV=YjwB^Q4cNIhk;qrcOL+Deq6uKIXGK3CtD)T z)}Rgl3@&8RlIwN;oDncND#ip{Yke~nvj9o<{5|F^?IOCm;kAU>+t@Yw_M$FZ&#|-(%8T_Wg@s{rn-eY?3(Rh~lO0`?!ImkNCHbt9#l! zd44L-%BPJv_MuW#w)xTl?8u)>0!8NhRl!>3HhH>htqxK*bO(>NP9O+jT;{8i(){Pw8xQ*a!_(y12vI1PetJ;m>&`!HE%h%fNk;sbOA&o@H9Bep0&cLa&;Uf@B zxtR2KF73nmc6;|f4VrGup_93K8owF<7#gu5?@q^qt*Yvs2aeK)gRQ;cGg|iH0A3F) zWML#)K7^%d+OC57ZR8MoHVP1FO&fFUL=lha(UWaPmJ2>>NU!?JhKZ0TLD^hFZ>U7} z9mk4#g%Yc6_2buZQ&a#V$R^2DngZ+ypYXPU`D*J`}|?JAT6G*Rj^{?573wAPOHIc3vseDo1HsL-P0USEk@$VDjfG8J$! z$3*%b+pib?5&)A=HyjWy;DR;`uplc5g><$D*bb$jQevS^gYzU*8SW8;{nr0 zNCpx6>&q*97csHyF`tDOg04@BO#+W4LMmttOFR=U;$#&=`4~M{8&3NWIvb7#65Jyn zf{C5iBFsAb7k-0x^-v%kV5zaiII*sKyFzPCkEBpH6}MPtrmFIxV&02y^<&OZzN};A zSdWIv7FM_qZ$Bc)SZd+VXg~CgzC~39GW}GYEzrZIr&*J0$>}O3n-d&BWg+-!k~xAj;23ww#HT{eo7` zb1617AA#u0Dg6?$6Z`yr?oT{~pNnt^K^SAOztHGb#8mh|{pPB8mr^FR&bcM?-<%xwU7{+e2S{v>EMg!@mYU86S8z*GO}`LFp!z<|i&rq9Syc>2Vhc3xj--6c!f=+iaK7_`M+$Me*%BG-e^Ll@I`>ADe<)4L+N=uw0$st20&JF6L>-~%Ec8CmMExUhU~G<%P zJsMH)kD%qck=^wyKv_G3OpWw$h>BJ$;=bpb42AQdcWTqy%m_C@56E(>5KM9WFZ8N_ zxaS0)@XOGV=0qzy@`JEsl+q`9-yZ69UV<)-(o1jM>yI7NAEmIj@HOM;Y+h)vqobS> zdiQhVY5rkMrf7+L#t@8vuviMJ#=4QK(%gRiQ_4H(OmR%~b4r@Pdm=Rm)X&m0!nqsJG>zO&b(_P9J}dCOB*)p(q`X_@Fb8X(K@C`1n1I! zJfjhEYsr71a$yGCR#{B{h6p|qHULIwlMb=I#EkPm&^iBxzJOgwiO;Kxz|o#Wjmr=; zW;;w$CKy_raHrb1OWF9eC0@7QO7{@br~SiO*qE>TK(K5CPN+-;YSpV#3q}t^cXp)! z_c@B~hqq5({+UY^SNK{UPG<(dPFy$e+iiF0No-h;2qa%{d3?`n^GVLcv9sBbL5e$W z>FlzHpnz!B_UQ8)mn*su1s$OLGYFRsewpd~NXn_#-ucC9yRd&T05*&sB^9J0luO+c z=EWr#(1o3wb%hPNb&<%Zw8yBMKcjC6)d-))FGQb`kqkLt%mwY+?yW`ZxmcKS4B%cY z@d#ZQ3(AEbPwbuV2sWORojrC9=uYHUaa^cU;Alnu6fP1p@~sV5Nid-Cy5aQMU~D1u zuHd1+skf^ACUw5hj;-`e;zec6d`lh(}M7*p%{ z=h2|{->~I97jb2<#qD}r#>@1^mL!8ZET~}NZH59l(&#>LLIPy1!l>f$u8nTiedbTq za{#WfwK=c0!lWe)|~fnI`}%=f@?!=KwLfJWC3tO%8mtR-Y%j1lq{ zbUA>P&#tdAlA~HYBfYa4z59kYc%&uLeY4v=M$pVkacHs=(5W|TG?ahT-shrc=~}qi zg?^>6tDeu{f##DVhHc@6w_P zqqjkRYqpJ*-sM2!L-n>s_o0ZYTyGaK=>AT-l2k3HepS%C>hTpn7HC#)ooNq?{CT}s zpeOHU%~p;WRNQ&?OAy2(2orm?r@N~SlNg7ggY>#3Y$Dmo-tY1SpPLjL)Gr@r4>aw? z9|+s6{<+9$zr-1TuL&J zoy*9o&^5cOCLA+09t}ZMRzzl_oHX3a#%t@(pdX*@>=8|HGS8@Bf#OPJYNpXAKcUaK zN`PjV2kQmce$yGiMbfREWr59Bml)f+g!%M);k$jIDyBeGF&I85aOGz;2J%%$+q{1N z=$bBok;*1D9Dwwdd$PLth1<&3>C|R@DCD&82)5F_xIteGtJ_({D>iNBw|3usJLP@K zXcU6+UUUL{Xy3QC-*HF@24Wx(iIXf#eR;wCc~g>VS+ZCmw;v8=cMxgYa^OJfnRDR< z+YtU4agqIYedu&l8(FM7ftrTuF+wv-@4GSm4RVeLnAqW>;X`!l_5M0jqQ0h^_TdHR zt?k!`b3(-WzE-ltVcs&S5(Bw>NJn`lSDlR(3Y-ZCuQ7j%8@7e98Y#lz2zN;;nD;Yb z{uNFjO0<;~C9IScl*rH1R)^0ha2v2kVr4bD`aA5+z_X3z;4+D~PEXt;78T>Te#$8E zYbn9lV;GS+=I(siD&ePST4;SQjU8tl=Sw<;_ zrz&2=Gu7swl>j`bDe_+vSMVqzeZPvzJ>W!by^TjXbYA` zWZ6?$ur&44phV}t=1Iy*9S5p8`U3AwuWfVa2n_sm9Nn<;ZU%zUA(i*hfwboQ^1zb| z8O^KeHcz(H+s1-V{c9gg8M=MV_wqFDPN6Y=+;f#Xg24VH~7#I+)cF{kox8KKdFF}eFyR|w73IPaxpqaHXL%Wk9I{&JnGn$GJwj{Z)&IJMvc%XnXCh;gAXE41Jrg0oF z&5^0u>s1jOlZya2?EtK_@8cWP_@@_#2ya0{F&fcMW7-#4Qwou+_xDM;(#|X#q|5D( zB)`WCQ~v~;vcbabHNF7n(dUv}(X>os?21gGw2?fbknn;yURHR7nC4?ruMtUbm3oRgo7E1Cz=*&EGkI(P_ z<^A@)H=hnCXP>>-wXU`H+H03X@Lqkf9q>Xvi1pU&+BfglUo&1d2Lh+a`48nAno|TG zBwt#Q>6!20F*hXHnw@U@xq^=7sbjY-{xZ3jgRKjeueRNhe(&5f_L4yl4 zP|2-%uYMtr1bl5T7;P|oZazIUC+xgMcA_Aw?EXHf^wE0ZF-nwxbCW9Y zNrUnDILN%M5=h`t2>#^^8Tt8$iwD%rI@IV3mpK%yZjceg>)|J8Ni^hzVeG2~H->t` z2}#(uF?4hE@t)igY=!jL8mCDzZDS?=^SIbn#enrNrC!Y+7?YS)vZ zi5;_%o3QEGdtgH2;kD};f#0}-yuYEh5YHf_-imu7_71mRLp_%^_v;qe)4;x)*)#IK zt7Sof?v0UqZq?pp{l7-?kYnjZQ}9k(Q+uDRzx(0j zjEpa|B^w;n_ebVl3JOC!qGr2H3SMYu3vv!fJ&+GQp8l+qdHe)K5MMo}E#x6B@=KkefHH~tkIxpAl#UIaeXT$m-GgeC0B5IZ~1kj(pK~X`M^D?(C`LdkK^oq^*Iqvwgy;hmCs4& z^Ruqrjk8%Ei~}Jc%IV`qreBe}vuqxQ!;nHRoPkLR@C{Y-vnfx4X7ige8R2%LVAtyP zf$q5~8Yqn`EDu5J8aQc6?Hc&Bt;r*CZNbWWLq&!3O((P#gI&HygA|fB2Q?s)NO6mFA`cVS`rw|Pu;WDhF?x;? zvy=d+)|Xf{$MO-Sv@X-FCACZ%*G|3OAaw{1#m_@k8m^E6ZtkNI)7x30`Ryg(tzh#b zTU_ zk8l3<`>Cs$00~=-MU_?jN5Stt=<@Qj;4{q@-mz*|{`t}$zdcoH47Qf(8@d18-v6W8 zcZ@)-{{5P~s{hMxtY^+X$K5prI*fm}CB))9lkmEdnu!?tNJf`4aVhwEgPMfb6Pg+&aT>Lb32+)FPF!2CqN+AOtja>b7?x$ubJR$el*j?!x zQc_CbE^ZIDE_A^5HNk~kA}@0P87lmb;Vx%3#!psW7hSbOYZ|U}ooQwg3E1~;O+x(K ziqvy+JnQw|iDx(WIw7tZmF71eN?BEV71e1`D*kM>R=0{kr#A~XC>%G0cl(&_ z)aYS_R4)WxzD2|2db9_%q@zC2VrPCI_4jN4zUaQj>ZJGASpall*Sjqs3o-3*H<;5y z&qVT9XP-lB#%V24w%EQz$fwfPpB>Z}WFxe}e=z~<7FYmm`wE#0wjO!fzK4S|&~Dr@ zGu}56`L`OZD#&E!pO@CM`HoTEr>Amx5{MK>Zt!=d9j~tBL4BGq9ZD2tn zx4$MlHQdD{77#S`!4H!vLy6rj8h6zAsrY3w5rb+xVY0!!FE9$VH9{2zB+DJ(&F-`n zqC(PMn6=HSebLHGKJt)mtL}>Zq7Rir~nH z88w^2EN(44^?oBRd%)K`+f|4cOe-(B%E(VU{p@VI*J!=h1KMk_?C|5Q$!y-1p1&LL z-;1C8`%eu1H<5k@{Qu}qH~pAXO)Fojf*bidRox0Ny^C>UegB>6;a$r{twECf5ZRwJ zY~-dglVm2fXi|LfA*(&-X?Z};On!d-#;j}!<;zQC2%Pa(xY7`ml)#^A^ zP%%kH8@`%4jN{E^4KV;JC=fPPr9HQPU2a`WpIc#;H{`fw-q89o$HS|_zv+Gyu?{6q z;giTI6Nm7IG=$ecm03iFb?v!}gUI%t?XKwwm_!5z+KNP}-B7SPM<%~zbOx!vr`*V) z>V{NPO5~q4D|u|yy06QD1|_O21b^M!DLu!lX(a*_ZoSKEcS62?eQh+`MA0gPJYcRw zXS$$7?;_K$Z?E*MJMjT!E6ww)z0NO4aZ*x=_9bVqgkM>%4Gw(2DVIa+zEbSQuWnF7 zexBdFk%vv4r}FnSDyCmoV3fJU`#_T!KIgm^zv`=5?r_&CDOx4p`nvc_1-g*0y94g< zVj(ddQ8f4YyV}n8g`BuT#Oc4iY)|&dw--=s5yZ7@W@V zEY)V14^%9a|HrhNKZg}&`5wlw3$- zF6?FD-|kSRDUB*O*4VfpbssSBKa~F7S^-$o?5nl>#*cwtzx9onwVvlUv>K@YDqCU( z&!poJei5uKyVO{=JOL=l_(*W1@i_o7_4}Sm9Wn?u>}&A*qML3*+$ANE6++I=qfQ6p z0K()Pu@^2cR{|xG(_qIc69D17-KxEf^LGusvxSB}%KRw$)p+_-Ve)083ZS&dDWy{~-;8Jj#5mmj#L&w--b!wK@T3 zzqNHLME$v^aMi|ooi{Ps!3shEq5H#Kbvx-V00M(h!#-Moe!a{;+-JS~)H8~ShAC}0 z^k#O}fvW5Uh6sd}n0$-|k@Rc))#bAT;0r53G3l3#iwW^-pSnd+`#K8(_7riV-u z4QjsE-2B8M_S^+;)5iKQIHFGN@P#o+gC}miC&(cLpoe`)XA)`Jp6;M5^WWWdjCc8=~6`b$6X=Rxdj{fY#Z-p_M?;92)TN4Cog=|{m5QbJ0 z_pXq}oh>}Np*n!P;dLLh%r7VHonkGEqKY@9B%``oEX03VPsiydP&{vAh=~kw6DZ!h z>Q<1d1)%J0v1DNhGMD6|L3}tc#ZejE(yNX>O zUOq!sb`jvGfmFB4?*Owkm+f4fIRR!XcFH2Sg}YDnL*k z0Kq7r;Zq?k0gCJ&{YITA1UQ}mTc_ph#(aR7@4kLLajrC>8+*`LUwrCSYXP4nK@N_@ut7;sEeE_c#w2CVD9eAysdhG9+P7t!YQUCqbrnHG$L0(&%E|{ZWO+`VxxN zoD2WMOLfA<=i?ID~@T{1faC-qWL?(@AvQm z)fzAISKS6QERmUAQFh8Jt@d(FhNS)PFbBkfDgp%n&+gP8$y{=3M^_%wt6EY0Za}_G z7hTm;lNH^vt%5dw-g?1Xhl5g8@Z}ghs_Vp+|0qQ&q3TwM;Hi1NXB>JTPDUH0hBMSD z;o{nSYfDHE8$~mBU#~y5lC7~i_{NTCQ!(VE7W8@gbs_OTH?&^y)n; zeZZ0Ay{WSJM{HIIWHG)mv#9%TRo?Hkj5>Z2rP+;wvvlL%G)kZPAMDI z`bTi7PA6iGDs@;<9`w0zry_k`#*!b|7z~%DEGg# z{CRBf|8H}RGX?pr|FCd=gV~f@8YTHNKsvz3vYNeJLP{vB$*4*c?sY1BWTpCD!Wj6+9Jp9Kr zjIwo}i)xT0y$!vZ)r*{}$UyO2?Mitx`q`%Rdyl8Ed5-P6z}S;~dduk1BGztvX^nzk zmXp4p`C|(U@q27PxcWsPgzkg1R8m#XfpQ#aBnPP1>7QNaGeTT^p?~| z$6QlkKfZOZtN(+&xobo6u4eAq@}V4$f>j9_?aR4s8heKlGBd`tn!e0M$|-#;(RQS! zU$opMIdHY{cL!;9k!Ga8_r&Nh*&qudPyMy}JLPwgXPURdi%E>THR_tF8k}D$8zDB& zNX#Pj1tWERGQVK2{eghYx{WL@eR7&Su?_?beYWH2LM_+!-FfPrIlq*(Iey8^j|0J9 zJrHEV?^Dax^(Zd!yh{esX(3YQ?4=~oHEQ8#uGb9^ZgIMUPf5585ZY0qwgwmt&q~@* zgC!PC`VgK$Wpo@09Opd05oxXB1F-HD+$en9<6jBM}rY(ZJ!EX<`+*&>G48-el^RtAS-X+3n@+Dk+vp zJhW3+{_VKl=~}T;UP@;bkgF)I=oQ^$D3ceMd%~&KfKNIb38(ik`$O@aONc3h!9iUI zDKqSA>NEYaK+!vv%pT1pFDK`AYiwb8LF3uT!ilcK|BSyH?~{*sG`fR?N!Ok&KymDB zF;I|&%m-f9>u+DE1Qa@ckR?;UT7B>RI#fAm+R9omfi%7IlJJr8lQsl*CY$#^=R-m| z$ueuP+(I?iyihlj30`!`(hi4zv}$q-$11EiwKDcRN`CfJXjpsh5|h@<)tRehr~>;+ zl@SfmmyM<5h5EPdlCDMnG#im*41|%Q$Yb*s-NTRpass&l$G24a{6XIT6zi7|@hji1 zKkt>sM%Q@;F z83_8Hz;m2Mn&Sb<2{{8gAQ-+;fYX(ED>Y2kjTSxK zy-{1pCTUowt;Fcf3&Am?ee`Bzk@jA=oYhcH1}UvC!(JXWhTPgKCJmZ}9@k!{1Di zSe9_4WXAJBaItl{LGf|mjHP6T10TJwMgEDrt$0VQvi{+2N5G@jKf1X#RiId`&-TVE z*d4yAb{5|iwdCwV0>wIxp1Lz|yM~LP(uQwWJ`QLRP}$B)BN{%gzdd&jq<<7Jd-MVN^(^$5O!)g^D!-q3vvxrVDPMm5-M$_9;F2j?!0|`h$^2G*`tVP6p z|IR1ht?-O%bZJ$vnXYuk&XC;PIQEP3oO6@cJVh+PcIA7ZUIp>FDP?)9&HjRY!ZQ8|$h zj(++xMe8OX4z=sw`vjd_MGJT@3>Ep#sd3W7T?1x-X^%+Iljz;_anJ)bX>saXg;C+^+360~6a$Yb7K6ga_z%;f`G?7lM(9FWpC3r`8aTwZ$K`r8 zmO%}npn3AVyNp8fW3n@$dFxeMNZ)Cl5dBL`K_dLQKC3-4A7>vyIXc2yk&@Tu*ZdT$ zSqW{GuEJiZ6yzu@y93QI!sQPk%WlhEJ1pod53m){IBJa@xC47#q;6_!<1mgg;?Mkn zw6a|;fOQA>)?Dbb75(nGZQ`A-q??PZH#6@bmPqPxI-WRuu=PyINyxc_kMrk1ne$#6 zxf<gDUbMYeyIg>I@6IF6q(`ENgUX)FV zr43PsEy^da^$a~<(|0mC!5n`Rp{(R~;TzNf%XRhCrX+vV-&BD>QuYr>)3;k7J5w`fJLWM|-^4c8i#kmPKVU()TBi7;A z$BRW8c62o1g`2dpA!O2bt269tK)7#@WHszVERxIJ@sZt3eXk@#bGfDL?f67W0`d{k zw{!dOj#Uz)34#9jO;b5R`_tL$h=ijpFUIN(Je8fMm&i3GZ|-SHM_tKZL|rfo$1p}L zk=pb?UBr@xxwkY8X=lM^2lRug(!%z+ym9-<|9A^`g!++fQQN(x*O} zaP^!#v5zE_PwojB=GO{34Y&8^n|5XucyF7uJ}<+xRgE>iO=`i12~l44xKwO+J#2AClRiW95zTDl-o zG(;U@s3@*{tp5XeOul~=7?FFmF%Vj1hiba!JR3{p53Q0(C?_z}c3w6qkJvY8UreZ4Qe z`&d&kwtN-0_d4hCjEj6{x!6fhva-)2YPaJ8uKUDW_i#Ar{o~i6eY7lmR&NV!^GwFO zS+0^46+7+3!279Fa4^f(v#{f0Pj9~!LX~YjLP zdF)ZL&ixUfpNoqmLHsCSe)3nyV)b}Juh{4#&EtubZ1q#Bd7`1MOJO;4%O(CJ*!fZZ zJo!`0O{p7?2kp)8mi!s_=lI*^w?G_cGn|g=zDhv~K z&=j^MW9s>E8O^ymb9E_uR7Lr>JjMGs;&sq|6w{1;{mnGY4bCI1IcUB+qJfy1^h9^{vycDNN1nbt zgcx7>nW(R8=@Aa|?TD#9^-t>90z>7SFK|ro9=v;HUflMSQB_@pC5pCNN-T}P8Dd1t zWlz6C&s4<{m>T-B;fjZXk`g}2|0oRnGN;yGu$zz2_Hi>?a9sscq#-LMYcl{_SQJ5r za&2qsWYuwQGjG6^G^FbzrP&$rZfeqE%*c*F$7I}+f5`?s?6*yX$^wCd9pC&rR{4=R zI(ly~y-ikA*Dwk(IOZJwT;M1<_oX=k@BKY?**Ga2TR1xExBdJPtJ!`5=`JWQHc{DO z>$PWZcqp*OxvK#5fVK|VU6PaG8d4laCWcGc>#e$uejInXF5Z=}Ne@FQpfp?bxBz{g zfaX_T#^@Mk5858Ov@_=VrZQsLU*3w3w_NN#Jnat!avu%5@kr#X%7(g|QmyqO*Uif| zzWZlQAy*9JZsB#74*SI&D$d*+BwieRC!j4x~&O~ph0Gp%F==S{~9d+_#_Mm%IJ=g zM~A#_=3Lbst(-Zp$V^+x+lCBt5&Ul*AGXQ0uq9Q51Er}~I__BFC;W_&ifCUI*Y zcgw{CNk;vPpp54kXzk<=oRsaT?4h`Lb~ZD&`!Ms#=+&9!g)Nc?-GtYC60t8#XK(;N z&mC7$7|U3qEk5l^l)LOEer7u-DQ&br6WHS4@o^|~-d+zOOHt{eDc)I#TQNtrl}*R_ zH>gXfD2l7Mzvw@9XuE*)5$sDdYALBvKQWj=YOKU5TtEAqfDun;7eO{hfl0a=4Hl6a zv1gmQZ3)MZN_iI%Q2~0Q$=lSM&p`8#YxP0*H2C5rP5QnOBIi2af|?iG+2M|NGupK- zFbShp)6Wk<)MCRYQG=B$#5bYeAkg}S9bG>o4JJ(0^CF=rhkbqlqZpEUOd?d`Qeu(c z`e5+a-3$TCLI*V7W?}}UsoR$rTBXnZcK^$(``dyw1IIEp6QZ1+!o?06r4) z2|KqQ35)JEU#oJC9XcCgN6oZ^=tZ3m^l8CzJ(qD>*=kA37Sxo!yud>-XrV~D&##w@ zZnrg=IT3d0@=KuLAi7L^?!la<%?Xs3@P#D$V4zj4eq*s4tfi31M9L$+5%W&L^kuv6=lBafDJdo%fbmN}b5&uGZ*b0qWV?f!jb@NBR>+fMJiViTv|ePJ2x_3zfkz7hWr^;!8qew@kj!%N)mc z1U9F;t{sR&54XXs4L9ZzK+QYdTOS>#-|k;;erq2fTbZ?v4EY^c`qgYdiVKBBA>vji z2IKJ~xDTMr6X~jF?TRRg59I|5lu?5-)8Hbf{8G`|n2!sM?SotvpcsrswXls!3qN~A zn@z1QO=Y0ahpv-9^6YOD3nn-E1O@fM)(P`)ns0`~U_3Xq-^g&*Mq%sxDuda)L-<(s zY9#`1U*3&3TY1pT!`PLd*Pa`NfIu9*(s^GO@E^sLEDgxd?1@{Lv%4Jn3@RZ|&v{*B z$|AnwE3+iqz9hJ3TIrCXo7VDig}feAMsjZ9$u@P4t^pUT^esdL(3z>hLgcl(Ft$Cl zskyoq-_*!QRA)~_CG1W46OYvNky`rp3v#H}r11`TptVWtIyn5T=j#mgiiHj#^dhyd z{*4CJy=x+a^IV?AbH!n6D#uoe_4gWX*(3!$K6tz?f|;uG<9C}#+UZ{wvJ9rHqU1x<^A;D3|AD z@rGPUrwKMH3p1l?Zdj%Na$U!K6eYlpn zsPc1q)G$VLu7E11ji2)O0mNi}-!-b7LE~q4E-^7!J>EB%?U8aM?tU)K43G1_y2MW0 zZ~46Na-2Zlh}KBSn-rbO%4d5^5b+i(Z%50p|nQX(QzzeZVmiKgHU^~_3q?U_Vcy%F5BBg*=_e>S3>9l|JCQKiuYlF>Pn18dq3*1`V9B!86^24sfo@Y$WofF6 z@V}NBLY?)O6pVxCb)a4BCOdXeI2hEN%NBdMYBncnUi2ks6wHyV0zl@FhNDy#LLNq$5? z@rk2*GjW~)gi+--zIM^NG&K%5X)P>lN^66~!&|Zb44A{l_*3smPPss!b6;DaOaho^ zj>ap&c54YZdn6qaEhU8E!sbQ{RCuRHA;#YTdL~=!Dy{X7d{TRxB-N4) z%6isS?$K^AQ}c*9&S>bFi8*PA6-~t-T3I1$+9Vr@UUoy?2x7zB{u}t)1A{x;TUMQ^OqMR+EeR%OEVm0k%8ZvEe7y zl1rO3i!TMiC*AEV{3v0YafgJn_olVV)(@AEFU~&iwn*H*z;DX2zbm1;5{ot4Cs5@y za&{Rhh}#Q(@ZQ%9n)VxaXh^2-@eC;BEp%-ZrsU7o1Ui(#kvvyoeQMPVb0kbvaoWl% zh<*plSr57qf6g=b+b0nCdWjpXawNOFf_-r`3r{(jr@InWmJ3sdxlBDXH`(W2s`9X4 zM(*LN$4BJFcUHP=f5;?|;U*D|q|uBiObhR>gY3rD?@kVjTV^~t&3$(*r#nzwG8|4V zGxJ?A56V-^GuzLdaIRb9`&O5bY`}cEdl>F?22yQx#xB1!Y+zc_^2jbpZgQ<$Qm-7+$5LA|5TM+07!kIq4W?tKd$lPRD z6zN7zPPy8D!tlBPbYv>p6p86y^xE z`k4bto4KpDb>xQ}#bsGhv)0|(b#FE(u{kvu=a}Da z5cYQ~V|0)b_?(X|h~>_|MHx(T{49%*CbT>~sO4h2B7(NE-artdS&9&xU%0 zOJJO%ize3=FYatr*O2%*AT=yq6EtrwY+jC`t)Y;iIOti07ZO6Iv-fPaC z-{FiBsi_0baQu=zzvQ>4P-gHWC9_ebSz=t}1q9=!ux?!A)w9o8m;bn(`%qehRZi(z zc+&2PPKO8xZ&3_)1jXx6gwRkexV>J0>lYr@q1YpiNBfNT*^|8p`a%}Wtv*DzEQd`DG*j85PE9Ij|9M#M z{8aG`w`d+XT@1Usg-B1zwgD;i+dmDdT|HH zi)hyEISzkkPR*udlRYcC74gMwv7nMm?-G;GM=oZA`MJ*llqx|MuPb*c#4t>CRSy}* z$G_#}`tu7iO~9K|Zdi5yK}Ela&MWMp5>NZO>qFMSg&ITiwf(>c_1 z3{=F~(_D(>ASxAsf>d_8fY@>1`72Vbq!Xb2u< z(+^7A z=5=k)bcFacH^Xt0G2=#uCp+&D7g`g~HkfwB?)Y&_EN9wv549F!kWe&J)^S~VZroW^ zX3-VHUdt{%Y6V=_IW@+6nWcxxm=|~hS0||){dmvUd_teu&BZ;SGwdz%s7WWyt7hid(cG0*+zh@4brFb5JLd;`j{EqHB+V)D2`+X4c?&l4%u-^?C7r_nFNvA^BVPreM>Wa)%s zI`IA3Qm>D~`XjETVOb4fiMINF@W(-Aj-Q#AASWnDoiI9`;#S~qdmoY#j=t4$S}^Fu z7t*~oR!*pRxl(*T!|Szh5xc!6?I+`6ADdvrxxUEvVAI*2?t6({0xWB!ivNNN}3LauEt)tJ{UCzO}oX%EONWX2p?`?q!0K z+U8#Dd=;Ymy~cW-#Fb}cUcz7BgykYr7zt%lH$BUYHU{p$tx1dJI()Ou&hcpcqwz%V zn;>3>Twc-5_1f0oLTc&^dlO-s*WbD&WNUxAeKhKLalOk&Cf~+2{bq&?&68Z%`XOxX znKD>I%H4(H)uyuzrUO6RxBNi-1W5WDw;MDMtUan5U2!EEsX+r=MjqsO(Q!lJiHUGq)4s)7f=VH4lB+Y`3+x9Q0@smdu9+#Lta5q_1v!;FDJ^AU|ee*d&{> z^00HTsNV1HYb36QTq`USZ&T09CT-XmZtmNk&hi+yaXE(ZoxTei1%8<%A;-gTD}H}C zNy6_%Yq=c)E=88DRH7f~V-q@zGc=OuuEqbcST(|53+*la&KuhNPfpX<`;+b0lOL7B z{d#|Y(NgZF@@Ut1sqV%c1t?Z`#@N?S@l%R5u`~AK>X#iBrk?N=>uD8s{QcuIdLz5& zi*|N34dyxqK>`8ZsTt^^Fo$-1e!awasBq9-oAQ)?>}H_l2W76PTJ&$X`!w=qeOyDjh;G5`Q)A+3Ik*k@ zyu{xC+(cTPYaPhTZ^Is=U`Q;Dc5568~s&!gIx3U?=~Ye!k>@7`Dy zN9sk&!=JubMJ=CgDs~{4hGw|1!HJbg@`n|*Up@7sN&feZ(FG>I&2#~7(FjPHj-RGX zGmf`#E(d;_$q+PhN*2-^iz{}WkFIg+OIF*{PJcMN*|xLg7g0}qq=A+U&GaTEy$%KP zo^fgEX1pIW2*wy))5_LzSuJr@l4Z{$=BFLnw`uck{aQ7N=TuumZ>Q16Fv`XboV9%} zq%;&S?(j>w)1*sf&CYJSsLh1D)PvBW3cLZa((F^ZJmX-YZ|>zp0N4KL31xPe?K9NWe6Qu@n}YvL=e$7U!4frd6lzXyfGh*dFc?H=hU;| z5dErMHT0SBo(FZ8$*A$t^bfP0oj*w{(oX&YGM9E0>|QGWy?2mj%-hBD2fzgx>JkKt zkE_+MVKoh!2&AtdTc!1ktykiq%NTbwDX7^mqQC$ zC2qr{OR|#i;KR-K9(1Yw3kW{68`)KuZ zSieLB7+thGEze0w-!_QjoKHQSHmD~?Zaq`BzzC(eA&LVM;NFVEK0$~MxP8mVJH%T{ zZ{v`a)AoU0zqkx}?htrvC)7EK+KNGqJEkE0(;06cPG}c}6V`M`_c9QQc0w||mo!Y9*#UtAe?+YeoDw^m-Z@RaDa$GL#}Lzv z)x5M5Z!_$11Mdk(NXT&mt{{;BOyAN@3U8h!s(}wuv`0~;T;AowU8co7oMsHh9c7m=~v7Q1h`{yX=1SYvk=8@IwIjpP$GQegCBZHY2^!ANX@K zR!Pl5JX)%cK3I})`H`kyJz?1CJqiiJy|xvQP-I7Vx@^6si6EspPbSMZ#{pWH^RYrp zH>XzQ(}{Ev2uNUYiNFk3=X{R#a*XK^DOeeh)Gv?Xk?u3nvb+Y+#1X1IUW<-yT<*e~ zwGPN|rc%vFK}Mtlp=sd>mm0%cQe9X>w6AiDkfN;goC63>zeNZ`+T~@X(svJK3T~+I zC_c-ZxRbX3ENPRS%Yxm*qS;Ql`>tTZ_>kD9d__KPDQ@|7zdW-%_|rWV$k*z(!)6J+ zvZIfv4@^=Q_ev_WM#-C9tPnmFHVQ~7)?ugn3Q%kK27Xkbm+-l%bDRC zx3du_FA>TcFS-S`x4;=wrjj`i`UYBY5>+?{+3hH_uE`>dkMa)qvW{an7D$X5#3s@_2Z(kq*tp1m`E6=qAE(aapS1wMoP2cHfkEi!?Xi76kalE;vm!>wDy7u(oKX1v`A77=9PNQk__{>*ZXP>WtaYjfb`y| ze^bcaF8x*M1}Fx~m9U3t=XVcuq2HY{(ixGVG(1iF?xJ89jRx&T9nzU`9{|x+*1i%R zhx`!wq@qy5T#iv_bqrMq1hBd?!ax8^DQ(@yixNm^@NYpZ@$+E#7kLPT&M~JY{_Z*Q z^FvfAtg&9RbE8~Y*TQNzm%t*N`vg2!;dPz(Lusodm(HSVSOuWZZe>~7v>C5eUNE;E z{1`w-Yz!04HqNn`5mKH(tH|E|fIf-QtmWryxSvewuTVt}{_)npV+Q$TXyuytZddsD z{*Z={W>*LT&aWA9Y=)J)ILL@IG17Dzhci3*-B)xyD$5e8G0;I`CwyKN#s$?65~!JbAbF6qoJ)4WlLjJ17KjlvI~A7pBX5KIeaggIM` z0&~ruvU+~~_Rfe`C@z;1^BeRgD-(TZ`1F=v5ZCse`Td)CvZGy)Wh9g3la<}Qq8(i5$uxkS7GzG?$HLi5}K8!{;-`w z=_rE=f770$=V1k)@ZZ3!&rmX)n-sYw7+;Hx>!d5dVcz~KJ#UbxX*LFGX(?do>sDoS zeP2^m)~5oz0gYnxh{q10Mg(`L$!8{l(c}`^AMU@Sl8!~L4cswX8>w1%IBLB}{w4XDZ(31l{`V$GW{WjSp zHwo1>p4Sa;y`#g9P8R{bn~vcrpyJ%_?H886j`kX| zkHY%BoE0sY7G^ctsJZItJscEg`ge%HK>f2~79n)s=%dL|i;)kZR8Tg@b#|oLDf&#o7lgUqLGCK`4FM zAep&0(_1UP0Z$ZN!HAUYF`w)(_Ayq;@b=>_f118@_N|04Jm<%i(}8usr+E-AzWhfV`R8yeWUap_oVpXV+w*S=aPC z_U!R@ZIR42TCx)<`g`%~mqMU`PhCOeUj+=vB*RAn?$``iedQm-&@XOO8JIBtbxy@E5QCIy~WTOaAZ0Uq<`y1@-@h?*D#nEEhDAXbqeMM_-+n=l%uw OQ&rMZEWB^^`~L^6dLMoO literal 0 HcmV?d00001 diff --git a/YOCO/requirements.txt b/YOCO/requirements.txt new file mode 100644 index 000000000..2e1336239 --- /dev/null +++ b/YOCO/requirements.txt @@ -0,0 +1,12 @@ +torch>=2.2.0 +triton>=2.2.0 +numpy==1.23.0 +fairscale +tiktoken +sentencepiece +ninja +boto3 +iopath +git+https://github.com/sunyt32/fairseq.git@moe3#egg=fairseq +git+https://github.com/shumingma/infinibatch.git#egg=infinibatch +git+https://github.com/microsoft/torchscale.git#egg=torchscale \ No newline at end of file diff --git a/YOCO/scripts/eval_needle.sh b/YOCO/scripts/eval_needle.sh new file mode 100644 index 000000000..a6277901f --- /dev/null +++ b/YOCO/scripts/eval_needle.sh @@ -0,0 +1,11 @@ +cd yoco/ +torchrun --master-port=29504 --nproc_per_node=1 validate.py \ + --task pseudo \ + --criterion multi_needle --needle-num 4 \ + --batch-size 1 \ + --max-epoch 1 \ + --no-save \ + --tiktoken-model cl100k_base \ + --bf16 \ + --arch yoco_3b_new --tiktoken-model cl100k_base --load-ckpt /data/yutao/ckpt_opensource/YOCO-3B-1M/checkpoint.pth --yoco-model /data/yutao/ckpt_opensource/YOCO-3B-1M --tokens-per-sample 1048576 --interval 1048576 + diff --git a/YOCO/scripts/eval_task.sh b/YOCO/scripts/eval_task.sh new file mode 100644 index 000000000..07b70593e --- /dev/null +++ b/YOCO/scripts/eval_task.sh @@ -0,0 +1,17 @@ +TASK='harness_boolq' +# TASK='hendrycksTest-abstract_algebra' + +cd yoco/ +torchrun --master-port=29505 --nproc_per_node=1 validate.py \ + --data-dir ../harness_data/ \ + --criterion harness_eval \ + --task harness_eval \ + --batch-size 4 \ + --eval-data ${TASK} \ + --log-format simple --log-interval 10 \ + --bf16 \ + --tokenizer-pad-to-multiple 8 \ + --arch yoco_3b_new --tiktoken-model cl100k_base --load-ckpt /data/yutao/ckpt_opensource/YOCO-3B-1M/checkpoint.pth --yoco-model /data/yutao/ckpt_opensource/YOCO-3B-1M --tokens-per-sample 4096 + # --arch llama_from_ckpt --llama-model /data/yutao/llama/llama-2-7b --load-ckpt /data/yutao/llama/llama-2-7b/consolidated.00.pth --tokens-per-sample 4096 + + diff --git a/YOCO/scripts/train.sh b/YOCO/scripts/train.sh new file mode 100644 index 000000000..28c13f7bf --- /dev/null +++ b/YOCO/scripts/train.sh @@ -0,0 +1,27 @@ +cd yoco/ +torchrun --master-port=29501 --nproc-per-node=1 train.py /mnt/nlcredstone/shaohanh/data/redstone_v4_21_config \ + --save-interval-updates 5000 \ + --no-epoch-checkpoints \ + --arch yoco_base \ + --criterion cross_entropy \ + --task gpt \ + --tokens-per-sample 2048 \ + --tokenizer-pad-to-multiple 8 \ + --pad-to-max-len \ + --optimizer adam --adam-betas "(0.9, 0.95)" \ + --adam-eps 1e-06 \ + --clip-norm 2.0 \ + --lr 0.00015 \ + --lr-scheduler polynomial_decay \ + --warmup-updates 50 \ + --weight-decay 0.05 \ + --batch-size 1 \ + --model-parallel-size 1 \ + --update-freq 1 \ + --batch-read-ahead 1000 \ + --total-num-update 300000 \ + --log-format simple --log-interval 10 --disable-validation \ + --tiktoken-model cl100k_base \ + --no-save \ + --bf16 \ + diff --git a/YOCO/yoco/__init__.py b/YOCO/yoco/__init__.py new file mode 100644 index 000000000..3ae31e250 --- /dev/null +++ b/YOCO/yoco/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2022 Microsoft +# Licensed under The MIT License [see LICENSE for details] diff --git a/YOCO/yoco/criterions/__init__.py b/YOCO/yoco/criterions/__init__.py new file mode 100644 index 000000000..9901f2753 --- /dev/null +++ b/YOCO/yoco/criterions/__init__.py @@ -0,0 +1,8 @@ +import importlib +import os + +# automatically import any Python files in the criterions/ directory +for file in sorted(os.listdir(os.path.dirname(__file__))): + if file.endswith(".py") and not file.startswith("_"): + file_name = file[: file.find(".py")] + importlib.import_module("criterions." + file_name) \ No newline at end of file diff --git a/YOCO/yoco/criterions/harness_eval.py b/YOCO/yoco/criterions/harness_eval.py new file mode 100644 index 000000000..8aed18e36 --- /dev/null +++ b/YOCO/yoco/criterions/harness_eval.py @@ -0,0 +1,86 @@ +import torch +import torch.nn.functional as F + +from fairseq import metrics +from fairseq.criterions import FairseqCriterion, register_criterion +from fairseq.dataclass import FairseqDataclass + + +@register_criterion("harness_eval", dataclass=FairseqDataclass) +class HarnessEvalCriterion(FairseqCriterion): + def __init__(self, cfg, task): + super().__init__(task) + + def forward(self, model, sample, reduce=True): + """Compute the loss for the given sample. + + Returns a tuple with three elements: + 1) the loss + 2) the sample size, which is used as the denominator for the gradient + 3) logging outputs to display while training + """ + model.eval() + net_output, _ = model(sample["net_input"]["src_tokens"]) + net_output = net_output[:, :-1, :] + targets = sample["net_input"]["src_tokens"][:, 1:] + loss_mask = sample["net_input"]["gpt_loss_mask"][:, 1:] + label_length = sample["net_input"]["label_length"] + loss = F.cross_entropy( + net_output.float().reshape(-1, net_output.size(-1)), + targets.reshape(-1), + reduction="none", + ignore_index=self.padding_idx, + ).reshape(targets.size(0), -1) + loss = loss * loss_mask.int() + loss_norm = loss.sum(-1) / label_length.float() + loss = loss.sum(-1) + + option_num = self.task.harness_task.class_num + labels = sample["targets"].view(-1) + + assert sample["targets"].size(0) % option_num == 0 + sample_size = sample["ntokens"] + + pred_label = torch.argmin(loss.view(-1, option_num), dim=1) + pred_norm_label = torch.argmin(loss_norm.view(-1, option_num), dim=1) + target_label = labels.view(-1, option_num)[:, 0] + + logging_output = {} + + logging_output.update( + { + "loss": 0, + "nsentences": pred_label.size(0), + "sample_size": pred_label.size(0), + "ncorrect": (pred_label == target_label).sum().item(), + "ncorrect_norm": (pred_norm_label == target_label).sum().item(), + } + ) + + return loss, sample_size, logging_output + + @staticmethod + def reduce_metrics(logging_outputs) -> None: + """Aggregate logging outputs from data parallel training.""" + loss = sum(log.get("loss", 0) for log in logging_outputs) + nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) + ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) + ncorrect_norm = sum(log.get("ncorrect_norm", 0) for log in logging_outputs) + metrics.log_scalar( + "loss", loss / nsentences, nsentences, round=3 + ) + metrics.log_scalar( + "accuracy", 100.0 * ncorrect / nsentences, nsentences, round=2 + ) + metrics.log_scalar( + "accuracy_norm", 100.0 * ncorrect_norm / nsentences, nsentences, round=2 + ) + + @staticmethod + def logging_outputs_can_be_summed() -> bool: + """ + Whether the logging outputs returned by `forward` can be summed + across workers prior to calling `reduce_metrics`. Setting this + to True will improves distributed training speed. + """ + return True \ No newline at end of file diff --git a/YOCO/yoco/criterions/multi_needle.py b/YOCO/yoco/criterions/multi_needle.py new file mode 100644 index 000000000..f1b564ec7 --- /dev/null +++ b/YOCO/yoco/criterions/multi_needle.py @@ -0,0 +1,181 @@ +import os +import random +import math +from dataclasses import dataclass, field + +import torch +import torch.nn.functional as F + +from fairseq import metrics +from fairseq.criterions import FairseqCriterion, register_criterion +from fairseq.dataclass import FairseqDataclass + +OURS_TEMPLATE = "There is a special magic number inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the magic number there. {context} " +RANDOM_NEEDLE_CITIES = [ + 'Chicago', 'Yangon', 'Antananarivo', 'Colombo', 'Almaty', 'Sydney', 'Chicago', 'Mexico City', + 'Seattle', 'Lagos', 'Amsterdam', 'Belgrade', 'Cairo', 'Baghdad', 'Damascus', 'Kigali', 'Dakar', + 'Dakar', 'Sofia', 'Kigali', 'Victoria', 'Tashkent', 'Mumbai', 'Barcelona', 'Almaty', 'Amman', + 'Toronto', 'Bratislava', 'Johannesburg', 'Thimphu', 'Bangkok', 'Santiago', 'Cairo', 'San Francisco', + 'Lagos', 'Amsterdam', 'Paris', 'Rabat', 'Santiago', 'Copenhagen', 'Madrid', 'Kigali', + 'Ho Chi Minh City', 'Sarajevo', 'Delhi', 'Istanbul', 'Ho Chi Minh City', 'Khartoum', 'Helsinki', + 'Doha', 'Istanbul', 'Kuala Lumpur', 'Budapest', 'Shanghai', 'Moscow', 'Los Angeles', 'Oslo', + 'Johannesburg', 'Berlin', 'Bangalore', 'Tokyo', 'Melbourne', 'Barcelona', 'Chicago', 'Port Louis', + 'Lisbon', 'Nairobi', 'Kampala', 'Lima', 'Maputo', 'Vancouver', 'Dubai', 'Khartoum', 'Jakarta', + 'Madrid', 'Yerevan', 'Beirut', 'Athens', 'Chicago', 'Paris', 'Bucharest', 'Copenhagen', 'Brussels', + 'Damascus', 'Seattle', 'Los Angeles', 'Yerevan', 'Victoria', 'Tunis', 'Astana', 'Seoul', + 'Buenos Aires', 'Bangkok', 'Colombo', 'Brussels', 'Khartoum', 'Doha', 'San Francisco', 'Vienna', 'Jakarta' +] +QUESTION_TEMPLATE = "What is the special magic {city} number? The special magic {city} number is " +NEEDLE_TEMPLATE = "The special magic {city} number is: {rnd_number}" +@dataclass +class NeedleEvalConfig(FairseqDataclass): + needle_num: int = field( + default=4, + metadata={"help":"needle number"} + ) + tokens_per_sample: int = field( + default=16384, + ) + interval: int = field( + default=1024, + ) + needle_file_path: str = field( + default="/mnt/msranlp/yutao/data/PaulGrahamEssays", + ) + +def random_partition(total, n): + cuts = random.sample(range(1, total), n - 1) + cuts.sort() + cuts = [0] + cuts + [total] + parts = [cuts[i+1] - cuts[i] for i in range(n)] + return parts + +@register_criterion("multi_needle", dataclass=NeedleEvalConfig) +class NeedleEvalCriterion(FairseqCriterion): + def __init__(self, cfg: NeedleEvalConfig, task): + super().__init__(task) + self.cfg = cfg + self.essay_list = os.listdir(cfg.needle_file_path) * 5000 + + def generate_garbage(self, length): + current_text = "" + current_length = 0 + while True: + essay = random.choice(self.essay_list) + essay = open(os.path.join(self.cfg.needle_file_path, essay)).read().splitlines() + for line in essay: + tokens = self.task.tokenizer.encode(line + " ") + if current_length + len(tokens) > length: + return current_text + current_text += line + " " + current_length += len(tokens) + + def generate_prompt_landmark(self, first_length_list, second_length_list, final_length): + """Generates a text file and inserts an passkey at a random position.""" + lines = [] + citys = random.sample(RANDOM_NEEDLE_CITIES, self.cfg.needle_num) + for length in first_length_list: + lines.append(self.generate_garbage(length)) + city = citys.pop() + magic_number = random.randint(1, 50000) + information_line = NEEDLE_TEMPLATE.format(city=city, rnd_number=magic_number) + lines.append(information_line) + + final_question, answer = QUESTION_TEMPLATE.format(city=city), magic_number + + for length in second_length_list: + lines.append(self.generate_garbage(length)) + city = citys.pop() + magic_number = random.randint(1, 50000) + information_line = NEEDLE_TEMPLATE.format(city=city, rnd_number=magic_number) + lines.append(information_line) + + + lines.append(self.generate_garbage(final_length)) + lines.append(final_question) + context = "\n".join(lines) + return OURS_TEMPLATE.format(context=context), str(answer) + + def forward(self, model, sample, reduce=True): + """Compute the loss for the given sample. + + Returns a tuple with three elements: + 1) the loss + 2) the sample size, which is used as the denominator for the gradient + 3) logging outputs to display while training + """ + model.eval() + all_retrieval_result = {} + random.seed(42) + for context_length in range(self.cfg.interval, self.cfg.tokens_per_sample + 1, self.cfg.interval): + all_length = (context_length - 150) + local_retrieval_result = [] + for depth_ratio in range(1, 11): + prefix_length = int(all_length * depth_ratio / 11) + suffix_length = all_length - prefix_length + n_correct = 0 + for _ in range(5): + if self.cfg.needle_num > 1: + first_needle_num = random.randint(1, self.cfg.needle_num - 1) + second_needle_num = self.cfg.needle_num + 1 - first_needle_num + first_length_list = random_partition(prefix_length, first_needle_num) + second_length_list = random_partition(suffix_length, second_needle_num) + final_length = second_length_list.pop() + else: + first_length_list = [prefix_length] + second_length_list = [] + final_length = suffix_length + prompt, pass_key = self.generate_prompt_landmark(first_length_list, second_length_list, final_length) + prompt_tokens = self.task.tokenizer.encode(prompt, bos=True) + prompt_tokens = torch.tensor([prompt_tokens], device="cuda") + print(prompt_tokens.shape) + output = self.generate(model, prompt_tokens) + pred = self.task.tokenizer.decode(output[0, prompt_tokens.shape[1]:]) + print("Answer: ", pass_key) + print("Pred: ", pred) + if pass_key in pred: + n_correct += 1 + local_retrieval_result.append(n_correct / 5) + all_retrieval_result[context_length] = local_retrieval_result + + print(all_retrieval_result) + return 0, 1, {"loss": 0} + + def generate(self, model, net_input, generate_tokens=20, chunk_length = 32768): + output_tokens = torch.cat((net_input, torch.full((net_input.shape[0], generate_tokens), self.task.tokenizer.pad_id).long().cuda()), dim=1) + begin_pad_index = torch.where(output_tokens == self.task.tokenizer.pad_id)[1].min().item() + incremental_state = {} + eos_reached = torch.tensor([False] * net_input.shape[0], device="cuda") + # prefilling + for begin_index in range(0, begin_pad_index - 1, chunk_length): + end_index = min(begin_index + chunk_length, begin_pad_index - 1) + _, _ = model(output_tokens[:, begin_index : end_index], incremental_state=incremental_state, start_pos=begin_index, skip_cross_decoder=True, is_prefilling=True) + # generation + for index in range(begin_pad_index, output_tokens.shape[1]): + generation_net_output, _ = model(output_tokens[:, index - 1].unsqueeze(-1), incremental_state=incremental_state, start_pos=index - 1, skip_cross_decoder=False, is_prefilling=False) + generation_net_output[:, :, self.task.tokenizer.bos_id] = -math.inf + generation_net_output[:, :, self.task.tokenizer.pad_id] = -math.inf + next_tokens = torch.argmax(generation_net_output[:, -1, :], dim=-1) + pad_tokens = output_tokens[:, index] + next_tokens = torch.where((pad_tokens == self.task.tokenizer.pad_id) & ~eos_reached, next_tokens, pad_tokens) + output_tokens[:, index] = next_tokens + eos_reached |= ( + next_tokens == self.task.tokenizer.eos_id + ) + if all(eos_reached): + break + + return output_tokens + + @staticmethod + def reduce_metrics(logging_outputs) -> None: + pass + + @staticmethod + def logging_outputs_can_be_summed() -> bool: + """ + Whether the logging outputs returned by `forward` can be summed + across workers prior to calling `reduce_metrics`. Setting this + to True will improves distributed training speed. + """ + return True \ No newline at end of file diff --git a/YOCO/yoco/criterions/needle_haystack.py b/YOCO/yoco/criterions/needle_haystack.py new file mode 100644 index 000000000..5cc9f231e --- /dev/null +++ b/YOCO/yoco/criterions/needle_haystack.py @@ -0,0 +1,169 @@ +import os +import random +import math +from dataclasses import dataclass, field + +import torch +import torch.nn.functional as F + +from fairseq import metrics +from fairseq.criterions import FairseqCriterion, register_criterion +from fairseq.dataclass import FairseqDataclass + +OURS_TEMPLATE = "There is a special magic number inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the magic number there. {context} " +RANDOM_NEEDLE_CITIES = [ + 'Chicago', 'Yangon', 'Antananarivo', 'Colombo', 'Almaty', 'Sydney', 'Chicago', 'Mexico City', + 'Seattle', 'Lagos', 'Amsterdam', 'Belgrade', 'Cairo', 'Baghdad', 'Damascus', 'Kigali', 'Dakar', + 'Dakar', 'Sofia', 'Kigali', 'Victoria', 'Tashkent', 'Mumbai', 'Barcelona', 'Almaty', 'Amman', + 'Toronto', 'Bratislava', 'Johannesburg', 'Thimphu', 'Bangkok', 'Santiago', 'Cairo', 'San Francisco', + 'Lagos', 'Amsterdam', 'Paris', 'Rabat', 'Santiago', 'Copenhagen', 'Madrid', 'Kigali', + 'Ho Chi Minh City', 'Sarajevo', 'Delhi', 'Istanbul', 'Ho Chi Minh City', 'Khartoum', 'Helsinki', + 'Doha', 'Istanbul', 'Kuala Lumpur', 'Budapest', 'Shanghai', 'Moscow', 'Los Angeles', 'Oslo', + 'Johannesburg', 'Berlin', 'Bangalore', 'Tokyo', 'Melbourne', 'Barcelona', 'Chicago', 'Port Louis', + 'Lisbon', 'Nairobi', 'Kampala', 'Lima', 'Maputo', 'Vancouver', 'Dubai', 'Khartoum', 'Jakarta', + 'Madrid', 'Yerevan', 'Beirut', 'Athens', 'Chicago', 'Paris', 'Bucharest', 'Copenhagen', 'Brussels', + 'Damascus', 'Seattle', 'Los Angeles', 'Yerevan', 'Victoria', 'Tunis', 'Astana', 'Seoul', + 'Buenos Aires', 'Bangkok', 'Colombo', 'Brussels', 'Khartoum', 'Doha', 'San Francisco', 'Vienna', 'Jakarta' +] +QUESTION_TEMPLATE = "What is the special magic {city} number? The special magic {city} number is " +# NEEDLE_TEMPLATE = "The special magic {city} number is {rnd_number} . Remember it. The special magic {city} number is {rnd_number} . " +NEEDLE_TEMPLATE = "The special magic {city} number is {rnd_number} . " +@dataclass +class NeedleHaystackEvalConfig(FairseqDataclass): + max_len_b: int = field( + default=5, + metadata={"help":"max_len_b"} + ) + tokens_per_sample: int = field( + default=16384, + ) + interval: int = field( + default=1024, + ) + needle_file_path: str = field( + default="/mnt/msranlp/yutao/data/PaulGrahamEssays", + ) + +@register_criterion("needle_haystack", dataclass=NeedleHaystackEvalConfig) +class NeedleHaystackEvalCriterion(FairseqCriterion): + def __init__(self, cfg: NeedleHaystackEvalConfig, task): + super().__init__(task) + self.cfg = cfg + self.essay_list = os.listdir(cfg.needle_file_path) * 5000 + + def generate_garbage(self, length): + current_text = "" + current_length = 0 + while True: + essay = random.choice(self.essay_list) + essay = open(os.path.join(self.cfg.needle_file_path, essay)).read().splitlines() + for line in essay: + tokens = self.task.tokenizer.encode(line + " ") + if current_length + len(tokens) > length: + return current_text + current_text += line + " " + current_length += len(tokens) + + def generate_prompt_landmark(self, prefix_length, suffix_length): + """Generates a text file and inserts an passkey at a random position.""" + city = random.choice(RANDOM_NEEDLE_CITIES) + magic_number = random.randint(1, 50000) + garbage_prefix = self.generate_garbage(prefix_length) + garbage_suffix = self.generate_garbage(suffix_length) + information_line = NEEDLE_TEMPLATE.format(city=city, rnd_number=magic_number) + final_question = QUESTION_TEMPLATE.format(city=city) + lines = [ + garbage_prefix, + information_line, + garbage_suffix, + final_question, + ] + context = "\n".join(lines) + return OURS_TEMPLATE.format(context=context), str(magic_number) + + def forward(self, model, sample, reduce=True): + """Compute the loss for the given sample. + + Returns a tuple with three elements: + 1) the loss + 2) the sample size, which is used as the denominator for the gradient + 3) logging outputs to display while training + """ + model.eval() + all_retrieval_result = {} + random.seed(0) + for context_length in range(self.cfg.interval, self.cfg.tokens_per_sample + 1, self.cfg.interval): + all_length = (context_length - 150) + local_retrieval_result = [] + depth_number = 10 + for depth_ratio in range(0, depth_number + 1): + prefix_length = int(all_length * depth_ratio / depth_number) + suffix_length = all_length - prefix_length + n_correct = 0 + times = 10 + for _ in range(times): + prompt, pass_key = self.generate_prompt_landmark(prefix_length, suffix_length) + prompt_tokens = self.task.tokenizer.encode(prompt, bos=True) + prompt_tokens = torch.tensor([prompt_tokens], device="cuda") + print(prompt_tokens.shape) + output = self.generate(model, prompt_tokens) + pred = self.task.tokenizer.decode(output[0, prompt_tokens.shape[1]:]) + print("Answer: ", pass_key) + print("Pred: ", pred) + if pass_key in pred: + n_correct += 1 + local_retrieval_result.append(n_correct / times) + all_retrieval_result[context_length] = local_retrieval_result + + print(all_retrieval_result) + return 0, 1, {"loss": 0} + + def generate(self, model, net_input, generate_tokens=20, chunk_length = 32768): + output_tokens = torch.cat((net_input, torch.full((net_input.shape[0], generate_tokens), self.task.tokenizer.pad_id).long().cuda()), dim=1) + begin_pad_index = torch.where(output_tokens == self.task.tokenizer.pad_id)[1].min().item() + incremental_state = {} + eos_reached = torch.tensor([False] * net_input.shape[0], device="cuda") + # prefilling + for begin_index in range(0, begin_pad_index - 1, chunk_length): + end_index = min(begin_index + chunk_length, begin_pad_index - 1) + _, _ = model(output_tokens[:, begin_index : end_index], incremental_state=incremental_state, start_pos=begin_index, skip_cross_decoder=True, is_prefilling=True) + # generation + for index in range(begin_pad_index, output_tokens.shape[1]): + generation_net_output, _ = model(output_tokens[:, index - 1].unsqueeze(-1), incremental_state=incremental_state, start_pos=index - 1, skip_cross_decoder=False, is_prefilling=False) + generation_net_output[:, :, self.task.tokenizer.bos_id] = -math.inf + generation_net_output[:, :, self.task.tokenizer.pad_id] = -math.inf + next_tokens = torch.argmax(generation_net_output[:, -1, :], dim=-1) + pad_tokens = output_tokens[:, index] + next_tokens = torch.where((pad_tokens == self.task.tokenizer.pad_id) & ~eos_reached, next_tokens, pad_tokens) + output_tokens[:, index] = next_tokens + eos_reached |= ( + next_tokens == self.task.tokenizer.eos_id + ) + if all(eos_reached): + break + + return output_tokens + + @staticmethod + def reduce_metrics(logging_outputs) -> None: + """Aggregate logging outputs from data parallel training.""" + loss_sum = sum(log.get("loss", 0) for log in logging_outputs) + metric_sum = sum(log.get("metric", 0) for log in logging_outputs) + nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) + ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) + metrics.log_scalar( + "loss", loss_sum / ntokens, ntokens, round=3 + ) + metrics.log_scalar( + "metric", metric_sum / nsentences, nsentences, round=3 + ) + + + @staticmethod + def logging_outputs_can_be_summed() -> bool: + """ + Whether the logging outputs returned by `forward` can be summed + across workers prior to calling `reduce_metrics`. Setting this + to True will improves distributed training speed. + """ + return True \ No newline at end of file diff --git a/YOCO/yoco/models/__init__.py b/YOCO/yoco/models/__init__.py new file mode 100644 index 000000000..1ff184f30 --- /dev/null +++ b/YOCO/yoco/models/__init__.py @@ -0,0 +1,41 @@ +import argparse +import importlib +import os + +try: + from torch._six import inf +except: + import sys + import torch + sys.modules["torch._six"] = torch + torch.string_classes = str + +MODEL_REGISTRY = {} +MODEL_DATACLASS_REGISTRY = {} +ARCH_MODEL_REGISTRY = {} +ARCH_MODEL_NAME_REGISTRY = {} +ARCH_MODEL_INV_REGISTRY = {} +ARCH_CONFIG_REGISTRY = {} + +# automatically import any Python files in the models/ directory +models_dir = os.path.dirname(__file__) +for file in os.listdir(models_dir): + path = os.path.join(models_dir, file) + if ( + not file.startswith("_") + and not file.startswith(".") + and (file.endswith(".py") or os.path.isdir(path)) + ): + model_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module("models." + model_name) + + # extra `model_parser` for sphinx + if model_name in MODEL_REGISTRY: + parser = argparse.ArgumentParser(add_help=False) + group_archs = parser.add_argument_group("Named architectures") + group_archs.add_argument( + "--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name] + ) + group_args = parser.add_argument_group("Additional command-line arguments") + MODEL_REGISTRY[model_name].add_args(group_args) + globals()[model_name + "_parser"] = parser \ No newline at end of file diff --git a/YOCO/yoco/models/decoder/__init__.py b/YOCO/yoco/models/decoder/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/YOCO/yoco/models/decoder/cross_attention.py b/YOCO/yoco/models/decoder/cross_attention.py new file mode 100644 index 000000000..09c31a893 --- /dev/null +++ b/YOCO/yoco/models/decoder/cross_attention.py @@ -0,0 +1,46 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from fairseq.model_parallel.megatron.mpu import ( + ColumnParallelLinear, + RowParallelLinear, +) + +from .model_parallel_init import init_method +from .kernel.rotary import apply_rotary_emb +from flash_attn import flash_attn_func + +class CrossAttention(nn.Module): + def __init__( + self, + args, + ): + super().__init__() + self.args = args + self.embed_dim = args.dim + self.num_heads = args.n_attn_heads // args.model_parallel_size + self.num_kv_heads = args.n_attn_kv_heads // args.model_parallel_size + + self.head_dim = args.dim // args.n_attn_heads + self.q_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=init_method) + self.out_proj = RowParallelLinear(args.dim, args.dim, bias=False, input_is_parallel=True, init_method=init_method) + + def forward( + self, + x, + key, + value, + rel_pos + ): + bsz, tgt_len, _ = x.size() + + q = self.q_proj(x) + q = q.view(bsz, tgt_len, self.num_heads, self.head_dim) + q = apply_rotary_emb(q, *rel_pos, interleaved=True) + + attn = flash_attn_func(q, key, value, causal=True) + attn = attn.view(bsz, tgt_len, self.head_dim * self.num_heads) + + attn = self.out_proj(attn) + return attn \ No newline at end of file diff --git a/YOCO/yoco/models/decoder/feedforward_network.py b/YOCO/yoco/models/decoder/feedforward_network.py new file mode 100644 index 000000000..3972068fe --- /dev/null +++ b/YOCO/yoco/models/decoder/feedforward_network.py @@ -0,0 +1,33 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from fairseq.model_parallel.megatron.mpu import ( + ColumnParallelLinear, + RowParallelLinear, +) + +from .kernel.swiglu import swiglu +from .model_parallel_init import init_method + +class FeedForwardNetwork(nn.Module): + def __init__( + self, + embed_dim, + ffn_dim, + load_checkpoint=False, + ): + super().__init__() + self.embed_dim = embed_dim + self.fc1 = ColumnParallelLinear(self.embed_dim, ffn_dim, bias=False, gather_output=False, init_method=init_method) + self.gate = ColumnParallelLinear(self.embed_dim, ffn_dim, bias=False, gather_output=False, init_method=init_method) + self.fc2 = RowParallelLinear(ffn_dim, self.embed_dim, bias=False, input_is_parallel=True, init_method=init_method) + + def forward(self, x): + x_shape = x.shape + x = x.reshape(-1, x.size(-1)) + x = self.fc2(swiglu(self.fc1(x), self.gate(x))) + output = x.view(x_shape) + return output \ No newline at end of file diff --git a/YOCO/yoco/models/decoder/gate_retention.py b/YOCO/yoco/models/decoder/gate_retention.py new file mode 100644 index 000000000..089164c77 --- /dev/null +++ b/YOCO/yoco/models/decoder/gate_retention.py @@ -0,0 +1,87 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from fairseq.model_parallel.megatron.mpu import ( + ColumnParallelLinear, + RowParallelLinear, +) + +from .rms_norm import RMSNorm + +from .kernel.gate_recurrent import chunk_gate_retention, recurrent_gate_retention +from .kernel.rotary import apply_rotary_emb +from .kernel.swiglu import swiglu + +from .model_parallel_init import qkvg_init_method, out_init_method + +class GateRetention(nn.Module): + + def __init__( + self, + args, + gate_logit_normalizer: int = 16, + ): + super().__init__() + self.args = args + self.embed_dim = args.dim + self.num_heads = args.n_self_heads // args.model_parallel_size + self.head_dim = args.dim // args.n_self_heads + + self.q_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=qkvg_init_method) + self.k_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=qkvg_init_method) + self.v_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=qkvg_init_method) + self.g_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=qkvg_init_method) + self.gt_proj = ColumnParallelLinear(args.dim, args.n_self_heads, bias=False, gather_output=False, init_method=qkvg_init_method) + + self.out_proj = RowParallelLinear(args.dim, args.dim, bias=False, input_is_parallel=True, init_method=out_init_method) + + self.subln = RMSNorm(self.head_dim, elementwise_affine=False, eps=args.norm_eps) + + self.gate_logit_normalizer = gate_logit_normalizer + + def forward( + self, + x, + rel_pos, + incremental_state=None, + is_prefilling=False, + ): + bsz, tgt_len, _ = x.size() + + q = self.q_proj(x) + k = self.k_proj(x) + v = self.v_proj(x) + g = self.g_proj(x) + gt = self.gt_proj(x) + + qr = q.view(bsz, tgt_len, self.num_heads, self.head_dim) + kr = k.view(bsz, tgt_len, self.num_heads, self.head_dim) + v = v.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) + gt = gt.view(bsz, tgt_len, self.num_heads).transpose(1, 2) + + qr = apply_rotary_emb(qr, *rel_pos, interleaved=True).transpose(1, 2) + kr = apply_rotary_emb(kr, *rel_pos, interleaved=True).transpose(1, 2) + gt = (F.logsigmoid(gt) / self.gate_logit_normalizer) + + if incremental_state is not None and not is_prefilling: + o = recurrent_gate_retention(qr, kr, v, gt, incremental_state) + else: + if incremental_state is not None: + index_mask = incremental_state["index_mask"] + gt_sum = gt.float().masked_fill(index_mask, 0).sum(dim=-1, keepdim=True) + gt_mask = (gt_sum - gt.float().cumsum(dim=-1)).exp().masked_fill(index_mask, 0) + next_hidden_state = (kr.transpose(-1, -2) * (self.head_dim ** -0.5)) @ (v * gt_mask.to(v.dtype).unsqueeze(-1)) + if "last_hidden_state" in incremental_state: + last_hidden_state = incremental_state["last_hidden_state"] + next_hidden_state += last_hidden_state * gt_sum.exp().unsqueeze(-1).to(v.dtype) if last_hidden_state is not None else 0 + else: + last_hidden_state = None + incremental_state["last_hidden_state"] = next_hidden_state + o = chunk_gate_retention(qr, kr, v, gt, chunk_size=256, last_hidden_state=last_hidden_state) + else: + o = chunk_gate_retention(qr, kr, v, gt, chunk_size=256) + + o = self.subln(o).transpose(1, 2).reshape(bsz, tgt_len, self.num_heads * self.head_dim) + o = swiglu(g, o) + o = self.out_proj(o) + return o diff --git a/YOCO/yoco/models/decoder/kernel/gate_recurrent.py b/YOCO/yoco/models/decoder/kernel/gate_recurrent.py new file mode 100644 index 000000000..304131ccc --- /dev/null +++ b/YOCO/yoco/models/decoder/kernel/gate_recurrent.py @@ -0,0 +1,302 @@ +import time +from typing import Optional + +import torch +import triton +import triton.language as tl + +torch.backends.cudnn.allow_tf32 = True + +@triton.jit +def _fwd_recurrence( + S, d, + O, + NUM_HEAD, NUM_BLOCK, + D_MODEL_K: tl.constexpr, D_MODEL_V: tl.constexpr, + BLOCK_MODEL_K: tl.constexpr, BLOCK_MODEL_V: tl.constexpr, + last_kv: Optional[tl.tensor] + ): + offset_bh = tl.program_id(0) + offset_d = tl.program_id(1) + offset_s = tl.program_id(2) + + S = S + offset_bh * NUM_BLOCK * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + + O = O + offset_bh * NUM_BLOCK * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + + if last_kv is not None: + last_kv = last_kv + offset_bh * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + acc = tl.load(last_kv).to(tl.float32) + else: + acc = tl.zeros([BLOCK_MODEL_K, BLOCK_MODEL_V], dtype=tl.float32) + + tl.store(O, acc.to(O.dtype.element_ty)) + O += D_MODEL_K * D_MODEL_V + d = d + offset_bh * NUM_BLOCK + for i in range(NUM_BLOCK-1): + d_i = tl.load(d) + S_i = tl.load(S) + acc = acc * d_i + S_i + tl.store(O, acc.to(O.dtype.element_ty)) + d += 1 + S += D_MODEL_K * D_MODEL_V + O += D_MODEL_K * D_MODEL_V + + +## NUM_SPLIT_K/V. K/V dimension split into NUM_SPLIT_K/V parts with equal size BLOCK_MODEL +@triton.jit +def _bwd_recurrence( + S, d, + DI, DG, DL, DS, + NUM_HEAD, NUM_BLOCK, + D_MODEL_K: tl.constexpr, D_MODEL_V: tl.constexpr, + BLOCK_MODEL_K: tl.constexpr, BLOCK_MODEL_V: tl.constexpr, + + ): + offset_bh = tl.program_id(0) + offset_d = tl.program_id(1) + offset_s = tl.program_id(2) + + # offset_h = offset_bh % NUM_HEAD + NUM_K = D_MODEL_K // BLOCK_MODEL_K + NUM_V = D_MODEL_V // BLOCK_MODEL_V + # skip the last chunk because it is never used + S = S + offset_bh * NUM_BLOCK * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + (NUM_BLOCK - 2) * D_MODEL_K * D_MODEL_V + + DI = DI + offset_bh * NUM_BLOCK * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + (NUM_BLOCK - 2) * D_MODEL_K * D_MODEL_V + + # start from the last chunk + DS = DS + offset_bh * NUM_BLOCK * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + (NUM_BLOCK - 1) * D_MODEL_K * D_MODEL_V + + DG = DG + offset_bh * NUM_BLOCK * NUM_K * NUM_V + offset_d * NUM_V + offset_s + (NUM_BLOCK - 2) * NUM_K * NUM_V + + d = d + offset_bh * NUM_BLOCK + (NUM_BLOCK - 1) + + Dacc = tl.zeros([BLOCK_MODEL_K, BLOCK_MODEL_V], dtype=tl.float32) + + # ignore the first chunk + for i in range(NUM_BLOCK - 1): + S_i = tl.load(S) + DS_i = tl.load(DS) + d_i = tl.load(d) + Dacc = Dacc * d_i + DS_i + DG_i = tl.sum(Dacc * S_i.to(tl.float32)) + + tl.store(DG, DG_i.to(DG.dtype.element_ty)) + tl.store(DI, Dacc.to(DI.dtype.element_ty)) + + S -= D_MODEL_K * D_MODEL_V + DI -= D_MODEL_K * D_MODEL_V + DS -= D_MODEL_K * D_MODEL_V + DG -= NUM_K * NUM_V + d -= 1 + + DL = DL + offset_bh * D_MODEL_K * D_MODEL_V + offset_d * D_MODEL_V * BLOCK_MODEL_K + tl.arange(0, BLOCK_MODEL_K)[:, None] * D_MODEL_V + offset_s * BLOCK_MODEL_V + tl.arange(0, BLOCK_MODEL_V)[None, :] + DS_i = tl.load(DS) + d_i = tl.load(d) + Dacc = Dacc * d_i + DS_i + tl.store(DL, Dacc.to(DL.dtype.element_ty)) + +class ChunkGateRecurrent(torch.autograd.Function): + @staticmethod + def forward(ctx, kv, cross_decay, last_kv=None): + cross_decay = cross_decay.contiguous() + kv = kv.contiguous() + + B, H, N, D_k, D_v = kv.shape + output = torch.empty_like(kv) + BLOCK_MODEL_K = 64 + BLOCK_MODEL_V = 16 + + assert D_k % BLOCK_MODEL_K == 0 + assert D_v % BLOCK_MODEL_V == 0 + + grid = (B*H, D_k//BLOCK_MODEL_K, D_v//BLOCK_MODEL_V) + ctx.grid = grid + ctx.have_last_kv = last_kv is not None + ctx.BLOCK_MODEL_K = BLOCK_MODEL_K + ctx.BLOCK_MODEL_V = BLOCK_MODEL_V + + _fwd_recurrence[grid]( + kv, + cross_decay, + output, + D_MODEL_K=D_k, D_MODEL_V=D_v, + NUM_BLOCK=N, NUM_HEAD=H, + BLOCK_MODEL_K=BLOCK_MODEL_K, + BLOCK_MODEL_V=BLOCK_MODEL_V, + last_kv=last_kv + ) + + ctx.save_for_backward(output, cross_decay) + return output + + @staticmethod + def backward(ctx, DO): + DO = DO.contiguous() + + output, cross_decay = ctx.saved_tensors + + B, H, N, D_k, D_v = output.shape + + BLOCK_MODEL_K = 64 + BLOCK_MODEL_V = 16 + + grid = (B*H, D_k//BLOCK_MODEL_K, D_v//BLOCK_MODEL_V) + + DI = torch.empty_like(DO) + DG = torch.empty(B*H, N, D_k//BLOCK_MODEL_K, D_v//BLOCK_MODEL_V, device=cross_decay.device, dtype=cross_decay.dtype) + DL = torch.empty(B, H, D_k, D_v, device=output.device, dtype=output.dtype) + _bwd_recurrence[grid]( + output, cross_decay, + DI, DG, DL, DO, + NUM_HEAD=H, NUM_BLOCK = N, + D_MODEL_K = D_k, + D_MODEL_V = D_v, + BLOCK_MODEL_K=BLOCK_MODEL_K, + BLOCK_MODEL_V=BLOCK_MODEL_V, + ) + + DI[:, :, -1] = 0 + DG[:, -1] = 0 + DG = DG.view(B, H, N, -1).sum(dim=-1) + return DI, DG, DL if ctx.have_last_kv else None + +def cross_chunk(q, k, v, g, last_hidden_state=None): + kv = k.transpose(-1, -2) @ (v * (-g + g[:, :, :, -1, None]).exp()[..., None].to(v.dtype)) + cross_decay = g[:, :, :, -1].exp().to(kv.dtype) + S = chunk_gate_recurrent(kv, cross_decay, last_hidden_state) + cross = (q * g[..., None].exp().to(q.dtype)) @ S + return cross + +@torch.compile +def inner_chunk(q, k, v, g): + attn = q @ k.transpose(-1, -2) + causal_mask = torch.full([q.shape[-2], q.shape[-2]], float("-inf"), device=q.device).triu(1).type_as(q) + attn = attn * (g[..., None] - g[..., None, :] + causal_mask).exp().to(attn.dtype) + inner = attn @ v + return inner + +def chunk_gate_retention(q, k, v, g, chunk_size=64, last_hidden_state=None): + bsz, num_head, tgt_len, key_dim = q.shape + head_dim = v.shape[-1] + num_chunk = tgt_len // chunk_size + q = q.view(bsz, num_head, num_chunk, chunk_size, key_dim) + k = k.view(bsz, num_head, num_chunk, chunk_size, key_dim) * (key_dim ** -0.5) + v = v.view(bsz, num_head, num_chunk, chunk_size, head_dim) + g = g.view(bsz, num_head, num_chunk, chunk_size) + g = g.float().cumsum(-1) + cross = cross_chunk(q, k, v, g, last_hidden_state=last_hidden_state) + inner = inner_chunk(q, k, v, g) + o = cross + inner + return o.view(bsz, num_head, tgt_len, head_dim) + +# for long sequence parallelism +def hier_chunk_gate_retention(q, k, v, g, chunk_size=64, hier_chunk_size=16384): + bsz, num_head, tgt_len, key_dim = q.shape + head_dim = v.shape[-1] + num_hier_chunk = tgt_len // hier_chunk_size + assert tgt_len == num_hier_chunk * hier_chunk_size + + q = q.view(bsz, num_head, num_hier_chunk, hier_chunk_size, key_dim) + k = k.view(bsz, num_head, num_hier_chunk, hier_chunk_size, key_dim) + v = v.view(bsz, num_head, num_hier_chunk, hier_chunk_size, head_dim) + g = g.view(bsz, num_head, num_hier_chunk, hier_chunk_size) + hier_cross = cross_chunk(q, k * (key_dim ** -0.5), v, g.float().cumsum(-1)).view(bsz, num_head, tgt_len, head_dim) + + qi = q.transpose(1, 2).reshape(bsz * num_hier_chunk, num_head, hier_chunk_size, key_dim) + ki = k.transpose(1, 2).reshape(bsz * num_hier_chunk, num_head, hier_chunk_size, key_dim) + vi = v.transpose(1, 2).reshape(bsz * num_hier_chunk, num_head, hier_chunk_size, head_dim) + gi = g.transpose(1, 2).reshape(bsz * num_hier_chunk, num_head, hier_chunk_size) + inner_cross = chunk_gate_retention(qi, ki, vi, gi, chunk_size) + + inner_cross = inner_cross.view(bsz, num_hier_chunk, num_head, hier_chunk_size, head_dim).transpose(1, 2).reshape(bsz, num_head, tgt_len, head_dim) + o = hier_cross + inner_cross + return o + +def recurrent_gate_retention(q, k, v, g, incremental_state): + bsz, num_head, _, key_dim = q.shape + k *= key_dim ** -0.5 + g = g.view(bsz, num_head, 1, 1).float().exp() + kv = k.transpose(-1, -2) * v + if "last_hidden_state" in incremental_state: + prev_kv = incremental_state["last_hidden_state"] + kv += prev_kv * g.to(prev_kv.dtype) + + incremental_state["last_hidden_state"] = kv + o = q @ kv + return o + +def parallel_gate_retention(q, k, v, g): + k = k * (q.shape[-1] ** -0.5) + causal_mask = torch.full([q.shape[-2], q.shape[-2]], float("-inf"), device=q.device).triu(1).type_as(q) + g = g.float().cumsum(-1) + mask = g[..., None] - g[..., None, :] + causal_mask + mask = mask.exp() + + attn = q @ k.transpose(-1, -2) + attn = attn * mask.to(attn.dtype) + o = attn @ v + return o + +def naive_kv_recurrent(kv, cross_decay, last_kv=None): + BSZ, NUM_HEAD, NUM_BLOCK, D_MODEL_K, D_MODEL_V = kv.shape + kv_recurrent = [] + kv_state = torch.zeros(BSZ, NUM_HEAD, D_MODEL_K, D_MODEL_V, dtype=kv.dtype, device="cuda") if last_kv is None else last_kv + # accumulate kv by loop + for i in range(NUM_BLOCK): + kv_recurrent.append(kv_state) + kv_state = kv_state * cross_decay[:, :, i, None, None] + kv[:, :, i] + + kv_recurrent = torch.stack(kv_recurrent, dim=2) + return kv_recurrent + +chunk_gate_recurrent = ChunkGateRecurrent.apply + +def main(): + BSZ = 4 + NUM_HEAD = 4 + NUM_BLOCK = 16 + D_MODEL_K = 256 + D_MODEL_V = 432 + dtype = torch.float16 + kv = torch.randn(BSZ, NUM_HEAD, NUM_BLOCK, D_MODEL_K, D_MODEL_V, dtype=dtype, device="cuda") + last_kv = torch.randn(BSZ, NUM_HEAD, D_MODEL_K, D_MODEL_V, dtype=dtype, device="cuda") + kv_triton = kv.clone().detach() + last_kv_triton = last_kv.clone().detach() + cross_decay = torch.randn(BSZ, NUM_HEAD, NUM_BLOCK, dtype=dtype, device="cuda") + cross_decay = torch.sigmoid(cross_decay) + cross_decay_triton = cross_decay.clone().detach() + grad_weight = torch.randn(BSZ, NUM_HEAD, NUM_BLOCK, D_MODEL_K, D_MODEL_V, dtype=dtype, device="cuda") + kv.requires_grad = True + kv_triton.requires_grad = True + last_kv.requires_grad = True + last_kv_triton.requires_grad = True + cross_decay.requires_grad = True + cross_decay_triton.requires_grad = True + + start = time.time() + kv_recurrent = naive_kv_recurrent(kv, cross_decay, last_kv) + kv_recurrent.mul(grad_weight).sum().backward() + print("naive time:", time.time() - start) + + start = time.time() + kv_recurrent_triton = chunk_gate_recurrent(kv_triton, cross_decay_triton, last_kv_triton) + kv_recurrent_triton.mul(grad_weight).sum().backward() + print("triton time:", time.time() - start) + + print(torch.allclose(kv_recurrent, kv_recurrent_triton, atol=1e-3)) + print((kv_recurrent - kv_recurrent_triton).abs().max(), (kv_recurrent - kv_recurrent_triton).abs().mean()) + + print(torch.allclose(kv.grad, kv_triton.grad, atol=1e-3)) + print((kv.grad - kv_triton.grad).abs().max(), (kv.grad - kv_triton.grad).abs().mean()) + + print(torch.allclose(last_kv.grad, last_kv_triton.grad, atol=1e-3)) + print((last_kv.grad - last_kv_triton.grad).abs().max(), (last_kv.grad - last_kv_triton.grad).abs().mean()) + + print(torch.allclose(cross_decay.grad, cross_decay_triton.grad, atol=1e-3)) + print((cross_decay.grad - cross_decay_triton.grad).abs().max(), (cross_decay.grad - cross_decay_triton.grad).abs().mean()) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/YOCO/yoco/models/decoder/kernel/rotary.py b/YOCO/yoco/models/decoder/kernel/rotary.py new file mode 100644 index 000000000..8ee2cb938 --- /dev/null +++ b/YOCO/yoco/models/decoder/kernel/rotary.py @@ -0,0 +1,332 @@ +# Copyright (c) 2023, Tri Dao. + +from typing import Optional, Union + +import torch + +import triton +import triton.language as tl + + +# @triton.autotune( +# configs=[ +# triton.Config({"BLOCK_M": 2}), +# triton.Config({"BLOCK_M": 4}), +# triton.Config({"BLOCK_M": 8}), +# triton.Config({"BLOCK_M": 16}), +# ], +# key=["CACHE_KEY_SEQLEN", "BLOCK_K", "INTERLEAVED"], +# ) +@triton.jit +def rotary_kernel( + OUT, # Pointers to matrices + X, + COS, + SIN, + CU_SEQLENS, + SEQLEN_OFFSETS, # this could be int or a pointer + # Matrix dimensions + seqlen, + nheads, + rotary_dim, + seqlen_ro, + CACHE_KEY_SEQLEN, + # strides + stride_out_batch, + stride_out_seqlen, + stride_out_nheads, + stride_out_headdim, + stride_x_batch, + stride_x_seqlen, + stride_x_nheads, + stride_x_headdim, + # Meta-parameters + BLOCK_K: tl.constexpr, + IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr, + IS_VARLEN: tl.constexpr, + INTERLEAVED: tl.constexpr, + CONJUGATE: tl.constexpr, + BLOCK_M: tl.constexpr, +): + pid_m = tl.program_id(axis=0) + pid_batch = tl.program_id(axis=1) + pid_head = tl.program_id(axis=2) + rotary_dim_half = rotary_dim // 2 + + if not IS_VARLEN: + X = X + pid_batch * stride_x_batch + pid_head * stride_x_nheads + OUT = OUT + pid_batch * stride_out_batch + pid_head * stride_out_nheads + else: + start_idx = tl.load(CU_SEQLENS + pid_batch) + seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx + X = X + start_idx * stride_x_seqlen + pid_head * stride_x_nheads + OUT = OUT + start_idx * stride_out_seqlen + pid_head * stride_out_nheads + + if pid_m * BLOCK_M >= seqlen: + return + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + if not IS_SEQLEN_OFFSETS_TENSOR: + rm_cs = rm + SEQLEN_OFFSETS + else: + rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch) + rk = tl.arange(0, BLOCK_K) + rk_half = tl.arange(0, BLOCK_K // 2) + + if not INTERLEAVED: + # Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT + X = X + (rm[:, None] * stride_x_seqlen + rk_half[None, :] * stride_x_headdim) + COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) + SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) + cos = tl.load( + COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=1.0 + ).to(tl.float32) + sin = tl.load( + SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=0.0 + ).to(tl.float32) + x0 = tl.load( + X, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0 + ).to(tl.float32) + x1 = tl.load( + X + rotary_dim_half * stride_x_headdim, + mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), + other=0.0, + ).to(tl.float32) + if CONJUGATE: + sin = -sin + o0 = x0 * cos - x1 * sin + o1 = x0 * sin + x1 * cos + # write back result + OUT = OUT + (rm[:, None] * stride_out_seqlen + rk_half[None, :] * stride_out_headdim) + tl.store(OUT, o0, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half)) + tl.store( + OUT + rotary_dim_half * stride_out_headdim, + o1, + mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), + ) + else: + # We don't want to load X[0, 2, 4, ...] and X[1, 3, 5, ...] separately since both are slow. + # Instead, we load x0 = X[0, 1, 2, 3, ...] and x1 = X[1, 0, 3, 2, ...]. + # Loading x0 will be fast but x1 will be slow. + # Then we load cos = COS[0, 0, 1, 1, ...] and sin = SIN[0, 0, 1, 1, ...]. + # Then we do the calculation and use tl.where to pick put the right outputs for the even + # and for the odd indices. + rk_swap = rk + ((rk + 1) % 2) * 2 - 1 # 1, 0, 3, 2, 5, 4, ... + rk_repeat = tl.arange(0, BLOCK_K) // 2 + X0 = X + (rm[:, None] * stride_x_seqlen + rk[None, :] * stride_x_headdim) + X1 = X + (rm[:, None] * stride_x_seqlen + rk_swap[None, :] * stride_x_headdim) + COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) + SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) + cos = tl.load( + COS, + mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half), + other=1.0, + ).to(tl.float32) + sin = tl.load( + SIN, + mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half), + other=0.0, + ).to(tl.float32) + x0 = tl.load(X0, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim), other=0.0).to( + tl.float32 + ) + x1 = tl.load( + X1, mask=(rm[:, None] < seqlen) & (rk_swap[None, :] < rotary_dim), other=0.0 + ).to(tl.float32) + if CONJUGATE: + sin = -sin + x0_cos = x0 * cos + x1_sin = x1 * sin + out = tl.where(rk[None, :] % 2 == 0, x0_cos - x1_sin, x0_cos + x1_sin) + OUT = OUT + (rm[:, None] * stride_out_seqlen + rk[None, :] * stride_out_headdim) + tl.store(OUT, out, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim)) + + +def apply_rotary( + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + seqlen_offsets: Union[int, torch.Tensor] = 0, + cu_seqlens: Optional[torch.Tensor] = None, + max_seqlen: Optional[int] = None, + interleaved=False, + inplace=False, + conjugate=False, +) -> torch.Tensor: + """ + Arguments: + x: (batch, seqlen, nheads, headdim) if cu_seqlens is None + else (total_seqlen, nheads, headdim). + cos: (seqlen_ro, rotary_dim / 2) + sin: (seqlen_ro, rotary_dim / 2) + seqlen_offsets: integer or integer tensor of size (batch,) + cu_seqlens: (batch + 1,) or None + max_seqlen: int + Returns: + y: (batch, seqlen, nheads, headdim) + """ + is_varlen = cu_seqlens is not None + if not is_varlen: + batch, seqlen, nheads, headdim = x.shape + else: + assert max_seqlen is not None, "If cu_seqlens is passed in, then max_seqlen must be passed" + total_seqlen, nheads, headdim = x.shape + batch_p_1 = cu_seqlens.shape[0] + batch = batch_p_1 - 1 + seqlen = max_seqlen + seqlen_ro, rotary_dim = cos.shape + assert sin.shape == cos.shape + rotary_dim *= 2 + assert rotary_dim <= headdim, "rotary_dim must be <= headdim" + assert headdim <= 256, "Only support headdim <= 256" + assert seqlen_ro >= seqlen, "seqlen_ro must be >= seqlen" + + assert ( + cos.dtype == sin.dtype + ), f"cos and sin must have the same dtype, got {cos.dtype} and {sin.dtype}" + assert ( + x.dtype == cos.dtype + ), f"Input and cos/sin must have the same dtype, got {x.dtype} and {cos.dtype}" + + cos, sin = cos.contiguous(), sin.contiguous() + if isinstance(seqlen_offsets, torch.Tensor): + assert seqlen_offsets.shape == (batch,) + assert seqlen_offsets.dtype in [torch.int32, torch.int64] + seqlen_offsets = seqlen_offsets.contiguous() + else: + assert seqlen_offsets + seqlen <= seqlen_ro + + output = torch.empty_like(x) if not inplace else x + if rotary_dim < headdim and not inplace: + output[..., rotary_dim:].copy_(x[..., rotary_dim:]) + + BLOCK_K = ( + 32 + if rotary_dim <= 32 + else (64 if rotary_dim <= 64 else (128 if rotary_dim <= 128 else 256)) + ) + grid = lambda META: (triton.cdiv(seqlen, META["BLOCK_M"]), batch, nheads) # noqa + BLOCK_M = 4 if interleaved else (8 if rotary_dim <= 64 else 4) + + # Need this, otherwise Triton tries to launch from cuda:0 and we get + # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?) + with torch.cuda.device(x.device.index): + rotary_kernel[grid]( + output, # data ptrs + x, + cos, + sin, + cu_seqlens, + seqlen_offsets, + seqlen, # shapes + nheads, + rotary_dim, + seqlen_ro, + seqlen // 128, # key for triton cache (limit number of compilations) + output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0 + output.stride(-3), # seqlen_stride or total_seqlen_stride + output.stride(-2), # nheads_stride + output.stride(-1), # headdim_stride + x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0 + x.stride(-3), # seqlen stride or total_seqlen_stride + x.stride(-2), # nheads stride + x.stride(-1), # headdim stride + BLOCK_K, + isinstance(seqlen_offsets, torch.Tensor), + is_varlen, + interleaved, + conjugate, + BLOCK_M, + ) + return output + +class ApplyRotaryEmb(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x, + cos, + sin, + interleaved=False, + inplace=False, + seqlen_offsets: Union[int, torch.Tensor] = 0, + cu_seqlens: Optional[torch.Tensor] = None, + max_seqlen: Optional[int] = None, + ): + out = apply_rotary( + x, + cos, + sin, + seqlen_offsets=seqlen_offsets, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + interleaved=interleaved, + inplace=inplace, + ) + if isinstance(seqlen_offsets, int): + # Can't save int with save_for_backward + ctx.save_for_backward(cos, sin, cu_seqlens) + ctx.seqlen_offsets = seqlen_offsets + else: + ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets) + ctx.seqlen_offsets = None + ctx.interleaved = interleaved + ctx.inplace = inplace + ctx.max_seqlen = max_seqlen + return out if not inplace else x + + @staticmethod + def backward(ctx, do): + seqlen_offsets = ctx.seqlen_offsets + if seqlen_offsets is None: + cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors + else: + cos, sin, cu_seqlens = ctx.saved_tensors + # TD [2023-09-02]: For some reason Triton (2.0.0.post1) errors with + # "[CUDA]: invalid device context", and cloning makes it work. Idk why. Triton 2.1.0 works. + if not ctx.interleaved and not ctx.inplace: + do = do.clone() + dx = apply_rotary( + do, + cos, + sin, + seqlen_offsets=seqlen_offsets, + cu_seqlens=cu_seqlens, + max_seqlen=ctx.max_seqlen, + interleaved=ctx.interleaved, + inplace=ctx.inplace, + conjugate=True, + ) + return dx, None, None, None, None, None, None, None + + +def apply_rotary_emb( + x, + cos, + sin, + interleaved=False, + inplace=False, + seqlen_offsets: Union[int, torch.Tensor] = 0, + cu_seqlens: Optional[torch.Tensor] = None, + max_seqlen: Optional[int] = None, +): + """ + Arguments: + x: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None + else (total_seqlen, nheads, headdim) + cos, sin: (seqlen_rotary, rotary_dim / 2) + interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead + of 1st half and 2nd half (GPT-NeoX style). + inplace: if True, apply rotary embedding in-place. + seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount. + Most commonly used in inference when we have KV cache. + cu_seqlens: (batch + 1,) or None + max_seqlen: int + Return: + out: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None + else (total_seqlen, nheads, headdim) + rotary_dim must be <= headdim + Apply rotary embedding to the first rotary_dim of x. + """ + return ApplyRotaryEmb.apply( + x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen + ) diff --git a/YOCO/yoco/models/decoder/kernel/swiglu.py b/YOCO/yoco/models/decoder/kernel/swiglu.py new file mode 100644 index 000000000..d57589d21 --- /dev/null +++ b/YOCO/yoco/models/decoder/kernel/swiglu.py @@ -0,0 +1,32 @@ +import torch + + +swiglu_fwd_codestring = """ +template T swiglu_fwd(T x, T y) { + return float(x) * float(y) / (1.0f + ::exp(-float(x))); +} +""" +swiglu_bwd_codestring = """ +template T swiglu_bwd(T x, T y, T g, T& dx, T& dy) { + float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x))); + dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y); + dy = float(x) * x_sigmoid * float(g); +} +""" +swiglu_fwd = torch.cuda.jiterator._create_jit_fn(swiglu_fwd_codestring) +swiglu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_bwd_codestring, num_outputs=2) + + +class SwiGLUFunction(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, y): + ctx.save_for_backward(x, y) + return swiglu_fwd(x, y) + + @staticmethod + def backward(ctx, dout): + x, y = ctx.saved_tensors + return swiglu_bwd(x, y, dout) + +swiglu = SwiGLUFunction.apply diff --git a/YOCO/yoco/models/decoder/model_parallel_init.py b/YOCO/yoco/models/decoder/model_parallel_init.py new file mode 100644 index 000000000..3eb50a854 --- /dev/null +++ b/YOCO/yoco/models/decoder/model_parallel_init.py @@ -0,0 +1,16 @@ +import math + +import torch +import torch.nn as nn + +def init_method(tensor, **kwargs): + nn.init.kaiming_uniform_(tensor, a=math.sqrt(5)) + +def qkvg_init_method(tensor, **kwargs): + nn.init.xavier_uniform_(tensor, gain = 2 ** -2.5) + +def out_init_method(tensor, **kwargs): + nn.init.xavier_uniform_(tensor, gain = 2 ** -1) + +def vocab_init_method(tensor, **kwargs): + torch.nn.init.normal_(tensor, mean=0, std=tensor.shape[1] ** -0.5) diff --git a/YOCO/yoco/models/decoder/rms_norm.py b/YOCO/yoco/models/decoder/rms_norm.py new file mode 100644 index 000000000..fccb027ec --- /dev/null +++ b/YOCO/yoco/models/decoder/rms_norm.py @@ -0,0 +1,26 @@ +import torch +import torch.nn as nn + +class RMSNorm(nn.Module): + def __init__(self, dim: int, eps: float = 1e-6, elementwise_affine=True): + super().__init__() + self.dim = dim + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + self.weight = nn.Parameter(torch.ones(dim)) + else: + self.register_parameter('weight', None) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x.float()).type_as(x) + if self.weight is not None: + output = output * self.weight + return output + + def extra_repr(self) -> str: + return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}' + \ No newline at end of file diff --git a/YOCO/yoco/models/decoder/sliding_window_attention.py b/YOCO/yoco/models/decoder/sliding_window_attention.py new file mode 100644 index 000000000..3d744956d --- /dev/null +++ b/YOCO/yoco/models/decoder/sliding_window_attention.py @@ -0,0 +1,68 @@ +import torch +import torch.nn.functional as F +from torch import nn + +from fairseq.model_parallel.megatron.mpu import ( + ColumnParallelLinear, + RowParallelLinear, +) + +from .model_parallel_init import init_method +from .kernel.rotary import apply_rotary_emb + +from flash_attn import flash_attn_func + +class SlidingWindowAttention(nn.Module): + def __init__(self, args): + super().__init__() + self.args = args + self.embed_dim = args.dim + self.num_heads = args.n_self_heads // args.model_parallel_size + self.window_size = args.sliding_window - 1 # compatible with flash attention + + self.head_dim = args.dim // args.n_self_heads + + self.q_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=init_method) + self.k_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=init_method) + self.v_proj = ColumnParallelLinear(args.dim, args.dim, bias=False, gather_output=False, init_method=init_method) + self.out_proj = RowParallelLinear(args.dim, args.dim, bias=False, input_is_parallel=True, init_method=init_method) + + def forward( + self, + x, + rel_pos, + start_pos=0, + incremental_state=None, + ): + bsz, tgt_len, embed_dim = x.size() + src_len = tgt_len + + q = self.q_proj(x) + k = self.k_proj(x) + v = self.v_proj(x) + + q = q.view(bsz, tgt_len, self.num_heads, self.head_dim) + k = k.view(bsz, src_len, self.num_heads, self.head_dim) + v = v.view(bsz, src_len, self.num_heads, self.head_dim) + + q = apply_rotary_emb(q, *rel_pos, interleaved=True) + k = apply_rotary_emb(k, *rel_pos, interleaved=True) + if incremental_state is not None: + if "prev_key" not in incremental_state: + incremental_state["prev_key"] = torch.empty(self.args.max_batch_size, self.window_size, self.num_heads, self.head_dim, device=x.device, dtype=x.dtype) + incremental_state["prev_value"] = torch.empty(self.args.max_batch_size, self.window_size, self.num_heads, self.head_dim, device=x.device, dtype=x.dtype) + + key = torch.cat([incremental_state["prev_key"][:bsz, :start_pos], k], dim=1) + value = torch.cat([incremental_state["prev_value"][:bsz, :start_pos], v], dim=1) + if key.shape[1] > self.window_size: + incremental_state["prev_key"][:bsz] = key[:, -self.window_size:] + incremental_state["prev_value"][:bsz] = value[:, -self.window_size:] + else: + incremental_state["prev_key"][:bsz, start_pos : start_pos + tgt_len] = k + incremental_state["prev_value"][:bsz, start_pos : start_pos + tgt_len] = v + + attn = flash_attn_func(q, k, v, causal=True, window_size=(self.window_size - 1, 0)) + attn = attn.reshape(bsz, tgt_len, self.head_dim * self.num_heads) + + attn = self.out_proj(attn) + return attn \ No newline at end of file diff --git a/YOCO/yoco/models/decoder/transformer.py b/YOCO/yoco/models/decoder/transformer.py new file mode 100644 index 000000000..f41edf583 --- /dev/null +++ b/YOCO/yoco/models/decoder/transformer.py @@ -0,0 +1,251 @@ +import json +import math +import os +from dataclasses import dataclass +from pathlib import Path +from typing import Optional, Tuple + +import torch +from torch import nn + +from flash_attn import flash_attn_func + +from fairseq.model_parallel.megatron.mpu import ( + ColumnParallelLinear, + RowParallelLinear, + copy_to_model_parallel_region, + VocabParallelEmbedding +) + +from fairscale.nn import checkpoint_wrapper + +from .rms_norm import RMSNorm +from .kernel.rotary import apply_rotary_emb +from .model_parallel_init import init_method, vocab_init_method + + +def precompute_freqs_cis(dim: int, end: int, theta: float) -> torch.Tensor: + freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) + t = torch.arange(end, device=freqs.device) # type: ignore + freqs = torch.outer(t, freqs).float() # type: ignore + return freqs + + +@dataclass +class ModelArgs: + dim: int + n_layers: int + head_dim: int + hidden_dim: int + n_heads: int + n_kv_heads: int + norm_eps: float + vocab_size: int + + max_batch_size: int = 0 + max_seq_len: int = -1 + model_parallel_size: int = 1 + load_checkpoint: bool = False + rope_theta: float = 10000.0 + sliding_window: Optional[int] = None + + +class Attention(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.args = args + + self.dim = args.dim + self.head_dim = args.head_dim + self.hidden_dim = args.n_heads * args.head_dim + self.key_value_dim = args.n_kv_heads * args.head_dim + self.n_heads = args.n_heads // args.model_parallel_size + self.n_kv_heads = args.n_kv_heads // args.model_parallel_size + self.activate_sliding_window = args.sliding_window is not None + self.cache_len = args.sliding_window - 1 if self.activate_sliding_window else args.max_seq_len + + self.repeats = self.n_heads // self.n_kv_heads + + self.scale = self.args.head_dim**-0.5 + + self.wq = ColumnParallelLinear(self.dim, self.hidden_dim, bias=False, gather_output=False, init_method=init_method) + self.wk = ColumnParallelLinear(self.dim, self.key_value_dim, bias=False, gather_output=False, init_method=init_method) + self.wv = ColumnParallelLinear(self.dim, self.key_value_dim, bias=False, gather_output=False, init_method=init_method) + self.wo = RowParallelLinear(self.hidden_dim, self.dim, bias=False, input_is_parallel=True, init_method=init_method) + + def forward( + self, + x: torch.Tensor, + rel_pos: Tuple[torch.Tensor, torch.Tensor], + start_pos: int, + incremental_state = None, + ) -> torch.Tensor: + bsz, seqlen, _ = x.shape + + xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) + xq = xq.view(bsz, seqlen, self.n_heads, self.head_dim) + xk = xk.view(bsz, seqlen, self.n_kv_heads, self.head_dim) + xv = xv.view(bsz, seqlen, self.n_kv_heads, self.head_dim) + xq = apply_rotary_emb(xq, *rel_pos) + xk = apply_rotary_emb(xk, *rel_pos) + if incremental_state is not None: + if "cache_k" not in incremental_state: + incremental_state["cache_k"] = torch.zeros( + ( + self.args.max_batch_size, + self.cache_len, + self.n_kv_heads, + self.head_dim, + ) + ).to(xk) + incremental_state["cache_v"] = torch.zeros( + ( + self.args.max_batch_size, + self.cache_len, + self.n_kv_heads, + self.head_dim, + ) + ).to(xv) + key = torch.cat([incremental_state["cache_k"][:, :start_pos], xk], dim=1) + value = torch.cat([incremental_state["cache_v"][:, :start_pos], xv], dim=1) + if key.shape[1] > self.cache_len: + incremental_state["cache_k"][:bsz] = key[:, -self.cache_len:] + incremental_state["cache_v"][:bsz] = value[:, -self.cache_len:] + else: + incremental_state["cache_k"][:bsz, start_pos : start_pos + seqlen] = xk + incremental_state["cache_v"][:bsz, start_pos : start_pos + seqlen] = xv + else: + key, value = xk, xv + + output = flash_attn_func(xq, key, value, causal=True, window_size=(self.args.sliding_window - 1, 0) if self.activate_sliding_window else (-1, -1)) + + return self.wo(output.view(bsz, seqlen, self.n_heads * self.head_dim)) + + +class FeedForward(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.w1 = ColumnParallelLinear(args.dim, args.hidden_dim, bias=False, gather_output=False, init_method=init_method) + self.w2 = RowParallelLinear(args.hidden_dim, args.dim, bias=False, input_is_parallel=True, init_method=init_method) + self.w3 = ColumnParallelLinear(args.dim, args.hidden_dim, bias=False, gather_output=False, init_method=init_method) + + def forward(self, x) -> torch.Tensor: + return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x)) + + +class TransformerBlock(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.n_heads = args.n_heads + self.dim = args.dim + self.attention = Attention(args) + self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) + self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) + self.args = args + + self.feed_forward: nn.Module + self.feed_forward = FeedForward(args=args) + + def forward( + self, x: torch.Tensor, rel_pos: Tuple[torch.Tensor, torch.Tensor], start_pos: int, incremental_state = None + ) -> torch.Tensor: + r = self.attention.forward(self.attention_norm(x), rel_pos, start_pos, incremental_state) + h = x + r + r = self.feed_forward.forward(self.ffn_norm(h)) + out = h + r + return out + + +class Transformer(nn.Module): + def __init__( + self, + args: ModelArgs, + mp_rank: int = 0, + checkpoint_activations: bool = False + ): + super().__init__() + self.args = args + self.vocab_size = args.vocab_size + self.n_layers = args.n_layers + self._precomputed_freqs_cis: Optional[torch.Tensor] = None + self._window_precomputed_freqs_cis: Optional[torch.Tensor] = None + self._global_precomputed_freqs_cis: Optional[torch.Tensor] = None + assert self.vocab_size > 0 + self.mp_rank = mp_rank + self.checkpoint_activations = checkpoint_activations + self.tok_embeddings = VocabParallelEmbedding( + args.vocab_size, args.dim, -1, init_method=vocab_init_method + ) + self.norm = RMSNorm(args.dim, eps=args.norm_eps) + self.output = nn.Linear(args.dim, args.vocab_size // args.model_parallel_size, bias=False) + # Initialize all layers but slice off those not of this rank. + layers = [TransformerBlock(args=args) for idx in range(args.n_layers)] + if checkpoint_activations: + layers = [checkpoint_wrapper(layer) for layer in layers] + self.layers = nn.ModuleList(layers) + self.n_local_layers = len(self.layers) + + @property + def dtype(self) -> torch.dtype: + return next(self.parameters()).dtype + + @property + def device(self) -> torch.device: + return next(self.parameters()).device + + def build_rel_pos(self, x, start_pos): + if self._precomputed_freqs_cis is None: + theta = self.args.rope_theta + self._precomputed_freqs_cis = precompute_freqs_cis( + self.args.head_dim, self.args.max_seq_len, theta + ) + if self._precomputed_freqs_cis.device != self.device: + self._precomputed_freqs_cis = self._precomputed_freqs_cis.to( + device=self.device + ) + cos = torch.cos(self._precomputed_freqs_cis[start_pos:start_pos+x.size(1)]) + sin = torch.sin(self._precomputed_freqs_cis[start_pos:start_pos+x.size(1)]) + rel_pos = (cos.to(x.dtype), sin.to(x.dtype)) + return rel_pos + + def forward_partial( + self, + input_ids: torch.Tensor, + start_pos: Optional[int] = 0, + incremental_state = None, + ) -> torch.Tensor: + h = self.tok_embeddings(input_ids) + rel_pos = self.build_rel_pos(h, start_pos) + for local_layer_id, layer in enumerate(self.layers): + if incremental_state is not None: + if local_layer_id not in incremental_state: + incremental_state[local_layer_id] = {} + h = layer(h, rel_pos, start_pos, incremental_state=incremental_state[local_layer_id] if incremental_state is not None else None) + + return self.norm(h) + + def forward( + self, + input_ids: torch.Tensor, + start_pos: Optional[int] = 0, + incremental_state = None, + ) -> torch.Tensor: + h = self.forward_partial(input_ids, start_pos, incremental_state) + if self.args.model_parallel_size > 1: + h = copy_to_model_parallel_region(h) + outs = self.output(h) + return outs.float(), None + + def load_state_dict(self, state_dict, strict=False, assign=False): + state_to_load = {} + for k, v in state_dict.items(): + if k.startswith("tok_embeddings") or k.startswith("output"): + state_to_load[k] = v.view(self.args.model_parallel_size, self.vocab_size // self.args.model_parallel_size, self.args.dim)[self.mp_rank] + elif "wq" in k or "wk" in k or "wv" in k or "w1" in k or "w3" in k: + state_to_load[k] = v.view(self.args.model_parallel_size, -1, v.shape[1])[self.mp_rank] + elif "wo" in k or "w2" in k: + state_to_load[k] = v.view(v.shape[0], self.args.model_parallel_size, -1)[:, self.mp_rank] + else: + state_to_load[k] = v + super().load_state_dict(state_to_load, strict=False, assign=assign) + print("Loaded state dict from checkpoint.") diff --git a/YOCO/yoco/models/decoder/yoco.py b/YOCO/yoco/models/decoder/yoco.py new file mode 100644 index 000000000..6fb0d01e8 --- /dev/null +++ b/YOCO/yoco/models/decoder/yoco.py @@ -0,0 +1,294 @@ +import math +from dataclasses import dataclass +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from fairscale.nn import checkpoint_wrapper + +from fairseq.model_parallel.megatron.mpu import ( + ColumnParallelLinear, + copy_to_model_parallel_region, + VocabParallelEmbedding +) + +from .gate_retention import GateRetention +from .sliding_window_attention import SlidingWindowAttention +from .cross_attention import CrossAttention +from .feedforward_network import FeedForwardNetwork, init_method +from .rms_norm import RMSNorm + +from .kernel.rotary import apply_rotary_emb +from .model_parallel_init import vocab_init_method, init_method + + +@dataclass +class YOCOArgs: + dim: int + n_layers: int + hidden_dim: int + n_self_heads: int + n_attn_heads: int + n_attn_kv_heads: int + vocab_size: int + + max_batch_size: int = 0 + max_seq_len: int = -1 + model_parallel_size: int = 1 + load_checkpoint: bool = False + rope_theta: float = 10000.0 + norm_eps: float = 1e-5 + sliding_window: Optional[int] = None + +class DecoderLayer(nn.Module): + def __init__( + self, + args: YOCOArgs, + is_cross_layer=False + ): + super().__init__() + self.args = args + self.is_cross_layer = is_cross_layer + + if is_cross_layer: + self.mixer = CrossAttention(args) + elif args.sliding_window is not None: + self.mixer = SlidingWindowAttention(args) + else: + self.mixer = GateRetention(args) + + self.mixer_layer_norm = RMSNorm(args.dim, eps=args.norm_eps) + + self.ffn = FeedForwardNetwork( + args.dim, + args.hidden_dim, + args.load_checkpoint + ) + + self.final_layer_norm = RMSNorm(args.dim, eps=args.norm_eps) + + def forward( + self, + x, + start_pos=0, + key=None, + value=None, + rel_pos=None, + incremental_state=None, + is_prefilling=False, + ): + residual = x + x = self.mixer_layer_norm(x) + + if self.is_cross_layer: + x = self.mixer( + x, + key, + value, + rel_pos=rel_pos, + ) + elif self.args.sliding_window is not None: + x = self.mixer( + x, + rel_pos=rel_pos, + start_pos=start_pos, + incremental_state=incremental_state, + ) + else: + x = self.mixer( + x, + rel_pos=rel_pos, + incremental_state=incremental_state, + is_prefilling=is_prefilling,) + + x = x + residual + residual = x + x = self.final_layer_norm(x) + + x = self.ffn(x) + + x = x + residual + return x + +class SelfDecoder(nn.Module): + def __init__( + self, + args: YOCOArgs, + checkpoint_activations: bool = False + ): + super().__init__() + self.args = args + layers = [DecoderLayer(args, is_cross_layer=False,) for idx in range(args.n_layers // 2)] + if checkpoint_activations: + layers = [checkpoint_wrapper(layer) for layer in layers] + self.layers = nn.ModuleList(layers) + self.head_dim = args.dim // args.n_self_heads + self.block_size = 256 + self._precomputed_freqs_cis = None + + def build_rel_pos(self, x, start_pos): + if self._precomputed_freqs_cis is None: + angle = 1.0 / (self.args.rope_theta ** torch.linspace(0, 1, self.head_dim // 2, dtype=torch.float, device=x.device)) + index = torch.arange(self.args.max_seq_len).to(angle) + self._precomputed_freqs_cis = index[:, None] * angle + + cos = torch.cos(self._precomputed_freqs_cis[start_pos:start_pos+x.size(1)]) + sin = torch.sin(self._precomputed_freqs_cis[start_pos:start_pos+x.size(1)]) + rel_pos = (cos.to(x.dtype), sin.to(x.dtype)) + return rel_pos + + def get_index_mask(self, x, length, pad_length): + return torch.arange(pad_length, device=x.device) >= length + + def forward( + self, + x, + incremental_state=None, + is_prefilling=False, + start_pos=0 + ): + if is_prefilling and x.size(1) % self.block_size != 0 and self.args.sliding_window is None: + padding_len = self.block_size - x.size(1) % self.block_size + x = F.pad(x, (0, 0, 0, padding_len), value=0) + else: + padding_len = 0 + + if incremental_state is not None and is_prefilling: + index_mask = self.get_index_mask(x, x.size(1) - padding_len, x.size(1)) + + rel_pos = self.build_rel_pos(x, start_pos) + for idx, layer in enumerate(self.layers): + if incremental_state is not None: + if idx not in incremental_state: + incremental_state[idx] = {} + if is_prefilling: + incremental_state[idx]["index_mask"] = index_mask + x = layer( + x, + start_pos=start_pos, + rel_pos=rel_pos, + incremental_state=incremental_state[idx] if incremental_state is not None else None, + is_prefilling=is_prefilling,) + + x = x[:, :x.size(1) - padding_len, :] + return x + +class CrossDecoder(nn.Module): + def __init__( + self, + args: YOCOArgs, + checkpoint_activations: bool = False + ): + super().__init__() + self.args = args + self.num_heads = args.n_attn_kv_heads + self.head_dim = args.dim // args.n_attn_heads + self.k_proj = ColumnParallelLinear(args.dim, self.head_dim * args.n_attn_kv_heads, bias=False, gather_output=False, init_method=init_method) + self.v_proj = ColumnParallelLinear(args.dim, self.head_dim * args.n_attn_kv_heads, bias=False, gather_output=False, init_method=init_method) + self.kv_layer_norm = RMSNorm(args.dim, eps=args.norm_eps) + layers = [DecoderLayer(args, is_cross_layer=True) for idx in range(args.n_layers // 2)] + if checkpoint_activations: + layers = [checkpoint_wrapper(layer) for layer in layers] + self.layers = nn.ModuleList(layers) + self._precomputed_freqs_cis = None + + def build_rel_pos(self, x, start_pos): + if self._precomputed_freqs_cis is None: + angle = 1.0 / (self.args.rope_theta ** torch.linspace(0, 1, self.head_dim // 2, dtype=torch.float, device=x.device)) + index = torch.arange(self.args.max_seq_len).to(angle) + self._precomputed_freqs_cis = index[:, None] * angle + + cos = torch.cos(self._precomputed_freqs_cis[start_pos:start_pos+x.size(1)]) + sin = torch.sin(self._precomputed_freqs_cis[start_pos:start_pos+x.size(1)]) + rel_pos = (cos.to(x.dtype), sin.to(x.dtype)) + return rel_pos + + def forward( + self, + x, + incremental_state=None, + start_pos=0, + skip_cross_decoder=False, + ): + bsz, seqlen, embed_dim = x.size() + x_norm = self.kv_layer_norm(x) + key, value = self.k_proj(x_norm), self.v_proj(x_norm) + key = key.view(bsz, seqlen, self.num_heads, self.head_dim) + value = value.view(bsz, seqlen, self.num_heads, self.head_dim) + rel_pos = self.build_rel_pos(x, start_pos) + key = apply_rotary_emb(key, *rel_pos, interleaved=True) + if incremental_state is not None: + if "prev_key" not in incremental_state: + incremental_state["prev_key"] = torch.empty(bsz, self.args.max_seq_len, self.num_heads, self.head_dim, device=x.device, dtype=x.dtype) + incremental_state["prev_value"] = torch.empty(bsz, self.args.max_seq_len, self.num_heads, self.head_dim, device=x.device, dtype=x.dtype) + incremental_state["prev_key"][:, start_pos : start_pos + seqlen] = key + incremental_state["prev_value"][:, start_pos : start_pos + seqlen] = value + key = incremental_state["prev_key"][:, : start_pos + seqlen] + value = incremental_state["prev_value"][:, : start_pos + seqlen] + + if skip_cross_decoder: + return torch.zeros(bsz, 1, embed_dim, device=x.device, dtype=x.dtype) + for layer in self.layers: + x = layer( + x, + key=key, + value=value, + rel_pos=rel_pos) + + return x + +class YOCO(nn.Module): + def __init__( + self, + args: YOCOArgs, + checkpoint_activations: bool = False, + share_input_output_embed: bool = False, + ): + super().__init__() + self.args = args + self.embed_scale = math.sqrt(args.dim) + self.embed_tokens = VocabParallelEmbedding( + args.vocab_size, args.dim, -1, init_method=vocab_init_method + ) + self.output_projection = nn.Linear(args.dim, args.vocab_size, bias=False) + if share_input_output_embed: + self.output_projection.weight = self.embed_tokens.weight + + self.self_decoder = SelfDecoder(args, checkpoint_activations) + self.cross_decoder = CrossDecoder(args, checkpoint_activations) + self.layer_norm = RMSNorm(args.dim, eps=args.norm_eps) + + def forward( + self, + x, + start_pos=0, + incremental_state=None, + is_prefilling=True, + skip_cross_decoder=False + ): + x = self.embed_scale * self.embed_tokens(x) + + x = self.self_decoder( + x, + incremental_state=incremental_state, + is_prefilling=is_prefilling, + start_pos=start_pos, + ) + + x = self.cross_decoder( + x, + start_pos=start_pos, + incremental_state=incremental_state, + skip_cross_decoder=skip_cross_decoder, + ) + + x = self.layer_norm(x) + x = self.output_layer(x) + + return x, None + + def output_layer(self, features): + if self.args.model_parallel_size > 1: + features = copy_to_model_parallel_region(features) + return self.output_projection(features) \ No newline at end of file diff --git a/YOCO/yoco/models/transformer.py b/YOCO/yoco/models/transformer.py new file mode 100644 index 000000000..3fcaa78dc --- /dev/null +++ b/YOCO/yoco/models/transformer.py @@ -0,0 +1,141 @@ +import json +import os +from dataclasses import dataclass, field +from typing import Optional + +import torch + +from fairseq.model_parallel.megatron.mpu import ( + initialize_model_parallel, + model_parallel_is_initialized, + get_model_parallel_rank +) + +from fairseq.dataclass import FairseqDataclass +from fairseq.models import ( + FairseqIncrementalDecoder, + FairseqLanguageModel, + register_model, + register_model_architecture, +) + +from omegaconf import II + +from .decoder.transformer import ModelArgs, Transformer + +DEFAULT_MAX_TARGET_POSITIONS = 4096 + +@dataclass +class LanguageConfig(FairseqDataclass): + llama_model: Optional[str] = field( + default=None, + metadata={"help": "path to load tokenizer and config"}, + ) + load_ckpt: Optional[str] = field( + default=None, + metadata={"help": "path to load checkpoint from"}, + ) + init_from_config: bool = field( + default=False, + ) + dim: int = field( + default=1024, + ) + n_layers: int = field( + default=8, + ) + n_heads: int = field( + default=8, + ) + n_kv_heads: int = field( + default=2, + ) + batch_size: int = field( + default=1, + ) + rope_theta: Optional[float] = field( + default=10000.0, + ) + checkpoint_activations: bool = field( + default=False, metadata={"help": "checkpoint activations at each layer"} + ) + tokens_per_sample: int = II("task.tokens_per_sample") + model_parallel_size: int = II("common.model_parallel_size") + +@register_model("llama", dataclass=LanguageConfig) +class LanguageModel(FairseqLanguageModel): + def __init__(self, args, decoder, tokenizer): + self.args = args + self.tokenizer = tokenizer + super().__init__(decoder) + + @classmethod + def build_model(cls, args, task): + if not model_parallel_is_initialized(): + initialize_model_parallel(args.model_parallel_size) + + if not args.init_from_config: + params = { + "dim": args.dim, + "n_layers": args.n_layers, + "n_heads": args.n_heads, + "head_dim": args.dim // args.n_heads, + "n_kv_heads": args.n_kv_heads, + "hidden_dim": int(args.dim * 8 / 3), + "vocab_size": task.tokenizer.n_words, + "max_batch_size": args.batch_size, + "max_seq_len": args.tokens_per_sample, + "model_parallel_size": args.model_parallel_size, + "load_checkpoint": args.load_ckpt is not None, + "rope_theta": args.rope_theta, + } + model_args: ModelArgs = ModelArgs( + **params, + ) + else: + with open(os.path.join(args.llama_model, "params.json"), "r") as f: + params = json.load(f) + model_args = ModelArgs(**params) + model_args.max_batch_size = args.batch_size + model_args.max_seq_len = args.tokens_per_sample + model_args.model_parallel_size = args.model_parallel_size + model_args.load_checkpoint = args.load_ckpt is not None + model = Transformer( + model_args, + mp_rank=get_model_parallel_rank(), + checkpoint_activations=args.checkpoint_activations, + ) + if args.load_ckpt is not None: + loaded = torch.load(args.load_ckpt, mmap=True) + model.load_state_dict(loaded, assign=True) + + model = LLaMA(model) + return cls(args, model, task.tokenizer) + +class LLaMA(FairseqIncrementalDecoder): + def __init__(self, model): + super().__init__(None) + self.model = model + + def forward(self, src_tokens, start_pos = 0, **kwargs): + padding = src_tokens < 0 + src_tokens = torch.where(padding, torch.zeros_like(src_tokens), src_tokens) + return self.model.forward(src_tokens, start_pos, **kwargs) + + def max_positions(self): + return self.model.args.max_seq_len + +@register_model_architecture("llama", "llama_from_scratch") +def llama_from_scratch(args): + args.init_from_config = getattr(args, "init_from_config", False) + args.dim = getattr(args, "dim", 1024) + args.n_layers = getattr(args, "n_layers", 8) + args.n_heads = getattr(args, "n_heads", 8) + args.n_kv_heads = getattr(args, "n_kv_heads", 2) + +@register_model_architecture("llama", "llama_from_ckpt") +def llama_from_ckpt(args): + args.init_from_config = getattr(args, "init_from_config", True) + + + \ No newline at end of file diff --git a/YOCO/yoco/models/yoco.py b/YOCO/yoco/models/yoco.py new file mode 100644 index 000000000..580d15bd2 --- /dev/null +++ b/YOCO/yoco/models/yoco.py @@ -0,0 +1,158 @@ +import os +import json +import logging +from dataclasses import dataclass, field +from typing import Optional + +import torch +from fairseq import distributed_utils, utils +from fairseq.dataclass import FairseqDataclass +from fairseq.models import ( + FairseqIncrementalDecoder, + FairseqLanguageModel, + register_model, + register_model_architecture, +) + +from omegaconf import II + +from fairseq.model_parallel.megatron.mpu import ( + initialize_model_parallel, + model_parallel_is_initialized +) +from .decoder.yoco import YOCO, YOCOArgs + +DEFAULT_MAX_TARGET_POSITIONS = 4096 +logger = logging.getLogger(__name__) + + +@dataclass +class LanguageConfig(FairseqDataclass): + yoco_model: Optional[str] = field( + default=None, + metadata={"help": "path to load params from"}, + ) + load_ckpt: Optional[str] = field( + default=None, + metadata={"help": "path to load checkpoint from"}, + ) + dim: int = field( + default=1024, + ) + hidden_dim: int = field( + default=3072, + ) + n_layers: int = field( + default=24, + ) + n_self_heads: int = field( + default=4, + ) + n_attn_heads: int = field( + default=8, + ) + n_attn_kv_heads: Optional[int] = field( + default=None, + ) + batch_size: int = field( + default=1, + ) + share_input_output_embed: bool = field( + default=False, metadata={"help": "share decoder input and output embeddings"} + ) + sliding_window: Optional[bool] = field( + default=None, + ) + rope_theta: Optional[float] = field( + default=10000.0, + ) + checkpoint_activations: bool = field( + default=False, metadata={"help": "checkpoint activations at each layer"} + ) + tokens_per_sample: int = II("task.tokens_per_sample") + model_parallel_size: int = II("common.model_parallel_size") + + +@register_model("yoco", dataclass=LanguageConfig) +class LanguageModel(FairseqLanguageModel): + def __init__(self, args, decoder, tokenizer): + self.args = args + self.tokenizer = tokenizer + super().__init__(decoder) + + @classmethod + def build_model(cls, args, task): + if not model_parallel_is_initialized(): + initialize_model_parallel(args.model_parallel_size) + + if args.yoco_model is None: + params = { + "dim": args.dim, + "n_layers": args.n_layers, + "n_self_heads": args.n_self_heads, + "n_attn_heads": args.n_attn_heads, + "n_attn_kv_heads": args.n_attn_kv_heads, + "hidden_dim": args.hidden_dim, + "vocab_size": task.tokenizer.n_words, + "max_batch_size": args.batch_size, + "max_seq_len": args.tokens_per_sample, + "model_parallel_size": args.model_parallel_size, + "load_checkpoint": args.load_ckpt is not None, + "rope_theta": args.rope_theta, + } + model_args: YOCOArgs = YOCOArgs( + **params, + ) + else: + with open(os.path.join(args.yoco_model, "params.json"), "r") as f: + params = json.load(f) + model_args = YOCOArgs(**params) + model_args.max_batch_size = args.batch_size + model_args.max_seq_len = args.tokens_per_sample + model_args.model_parallel_size = args.model_parallel_size + model_args.load_checkpoint = args.load_ckpt is not None + + model = YOCO( + model_args, + checkpoint_activations=args.checkpoint_activations, + ) + if args.load_ckpt is not None: + loaded = torch.load(args.load_ckpt, mmap=True) + model.load_state_dict(loaded, assign=True) + model = YOCOModel(model) + return cls(args, model, task.tokenizer) + +class YOCOModel(FairseqIncrementalDecoder): + def __init__(self, model): + super().__init__(None) + self.model = model + + def forward(self, src_tokens, **kwargs): + return self.model.forward(src_tokens, **kwargs) + + def max_positions(self): + return self.model.args.max_seq_len + +def default(args): + args.n_attn_kv_heads = getattr(args, "n_attn_kv_heads", args.n_attn_heads) + args.sliding_window = getattr(args, "sliding_window", False) + args.rope_theta = getattr(args, "rope_theta", 10000.0) + args.share_input_output_embed = getattr( + args, "share_input_output_embed", False + ) + args.checkpoint_activations = getattr(args, "checkpoint_activations", False) + + +@register_model_architecture("yoco", "yoco_3b") +def yoco_3b(args): + args.dim = getattr(args, "dim", 3072) + args.hidden_dim = getattr(args, "hidden_dim", 8192) + args.n_layers = getattr(args, "n_layers", 26) + args.n_self_heads = getattr(args, "n_self_heads", 24) + args.n_attn_heads = getattr(args, "n_attn_heads", 24) + args.n_attn_kv_heads = getattr(args, "n_attn_kv_heads", 8) + default(args) + + + + diff --git a/YOCO/yoco/tasks/__init__.py b/YOCO/yoco/tasks/__init__.py new file mode 100644 index 000000000..1da9d1238 --- /dev/null +++ b/YOCO/yoco/tasks/__init__.py @@ -0,0 +1,32 @@ +import argparse +import importlib +import os + +# register dataclass +TASK_DATACLASS_REGISTRY = {} +TASK_REGISTRY = {} +TASK_CLASS_NAMES = set() + +# automatically import any Python files in the tasks/ directory +tasks_dir = os.path.dirname(__file__) +for file in os.listdir(tasks_dir): + path = os.path.join(tasks_dir, file) + if ( + not file.startswith("_") + and not file.startswith(".") + and (file.endswith(".py") or os.path.isdir(path)) + ): + task_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module("tasks." + task_name) + + # expose `task_parser` for sphinx + if task_name in TASK_REGISTRY: + parser = argparse.ArgumentParser(add_help=False) + group_task = parser.add_argument_group("Task name") + # fmt: off + group_task.add_argument('--task', metavar=task_name, + help='Enable this task with: ``--task=' + task_name + '``') + # fmt: on + group_args = parser.add_argument_group("Additional command-line arguments") + TASK_REGISTRY[task_name].add_args(group_args) + globals()[task_name + "_parser"] = parser diff --git a/YOCO/yoco/tasks/data/__init__.py b/YOCO/yoco/tasks/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/YOCO/yoco/tasks/data/basic_loader.py b/YOCO/yoco/tasks/data/basic_loader.py new file mode 100644 index 000000000..d6f06f2ac --- /dev/null +++ b/YOCO/yoco/tasks/data/basic_loader.py @@ -0,0 +1,75 @@ +import torch +from infinibatch.iterators import CheckpointableIterator + +from . import utils + + +class BaseBatchGen(CheckpointableIterator): + """ + This is a base class for batch generators that use infinibatch + """ + + def __init__(self): + self._iter = None + self.epoch = 1 + self.next_epoch_idx = 1 + self.sharded_checkpoint = True + self.should_close_after_finished = True + + def _build_iter(self): + """ + Build infinibatch iterator and assign to self._iter + """ + raise NotImplementedError() + + def _move_to_tensor(self, batch): + def to_tensor(x): + return torch.tensor(x) + + return utils.apply_to_sample(to_tensor, batch) + + @property + def iterator(self): + if self._iter is None: + raise NotImplementedError("_build_iter() must called first") + return self._iter + + def __iter__(self): + if self._iter is None: + raise NotImplementedError("_build_iter() must called first") + return self._iter + + def __next__(self): + return next(self._iter) + + def setstate(self, value): + self._iter.setstate(value) + + def getstate(self): + return self._iter.getstate() + + def close(self): + self._iter.close() + + def __len__(self) -> int: + return 819200000 + + def next_epoch_itr( + self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True + ): + return self + + def end_of_epoch(self) -> bool: + return False + + def state_dict(self): + """Returns a dictionary containing a whole state of the iterator.""" + return self.getstate() + + def load_state_dict(self, state_dict): + """Copies the state of the iterator from the given *state_dict*.""" + self.setstate(state_dict) + + @property + def first_batch(self): + return "DUMMY" diff --git a/YOCO/yoco/tasks/data/llama_tokenizer.py b/YOCO/yoco/tasks/data/llama_tokenizer.py new file mode 100644 index 000000000..fad3d206b --- /dev/null +++ b/YOCO/yoco/tasks/data/llama_tokenizer.py @@ -0,0 +1,38 @@ +from pathlib import Path +from sentencepiece import SentencePieceProcessor +from typing import List + + +class LLaMATokenizer: + def __init__(self, model_path: str): + assert Path(model_path).exists(), model_path + self._model = SentencePieceProcessor(model_file=model_path) + assert self._model.vocab_size() == self._model.get_piece_size() + + @property + def n_words(self) -> int: + return self._model.vocab_size() + + @property + def bos_id(self) -> int: + return self._model.bos_id() + + @property + def eos_id(self) -> int: + return self._model.eos_id() + + @property + def pad_id(self) -> int: + return self._model.pad_id() + + def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: + assert isinstance(s, str) + t = self._model.encode(s) + if bos: + t = [self.bos_id, *t] + if eos: + t = [*t, self.eos_id] + return t + + def decode(self, t: List[int]) -> str: + return self._model.decode(t) \ No newline at end of file diff --git a/YOCO/yoco/tasks/data/lm_loader.py b/YOCO/yoco/tasks/data/lm_loader.py new file mode 100644 index 000000000..825a82239 --- /dev/null +++ b/YOCO/yoco/tasks/data/lm_loader.py @@ -0,0 +1,303 @@ +import os +import random +import math +import numpy as np +import json + +from infinibatch import iterators +from .utils import FixedBlockwiseShuffleIterator, NativeCheckpointableIterator, WeightNoRandomStateIterator +from .basic_loader import BaseBatchGen + + +class LMLoader(BaseBatchGen): + def __init__( + self, + args, + dataset, + tokenizer, + max_tokens=None, + max_sentences=None, + max_positions=None, + ignore_invalid_inputs=False, + required_batch_size_multiple=1, + seed=1, + epoch=1, + num_shards=1, + shard_id=0, + reject_sampling=1, + ): + super().__init__() + self.args = args + self.data = dataset.data + self.data_dir = dataset.data_dir + self.shuffle = dataset.shuffle + self.tokenizer = tokenizer + + self.max_tokens = max_tokens + self.max_sentences = max_sentences + self.max_positions = max_positions + self.tokens_per_sample = args.tokens_per_sample + self.mlm_cut_length = getattr(args, "mlm_cut_length", 0) + self.mlm_tokens_proportion = getattr(args, "mlm_tokens_proportion", 0) + self.pad_to_max_len = getattr(args, "pad_to_max_len", False) + self.ignore_invalid_inputs = ignore_invalid_inputs + self.required_batch_size_multiple = required_batch_size_multiple + self.seed = str(seed) + self.epoch = epoch + self.num_shards = num_shards + self.shard_id = shard_id + + self.batch_read_ahead = args.batch_read_ahead + self.sharded_checkpoint = True + + self._build_iter() + + def _build_iter(self): + tokenized_lines = self._tokenize() + self.padded_batches = self._batchify(tokenized_lines) + + prefetch_batches = iterators.PrefetchIterator( + self.padded_batches, + buffer_size=10, + buffer_in_main_process=True, + log_empty_buffer_warning=True and self.shard_id == 0, + ) + + prefetch_batches = iterators.MapIterator( + prefetch_batches, self._move_to_tensor + ) + + self._iter = prefetch_batches + + def _tokenize(self): + ''' + data: + { + 'source': list[Path], + } + ''' + dataset = list(zip(self.data['source'])) + + if self.shuffle: + chunk_files = \ + iterators.InfinitePermutationSourceIterator( + dataset, + seed=self.seed, + shuffle=self.shuffle, + num_instances=self.num_shards, + instance_rank=self.shard_id, + ) + else: + chunk_files = \ + iterators.ChunkedSourceIterator( + dataset, + num_instances=self.num_shards, + instance_rank=self.shard_id, + ) + + tokenized_lines = iterators.SelectManyIterator(chunk_files, lambda files: self._read_from_files(*files)) + tokenized_lines = iterators.SamplingRandomMapIterator(tokenized_lines, self._prepare, self.seed) + + return tokenized_lines + + def getstate(self): + state = super().getstate() + state["epoch"] = self.epoch + state["iterations_in_epoch"] = None + return state + + def _batchify(self, lines): + + if self.max_sentences is not None: + if self.batch_read_ahead > 0: + lines = FixedBlockwiseShuffleIterator(lines, self.batch_read_ahead, self.seed) + batches = iterators.FixedBatchIterator(lines, self.max_sentences) + else: + # - + def dynamic_batch_size(sample): + lengths = [len(x) for x in sample] + batch_size = self.max_tokens // max(lengths) // self.required_batch_size_multiple * self.required_batch_size_multiple + return max(1, batch_size) + + batches = iterators.BucketedReadaheadBatchIterator( + lines, + read_ahead=self.batch_read_ahead, + key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None, + batch_size=dynamic_batch_size, + shuffle=self.shuffle, + seed=self.seed, + ) + + def collate(batch): + batch_size = len(batch) + gpt_max_length = max([len(x[0]) for x in batch]) + if self.pad_to_max_len: + gpt_max_length = self.tokens_per_sample + 1 + + gpt_source_ids = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, + fill_value=self.tokenizer.pad_id) + gpt_target_ids = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, + fill_value=self.tokenizer.pad_id) + gpt_input_mask_all = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, fill_value=0) + gpt_loss_mask_all = np.full(shape=(batch_size, gpt_max_length-1), dtype=np.int32, fill_value=1) + + for i, (gpt_ids, gpt_input_mask, gpt_loss_mask) in enumerate(batch): + gpt_source_ids[i, :len(gpt_ids)-1] = gpt_ids[:-1] + gpt_target_ids[i, :len(gpt_ids)-1] = gpt_ids[1:] + gpt_input_mask_all[i, :len(gpt_ids)-1] = gpt_input_mask[:-1] + gpt_loss_mask_all[i, :len(gpt_ids)-1] = gpt_loss_mask[1:] + + ret_batch = { + 'net_input': { + 'src_tokens': gpt_source_ids.astype(np.int64), + }, + 'target': gpt_target_ids.astype(np.int64), + 'nsentences': batch_size, + 'ntokens': sum([len(x[0]) for x in batch]), + } + + return ret_batch + + padded_batches = iterators.MapIterator( + batches, collate + ) + + return padded_batches + + def _prepare(self, doc): + gpt_input_mask = [0] * len(doc) + gpt_loss_mask = [1] * len(doc) + full_tokens = doc + return full_tokens, gpt_input_mask, gpt_loss_mask + + def _tokenize(self): + multilingual_iters = [] + weights = [] + + for data in self.data: + multilingual_iters.append( + self._tokenize_foreach_lang(data) + ) + if 'weight' in data: + weights.append(float(data['weight'])) + else: + weights.append(int(data['count'])) + + if len(multilingual_iters) == 1: + return multilingual_iters[0] + + sampling_iterator = WeightNoRandomStateIterator(weights, self.seed) + control_iterator = NativeCheckpointableIterator(sampling_iterator) + tokenized_lines = iterators.MultiplexIterator(control_iterator, multilingual_iters) + + return tokenized_lines + + def _tokenize_foreach_lang(self, data): + # if 'epoch' in data: + _random = random.Random(self.seed) + if 'source' not in data or len(data['source']) == 0: + # load source from single file, format: self.data_dir/json/{name}.json + file_path = os.path.join(self.data_dir, 'json', f"{data['name']}.json") + if not os.path.exists(file_path): + raise FileNotFoundError(f"file {file_path} not exists") + with open(file_path, 'r', encoding='utf8') as f: + data_source = json.load(f) + data['source'] = data_source + data_source = data['source'] + epoch_num = 50 + temp_list = math.ceil(epoch_num) * data_source + _random.shuffle(temp_list) + dataset = list(zip(temp_list)) + # print('data name: ', data['name'], 'len(dataset): ', len(dataset)) + chunk_files = iterators.ChunkedSourceIterator( + dataset, + num_instances=self.num_shards, + instance_rank=self.shard_id,) + + tokenized_lines = iterators.SelectManyIterator(chunk_files, lambda files: self._read_from_files(*files)) + tokenized_lines = iterators.MapIterator(tokenized_lines, self._prepare) + + return tokenized_lines + + @staticmethod + def _doc_to_ids(text, tokenizer=None): + tokenized_ids = [] # list of list of ids + lines = text.split('\n\n') + for line_idx, line in enumerate(lines): + suffix = '\n\n' if line_idx != len(lines) - 1 else '' + if len(line) == 0: + continue + + sublines = line.split('\n') + for idx, subline in enumerate(sublines): + if len(subline) > 200000: + continue + if len(subline) == 0: + continue + if idx == len(sublines) - 1: + tokenized_ids.append(tokenizer.encode(subline + suffix)) + else: + tokenized_ids.append(tokenizer.encode(subline + '\n')) + + tokenized_ids[-1].append(tokenizer.eos_id) + return tokenized_ids + + def _read_lines(self, file_path): + try: + with open(file_path, 'r', encoding='utf8') as f: + lines = f.read().strip().split('\n') + except: + return iter([]) # skip bad file + return lines + + def _read_from_files(self, source_file): + data = [] + if self.args.absolute_path: + file_path = source_file + else: + file_path = os.path.join(self.data_dir, source_file) + + if not os.path.exists(file_path): + print('| file {} not exists'.format(file_path), flush=True) + return iter([]) # skip bad file + + lines = self._read_lines(file_path) + + tokenized_ids = [] + for doc_jsonstr in lines: + try: + json_obj = json.loads(doc_jsonstr) + + if 'text' in json_obj: + text = json_obj['text'] + elif 'content' in json_obj: + text = json_obj['content'] + elif 'raw_content_lines' in json_obj: + text = "\n".join(json_obj['raw_content_lines']) + else: + print('no text in json_obj') + + if len(text) == 0: + continue + ret = LMLoader._doc_to_ids(text, self.tokenizer) + tokenized_ids.extend(ret) + except Exception as e: + print(source_file, flush=True) + print(e, flush=True) + + # ################################################### + + doc = [self.tokenizer.bos_id] + for ids in tokenized_ids: + if len(doc) + len(ids) > self.tokens_per_sample + 1: + doc.extend(ids) + doc = doc[:self.tokens_per_sample + 1] + data.append(doc) + doc = [self.tokenizer.bos_id] + else: + doc.extend(ids) + + # if len(doc) > 1 and len(doc) <= self.tokens_per_sample + 1: + # data.append(doc) + return data + diff --git a/YOCO/yoco/tasks/data/tiktoken_tokenizer.py b/YOCO/yoco/tasks/data/tiktoken_tokenizer.py new file mode 100644 index 000000000..3a041cc09 --- /dev/null +++ b/YOCO/yoco/tasks/data/tiktoken_tokenizer.py @@ -0,0 +1,81 @@ +import tiktoken +from typing import List + + +class TiktokenTokenizer: + def __init__(self, + tiktoken_model: str, + tokenizer_pad_to_multiple: int = 8, + bos="", + pad="", + eos="", + unk="", + ): + self.symbols = [bos, pad, eos, unk] + self.indices = {s: i for i, s in enumerate(self.symbols)} + self.tokenizer_pad_to_multiple = tokenizer_pad_to_multiple + cl100k_base = tiktoken.get_encoding(tiktoken_model) + self._model = tiktoken.Encoding( + # If you're changing the set of special tokens, make sure to use a different name + # It should be clear from the name what behaviour to expect. + name="cl100k_im", + pat_str=cl100k_base._pat_str, + mergeable_ranks=cl100k_base._mergeable_ranks, + special_tokens={ + **cl100k_base._special_tokens, + "": 100264, + "": 100265, + "": 100266, + "": 100267, + "": 100268, + "": 100269, + "": 100270, + "": 100271, + "": 100272, + "": 100273, + "": 100274, + "": 100275, + "": 100276, + "": 100277, + "": 100278, + "": 100279, + "": 100280, + "": 100281, + } + ) + + @property + def n_words(self) -> int: + n_words = self._model.n_vocab + len(self.symbols) + n_words = (n_words + self.tokenizer_pad_to_multiple - 1) // self.tokenizer_pad_to_multiple * self.tokenizer_pad_to_multiple + return n_words + + @property + def bos_id(self) -> int: + return self.indices[""] + + @property + def eos_id(self) -> int: + return self.indices[""] + + @property + def pad_id(self) -> int: + return self.indices[""] + + @property + def unk_id(self) -> int: + return self.indices[""] + + def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: + assert isinstance(s, str) + t = self._model.encode(s, allowed_special="all") + t = [i + len(self.symbols) for i in t] + if bos: + t = [self.bos_id, *t] + if eos: + t = [*t, self.eos_id] + return t + + def decode(self, t: List[int]) -> str: + t = [i - len(self.symbols) for i in t if i >= len(self.symbols)] + return self._model.decode(t) \ No newline at end of file diff --git a/YOCO/yoco/tasks/data/utils.py b/YOCO/yoco/tasks/data/utils.py new file mode 100644 index 000000000..fd850d73f --- /dev/null +++ b/YOCO/yoco/tasks/data/utils.py @@ -0,0 +1,267 @@ +import collections +from random import Random +from typing import Dict, Iterable, Optional + +import torch +import numpy as np +from infinibatch import iterators +from infinibatch.iterators import CheckpointableIterator, FixedBatchIterator, SelectManyIterator, MapIterator + +from fairseq.data import BaseWrapperDataset, FairseqDataset, data_utils + +def apply_to_sample(f, sample): + if hasattr(sample, "__len__") and len(sample) == 0: + return {} + + def _apply(x): + if isinstance(x, np.ndarray): + return f(x) + elif isinstance(x, collections.OrderedDict): + # OrderedDict has attributes that needs to be preserved + od = collections.OrderedDict( + (key, _apply(value)) for key, value in x.items() + ) + od.__dict__ = x.__dict__ + return od + elif isinstance(x, dict): + return {key: _apply(value) for key, value in x.items()} + elif isinstance(x, list): + return [_apply(x) for x in x] + elif isinstance(x, tuple): + return tuple(_apply(x) for x in x) + elif isinstance(x, set): + return {_apply(x) for x in x} + else: + return x + + return _apply(sample) + + +class NativeCheckpointableIterator(iterators.CheckpointableIterator): + def __init__(self, iterable: Iterable): + self._input_iterable = iterable + self.setstate(None) + + def getstate(self) -> Dict: + return {"num_items_yielded": self._num_items_yielded} + + def setstate(self, checkpoint: Optional[Dict]): + self._iterator = iter(self._input_iterable) + self._num_items_yielded = ( + iterators._advance_iterator(self._iterator, checkpoint["num_items_yielded"]) + if checkpoint is not None + else 0 + ) + + def __next__(self): + item = next(self._iterator) + self._num_items_yielded += 1 + return item + + def close(self): + pass + + +class WeightIterator(object): + def __init__(self, weights, seed): + self.weights = weights + self.seed = seed + self.control_index = list(range(len(weights))) + self.setstate(None) + + def __iter__(self): + return self + + def getstate(self): + return {"random_state": self._random_state} + + def setstate(self, checkpoint): + self._random_state = checkpoint["random_state"] if checkpoint else None + self._random = ( + None # this will trigger the lazy initialization in self.__next__ + ) + + def __next__(self): + if self._random is None: + self._random = Random(self.seed) + if self._random_state is not None: + self._random.setstate(self._random_state) + idx = self._random.choices(self.control_index, self.weights)[0] + self._random_state = self._random.getstate() + return idx + + def close(self): + pass + + +def FixedBlockwiseShuffleIterator(source_iterator: CheckpointableIterator, block_size: int, seed: int=0): + """ + Shuffles a sequence of items by grouping consecutive items in blocks of fixed size, shuffling + each block, and yielding the shuffled items of all blocks as a flat sequence. + + E.g. [1, 2, 3, 4, 5, 6, 7, 8] with block_size = 3 may yield [3, 1, 2, 4, 6, 5, 8, 7]. + + Args: + source_iterator: checkpointable iterator or restartable iterable over input items to shuffle + block_size: size of the buffer in number of items used for shuffling + seed: random seed used for shuffling (or None) + """ + # This is implemented as a pipeline: + # - group N consecutive items together + # - shuffle them + # - flatten the result + blocks = FixedBatchIterator(source_iterator, batch_size=block_size) + def shuffle_block_fn(block): + _random = Random(seed) + _random.shuffle(block) + return block + shuffled_blocks = MapIterator(blocks, transform=shuffle_block_fn) + # samples = SelectManyNoSkipIterator(shuffled_blocks, collection_selector=lambda shuffled_block: iter(shuffled_block)) + samples = SelectManyIterator(shuffled_blocks, collection_selector=lambda shuffled_block: iter(shuffled_block)) + return samples + + +class IndexIterator(object): + def __init__(self, num): + self.num = num + self.setstate(None) + + def __iter__(self): + return self + + def getstate(self): + return {'num_items_yielded': self._num_items_yielded} + + def setstate(self, checkpoint): + self._num_items_yielded =checkpoint['num_items_yielded'] if checkpoint is not None else 0 + + def __next__(self): + item = self._num_items_yielded % self.num + self._num_items_yielded += 1 + return item + + def close(self): + pass + + +class WeightNoRandomStateIterator(object): + def __init__(self, weights, seed): + self.weights = weights + self.seed = seed + self.control_index = list(range(len(weights))) + self.setstate(None) + + def __iter__(self): + return self + + def getstate(self): + return {'num_items_yielded': self._num_items_yielded} + + def setstate(self, checkpoint): + self._num_items_yielded =checkpoint['num_items_yielded'] if checkpoint is not None else 0 + + def __next__(self): + self._random = Random(int(self.seed) + self._num_items_yielded) + idx = self._random.choices(self.control_index, self.weights)[0] + self._num_items_yielded += 1 + return idx + + def close(self): + pass + + +class SelectManyNoSkipIterator(CheckpointableIterator): + """ + Projects each element of a source sequence to a sequence and flattens the resulting sequences into one sequence. + """ + def __init__(self, source_iterator: CheckpointableIterator, collection_selector=None): + """ + Args: + source_iterator: iterator over the items to pass to collection_selector() + collection_selector: user callback that maps an item into an Iterable, whose items will be yielded. + The returned Iterator is used only once. Hence, it is also allowed to + return self-iterables, such as iterators and generator expressions. + If None is given, no callback is applied. + """ + if not isinstance(source_iterator, CheckpointableIterator): + raise ValueError('source_iterator has to be a CheckpointableIterator') + self._source_iterator = source_iterator # type: CheckpointableIterator + self._collection_selector = collection_selector + self.setstate(None) + + def getstate(self) -> Dict: + return {'source_state': self._source_state, + 'flattened_items_yielded': self._flattened_items_yielded} + + def setstate(self, checkpoint: Optional[Dict]): + self._source_state = checkpoint['source_state'] if checkpoint else None + self._flattened_items_yielded = 0 + self._source_iterator.setstate(self._source_state) + def _generate(): + skip_to_checkpoint = self._flattened_items_yielded + # main loop over source source_items + for source_item in self._source_iterator: + if self._collection_selector is not None: + data = iter(self._collection_selector(source_item)) + else: + data = iter(source_item) + self._flattened_items_yielded = 0 + # if skip_to_checkpoint: + # #print("Skipping to index", skip_to_checkpoint, file=sys.stderr) + # self._flattened_items_yielded += _advance_iterator(data, skip_to_checkpoint) + # skip_to_checkpoint = 0 + # main loop over lines + for item in data: + self._flattened_items_yielded += 1 + yield item + self._source_state = self._source_iterator.getstate() + self._iterator = _generate() + + def __next__(self): + return next(self._iterator) + + def close(self): + self._source_iterator.close() + + +class RawArrayDataset(FairseqDataset): + + def __init__(self, dataset, datatype="token"): + super().__init__() + self.dataset = dataset + self.datatype = datatype + if hasattr(dataset, 'sizes'): + self._sizes = dataset.sizes + else: + try: + self._sizes = np.array([len(x) for x in self.dataset]) + except: + self._sizes = np.array([1 for x in self.dataset]) + + def __getitem__(self, index): + if type(self.dataset[index][0]) != list: + if self.datatype == "token": + return torch.Tensor(self.dataset[index]).long() + else: + return torch.Tensor(self.dataset[index]).bool() + else: + return self.dataset[index] + + def __len__(self): + return len(self.dataset) + + def collater(self, samples): + if hasattr(self.dataset, 'collater'): + return self.dataset.collater(samples) + else: + raise NotImplementedError() + + @property + def sizes(self): + return self._sizes + + def num_tokens(self, index): + return self.dataset.num_tokens(index) + + def size(self, index): + return self.dataset.size(index) diff --git a/YOCO/yoco/tasks/gpt.py b/YOCO/yoco/tasks/gpt.py new file mode 100644 index 000000000..70dd10283 --- /dev/null +++ b/YOCO/yoco/tasks/gpt.py @@ -0,0 +1,176 @@ +import os +from typing import Optional +import json +from argparse import Namespace +import torch + +from fairseq.tasks import register_task, FairseqDataclass, FairseqTask +from dataclasses import dataclass, field +from omegaconf import II + +from .data.lm_loader import LMLoader +from .data.tiktoken_tokenizer import TiktokenTokenizer +from .data.llama_tokenizer import LLaMATokenizer + + +@dataclass +class GPTLanguageModelingConfig(FairseqDataclass): + data: Optional[str] = field( + default=None, metadata={"help": "path to data directory"} + ) + tokens_per_sample: int = field( + default=1024, + metadata={"help": "max number of tokens per sample for LM dataset"}, + ) + max_target_positions: Optional[int] = field( + default=None, metadata={"help": "max number of tokens in the target sequence"} + ) + llama_model: Optional[str] = field( + default=None, + metadata={"help": "path to load tokenizer and config"}, + ) + tiktoken_model: Optional[str] = field( + default=None, + metadata={ + "help": "tiktoken model to tokenize the data" + }, + ) + batch_read_ahead: int = field( + default=10000, + metadata={"help": "batch read ahead size for infinibatch"}, + ) + pad_to_max_len: bool = field( + default=False, + metadata={"help": "pad each sentence to max length"}, + ) + absolute_path: bool = field( + default=False, + metadata={"help": "use absolute path in data config"}, + ) + tokenizer_pad_to_multiple: int = field( + default=8, + metadata={"help": "pad to multiple of this value"}, + ) + seed: int = II("common.seed") + batch_size: Optional[int] = II("dataset.batch_size") + + +@register_task('gpt', dataclass=GPTLanguageModelingConfig) +class GPTPretrainingTask(FairseqTask): + def __init__(self, args, tokenizer): + super().__init__(args) + self.cfg = args + self.tokenizer = tokenizer + + @classmethod + def setup_task(cls, cfg, **kwargs): + """Setup the task (e.g., load dictionaries). + + Args: + args (argparse.Namespace): parsed command-line arguments + """ + if cfg.llama_model is not None: + tokenizer = LLaMATokenizer(os.path.join(cfg.llama_model, "tokenizer.model")) + elif cfg.tiktoken_model is not None: + tokenizer = TiktokenTokenizer(cfg.tiktoken_model, cfg.tokenizer_pad_to_multiple) + else: + raise ValueError("No tokenizer model provided") + + return cls(cfg, tokenizer) + + def load_dataset(self, split, epoch=1, combine=False, **kwargs): + self.datasets[split] = { + 'data': json.load(open(f'{self.cfg.data}/json/{split}.json')), + 'data_dir': self.cfg.data, + 'shuffle': True if split == 'train' else False, + } + self.datasets[split] = Namespace(**self.datasets[split]) + + def dataset(self, split): + if split not in self.datasets: + raise KeyError("Dataset not loaded: " + split) + + return self.datasets[split] + + def get_batch_iterator( + self, + dataset, + max_tokens=None, + max_sentences=None, + max_positions=None, + ignore_invalid_inputs=False, + required_batch_size_multiple=1, + seed=1, + num_shards=1, + shard_id=0, + num_workers=0, + epoch=1, + data_buffer_size=0, + disable_iterator_cache=False, + skip_remainder_batch=False, + grouped_shuffling=False, + update_epoch_batch_itr=False + ): + return LMLoader( + self.cfg, + dataset, + self.tokenizer, + max_tokens=max_tokens, + max_sentences=max_sentences, + max_positions=max_positions, + ignore_invalid_inputs=ignore_invalid_inputs, + required_batch_size_multiple=required_batch_size_multiple, + seed=seed, + epoch=epoch, + num_shards=num_shards, + shard_id=shard_id, + ) + + def train_step( + self, sample, model, criterion, optimizer, update_num, ignore_grad=False + ): + """ + Do forward and backward, and return the loss as computed by *criterion* + for the given *model* and *sample*. + + Args: + sample (dict): the mini-batch. The format is defined by the + :class:`~fairseq.data.FairseqDataset`. + model (~fairseq.models.BaseFairseqModel): the model + criterion (~fairseq.criterions.FairseqCriterion): the criterion + optimizer (~fairseq.optim.FairseqOptimizer): the optimizer + update_num (int): the current update + ignore_grad (bool): multiply loss by 0 if this is set to True + + Returns: + tuple: + - the loss + - the sample size, which is used as the denominator for the + gradient + - logging outputs to display while training + """ + model.train() + model.set_num_updates(update_num) + with torch.autograd.profiler.record_function("forward"): + loss, sample_size, logging_output = criterion(model, sample) + if ignore_grad: + loss *= 0 + with torch.autograd.profiler.record_function("backward"): + optimizer.backward(loss) + return loss, sample_size, logging_output + + def valid_step(self, sample, model, criterion): + model.eval() + with torch.no_grad(): + loss, sample_size, logging_output = criterion(model, sample) + return loss, sample_size, logging_output + + @property + def target_dictionary(self): + padding_idx = self.tokenizer.pad_id + class Dict: + def pad(self): + return padding_idx + dictionary = Dict() + return dictionary + diff --git a/YOCO/yoco/tasks/harness_eval.py b/YOCO/yoco/tasks/harness_eval.py new file mode 100644 index 000000000..0b0621aea --- /dev/null +++ b/YOCO/yoco/tasks/harness_eval.py @@ -0,0 +1,151 @@ +import os +from typing import Optional +import logging + +from fairseq.data import ( + IdDataset, + NumSamplesDataset, + NumelDataset, + NestedDictionaryDataset, + NumelDataset, + RightPadDataset, + RawLabelDataset, +) + +from fairseq.tasks import register_task, FairseqDataclass, LegacyFairseqTask +from dataclasses import dataclass, field + +from .data.tiktoken_tokenizer import TiktokenTokenizer +from .data.llama_tokenizer import LLaMATokenizer +from .data.utils import RawArrayDataset + +from .harness_task import HarnessAnlir1, HarnessAnlir2, HarnessAnlir3, HarnessArc_challenge, HarnessArc_easy, HarnessBoolq, HarnessCopa, HarnessOpenbookqa, HarnessPiqa, HarnessRte, HarnessWic, HarnessWinogrande, HarnessHellaswag, HarnessRecord, HarnessTruthfullqaMC1, HarnessTruthfullqaMC2, HarnessSCIQ +from .harness_task import HarnessArc_challenge25s, HarnessHellaswag10s + + +logger = logging.getLogger(__name__) + +task_map = { + "harness_anli_r1": HarnessAnlir1, + "harness_anli_r2": HarnessAnlir2, + "harness_anli_r3": HarnessAnlir3, + "harness_boolq": HarnessBoolq, + "harness_copa": HarnessCopa, + "harness_openbookqa": HarnessOpenbookqa, + "harness_piqa": HarnessPiqa, + "harness_rte": HarnessRte, + "harness_wic": HarnessWic, + "harness_winogrande": HarnessWinogrande, + "harness_hellaswag": HarnessHellaswag, + "harness_arc_challenge": HarnessArc_challenge, + "harness_arc_easy": HarnessArc_easy, + "harness_record": HarnessRecord, + "harness_truthfullqa_mc1": HarnessTruthfullqaMC1, + "harness_truthfullqa_mc2": HarnessTruthfullqaMC2, + "harness_arc_challenge_25s": HarnessArc_challenge25s, + "harness_hellaswag_10s": HarnessHellaswag10s, + "harness_sciq": HarnessSCIQ, +} + +from .mmlu_task import create_mmlu_tasks +mmlu_tasks = create_mmlu_tasks() +task_map.update(mmlu_tasks) + +@dataclass +class HarnessEvalConfig(FairseqDataclass): + data_dir: str = field( + default="/mnt/msranlp/shaohanh/data/fs_eval/harness/", + metadata={"help": "path to data directory"}, + ) + eval_data: str = field(default="", metadata={"help": "dataset name"}) + tokens_per_sample: int = field( + default=2048, + metadata={"help": "max number of tokens per sample for LM dataset"}, + ) + max_target_positions: Optional[int] = field( + default=None, metadata={"help": "max number of tokens in the target sequence"} + ) + llama_model: Optional[str] = field( + default=None, + metadata={"help": "path to load tokenizer and config"}, + ) + tiktoken_model: Optional[str] = field( + default=None, + metadata={ + "help": "tiktoken model to tokenize the data" + }, + ) + tokenizer_pad_to_multiple: int = field( + default=8, + metadata={"help": "pad to multiple of this value"}, + ) + + +@register_task('harness_eval', dataclass=HarnessEvalConfig) +class HarnessEval(LegacyFairseqTask): + + def __init__(self, cfg, tokenizer): + super().__init__(cfg) + self.cfg = cfg + self.tokenizer = tokenizer + self.harness_task = task_map[self.cfg.eval_data](tokenizer=self.tokenizer, data_dir=cfg.data_dir, tokens_per_sample=cfg.tokens_per_sample) + + @classmethod + def setup_task(cls, cfg, **kwargs): + if cfg.llama_model is not None: + tokenizer = LLaMATokenizer(os.path.join(cfg.llama_model, "tokenizer.model")) + elif cfg.tiktoken_model is not None: + tokenizer = TiktokenTokenizer(cfg.tiktoken_model, cfg.tokenizer_pad_to_multiple) + else: + raise ValueError("No tokenizer model provided") + + return cls(cfg, tokenizer) + + def load_dataset(self, split, combine=False, **kwargs): + src_tokens, gpt_loss_mask, label_length, labels = self.harness_task.get_data_for_evaluation() + + src_tokens = RawArrayDataset(src_tokens) + gpt_loss_mask = RawArrayDataset(gpt_loss_mask, datatype="mask") + label_length = RawLabelDataset(label_length) + label_ids = RawLabelDataset(labels) + ''' + Input format: src_tokens + option_tokens + ''' + data_dict = { + 'id': IdDataset(), + 'net_input': { + 'src_tokens': RightPadDataset( + src_tokens, + pad_idx=self.tokenizer.pad_id, + ), + 'gpt_loss_mask': RightPadDataset( + gpt_loss_mask, + pad_idx=False, + ), + 'label_length': label_length, + 'src_lengths': NumelDataset(src_tokens, reduce=False), + }, + 'targets': label_ids, + 'nsentences': NumSamplesDataset(), + 'ntokens': NumelDataset(src_tokens, reduce=True), + } + dataset = NestedDictionaryDataset( + data_dict, + sizes=[src_tokens.sizes], + ) + + print('| Loaded {} with {} samples'.format(split, len(dataset))) + + self.datasets[split] = dataset + return self.datasets[split] + + @property + def target_dictionary(self): + padding_idx = self.tokenizer.pad_id + class Dict: + def pad(self): + return padding_idx + dictionary = Dict() + return dictionary + + \ No newline at end of file diff --git a/YOCO/yoco/tasks/harness_task.py b/YOCO/yoco/tasks/harness_task.py new file mode 100644 index 000000000..3e87f96a2 --- /dev/null +++ b/YOCO/yoco/tasks/harness_task.py @@ -0,0 +1,289 @@ +import json +import numpy as np + +class HarnessBaseTask: + def __init__(self, tokenizer, data_dir, tokens_per_sample=1024): + self.tokenizer = tokenizer + self.class_num = 1 + self.tokens_per_sample = tokens_per_sample + self.base_dir = data_dir + self.set_dataname() + self.set_class_num() + self.dataset = self.load_data() + + def load_data(self): + import os + datasets = [] + with open(os.path.join(self.base_dir, self.dataname), "r", encoding='utf-8') as fin: + for line in fin: + obj = json.loads(line) + datasets.append( + { + "text": obj["ctx"] if "ctx" in obj else None, + "label": obj["label"] if "label" in obj else None, + "choices": obj["choices"] if "choices" in obj else [], + "gold": obj["gold"] if "gold" in obj else None, + "raw": obj, + } + ) + return datasets + + def set_class_num(self): + raise NotImplementedError + + def set_dataname(self): + raise NotImplementedError + + def preprocess_example(self, example): + raise NotImplementedError + + def get_data_for_evaluation(self): + src_tokens = [] + gpt_loss_mask = [] + label_length = [] + labels = [] + cut_num = 0 + for i, example in enumerate(self.dataset): + input_str, label_str, label = self.preprocess_example(example) + if i < 2: + print(f"input str is {input_str}") + print(f"label str is {label_str}") + + for j in range(len(input_str)): + sub_input_str, sub_label_str = input_str[j], label_str[j] + input_token = self.tokenizer.encode(sub_input_str) + label_token = self.tokenizer.encode(sub_input_str + sub_label_str)[len(input_token):] + if len(input_token) + len(label_token) + 1 >= self.tokens_per_sample: + cut_num += 1 + input_token = input_token[-(self.tokens_per_sample - len(label_token) - 1):] + + src_tokens.append([self.tokenizer.bos_id] + input_token + label_token) + gpt_loss_mask.append([False] * (len(input_token) + 1) + [True] * len(label_token)) + label_length.append(len(sub_label_str.strip())) + labels.append(label) + + if cut_num > 0: + print(f"cut {cut_num} examples") + + return np.array(src_tokens), np.array(gpt_loss_mask), np.array(label_length), np.array(labels) + + +class HarnessAnlir1(HarnessBaseTask): + def set_class_num(self): + self.class_num = 3 + + def set_dataname(self): + self.dataname = "anli_r1" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [" True", " Neither", " False"] + label = example["label"] + return input_str, answer_str, label + +class HarnessAnlir2(HarnessAnlir1): + def set_dataname(self): + self.dataname = "anli_r2" + +class HarnessAnlir3(HarnessAnlir1): + def set_dataname(self): + self.dataname = "anli_r3" + +class HarnessArc_challenge(HarnessBaseTask): + ''' + using harness to evaluate arc challenge + ''' + def set_class_num(self): + self.class_num = 5 + + def set_dataname(self): + self.dataname = "arc_challenge" + + def preprocess_example(self, example): + input_str = [example["text"]] * len(example["choices"]) + answer_str = [' ' + item for item in example["choices"]] + label = example["gold"] + return input_str, answer_str, label + +class HarnessArc_challenge25s(HarnessBaseTask): + ''' + using harness to evaluate arc challenge + ''' + def set_class_num(self): + self.class_num = 5 + + def set_dataname(self): + self.dataname = "arc_challenge_25s" + + def preprocess_example(self, example): + input_str = [example["text"]] * len(example["choices"]) + answer_str = [' ' + item for item in example["choices"]] + label = example["gold"] + return input_str, answer_str, label + +class HarnessArc_easy(HarnessArc_challenge): + def set_class_num(self): + self.class_num = 5 + + def set_dataname(self): + self.dataname = "arc_easy" + +class HarnessBoolq(HarnessBaseTask): + def set_class_num(self): + self.class_num = 2 + + def set_dataname(self): + self.dataname = "boolq" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [" no", " yes"] + label = example["label"] + return input_str, answer_str, label + +class HarnessCopa(HarnessBaseTask): + def set_class_num(self): + self.class_num = 2 + + def set_dataname(self): + self.dataname = "copa" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [' ' + example['raw']['choice1'], ' ' + example['raw']['choice2']] + label = example["label"] + return input_str, answer_str, label + +class HarnessOpenbookqa(HarnessArc_challenge): + def set_class_num(self): + self.class_num = 4 + + def set_dataname(self): + self.dataname = "openbookqa" + +class HarnessPiqa(HarnessArc_challenge): + def set_class_num(self): + self.class_num = 2 + + def set_dataname(self): + self.dataname = "piqa" + +class HarnessRte(HarnessBaseTask): + def set_class_num(self): + self.class_num = 2 + + def set_dataname(self): + self.dataname = "rte" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [' True', ' False'] + label = example["label"] + return input_str, answer_str, label + +class HarnessWic(HarnessRte): + def set_dataname(self): + self.dataname = "wic" + +class HarnessWinogrande(HarnessBaseTask): + def set_class_num(self): + self.class_num = 2 + + def set_dataname(self): + self.dataname = "winogrande" + + def preprocess_example(self, example): + pronoun_loc = example['raw']['sentence'].index("_") + input_str = [] + input_str.append(example['raw']['sentence'][:pronoun_loc].strip() + ' ' + example['raw']['option1']) + input_str.append(example['raw']['sentence'][:pronoun_loc].strip() + ' ' + example['raw']['option2']) + answer_str = [" " + example['raw']["sentence"][pronoun_loc + 1:].strip()] * self.class_num + label = int(example['raw']['answer']) - 1 + return input_str, answer_str, label + +class HarnessHellaswag(HarnessBaseTask): + def set_class_num(self): + self.class_num = 4 + + def set_dataname(self): + self.dataname = "hellaswag" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [' ' + item for item in example["choices"]] + label = example["gold"] + return input_str, answer_str, label + + +class HarnessHellaswag10s(HarnessBaseTask): + def set_class_num(self): + self.class_num = 4 + + def set_dataname(self): + self.dataname = "hellaswag_10s" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [' ' + item for item in example["choices"]] + label = example["gold"] + return input_str, answer_str, label + + +class HarnessTruthfullqaMC1(HarnessBaseTask): + def set_class_num(self): + self.class_num = 1 + + def set_dataname(self): + self.dataname = "truthfulqa_mc" + + def preprocess_example(self, example): + input_str = [example["text"]] * len(example["raw"]["mc1_targets"]["choices"]) + answer_str = [' ' + item for item in example["raw"]["mc1_targets"]["choices"]] + label = 0 # dummy label + return input_str, answer_str, label + + + +class HarnessTruthfullqaMC2(HarnessBaseTask): + def set_class_num(self): + self.class_num = 1 + + def set_dataname(self): + self.dataname = "truthfulqa_mc" + + def preprocess_example(self, example): + input_str = [example["text"]] * len(example["raw"]["mc2_targets"]["choices"]) + answer_str = [' ' + item for item in example["raw"]["mc2_targets"]["choices"]] + label = 0 # dummy label + return input_str, answer_str, label + + +class HarnessRecord(HarnessBaseTask): + def set_class_num(self): + self.class_num = 1 + + def set_dataname(self): + self.dataname = "record" + + def preprocess_example(self, example): + input_str = [example["text"]] * len(example["raw"]["entities"]) + answer_str = [f' - {example["raw"]["query"]}'.replace("@placeholder", item) for item in example["raw"]["entities"]] + label = 0 # dummy label + return input_str, answer_str, label + +class HarnessSCIQ(HarnessBaseTask): + def set_class_num(self): + self.class_num = 4 + + def set_dataname(self): + self.dataname = "sciq" + + def preprocess_example(self, example): + input_str = [example["text"]] * self.class_num + answer_str = [' ' + example["raw"]["distractor1"], + ' ' + example["raw"]["distractor2"], + ' ' + example["raw"]["distractor3"], + ' ' + example["raw"]["correct_answer"] + ] + label = 3 + return input_str, answer_str, label \ No newline at end of file diff --git a/YOCO/yoco/tasks/mmlu_task.py b/YOCO/yoco/tasks/mmlu_task.py new file mode 100644 index 000000000..a93476c72 --- /dev/null +++ b/YOCO/yoco/tasks/mmlu_task.py @@ -0,0 +1,92 @@ +from .harness_task import HarnessBaseTask + + +SUBJECTS = [ + "abstract_algebra", + "anatomy", + "astronomy", + "business_ethics", + "clinical_knowledge", + "college_biology", + "college_chemistry", + "college_computer_science", + "college_mathematics", + "college_medicine", + "college_physics", + "computer_security", + "conceptual_physics", + "econometrics", + "electrical_engineering", + "elementary_mathematics", + "formal_logic", + "global_facts", + "high_school_biology", + "high_school_chemistry", + "high_school_computer_science", + "high_school_european_history", + "high_school_geography", + "high_school_government_and_politics", + "high_school_macroeconomics", + "high_school_mathematics", + "high_school_microeconomics", + "high_school_physics", + "high_school_psychology", + "high_school_statistics", + "high_school_us_history", + "high_school_world_history", + "human_aging", + "human_sexuality", + "international_law", + "jurisprudence", + "logical_fallacies", + "machine_learning", + "management", + "marketing", + "medical_genetics", + "miscellaneous", + "moral_disputes", + "moral_scenarios", + "nutrition", + "philosophy", + "prehistory", + "professional_accounting", + "professional_law", + "professional_medicine", + "professional_psychology", + "public_relations", + "security_studies", + "sociology", + "us_foreign_policy", + "virology", + "world_religions", +] + + +def create_mmlu_tasks(): + """Creates a dictionary of tasks from a list of subjects + :return: {task_name: task} + e.g. {hendrycksTest-abstract_algebra: Task, hendrycksTest-anatomy: Task} + """ + return {f"hendrycksTest-{sub}": create_task(f"hendrycksTest-{sub}") for sub in SUBJECTS} + + +def create_task(subject): + class HendrycksTest(GeneralHendrycksTest): + def set_dataname(self): + self.dataname = f"{subject}" + + return HendrycksTest + +class GeneralHendrycksTest(HarnessBaseTask): + def set_class_num(self): + self.class_num = 4 + + def preprocess_example(self, example): + # find the last occurence of "Queston:" in example["text"], and remove everything before it + # this is to remove the context + # last_question = example["text"].rfind("Question:") + # example["text"] = example["text"][last_question:] + input_str = [example["text"]] * self.class_num + answer_str = [' ' + item for item in example["choices"]] + label = example["gold"] + return input_str, answer_str, label diff --git a/YOCO/yoco/tasks/pseudo.py b/YOCO/yoco/tasks/pseudo.py new file mode 100644 index 000000000..87b51a12b --- /dev/null +++ b/YOCO/yoco/tasks/pseudo.py @@ -0,0 +1,202 @@ +import os +from typing import Optional +import torch + +from fairseq.data import FairseqDataset +from fairseq.tasks import register_task, FairseqDataclass, LegacyFairseqTask +from dataclasses import dataclass, field +from omegaconf import II + +from .data.tiktoken_tokenizer import TiktokenTokenizer +from .data.llama_tokenizer import LLaMATokenizer + + +class PseudoIterator(FairseqDataset): + def __init__(self, batch_size, length, vocab_size): + super().__init__() + self.batch_size = batch_size + self.length = length + self.vocab_size = vocab_size + + self.epoch = 1 + self.next_epoch_idx = 1 + self.sharded_checkpoint = True + self.should_close_after_finished = True + + def __iter__(self): + while True: + yield self.__next__() + + def __next__(self): + net_input = torch.randint(size=(self.batch_size, self.length), dtype=torch.long, low=0, high=self.vocab_size - 1) + return { + "net_input": {"src_tokens": net_input}, + "target": net_input, + "ntokens": self.batch_size * self.length, + } + + def __len__(self) -> int: + return 819200000 + + def next_epoch_itr(self, **kwargs): + return self + + @property + def first_batch(self): + return "DUMMY" + + def end_of_epoch(self) -> bool: + return False + + def state_dict(self): + return None + + def load_state_dict(self, state_dict): + pass + + def setstate(self, value): + pass + + def getstate(self): + pass + + def close(self): + pass + +@dataclass +class PseudoConfig(FairseqDataclass): + tokens_per_sample: int = field( + default=1024, + metadata={"help": "max number of tokens per sample for LM dataset"}, + ) + max_target_positions: Optional[int] = field( + default=None, metadata={"help": "max number of tokens in the target sequence"} + ) + llama_model: Optional[str] = field( + default=None, + metadata={"help": "path to load tokenizer and config"}, + ) + tiktoken_model: Optional[str] = field( + default=None, + metadata={ + "help": "tiktoken model to tokenize the data" + }, + ) + batch_read_ahead: int = field( + default=10000, + metadata={"help": "batch read ahead size for infinibatch"}, + ) + pad_to_max_len: bool = field( + default=False, + metadata={"help": "pad each sentence to max length"}, + ) + absolute_path: bool = field( + default=False, + metadata={"help": "use absolute path in data config"}, + ) + tokenizer_pad_to_multiple: int = field( + default=8, + metadata={"help": "pad to multiple of this value"}, + ) + seed: int = II("common.seed") + batch_size: Optional[int] = II("dataset.batch_size") + + +@register_task('pseudo', dataclass=PseudoConfig) +class PseudoTask(LegacyFairseqTask): + def __init__(self, args, tokenizer): + super().__init__(args) + self.cfg = args + self.tokenizer = tokenizer + + @classmethod + def setup_task(cls, cfg, **kwargs): + """Setup the task (e.g., load dictionaries). + + Args: + args (argparse.Namespace): parsed command-line arguments + """ + if cfg.llama_model is not None: + tokenizer = LLaMATokenizer(os.path.join(cfg.llama_model, "tokenizer.model")) + elif cfg.tiktoken_model is not None: + tokenizer = TiktokenTokenizer(cfg.tiktoken_model, cfg.tokenizer_pad_to_multiple) + else: + raise ValueError("No tokenizer model provided") + + return cls(cfg, tokenizer) + + def load_dataset(self, split, **kwargs): + pass + # self.datasets[split] = None + + def dataset(self, split): + return None + + def get_batch_iterator( + self, + dataset, + max_tokens=None, + max_sentences=None, + max_positions=None, + ignore_invalid_inputs=False, + required_batch_size_multiple=1, + seed=1, + num_shards=1, + shard_id=0, + num_workers=0, + epoch=1, + data_buffer_size=0, + disable_iterator_cache=False, + skip_remainder_batch=False, + grouped_shuffling=False, + update_epoch_batch_itr=False + ): + return PseudoIterator(max_sentences, self.cfg.tokens_per_sample, 10000) + + def train_step( + self, sample, model, criterion, optimizer, update_num, ignore_grad=False + ): + """ + Do forward and backward, and return the loss as computed by *criterion* + for the given *model* and *sample*. + + Args: + sample (dict): the mini-batch. The format is defined by the + :class:`~fairseq.data.FairseqDataset`. + model (~fairseq.models.BaseFairseqModel): the model + criterion (~fairseq.criterions.FairseqCriterion): the criterion + optimizer (~fairseq.optim.FairseqOptimizer): the optimizer + update_num (int): the current update + ignore_grad (bool): multiply loss by 0 if this is set to True + + Returns: + tuple: + - the loss + - the sample size, which is used as the denominator for the + gradient + - logging outputs to display while training + """ + model.train() + model.set_num_updates(update_num) + with torch.autograd.profiler.record_function("forward"): + loss, sample_size, logging_output = criterion(model, sample) + if ignore_grad: + loss *= 0 + with torch.autograd.profiler.record_function("backward"): + optimizer.backward(loss) + return loss, sample_size, logging_output + + def valid_step(self, sample, model, criterion): + model.eval() + with torch.no_grad(): + loss, sample_size, logging_output = criterion(model, sample) + return loss, sample_size, logging_output + + @property + def target_dictionary(self): + padding_idx = self.tokenizer.pad_id + class Dict: + def pad(self): + return padding_idx + dictionary = Dict() + return dictionary \ No newline at end of file diff --git a/YOCO/yoco/train.py b/YOCO/yoco/train.py new file mode 100644 index 000000000..ee6615d87 --- /dev/null +++ b/YOCO/yoco/train.py @@ -0,0 +1,7 @@ +import models +import tasks +import criterions +from fairseq_cli.train import cli_main + +if __name__ == "__main__": + cli_main() diff --git a/YOCO/yoco/validate.py b/YOCO/yoco/validate.py new file mode 100644 index 000000000..e3815ca6a --- /dev/null +++ b/YOCO/yoco/validate.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 -u +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +""" +Train a new model on one or across multiple GPUs. +""" +import models +import tasks +import criterions + +import argparse +import logging +import math +import os +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple + +# We need to setup root logger before importing any fairseq libraries. +logging.basicConfig( + format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=os.environ.get("LOGLEVEL", "INFO").upper(), + stream=sys.stdout, +) +logger = logging.getLogger("fairseq_cli.train") + +import numpy as np +import torch +from omegaconf import DictConfig, OmegaConf + +from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils +from fairseq.data import data_utils, iterators +from fairseq.data.plasma_utils import PlasmaStore +from fairseq.dataclass.configs import FairseqConfig +from fairseq.dataclass.utils import convert_namespace_to_omegaconf +from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap +from fairseq.distributed import utils as distributed_utils +from fairseq.file_io import PathManager +from fairseq.logging import meters, metrics, progress_bar + + +def main(cfg: FairseqConfig) -> None: + if isinstance(cfg, argparse.Namespace): + cfg = convert_namespace_to_omegaconf(cfg) + + utils.import_user_module(cfg.common) + + if ( + distributed_utils.is_master(cfg.distributed_training) + and "job_logging_cfg" in cfg + ): + # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) + logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) + + assert ( + cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None + ), "Must specify batch size either with --max-tokens or --batch-size" + metrics.reset() + + if cfg.common.log_file is not None: + handler = logging.FileHandler(filename=cfg.common.log_file) + logger.addHandler(handler) + + np.random.seed(cfg.common.seed) + utils.set_torch_seed(cfg.common.seed) + + # if distributed_utils.is_master(cfg.distributed_training): + # checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) + + # Print args + logger.info(cfg) + + if cfg.checkpoint.write_checkpoints_asynchronously: + try: + import iopath # noqa: F401 + except ImportError: + logging.exception( + "Asynchronous checkpoint writing is specified but iopath is " + "not installed: `pip install iopath`" + ) + return + + # Setup task, e.g., translation, language modeling, etc. + task = tasks.setup_task(cfg.task) + + assert cfg.criterion, "Please specify criterion to train a model" + + # Build model and criterion + if cfg.distributed_training.ddp_backend == "fully_sharded": + with fsdp_enable_wrap(cfg.distributed_training): + model = fsdp_wrap(task.build_model(cfg.model)) + else: + model = task.build_model(cfg.model) + criterion = task.build_criterion(cfg.criterion) + + tpu = cfg.common.tpu + cuda = torch.cuda.is_available() and not cfg.common.cpu and not tpu + if cuda: + device = torch.device("cuda") + elif tpu: + device = utils.get_tpu_device() + else: + device = torch.device("cpu") + if cfg.common.fp16: + criterion = criterion.half() + model = model.half() + elif cfg.common.bf16: + criterion = criterion.to(dtype=torch.bfloat16) + model = model.to(dtype=torch.bfloat16) + criterion = criterion.to(device) + model = model.to(device) + + logger.info(model) + logger.info("task: {}".format(task.__class__.__name__)) + logger.info("model: {}".format(model.__class__.__name__)) + logger.info("criterion: {}".format(criterion.__class__.__name__)) + logger.info( + "num. shared model params: {:,} (num. trained: {:,})".format( + sum( + p.numel() for p in model.parameters() if not getattr(p, "expert", False) + ), + sum( + p.numel() + for p in model.parameters() + if not getattr(p, "expert", False) and p.requires_grad + ), + ) + ) + + logger.info( + "num. expert model params: {} (num. trained: {})".format( + sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)), + sum( + p.numel() + for p in model.parameters() + if getattr(p, "expert", False) and p.requires_grad + ), + ) + ) + + # Load valid dataset (we load training data below, based on the latest checkpoint) + # We load the valid dataset AFTER building the model + data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg) + if cfg.dataset.combine_valid_subsets: + task.load_dataset("valid", combine=True, epoch=1) + else: + for valid_sub_split in cfg.dataset.valid_subset.split(","): + task.load_dataset(valid_sub_split, combine=False, epoch=1) + + # Load the latest checkpoint if one is available and restore the + # corresponding train iterator + # try: + # state_dict = torch.load(cfg.checkpoint.restore_file) + # model.load_state_dict(state_dict['model']) + # print(f"Loaded model from {cfg.checkpoint.restore_file}") + # except Exception as e: + # print(e) + # print(f"No checkpoint found from {cfg.checkpoint.restore_file}") + + valid_subsets = cfg.dataset.valid_subset.split(",") + logger.info("Start validating") + + validate( + cfg, task, model, criterion, valid_subsets, + ) + +@torch.no_grad() +def validate( + cfg: DictConfig, + task: tasks.FairseqTask, + model, + criterion, + subsets: List[str], +) -> List[Optional[float]]: + """Evaluate the model on the validation set(s) and return the losses.""" + if cfg.dataset.fixed_validation_seed is not None: + # set fixed seed for every validation + utils.set_torch_seed(cfg.dataset.fixed_validation_seed) + + valid_losses = [] + for subset in subsets: + logger.info('begin validation on "{}" subset'.format(subset)) + + # Initialize data iterator + itr = task.get_batch_iterator( + dataset=task.dataset(subset), + max_tokens=cfg.dataset.max_tokens_valid, + max_sentences=cfg.dataset.batch_size_valid, + required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, + seed=cfg.common.seed, + num_workers=cfg.dataset.num_workers_valid, + # always pass a fixed "epoch" to keep validation data consistent + # across training epochs + epoch=1, + data_buffer_size=cfg.dataset.data_buffer_size, + ).next_epoch_itr( + shuffle=False, set_dataset_epoch=False # use a fixed valid set + ) + if cfg.common.tpu: + itr = utils.tpu_data_loader(itr) + progress = progress_bar.progress_bar( + itr, + log_format=cfg.common.log_format, + log_interval=cfg.common.log_interval, + prefix=f"valid on '{subset}' subset", + tensorboard_logdir=( + cfg.common.tensorboard_logdir + if distributed_utils.is_master(cfg.distributed_training) + else None + ), + default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), + wandb_project=( + cfg.common.wandb_project + if distributed_utils.is_master(cfg.distributed_training) + else None + ), + wandb_run_name=os.environ.get( + "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) + ), + ) + + # create a new root metrics aggregator so validation metrics + # don't pollute other aggregators (e.g., train meters) + with metrics.aggregate(new_root=True) as agg: + logging_outputs = [] + for i, sample in enumerate(progress): + if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps: + break + sample = utils.move_to_cuda(sample) + _, _, inner_logging_outputs = task.valid_step( + sample, model, criterion + ) + logging_outputs.append(inner_logging_outputs) + task.reduce_metrics(logging_outputs, criterion) + + # with metrics.aggregate(new_root=True) as agg: + # for i, sample in enumerate(progress): + # if ( + # cfg.dataset.max_valid_steps is not None + # and i > cfg.dataset.max_valid_steps + # ): + # break + # trainer.valid_step(sample) + + stats = get_valid_stats(cfg, agg.get_smoothed_values()) + + progress.print(stats, tag=subset) + + valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric]) + return valid_losses + + +def get_valid_stats( + cfg: DictConfig, stats: Dict[str, Any] +) -> Dict[str, Any]: + if hasattr(checkpoint_utils.save_checkpoint, "best"): + key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric) + best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min + stats[key] = best_function( + checkpoint_utils.save_checkpoint.best, + stats[cfg.checkpoint.best_checkpoint_metric], + ) + return stats + + +def cli_main( + modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None +) -> None: + parser = options.get_training_parser() + args = options.parse_args_and_arch(parser, modify_parser=modify_parser) + + cfg = convert_namespace_to_omegaconf(args) + + if cfg.common.use_plasma_view: + server = PlasmaStore(path=cfg.common.plasma_path) + logger.info( + f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}" + ) + + if args.profile: + with torch.cuda.profiler.profile(): + with torch.autograd.profiler.emit_nvtx(): + distributed_utils.call_main(cfg, main) + else: + distributed_utils.call_main(cfg, main) + + # if cfg.common.use_plasma_view: + # server.server.kill() + + +if __name__ == "__main__": + cli_main() \ No newline at end of file